diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 488e6c90cf7f..5a1d1df72611 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,11 +20,11 @@ crates/fs-util/ @onbjerg @emhane crates/metrics/ @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk @emhane -crates/node/ @mattsse @Rjected @onbjerg +crates/node/ @mattsse @Rjected @onbjerg @emhane @klkvr crates/optimism/ @mattsse @Rjected @fgimenez @emhane crates/payload/ @mattsse @Rjected -crates/primitives/ @Rjected -crates/primitives-traits/ @Rjected @joshieDo +crates/primitives/ @Rjected @emhane @mattsse @klkvr +crates/primitives-traits/ @Rjected @joshieDo @emhane @mattsse @klkvr crates/prune/ @shekhirin @joshieDo crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected @emhane diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh new file mode 100755 index 000000000000..0112e5cec175 --- /dev/null +++ b/.github/assets/check_rv32imac.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +set +e # Disable immediate exit on error + +# Array of crates to check +crates_to_check=( + reth-codecs-derive + # reth-evm + # reth-primitives + # reth-primitives-traits + # reth-optimism-forks + # reth-optimism-chainspec +) + +# Array to hold the results +results=() +# Flag to track if any command fails +any_failed=0 + +for crate in "${crates_to_check[@]}"; do + cmd="cargo +stable build -p $crate --target riscv32imac-unknown-none-elf --no-default-features" + + if [ -n "$CI" ]; then + echo "::group::$cmd" + else + printf "\n%s:\n %s\n" "$crate" "$cmd" + fi + + set +e # Disable immediate exit on error + # Run the command and capture the return code + $cmd + ret_code=$? + set -e # Re-enable immediate exit on error + + # Store the result in the dictionary + if [ $ret_code -eq 0 ]; then + results+=("1:✅:$crate") + else + results+=("2:❌:$crate") + any_failed=1 + fi + + if [ -n "$CI" ]; then + echo "::endgroup::" + fi +done + +# Sort the results by status and then by crate name +IFS=$'\n' sorted_results=($(sort <<<"${results[*]}")) +unset IFS + +# Print summary +echo -e "\nSummary of build results:" +for result in "${sorted_results[@]}"; do + status="${result#*:}" + status="${status%%:*}" + crate="${result##*:}" + echo "$status $crate" +done + +# Exit with a non-zero status if any command fails +exit $any_failed diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index c34f82d2e315..11e5b5e00b9e 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -15,7 +15,6 @@ exclude_crates=( reth-beacon-consensus reth-bench reth-blockchain-tree - reth-chain-state reth-cli reth-cli-commands reth-cli-runner @@ -26,13 +25,11 @@ exclude_crates=( reth-dns-discovery reth-downloaders reth-e2e-test-utils - reth-engine-primitives reth-engine-service reth-engine-tree reth-engine-util reth-eth-wire reth-ethereum-cli - reth-ethereum-engine-primitives reth-ethereum-payload-builder reth-etl reth-exex @@ -41,7 +38,6 @@ exclude_crates=( reth-net-nat reth-network reth-node-api - reth-node-types reth-node-builder reth-node-core reth-node-ethereum @@ -51,8 +47,7 @@ exclude_crates=( reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc - reth-payload-builder - reth-payload-primitives + reth-optimism-primitives reth-rpc reth-rpc-api reth-rpc-api-testing-util @@ -74,6 +69,7 @@ exclude_crates=( reth-static-file # tokio reth-transaction-pool # c-kzg reth-trie-parallel # tokio + reth-testing-utils ) # Array to hold the results diff --git a/.github/assets/hive/Dockerfile b/.github/assets/hive/Dockerfile index 9f75ba6f1cf2..25b71bf21872 100644 --- a/.github/assets/hive/Dockerfile +++ b/.github/assets/hive/Dockerfile @@ -5,4 +5,5 @@ COPY dist/reth /usr/local/bin COPY LICENSE-* ./ EXPOSE 30303 30303/udp 9001 8545 8546 -ENTRYPOINT ["/usr/local/bin/reth"] \ No newline at end of file +ENV RUST_LOG=debug +ENTRYPOINT ["/usr/local/bin/reth"] diff --git a/.github/assets/kurtosis_network_params.yaml b/.github/assets/kurtosis_network_params.yaml index 9c104de49500..e8cc1b51dc81 100644 --- a/.github/assets/kurtosis_network_params.yaml +++ b/.github/assets/kurtosis_network_params.yaml @@ -2,8 +2,6 @@ participants: - el_type: geth cl_type: lighthouse - el_type: reth - el_extra_params: - - --engine.experimental el_image: "ghcr.io/paradigmxyz/reth:kurtosis-ci" cl_type: teku additional_services: diff --git a/.github/assets/kurtosis_op_network_params.yaml b/.github/assets/kurtosis_op_network_params.yaml new file mode 100644 index 000000000000..0e1516cc8890 --- /dev/null +++ b/.github/assets/kurtosis_op_network_params.yaml @@ -0,0 +1,15 @@ +ethereum_package: + participants: + - el_type: reth + cl_type: lighthouse +optimism_package: + chains: + - participants: + - el_type: op-geth + cl_type: op-node + - el_type: op-reth + el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" + cl_type: op-node + batcher_params: + extra_params: + - "--throttle-interval=0" diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 6c50923d3e6b..b8d3f378fca4 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -5,8 +5,8 @@ name: hive on: workflow_dispatch: schedule: - # every day - - cron: "0 0 * * *" + # run every 12 hours + - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml new file mode 100644 index 000000000000..c7307d10c7bd --- /dev/null +++ b/.github/workflows/kurtosis-op.yml @@ -0,0 +1,121 @@ +# Runs simple OP stack setup in Kurtosis + +name: kurtosis-op + +on: + workflow_dispatch: + schedule: + # every day + - cron: "0 1 * * *" + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + prepare-reth: + if: github.repository == 'paradigmxyz/reth' + timeout-minutes: 45 + runs-on: + group: Reth + steps: + - uses: actions/checkout@v4 + - run: mkdir artifacts + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build reth + run: | + cargo build --features optimism,asm-keccak --profile hivetests --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml --locked + mkdir dist && cp ./target/hivetests/op-reth ./dist/reth + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Build and export reth image + uses: docker/build-push-action@v6 + with: + context: . + file: .github/assets/hive/Dockerfile + tags: ghcr.io/paradigmxyz/op-reth:kurtosis-ci + outputs: type=docker,dest=./artifacts/reth_image.tar + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Upload reth image + uses: actions/upload-artifact@v4 + with: + name: artifacts + path: ./artifacts + + test: + timeout-minutes: 60 + strategy: + fail-fast: false + name: run kurtosis + runs-on: + group: Reth + needs: + - prepare-reth + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download reth image + uses: actions/download-artifact@v4 + with: + name: artifacts + path: /tmp + + - name: Load Docker image + run: | + docker load -i /tmp/reth_image.tar & + wait + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" + done + kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis + exit 1 + + + notify-on-error: + needs: test + if: failure() + runs-on: + group: Reth + steps: + - name: Slack Webhook Action + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 74d26dbd3eea..3e1b74321116 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -5,8 +5,8 @@ name: kurtosis on: workflow_dispatch: schedule: - # every day - - cron: "0 1 * * *" + # run every 12 hours + - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index fa7b4f9f45c2..61ba54e95568 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -71,10 +71,26 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - uses: dcarbone/install-jq-action@v2 + - uses: dcarbone/install-jq-action@v3 - name: Run Wasm checks run: .github/assets/check_wasm.sh + riscv: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: riscv32imac-unknown-none-elf + - uses: taiki-e/install-action@cargo-hack + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - uses: dcarbone/install-jq-action@v3 + - name: Run RISC-V checks + run: .github/assets/check_rv32imac.sh + crate-checks: runs-on: ubuntu-latest timeout-minutes: 30 diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index defd9a6f535d..4c927df8be00 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -35,11 +35,11 @@ jobs: partition: 2 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 1 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 2 total_partitions: 2 - type: book @@ -61,7 +61,8 @@ jobs: - name: Run tests run: | cargo nextest run \ - ${{ matrix.args }} --workspace --exclude ef-tests \ + ${{ matrix.args }} --workspace \ + --exclude ef-tests --no-tests=warn \ --partition hash:${{ matrix.partition }}/2 \ -E "!kind(test)" diff --git a/Cargo.lock b/Cargo.lock index 04d186d776c5..c0ad6f5b230b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "alloy-chains" @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b19fd285b55dd39ae0dbc37481ad9f5f48898726f76335a2d6167a85a5fa41da" +checksum = "ae09ffd7c29062431dd86061deefe4e3c6f07fa0d674930095f8dcedb0baf02c" dependencies = [ "alloy-eips", "alloy-primitives", @@ -131,9 +131,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f42b1cb3fa8cba51b45795097a0d58a34569ca5db9eda48f63230e22fbc5cb5" +checksum = "66430a72d5bf5edead101c8c2f0a24bada5ec9f3cf9909b3e08b6d6899b4803e" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -146,14 +146,14 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] name = "alloy-dyn-abi" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85132f2698b520fab3f54beed55a44389f7006a7b557a0261e1e69439dcc1572" +checksum = "ef2364c782a245cf8725ea6dbfca5f530162702b5d685992ea03ce64529136cc" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -182,9 +182,9 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69fb9fd842fdf10a524bbf2c4de6942ad869c1c8c3d128a1b09e67ed5f7cedbd" +checksum = "5f6cee6a35793f3db8a5ffe60e86c695f321d081a567211245f503e8c498fce8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21aff0f2c921246398cad88e32a1d8ec14359b183afbc3dcb816873714cafc1a" +checksum = "5b6aa3961694b30ba53d41006131a2fca3bdab22e4c344e46db2c639e7c2dfdd" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76d899cfbfa13c5ed044383b7ae0e6a4d6ffcad3fd25e4acf71ff1c255ddae0" +checksum = "e53f7877ded3921d18a0a9556d55bedf84535567198c9edab2aa23106da91855" dependencies = [ "alloy-primitives", "alloy-serde", @@ -230,9 +230,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded610181f3dad5810f6ff12d1a99994cf9b42d2fcb7709029352398a5da5ae6" +checksum = "b84c506bf264110fa7e90d9924f742f40ef53c6572ea56a0b0bd714a567ed389" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -242,23 +242,23 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e244937365749c09c403d3054de39cc7dd46e3c3a12e5b164106af4903011ab1" +checksum = "3694b7e480728c0b3e228384f223937f14c10caef5a4c766021190fc8f283d35" dependencies = [ "alloy-primitives", "alloy-sol-types", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "tracing", ] [[package]] name = "alloy-network" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a28811461dc37e28db92b6d3a8c03a5883f2100b270a6294af00710bf4a0be4" +checksum = "ea94b8ceb5c75d7df0a93ba0acc53b55a22b47b532b600a800a87ef04eb5b0b4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -274,14 +274,14 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] name = "alloy-network-primitives" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e517c44a97e753f10dc0736215ba4677da5e2fbc1451e3e76902e02cd6cff12" +checksum = "df9f3e281005943944d15ee8491534a1c7b3cbf7a7de26f8c433b842b93eb5f9" dependencies = [ "alloy-consensus", "alloy-eips", @@ -292,9 +292,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15bf1a4b35b071c2d6f21fd3d32b8c5466cb7ed31fd4a4473a4e2ce180729121" +checksum = "c9805d126f24be459b958973c0569c73e1aadd27d4535eee82b2b6764aa03616" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -302,16 +302,16 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 1.0.68", + "thiserror 1.0.69", "tracing", "url", ] [[package]] name = "alloy-primitives" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd58d377699e6cfeab52c4a9d28bdc4ef37e2bd235ff2db525071fe37a2e9af5" +checksum = "9fce5dbd6a4f118eecc4719eaa9c7ffc31c315e6c5ccde3642db927802312425" dependencies = [ "alloy-rlp", "arbitrary", @@ -341,9 +341,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56befb85784c7eb4f163b9aed7cdcaba09d5b07f8e59d6c12ad0ce1acf67c0fd" +checksum = "40c1f9eede27bf4c13c099e8e64d54efd7ce80ef6ea47478aa75d5d74e2dba3b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -373,7 +373,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tracing", "url", @@ -382,9 +382,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6480f9596064db2ca8e1a4b710ea9a4ef420534e68640296a461b71f6bfadc1" +checksum = "90f1f34232f77341076541c405482e4ae12f0ee7153d8f9969fc1691201b2247" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -418,14 +418,14 @@ checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "alloy-rpc-client" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb49d38b3279a07e864d973323534a2c4a845e16f2c0153a509a3abcc01da7b1" +checksum = "374dbe0dc3abdc2c964f36b3d3edf9cdb3db29d16bda34aa123f03d810bec1dd" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -448,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90be9542c6c9bb0d21ac08104ca0a3d1fb83e56f1c704f5cdcf6fb9e01fcbd75" +checksum = "c74832aa474b670309c20fffc2a869fa141edab7c79ff7963fad0a08de60bae1" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -461,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410e7b9d67489d19ad52439b940fbf482e0823190d8245242bfff1eec44290d5" +checksum = "6bfd9b2cc3a1985f1f6da5afc41120256f9f9316fcd89e054cea99dbb10172f6" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -473,9 +473,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "951f9106bb02ad00a2dc2eb7b400041a2c073d7fb8f33e2f1f29b2f71564f3f7" +checksum = "5ca97963132f78ddfc60e43a017348e6d52eea983925c23652f5b330e8e02291" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -485,23 +485,23 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dab9821d5a73f56512ddd8e3db89a5bbb285353129b271c4ad6803a37c4e00ce" +checksum = "922fa76678d2f9f07ea1b19309b5cfbf244c6029dcba3515227b515fdd6ed4a7" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "serde", "serde_with", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] name = "alloy-rpc-types-debug" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebe68f35cafc465442862421ae2d123bb58c8df25f837d8866bf5fc278b74a52" +checksum = "ba2253bee958658ebd614c07a61c40580e09dd1fad3f017684314442332ab753" dependencies = [ "alloy-primitives", "serde", @@ -509,9 +509,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ed9e7b3233cb3e0aaeaedc4e21e1ea9d99e947a7206241a9f9521c138193978" +checksum = "3f56294dce86af23ad6ee8df46cf8b0d292eb5d1ff67dc88a0886051e32b1faf" dependencies = [ "alloy-consensus", "alloy-eips", @@ -530,9 +530,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be10f130b8be7c2351a3ea64b4bf07020fde5be8d1ac18db9a9a3496aa22bb19" +checksum = "a8a477281940d82d29315846c7216db45b15e90bcd52309da9f54bcf7ad94a11" dependencies = [ "alloy-consensus", "alloy-eips", @@ -551,9 +551,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "110f7dbee6f047915eb8915751d96402f6d02cb6e5f64286f10949eaa5bed841" +checksum = "8647f8135ee3d5de1cf196706c905c05728a4e38bb4a5b61a7214bd1ba8f60a6" dependencies = [ "alloy-eips", "alloy-primitives", @@ -564,23 +564,23 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f7f183d06db1457b58c6d618ff7ab92c97810138c148e09edb14ed2001069" +checksum = "ecd8b4877ef520c138af702097477cdd19504a8e1e4675ba37e92ba40f2d3c6f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] name = "alloy-rpc-types-txpool" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f85580d4e78ffd765086ebf640004a773e3c335ebbfaa5666e13a0640c4957fe" +checksum = "1d4ab49acf90a71f7fb894dc5fd485f1f07a1e348966c714c4d1e0b7478850a8" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -590,9 +590,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1493df14770a23b1e32d22c66fa22508d09e0a99d6923a45f179ff7887ca0cef" +checksum = "4dfa4a7ccf15b2492bb68088692481fd6b2604ccbee1d0d6c44c21427ae4df83" dependencies = [ "alloy-primitives", "arbitrary", @@ -602,23 +602,23 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebff64a3b4062eba217404700d1517b9bf3ff9a7a5b2dd03f1cf8aeec3e9a6b8" +checksum = "2e10aec39d60dc27edcac447302c7803d2371946fb737245320a05b78eb2fafd" dependencies = [ "alloy-primitives", "async-trait", "auto_impl", "elliptic-curve", "k256", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] name = "alloy-signer-local" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc1f6602be452e3bb5b6c2fe0fa0f966465f9e9bfd6ad7691bfe1bd8b74bf432" +checksum = "d8396f6dff60700bc1d215ee03d86ff56de268af96e2bf833a14d0bafcab9882" dependencies = [ "alloy-consensus", "alloy-network", @@ -629,28 +629,28 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] name = "alloy-sol-macro" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a1b42ac8f45e2f49f4bcdd72cbfde0bb148f5481d403774ffa546e48b83efc1" +checksum = "9343289b4a7461ed8bab8618504c995c049c082b70c7332efd7b32125633dc05" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06318f1778e57f36333e850aa71bd1bb5e560c10279e236622faae0470c50412" +checksum = "4222d70bec485ceccc5d8fd4f2909edd65b5d5e43d4aca0b5dcee65d519ae98f" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -659,31 +659,31 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaebb9b0ad61a41345a22c9279975c0cdd231b97947b10d7aad1cf0a7181e4a5" +checksum = "2e17f2677369571b976e51ea1430eb41c3690d344fef567b840bfc0b01b6f83a" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12c71028bfbfec210e24106a542aad3def7caf1a70e2c05710e92a98481980d3" +checksum = "aa64d80ae58ffaafdff9d5d84f58d03775f66c84433916dc9a64ed16af5755da" dependencies = [ "serde", "winnow", @@ -691,9 +691,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374d7fb042d68ddfe79ccb23359de3007f6d4d53c13f703b64fb0db422132111" +checksum = "6520d427d4a8eb7aa803d852d7a52ceb0c519e784c292f64bb339e636918cf27" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -704,9 +704,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64534da7f71ecca86b3449adec19b7942fb0905b9f392f60054a02a5f686f71f" +checksum = "f99acddb34000d104961897dbb0240298e8b775a7efffb9fda2a1a3efedd65b3" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -714,7 +714,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tower 0.5.1", "tracing", @@ -724,9 +724,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "617b5ab96f4fb64ef697a84c68ec8534c062baafbdb0529c34aaee43324f0d5a" +checksum = "5dc013132e34eeadaa0add7e74164c1503988bfba8bae885b32e0918ba85a8a6" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -739,9 +739,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10043df9ea36e3a38056cdfc3a70138343caef4eec6df66d6cbfdd348d245828" +checksum = "063edc0660e81260653cc6a95777c29d54c2543a668aa5da2359fb450d25a1ba" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -758,9 +758,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6a43ecdbc8f79cb5d7f54e2118626f873ded93c8c040fb714ce6be47dc5b526" +checksum = "abd170e600801116d5efe64f74a4fc073dbbb35c807013a7d0a388742aeebba0" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -776,9 +776,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40d8e28db02c006f7abb20f345ffb3cc99c465e36f676ba262534e654ae76042" +checksum = "b6b2e366c0debf0af77766c23694a3f863b02633050e71e096e257ffbd395e50" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -881,7 +881,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1057,9 +1057,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "brotli", "flate2", @@ -1104,7 +1104,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1115,7 +1115,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1153,7 +1153,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1164,9 +1164,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backon" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" +checksum = "ba5289ec98f68f28dd809fd601059e6aa908bb8f6108620930828283d4ee23d7" dependencies = [ "fastrand 2.2.0", "tokio", @@ -1259,7 +1259,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1400,7 +1400,7 @@ dependencies = [ "static_assertions", "tap", "thin-vec", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", ] @@ -1441,7 +1441,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -1525,12 +1525,12 @@ dependencies = [ [[package]] name = "bstr" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" +checksum = "1a68f1f47cdf0ec8ee4b941b2eee2a80cb796db73118c0dd09ac63fbe405be22" dependencies = [ "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "serde", ] @@ -1548,9 +1548,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" dependencies = [ "bytemuck_derive", ] @@ -1563,7 +1563,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1625,7 +1625,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -1651,9 +1651,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.36" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baee610e9452a8f6f0a1b6194ec09ff9e2d85dea54432acdae41aa0761c95d70" +checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ "jobserver", "libc", @@ -1752,9 +1752,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -1762,9 +1762,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -1781,14 +1781,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "coins-bip32" @@ -1803,7 +1803,7 @@ dependencies = [ "k256", "serde", "sha2 0.10.8", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -1819,7 +1819,7 @@ dependencies = [ "pbkdf2", "rand 0.8.5", "sha2 0.10.8", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -1838,7 +1838,7 @@ dependencies = [ "serde", "sha2 0.10.8", "sha3", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -1859,14 +1859,14 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" +checksum = "24f165e7b643266ea80cb858aed492ad9280e3e05ce24d4a99d7d7b889b6a4d9" dependencies = [ - "crossterm 0.27.0", + "crossterm", "strum", "strum_macros", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] @@ -1915,9 +1915,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" +checksum = "487981fa1af147182687064d0a2c336586d337a606595ced9ffb0c685c250c73" dependencies = [ "cfg-if", "cpufeatures", @@ -1972,6 +1972,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -1998,9 +2008,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -2107,19 +2117,6 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" -[[package]] -name = "crossterm" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" -dependencies = [ - "bitflags 2.6.0", - "crossterm_winapi", - "libc", - "parking_lot", - "winapi", -] - [[package]] name = "crossterm" version = "0.28.1" @@ -2186,9 +2183,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" dependencies = [ "csv-core", "itoa", @@ -2238,7 +2235,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2262,7 +2259,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2273,7 +2270,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2395,7 +2392,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2406,7 +2403,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2427,7 +2424,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "unicode-xid", ] @@ -2541,7 +2538,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2609,8 +2606,9 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -2626,7 +2624,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "walkdir", ] @@ -2690,7 +2688,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2701,7 +2699,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2758,7 +2756,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2771,6 +2769,7 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" name = "example-beacon-api-sidecar-fetcher" version = "0.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-beacon", "clap", @@ -2781,7 +2780,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] @@ -2866,11 +2865,10 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-payload-builder", - "reth-primitives", "reth-tracing", "reth-trie-db", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", ] @@ -2878,6 +2876,7 @@ dependencies = [ name = "example-custom-evm" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", @@ -3063,6 +3062,7 @@ dependencies = [ name = "example-stateful-precompile" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", @@ -3139,7 +3139,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -3196,9 +3196,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", @@ -3317,7 +3317,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3452,7 +3452,7 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -3496,9 +3496,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -3761,9 +3761,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -3792,7 +3792,7 @@ dependencies = [ "hyper-util", "log", "rustls", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", "tokio-rustls", @@ -3843,7 +3843,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3993,7 +3993,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4054,13 +4054,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -4111,6 +4111,12 @@ dependencies = [ "serde", ] +[[package]] +name = "indoc" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" + [[package]] name = "infer" version = "0.2.3" @@ -4167,12 +4173,16 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" +checksum = "b829f37dead9dc39df40c2d3376c179fdfd2ac771f53f55d3c30dc096a3c0c6e" dependencies = [ + "darling", + "indoc", + "pretty_assertions", + "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4186,9 +4196,9 @@ dependencies = [ [[package]] name = "interprocess" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f4e4a06d42fab3e85ab1b419ad32b09eab58b901d40c57935ff92db3287a13" +checksum = "894148491d817cb36b6f778017b8ac46b17408d522dd90f539d677ea938362eb" dependencies = [ "doctest-file", "futures-core", @@ -4273,9 +4283,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "540654e97a3f4470a492cd30ff187bc95d89557a903a2bbf112e2fae98104ef2" [[package]] name = "jni" @@ -4287,7 +4297,7 @@ dependencies = [ "combine", "jni-sys", "log", - "thiserror 1.0.68", + "thiserror 1.0.69", "walkdir", ] @@ -4350,7 +4360,7 @@ dependencies = [ "rustls-pki-types", "rustls-platform-verifier", "soketto", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tokio-rustls", "tokio-util", @@ -4378,7 +4388,7 @@ dependencies = [ "rustc-hash 2.0.0", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -4403,7 +4413,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tower 0.4.13", "tracing", @@ -4420,7 +4430,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4442,7 +4452,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -4459,7 +4469,7 @@ dependencies = [ "http", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -4580,9 +4590,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.162" +version = "0.2.165" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "fcb4d3d38eab6c5239a362fa8bae48c03baf980a6e7079f063942d563ef3533e" [[package]] name = "libloading" @@ -4602,9 +4612,9 @@ checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "asn1_der", "bs58", @@ -4614,7 +4624,7 @@ dependencies = [ "multihash", "quick-protobuf", "sha2 0.10.8", - "thiserror 1.0.68", + "thiserror 1.0.69", "tracing", "zeroize", ] @@ -4713,9 +4723,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" @@ -4838,7 +4848,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4852,7 +4862,7 @@ dependencies = [ "metrics", "metrics-util", "quanta", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -4902,7 +4912,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -4965,9 +4975,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -4979,14 +4989,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5234,7 +5244,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5287,9 +5297,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33097177de330b1a83e0a882ae752ad55f23962b1e310176d1623655c18421e" +checksum = "fce158d886815d419222daa67fcdf949a34f7950653a4498ebeb4963331f70ed" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5300,14 +5310,14 @@ dependencies = [ "derive_more 1.0.0", "serde", "serde_with", - "spin", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-genesis" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2232ff799352932fc5484e1c63ee7bb1e74a79ac7b94a4f7318560fba21167de" +checksum = "2734e9a65efb90fe4520303f984c124766b7d2f2e5dd51cbe54d6269c85a3c91" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5315,13 +5325,14 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", + "thiserror 2.0.3", ] [[package]] name = "op-alloy-network" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f1021b644a8f0bf8d7f878aa5328da67c7d697e476c8e097d09e05585067713" +checksum = "87e4aef8ed017004a176ab1de49df419f59c0fb4a6ce3b693a10fe099fe1afe7" dependencies = [ "alloy-consensus", "alloy-network", @@ -5334,29 +5345,32 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a566c421638a3b655a2aaf59fbbdee017a7dce6acfbacead219861e14654b98d" +checksum = "6c68a3e2770890da3ad2fd20d7fe0c8e15672707577b4168a60e388c8eceaca0" dependencies = [ + "alloc-no-stdlib", "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", "async-trait", - "derive_more 1.0.0", + "brotli", + "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", "serde", + "thiserror 2.0.3", "tracing", "unsigned-varint", ] [[package]] name = "op-alloy-rpc-types" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72298f3f9084773dc3feaf88b08db82ceb3e3e13f98280459d869accb3f14234" +checksum = "060ebeaea8c772e396215f69bb86d231ec8b7f36aca0dd6ce367ceaa9a8c33e6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5373,9 +5387,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2a270e6370a0fa8a673e29bcd436cbb67b5dc88cefc1d00fbf2382673894f71" +checksum = "864dbd5511ef4ef00b6c2c980739259b25b24048007b7751ca0069b30b1e3fee" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5387,11 +5401,12 @@ dependencies = [ "op-alloy-protocol", "serde", "snap", + "thiserror 2.0.3", ] [[package]] name = "op-reth" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "reth-cli-util", @@ -5466,9 +5481,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "8be4817d39f3272f69c59fe05d0535ae6456c2dc2fa1ba02910296c7e0a5c590" dependencies = [ "arbitrary", "arrayvec", @@ -5477,19 +5492,20 @@ dependencies = [ "bytes", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "8781a75c6205af67215f382092b6e0a4ff3734798523e69073d4bcd294ec767b" dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.89", ] [[package]] @@ -5560,7 +5576,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" dependencies = [ "memchr", - "thiserror 1.0.68", + "thiserror 1.0.69", "ucd-trie", ] @@ -5604,7 +5620,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5633,7 +5649,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5721,9 +5737,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -5750,7 +5766,7 @@ dependencies = [ "smallvec", "symbolic-demangle", "tempfile", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -5805,7 +5821,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5856,14 +5872,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -5954,7 +5970,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -5998,9 +6014,9 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", @@ -6009,26 +6025,29 @@ dependencies = [ "rustc-hash 2.0.0", "rustls", "socket2", - "thiserror 1.0.68", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom 0.2.15", "rand 0.8.5", "ring", "rustc-hash 2.0.0", "rustls", + "rustls-pki-types", "slab", - "thiserror 1.0.68", + "thiserror 2.0.3", "tinyvec", "tracing", + "web-time", ] [[package]] @@ -6150,7 +6169,7 @@ dependencies = [ "bitflags 2.6.0", "cassowary", "compact_str", - "crossterm 0.28.1", + "crossterm", "instability", "itertools 0.13.0", "lru", @@ -6159,7 +6178,7 @@ dependencies = [ "strum_macros", "unicode-segmentation", "unicode-truncate", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -6214,7 +6233,7 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -6225,7 +6244,7 @@ checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -6240,9 +6259,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -6271,6 +6290,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "relative-path" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" + [[package]] name = "reqwest" version = "0.12.9" @@ -6296,13 +6321,13 @@ dependencies = [ "pin-project-lite", "quinn", "rustls", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.8.1", "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tokio-rustls", "tokio-util", @@ -6328,7 +6353,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6375,6 +6400,7 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-revm", @@ -6400,7 +6426,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6413,8 +6439,10 @@ dependencies = [ "reth-evm", "reth-metrics", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-tasks", @@ -6426,8 +6454,9 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", @@ -6439,6 +6468,7 @@ dependencies = [ "reth-blockchain-tree", "reth-blockchain-tree-api", "reth-chainspec", + "reth-codecs", "reth-config", "reth-consensus", "reth-db", @@ -6455,9 +6485,11 @@ dependencies = [ "reth-network-p2p", "reth-node-types", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", @@ -6470,7 +6502,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -6478,7 +6510,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6502,10 +6534,11 @@ dependencies = [ "reth-node-api", "reth-node-core", "reth-primitives", + "reth-primitives-traits", "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tower 0.4.13", "tracing", @@ -6513,7 +6546,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6551,7 +6584,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6559,19 +6592,18 @@ dependencies = [ "reth-execution-errors", "reth-primitives", "reth-storage-errors", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] name = "reth-chain-state" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-signer", "alloy-signer-local", - "auto_impl", "derive_more 1.0.0", "metrics", "parking_lot", @@ -6582,6 +6614,7 @@ dependencies = [ "reth-execution-types", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-storage-api", "reth-testing-utils", "reth-trie", @@ -6593,7 +6626,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -6614,7 +6647,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-genesis", "clap", @@ -6627,9 +6660,10 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.1" +version = "1.1.2" dependencies = [ "ahash", + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -6637,7 +6671,7 @@ dependencies = [ "backon", "clap", "comfy-table", - "crossterm 0.28.1", + "crossterm", "eyre", "fdlimit", "futures", @@ -6668,6 +6702,7 @@ dependencies = [ "reth-network", "reth-network-p2p", "reth-network-peers", + "reth-node-api", "reth-node-builder", "reth-node-core", "reth-node-events", @@ -6693,7 +6728,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-tasks", "tokio", @@ -6702,7 +6737,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6713,14 +6748,14 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", "tikv-jemallocator", "tracy-client", ] [[package]] name = "reth-codecs" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6742,18 +6777,18 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.1.1" +version = "1.1.2" dependencies = [ "convert_case", "proc-macro2", "quote", "similar-asserts", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "reth-config" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "eyre", @@ -6769,18 +6804,20 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", "derive_more 1.0.0", "reth-primitives", + "reth-primitives-traits", ] [[package]] name = "reth-consensus-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6790,13 +6827,14 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-primitives", + "reth-primitives-traits", "reth-storage-api", "revm-primitives", ] [[package]] name = "reth-consensus-debug-client" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6819,8 +6857,9 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", "arbitrary", "assert_matches", @@ -6854,13 +6893,14 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] name = "reth-db-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "arbitrary", @@ -6886,7 +6926,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -6909,13 +6949,13 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "tracing", ] [[package]] name = "reth-db-models" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6932,7 +6972,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6951,7 +6991,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -6959,7 +6999,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6976,14 +7016,14 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "reth-dns-discovery" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7002,7 +7042,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -7011,8 +7051,9 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -7033,13 +7074,14 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7048,12 +7090,13 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-network", "alloy-primitives", + "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-signer", @@ -7063,17 +7106,27 @@ dependencies = [ "futures-util", "jsonrpsee", "op-alloy-rpc-types-engine", - "reth", "reth-chainspec", "reth-db", "reth-engine-local", + "reth-network", + "reth-network-api", "reth-network-peers", + "reth-node-api", "reth-node-builder", + "reth-node-core", + "reth-optimism-primitives", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", + "reth-primitives", "reth-provider", + "reth-rpc-api", + "reth-rpc-eth-api", "reth-rpc-layer", + "reth-rpc-server-types", "reth-stages-types", + "reth-tasks", "reth-tokio-util", "reth-tracing", "serde_json", @@ -7085,7 +7138,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.1.1" +version = "1.1.2" dependencies = [ "aes", "alloy-primitives", @@ -7105,7 +7158,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7115,7 +7168,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7130,9 +7183,10 @@ dependencies = [ "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", + "reth-node-types", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-provider", "reth-prune", "reth-rpc-types-compat", @@ -7145,25 +7199,34 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", - "reth-execution-types", - "reth-payload-primitives", + "alloy-rpc-types-engine", + "futures", + "reth-errors", + "reth-execution-types", + "reth-payload-builder-primitives", + "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-trie", "serde", + "thiserror 2.0.3", + "tokio", ] [[package]] name = "reth-engine-service" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures", "pin-project", "reth-beacon-consensus", "reth-chainspec", "reth-consensus", + "reth-engine-primitives", "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", @@ -7172,29 +7235,32 @@ dependencies = [ "reth-network-p2p", "reth-node-types", "reth-payload-builder", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", ] [[package]] name = "reth-engine-tree" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", "assert_matches", + "criterion", + "crossbeam-channel", "futures", "metrics", - "pin-project", + "rand 0.8.5", + "rayon", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -7210,8 +7276,8 @@ dependencies = [ "reth-metrics", "reth-network-p2p", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", @@ -7222,19 +7288,21 @@ dependencies = [ "reth-stages-api", "reth-static-file", "reth-tasks", + "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-db", "reth-trie-parallel", + "reth-trie-sparse", "revm-primitives", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", - "tokio-stream", "tracing", ] [[package]] name = "reth-engine-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7244,7 +7312,6 @@ dependencies = [ "futures", "itertools 0.13.0", "pin-project", - "reth-beacon-consensus", "reth-engine-primitives", "reth-errors", "reth-ethereum-forks", @@ -7266,20 +7333,21 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] name = "reth-eth-wire" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-chains", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -7292,19 +7360,20 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "reth-chainspec", "reth-codecs", "reth-ecies", "reth-eth-wire-types", + "reth-ethereum-forks", "reth-metrics", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-tracing", "secp256k1", "serde", "snap", "test-fuzz", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7313,7 +7382,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7330,13 +7399,14 @@ dependencies = [ "reth-chainspec", "reth-codecs-derive", "reth-primitives", + "reth-primitives-traits", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] name = "reth-ethereum-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "eyre", @@ -7347,7 +7417,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7356,12 +7426,13 @@ dependencies = [ "reth-consensus", "reth-consensus-common", "reth-primitives", + "reth-primitives-traits", "tracing", ] [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7371,6 +7442,7 @@ dependencies = [ "reth-chainspec", "reth-engine-primitives", "reth-payload-primitives", + "reth-payload-validator", "reth-primitives", "reth-rpc-types-compat", "serde", @@ -7380,7 +7452,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7395,12 +7467,12 @@ dependencies = [ "proptest-derive", "rustc-hash 2.0.0", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7413,6 +7485,7 @@ dependencies = [ "reth-evm-ethereum", "reth-execution-types", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", @@ -7420,13 +7493,12 @@ dependencies = [ "reth-transaction-pool", "reth-trie", "revm", - "revm-primitives", "tracing", ] [[package]] name = "reth-etl" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "rayon", @@ -7436,7 +7508,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7464,7 +7536,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7487,7 +7559,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7502,8 +7574,9 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "arbitrary", @@ -7511,7 +7584,9 @@ dependencies = [ "rand 0.8.5", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-trie", + "reth-trie-common", "revm", "serde", "serde_with", @@ -7519,7 +7594,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7562,7 +7637,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "eyre", @@ -7589,13 +7664,13 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", ] [[package]] name = "reth-exex-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7605,23 +7680,25 @@ dependencies = [ "reth-chain-state", "reth-execution-types", "reth-primitives", + "reth-primitives-traits", "serde", "serde_with", ] [[package]] name = "reth-fs-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] name = "reth-invalid-block-hooks" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -7633,6 +7710,7 @@ dependencies = [ "reth-engine-primitives", "reth-evm", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", @@ -7644,7 +7722,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "async-trait", "bytes", @@ -7656,7 +7734,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7666,7 +7744,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.1" +version = "1.1.2" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7681,13 +7759,13 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.1", + "thiserror 2.0.3", "tracing", ] [[package]] name = "reth-mdbx-sys" -version = "1.1.1" +version = "1.1.2" dependencies = [ "bindgen", "cc", @@ -7695,7 +7773,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures", "metrics", @@ -7706,28 +7784,28 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.1" +version = "1.1.2" dependencies = [ "futures-util", "if-addrs", "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "reth-network" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7755,6 +7833,8 @@ dependencies = [ "reth-dns-discovery", "reth-ecies", "reth-eth-wire", + "reth-eth-wire-types", + "reth-ethereum-forks", "reth-fs-util", "reth-metrics", "reth-net-banlist", @@ -7764,6 +7844,7 @@ dependencies = [ "reth-network-peers", "reth-network-types", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", @@ -7777,7 +7858,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tokio-util", @@ -7787,7 +7868,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -7802,15 +7883,16 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", ] [[package]] name = "reth-network-p2p" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", @@ -7822,6 +7904,7 @@ dependencies = [ "reth-network-peers", "reth-network-types", "reth-primitives", + "reth-primitives-traits", "reth-storage-errors", "tokio", "tracing", @@ -7829,7 +7912,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7838,14 +7921,14 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "url", ] [[package]] name = "reth-network-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -7858,7 +7941,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.1" +version = "1.1.2" dependencies = [ "anyhow", "bincode", @@ -7869,15 +7952,16 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.1", + "thiserror 2.0.3", "tracing", "zstd", ] [[package]] name = "reth-node-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-rpc-types-engine", "eyre", "reth-beacon-consensus", @@ -7887,8 +7971,8 @@ dependencies = [ "reth-network-api", "reth-node-core", "reth-node-types", + "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-primitives", "reth-provider", "reth-tasks", "reth-transaction-pool", @@ -7896,8 +7980,9 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "aquamarine", @@ -7934,7 +8019,6 @@ dependencies = [ "reth-node-events", "reth-node-metrics", "reth-payload-builder", - "reth-payload-primitives", "reth-payload-validator", "reth-primitives", "reth-provider", @@ -7951,6 +8035,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", + "revm-primitives", "secp256k1", "tempfile", "tokio", @@ -7960,7 +8045,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7978,15 +8063,17 @@ dependencies = [ "reth-chainspec", "reth-cli-util", "reth-config", - "reth-consensus-common", + "reth-consensus", "reth-db", "reth-discv4", "reth-discv5", + "reth-ethereum-forks", "reth-net-nat", "reth-network", "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-rpc-eth-types", "reth-rpc-server-types", @@ -8000,7 +8087,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "toml", "tracing", @@ -8009,7 +8096,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8018,12 +8105,13 @@ dependencies = [ "alloy-primitives", "alloy-provider", "alloy-rpc-types-beacon", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-signer", "alloy-sol-types", "eyre", "futures", "rand 0.8.5", - "reth", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8038,11 +8126,14 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-payload-builder", + "reth-payload-primitives", "reth-primitives", "reth-provider", "reth-revm", "reth-rpc", + "reth-rpc-eth-api", "reth-tasks", "reth-tracing", "reth-transaction-pool", @@ -8054,7 +8145,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8064,33 +8155,31 @@ dependencies = [ "humantime", "pin-project", "reth-beacon-consensus", - "reth-network", + "reth-engine-primitives", "reth-network-api", "reth-primitives-traits", - "reth-provider", - "reth-prune", + "reth-prune-types", "reth-stages", - "reth-static-file", + "reth-static-file-types", + "reth-storage-api", "tokio", "tracing", ] [[package]] name = "reth-node-metrics" -version = "1.1.1" +version = "1.1.2" dependencies = [ "eyre", "http", - "jsonrpsee", + "jsonrpsee-server", "metrics", "metrics-exporter-prometheus", "metrics-process", "metrics-util", "procfs 0.16.0", "reqwest", - "reth-db-api", "reth-metrics", - "reth-provider", "reth-tasks", "socket2", "tikv-jemalloc-ctl", @@ -8102,7 +8191,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-chainspec", "reth-db-api", @@ -8113,7 +8202,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8133,11 +8222,14 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "clap", + "derive_more 1.0.0", "eyre", "futures-util", "op-alloy-consensus", @@ -8154,6 +8246,7 @@ dependencies = [ "reth-downloaders", "reth-errors", "reth-execution-types", + "reth-fs-util", "reth-network-p2p", "reth-node-builder", "reth-node-core", @@ -8171,6 +8264,7 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", + "serde", "tempfile", "tokio", "tokio-util", @@ -8179,10 +8273,11 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", + "alloy-trie", "reth-chainspec", "reth-consensus", "reth-consensus-common", @@ -8195,7 +8290,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8212,6 +8307,7 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-forks", + "reth-optimism-primitives", "reth-primitives", "reth-prune-types", "reth-revm", @@ -8222,7 +8318,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8233,7 +8329,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8248,7 +8344,6 @@ dependencies = [ "op-alloy-consensus", "op-alloy-rpc-types-engine", "parking_lot", - "reth", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -8260,17 +8355,23 @@ dependencies = [ "reth-network", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-forks", "reth-optimism-node", "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-optimism-rpc", "reth-payload-builder", + "reth-payload-util", + "reth-payload-validator", "reth-primitives", "reth-provider", "reth-revm", + "reth-rpc-server-types", + "reth-tasks", "reth-tracing", "reth-transaction-pool", "reth-trie-db", @@ -8282,12 +8383,13 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-debug", "alloy-rpc-types-engine", "op-alloy-consensus", "op-alloy-rpc-types-engine", @@ -8301,7 +8403,9 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-forks", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", + "reth-payload-util", "reth-primitives", "reth-provider", "reth-revm", @@ -8310,32 +8414,45 @@ dependencies = [ "reth-trie", "revm", "sha2 0.10.8", - "thiserror 2.0.1", + "thiserror 2.0.3", "tracing", ] [[package]] name = "reth-optimism-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", + "alloy-rlp", + "arbitrary", + "bytes", + "derive_more 1.0.0", + "op-alloy-consensus", + "reth-codecs", "reth-primitives", + "reth-primitives-traits", + "rstest", + "serde", ] [[package]] name = "reth-optimism-rpc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rpc-types-debug", "alloy-rpc-types-eth", "derive_more 1.0.0", + "jsonrpsee-core", "jsonrpsee-types", "op-alloy-consensus", "op-alloy-network", "op-alloy-rpc-types", + "op-alloy-rpc-types-engine", "parking_lot", "reqwest", "reth-chainspec", @@ -8347,9 +8464,12 @@ dependencies = [ "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-forks", + "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-primitives", "reth-provider", "reth-rpc", + "reth-rpc-api", "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", @@ -8357,14 +8477,14 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "reth-optimism-storage" -version = "1.1.1" +version = "1.1.2" dependencies = [ "reth-codecs", "reth-db-api", @@ -8375,8 +8495,9 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "async-trait", @@ -8385,6 +8506,7 @@ dependencies = [ "reth-chain-state", "reth-ethereum-engine-primitives", "reth-metrics", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "revm", @@ -8393,31 +8515,49 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-payload-builder-primitives" +version = "1.1.2" +dependencies = [ + "alloy-rpc-types-engine", + "async-trait", + "pin-project", + "reth-payload-primitives", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "reth-payload-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", - "async-trait", "op-alloy-rpc-types-engine", - "pin-project", "reth-chain-state", "reth-chainspec", "reth-errors", "reth-primitives", - "reth-transaction-pool", + "revm-primitives", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", - "tokio-stream", - "tracing", +] + +[[package]] +name = "reth-payload-util" +version = "1.1.2" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "reth-primitives", ] [[package]] name = "reth-payload-validator" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8427,7 +8567,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8437,6 +8577,7 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types", "alloy-serde", + "alloy-trie", "arbitrary", "assert_matches", "bincode", @@ -8462,6 +8603,7 @@ dependencies = [ "reth-testing-utils", "reth-trie-common", "revm-primitives", + "rstest", "secp256k1", "serde", "serde_json", @@ -8472,7 +8614,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8480,6 +8622,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "auto_impl", "bincode", "byteorder", "bytes", @@ -8489,7 +8632,6 @@ dependencies = [ "proptest-arbitrary-interop", "rand 0.8.5", "reth-codecs", - "reth-testing-utils", "revm-primitives", "roaring", "serde", @@ -8500,7 +8642,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8533,6 +8675,7 @@ dependencies = [ "reth-node-types", "reth-optimism-primitives", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-api", @@ -8549,8 +8692,10 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "assert_matches", "itertools 0.13.0", @@ -8563,6 +8708,7 @@ dependencies = [ "reth-errors", "reth-exex-types", "reth-metrics", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-stages", @@ -8571,14 +8717,14 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.0.0", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "reth-prune-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -8592,13 +8738,13 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 2.0.1", + "thiserror 2.0.3", "toml", ] [[package]] name = "reth-revm" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8606,6 +8752,7 @@ dependencies = [ "reth-ethereum-forks", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-storage-api", "reth-storage-errors", @@ -8615,7 +8762,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8660,6 +8807,7 @@ dependencies = [ "reth-network-types", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", @@ -8677,7 +8825,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tower 0.4.13", @@ -8687,7 +8835,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -8711,7 +8859,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8730,8 +8878,9 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", @@ -8770,7 +8919,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-util", "tower 0.4.13", @@ -8780,7 +8929,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8799,6 +8948,7 @@ dependencies = [ "reth-evm", "reth-metrics", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", @@ -8810,14 +8960,14 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "reth-rpc-eth-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8859,7 +9009,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8880,6 +9030,7 @@ dependencies = [ "reth-execution-types", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-revm", "reth-rpc-server-types", "reth-rpc-types-compat", @@ -8893,7 +9044,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -8901,22 +9052,24 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-rpc-types-engine", "http", + "http-body-util", "jsonrpsee", "jsonrpsee-http-client", "pin-project", "reqwest", "tokio", "tower 0.4.13", + "tower-http", "tracing", ] [[package]] name = "reth-rpc-server-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8931,7 +9084,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8940,6 +9093,7 @@ dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-serde", + "jsonrpsee-types", "reth-primitives", "reth-trie-common", "serde", @@ -8948,8 +9102,10 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.1.1" +version = "1.1.2" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "assert_matches", @@ -8990,14 +9146,14 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "reth-stages-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "aquamarine", @@ -9017,7 +9173,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -9025,7 +9181,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "arbitrary", @@ -9042,14 +9198,16 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "assert_matches", "parking_lot", "rayon", + "reth-codecs", "reth-db", "reth-db-api", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-stages", @@ -9064,7 +9222,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "clap", @@ -9075,26 +9233,30 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rpc-types-engine", "auto_impl", "reth-chainspec", + "reth-db", "reth-db-api", "reth-db-models", "reth-execution-types", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-errors", "reth-trie", + "reth-trie-db", ] [[package]] name = "reth-storage-errors" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9106,7 +9268,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.1.1" +version = "1.1.2" dependencies = [ "auto_impl", "dyn-clone", @@ -9115,7 +9277,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tracing", "tracing-futures", @@ -9123,7 +9285,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9131,12 +9293,13 @@ dependencies = [ "alloy-primitives", "rand 0.8.5", "reth-primitives", + "reth-primitives-traits", "secp256k1", ] [[package]] name = "reth-tokio-util" -version = "1.1.1" +version = "1.1.2" dependencies = [ "tokio", "tokio-stream", @@ -9145,7 +9308,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.1" +version = "1.1.2" dependencies = [ "clap", "eyre", @@ -9159,7 +9322,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9184,7 +9347,9 @@ dependencies = [ "reth-execution-types", "reth-fs-util", "reth-metrics", + "reth-payload-util", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", @@ -9196,7 +9361,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tokio-stream", "tracing", @@ -9204,13 +9369,13 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", + "alloy-trie", "auto_impl", - "bincode", "criterion", "itertools 0.13.0", "metrics", @@ -9224,16 +9389,14 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm", - "serde", "serde_json", - "serde_with", "tracing", "triehash", ] [[package]] name = "reth-trie-common" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9241,7 +9404,9 @@ dependencies = [ "alloy-rlp", "alloy-trie", "arbitrary", + "bincode", "bytes", + "criterion", "derive_more 1.0.0", "hash-db", "itertools 0.13.0", @@ -9253,11 +9418,13 @@ dependencies = [ "reth-primitives-traits", "revm-primitives", "serde", + "serde_json", + "serde_with", ] [[package]] name = "reth-trie-db" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9286,7 +9453,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9306,29 +9473,32 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 2.0.1", + "thiserror 2.0.3", "tokio", "tracing", ] [[package]] name = "reth-trie-sparse" -version = "1.1.1" +version = "1.1.2" dependencies = [ "alloy-primitives", "alloy-rlp", + "arbitrary", "assert_matches", "criterion", "itertools 0.13.0", "pretty_assertions", "proptest", + "proptest-arbitrary-interop", "rand 0.8.5", + "reth-primitives-traits", "reth-testing-utils", "reth-tracing", "reth-trie", "reth-trie-common", "smallvec", - "thiserror 2.0.1", + "thiserror 2.0.3", ] [[package]] @@ -9362,7 +9532,7 @@ dependencies = [ "colorchoice", "revm", "serde_json", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -9513,6 +9683,7 @@ checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" dependencies = [ "bytemuck", "byteorder", + "serde", ] [[package]] @@ -9530,6 +9701,36 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" +[[package]] +name = "rstest" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a2c585be59b6b5dd66a9d2084aa1d8bd52fbdb806eafdeffb52791147862035" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.1", +] + +[[package]] +name = "rstest_macros" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "825ea780781b15345a146be27eaefb05085e337e869bff01b4306a4fd4a9ad5a" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.1", + "syn 2.0.89", + "unicode-ident", +] + [[package]] name = "ruint" version = "1.12.3" @@ -9608,9 +9809,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.39" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -9621,9 +9822,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "9c9cc1d47e243d655ace55ed38201c19ae02c148ae56412ab8750e8f0166ab7f" dependencies = [ "log", "once_cell", @@ -9644,20 +9845,19 @@ dependencies = [ "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 2.11.1", ] [[package]] name = "rustls-native-certs" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.0.1", ] [[package]] @@ -9674,6 +9874,9 @@ name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-platform-verifier" @@ -9681,7 +9884,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afbb878bdfdf63a336a5e63561b1835e7a8c91524f51621db870169eac84b490" dependencies = [ - "core-foundation", + "core-foundation 0.9.4", "core-foundation-sys", "jni", "log", @@ -9690,7 +9893,7 @@ dependencies = [ "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework", + "security-framework 2.11.1", "security-framework-sys", "webpki-roots", "winapi", @@ -9754,18 +9957,18 @@ dependencies = [ [[package]] name = "scc" -version = "2.2.4" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8d25269dd3a12467afe2e510f69fb0b46b698e5afb296b59f2145259deaf8e8" +checksum = "66b202022bb57c049555430e11fc22fea12909276a80a4c3d368da36ac1d88ed" dependencies = [ "sdd", ] [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -9840,18 +10043,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", - "core-foundation", + "core-foundation 0.9.4", "core-foundation-sys", "libc", "num-bigint", "security-framework-sys", ] +[[package]] +name = "security-framework" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.10.0", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -9877,9 +10093,9 @@ dependencies = [ [[package]] name = "semver-parser" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" dependencies = [ "pest", ] @@ -9898,29 +10114,29 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "indexmap 2.6.0", "itoa", @@ -9937,7 +10153,7 @@ checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" dependencies = [ "percent-encoding", "serde", - "thiserror 1.0.68", + "thiserror 1.0.69", ] [[package]] @@ -9948,7 +10164,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -9999,14 +10215,14 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "serial_test" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" dependencies = [ "once_cell", "parking_lot", @@ -10016,13 +10232,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10173,7 +10389,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", ] @@ -10245,9 +10461,6 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] [[package]] name = "spki" @@ -10308,7 +10521,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10366,9 +10579,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.87" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -10377,14 +10590,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edf42e81491fb8871b74df3d222c64ae8cbc1269ea509fa768a3ed3e1b0ac8cb" +checksum = "f76fe0a3e1476bdaa0775b9aec5b869ed9520c2b2fedfe9c6df3618f8ea6290b" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10395,9 +10608,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -10410,14 +10623,14 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "sysinfo" -version = "0.31.4" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" +checksum = "4c33cd241af0f2e9e3b5c32163b873b29956890b5342e6745b917ce9d490f4af" dependencies = [ "core-foundation-sys", "libc", @@ -10487,7 +10700,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10511,42 +10724,42 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl 1.0.68", + "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" -version = "2.0.1" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c1e40dd48a282ae8edc36c732cbc219144b87fb6a4c7316d611c6b1f06ec0c" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" dependencies = [ - "thiserror-impl 2.0.1", + "thiserror-impl 2.0.3", ] [[package]] name = "thiserror-impl" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "thiserror-impl" -version = "2.0.1" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874aa7e446f1da8d9c3a5c95b1c5eb41d800045252121dc7f8e0ba370cee55f5" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10703,7 +10916,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -10831,12 +11044,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "base64 0.21.7", + "base64 0.22.1", "bitflags 2.6.0", "bytes", "futures-core", @@ -10853,7 +11066,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower 0.4.13", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -10891,7 +11104,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror 1.0.68", + "thiserror 1.0.69", "time", "tracing-subscriber", ] @@ -10904,7 +11117,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11042,7 +11255,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "smallvec", - "thiserror 1.0.68", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -11065,7 +11278,7 @@ dependencies = [ "resolv-conf", "serde", "smallvec", - "thiserror 1.0.68", + "thiserror 1.0.69", "tokio", "tracing", "trust-dns-proto", @@ -11093,7 +11306,7 @@ dependencies = [ "rustls", "rustls-pki-types", "sha1", - "thiserror 1.0.68", + "thiserror 1.0.69", "utf-8", ] @@ -11153,9 +11366,9 @@ checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" @@ -11180,7 +11393,7 @@ checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ "itertools 0.13.0", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -11189,6 +11402,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -11219,9 +11438,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna 1.0.3", @@ -11296,7 +11515,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11367,7 +11586,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -11401,7 +11620,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11427,9 +11646,9 @@ dependencies = [ [[package]] name = "wasmtimer" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb4f099acbc1043cc752b91615b24b02d7f6fcd975bd781fed9f50b3c3e15bf7" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" dependencies = [ "futures", "js-sys", @@ -11449,11 +11668,21 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -11557,7 +11786,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11568,7 +11797,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11579,7 +11808,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11590,7 +11819,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11824,7 +12053,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper 0.6.0", - "thiserror 1.0.68", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -11847,9 +12076,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", @@ -11859,13 +12088,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -11887,27 +12116,27 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "zerofrom" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -11928,7 +12157,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -11950,7 +12179,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9368ed9e3bf4..da012d258da9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.1.1" +version = "1.1.2" edition = "2021" rust-version = "1.82" license = "MIT OR Apache-2.0" @@ -80,8 +80,10 @@ members = [ "crates/optimism/storage", "crates/payload/basic/", "crates/payload/builder/", + "crates/payload/builder-primitives/", "crates/payload/primitives/", "crates/payload/validator/", + "crates/payload/util/", "crates/primitives-traits/", "crates/primitives/", "crates/prune/prune", @@ -379,8 +381,10 @@ reth-optimism-primitives = { path = "crates/optimism/primitives" } reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-optimism-storage = { path = "crates/optimism/storage" } reth-payload-builder = { path = "crates/payload/builder" } +reth-payload-builder-primitives = { path = "crates/payload/builder-primitives" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } +reth-payload-util = { path = "crates/payload/util" } reth-primitives = { path = "crates/primitives", default-features = false, features = [ "std", ] } @@ -415,6 +419,7 @@ reth-trie = { path = "crates/trie/trie" } reth-trie-common = { path = "crates/trie/common" } reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } +reth-trie-sparse = { path = "crates/trie/sparse" } # revm revm = { version = "18.0.0", features = ["std"], default-features = false } @@ -431,46 +436,46 @@ alloy-rlp = "0.3.4" alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.6.2", default-features = false } -alloy-contract = { version = "0.6.2", default-features = false } -alloy-eips = { version = "0.6.2", default-features = false } -alloy-genesis = { version = "0.6.2", default-features = false } -alloy-json-rpc = { version = "0.6.2", default-features = false } -alloy-network = { version = "0.6.2", default-features = false } -alloy-network-primitives = { version = "0.6.2", default-features = false } -alloy-node-bindings = { version = "0.6.2", default-features = false } -alloy-provider = { version = "0.6.2", features = [ +alloy-consensus = { version = "0.6.4", default-features = false } +alloy-contract = { version = "0.6.4", default-features = false } +alloy-eips = { version = "0.6.4", default-features = false } +alloy-genesis = { version = "0.6.4", default-features = false } +alloy-json-rpc = { version = "0.6.4", default-features = false } +alloy-network = { version = "0.6.4", default-features = false } +alloy-network-primitives = { version = "0.6.4", default-features = false } +alloy-node-bindings = { version = "0.6.4", default-features = false } +alloy-provider = { version = "0.6.4", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.6.2", default-features = false } -alloy-rpc-client = { version = "0.6.2", default-features = false } -alloy-rpc-types = { version = "0.6.2", features = [ +alloy-pubsub = { version = "0.6.4", default-features = false } +alloy-rpc-client = { version = "0.6.4", default-features = false } +alloy-rpc-types = { version = "0.6.4", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.6.2", default-features = false } -alloy-rpc-types-anvil = { version = "0.6.2", default-features = false } -alloy-rpc-types-beacon = { version = "0.6.2", default-features = false } -alloy-rpc-types-debug = { version = "0.6.2", default-features = false } -alloy-rpc-types-engine = { version = "0.6.2", default-features = false } -alloy-rpc-types-eth = { version = "0.6.2", default-features = false } -alloy-rpc-types-mev = { version = "0.6.2", default-features = false } -alloy-rpc-types-trace = { version = "0.6.2", default-features = false } -alloy-rpc-types-txpool = { version = "0.6.2", default-features = false } -alloy-serde = { version = "0.6.2", default-features = false } -alloy-signer = { version = "0.6.2", default-features = false } -alloy-signer-local = { version = "0.6.2", default-features = false } -alloy-transport = { version = "0.6.2" } -alloy-transport-http = { version = "0.6.2", features = [ +alloy-rpc-types-admin = { version = "0.6.4", default-features = false } +alloy-rpc-types-anvil = { version = "0.6.4", default-features = false } +alloy-rpc-types-beacon = { version = "0.6.4", default-features = false } +alloy-rpc-types-debug = { version = "0.6.4", default-features = false } +alloy-rpc-types-engine = { version = "0.6.4", default-features = false } +alloy-rpc-types-eth = { version = "0.6.4", default-features = false } +alloy-rpc-types-mev = { version = "0.6.4", default-features = false } +alloy-rpc-types-trace = { version = "0.6.4", default-features = false } +alloy-rpc-types-txpool = { version = "0.6.4", default-features = false } +alloy-serde = { version = "0.6.4", default-features = false } +alloy-signer = { version = "0.6.4", default-features = false } +alloy-signer-local = { version = "0.6.4", default-features = false } +alloy-transport = { version = "0.6.4" } +alloy-transport-http = { version = "0.6.4", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.6.2", default-features = false } -alloy-transport-ws = { version = "0.6.2", default-features = false } +alloy-transport-ipc = { version = "0.6.4", default-features = false } +alloy-transport-ws = { version = "0.6.4", default-features = false } # op -op-alloy-rpc-types = "0.6.3" -op-alloy-rpc-types-engine = "0.6.3" -op-alloy-network = "0.6.3" -op-alloy-consensus = "0.6.3" +op-alloy-rpc-types = "0.6.7" +op-alloy-rpc-types-engine = "0.6.7" +op-alloy-network = "0.6.7" +op-alloy-consensus = "0.6.7" # misc aquamarine = "0.6" @@ -523,6 +528,7 @@ tracing = "0.1.0" tracing-appender = "0.2" url = "2.3" zstd = "0.13" +byteorder = "1" # metrics metrics = "0.24.0" @@ -550,8 +556,10 @@ hyper = "1.3" hyper-util = "0.1.5" pin-project = "1.0.12" reqwest = { version = "0.12", default-features = false } +tracing-futures = "0.2" tower = "0.4" -tower-http = "0.5" +tower-http = "0.6" + # p2p discv5 = "0.8.0" @@ -560,12 +568,14 @@ if-addrs = "0.13" # rpc jsonrpsee = "0.24" jsonrpsee-core = "0.24" +jsonrpsee-server = "0.24" jsonrpsee-http-client = "0.24" jsonrpsee-types = "0.24" # http http = "1.0" http-body = "1.0" +http-body-util = "0.1.2" jsonwebtoken = "9" proptest-arbitrary-interop = "0.1.0" @@ -595,6 +605,7 @@ serial_test = { default-features = false, version = "3" } similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.8" test-fuzz = "6" +rstest = "0.23.0" tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" @@ -628,7 +639,7 @@ tracy-client = "0.17.3" #alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } #alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "a971b3a" } +#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } +#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } #op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } -#op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "6a042e7681b1" } diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md index 80ebfc20c98f..17c639f0d5e4 100644 --- a/HARDFORK-CHECKLIST.md +++ b/HARDFORK-CHECKLIST.md @@ -17,5 +17,5 @@ ### Updates to the engine API - Add new endpoints to the `EngineApi` trait and implement endpoints. -- Update the `ExceuctionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. -- Update version specific validation checks in the `EngineValidator` trait. \ No newline at end of file +- Update the `ExecutionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. +- Update version specific validation checks in the `EngineValidator` trait. diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 03844633a926..0182076130cb 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -20,6 +20,7 @@ reth-node-core.workspace = true reth-node-api.workspace = true reth-rpc-types-compat.workspace = true reth-primitives = { workspace = true, features = ["alloy-compat"] } +reth-primitives-traits.workspace = true reth-tracing.workspace = true # alloy diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index dd2f863e2c94..9e573a8957e1 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -18,7 +18,7 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -75,11 +75,11 @@ impl Command { while let Some((block, head, safe, finalized)) = receiver.recv().await { // just put gas used here - let gas_used = block.header.gas_used; + let gas_used = block.gas_used; let block_number = block.header.number; let versioned_hashes: Vec = - block.blob_versioned_hashes().into_iter().copied().collect(); + block.body.blob_versioned_hashes().into_iter().copied().collect(); let parent_beacon_block_root = block.parent_beacon_block_root; let payload = block_to_payload(block); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 68b2f76527df..0611faabf101 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -16,7 +16,7 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -60,10 +60,10 @@ impl Command { while let Some(block) = receiver.recv().await { // just put gas used here - let gas_used = block.header.gas_used; + let gas_used = block.gas_used; let versioned_hashes: Vec = - block.blob_versioned_hashes().into_iter().copied().collect(); + block.body.blob_versioned_hashes().into_iter().copied().collect(); let parent_beacon_block_root = block.parent_beacon_block_root; let payload = block_to_payload(block); diff --git a/bin/reth-bench/src/bench/output.rs b/bin/reth-bench/src/bench/output.rs index 8f68dac45336..56343c6af641 100644 --- a/bin/reth-bench/src/bench/output.rs +++ b/bin/reth-bench/src/bench/output.rs @@ -1,7 +1,7 @@ //! Contains various benchmark output formats, either for logging or for //! serialization to / from files. -use reth_primitives::constants::gas_units::GIGAGAS; +use reth_primitives_traits::constants::GIGAGAS; use serde::{ser::SerializeStruct, Serialize}; use std::time::Duration; diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ffd1998b24ea..a152bea2681e 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -19,6 +19,7 @@ reth-ethereum-cli.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 30af4c61c536..dc00e07d8830 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -15,26 +15,29 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_consensus::Consensus; use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::{ - EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes, -}; +use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ - revm_primitives::KzgSettings, BlobTransaction, PooledTransactionsElement, SealedBlock, - SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, + BlobTransaction, BlockExt, PooledTransactionsElement, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, Transaction, TransactionSigned, }; use reth_provider::{ - providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, - ProviderFactory, StageCheckpointReader, StateProviderFactory, + providers::{BlockchainProvider, ProviderNodeTypes}, + BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, + StageCheckpointReader, StateProviderFactory, +}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + primitives::{EnvKzgSettings, KzgSettings}, }; -use reth_revm::{cached::CachedReads, database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_stages::StageId; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, @@ -84,10 +87,10 @@ impl> Command { /// Fetches the best block block from the database. /// /// If the database is empty, returns the genesis block. - fn lookup_best_block>( + fn lookup_best_block>( &self, factory: ProviderFactory, - ) -> RethResult> { + ) -> RethResult>>> { let provider = factory.provider()?; let best_number = @@ -119,7 +122,7 @@ impl> Command { } /// Execute `debug in-memory-merkle` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { @@ -198,7 +201,7 @@ impl> Command { let encoded_length = pooled.encode_2718_len(); // insert the blob into the store - blob_store.insert(transaction.hash, sidecar)?; + blob_store.insert(transaction.hash(), sidecar)?; encoded_length } @@ -256,7 +259,7 @@ impl> Command { let senders = block.senders().expect("sender recovery failed"); let block_with_senders = - SealedBlockWithSenders::new(block.clone(), senders).unwrap(); + SealedBlockWithSenders::>::new(block.clone(), senders).unwrap(); let db = StateProviderDatabase::new(blockchain_db.latest()?); let executor = diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index cc584c892874..efe4a2f7c221 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -4,11 +4,11 @@ use crate::{args::NetworkArgs, utils::get_single_header}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; -use futures::{stream::select as stream_select, StreamExt}; +use futures::StreamExt; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -19,13 +19,13 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider, NetworkHandle}; +use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_network_p2p::{headers::client::HeadersClient, BlockClient}; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; +use reth_node_api::NodeTypesWithDBAdapter; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, + providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; use reth_prune::PruneModes; use reth_stages::{ @@ -58,7 +58,7 @@ pub struct Command { } impl> Command { - fn build_pipeline, Client>( + fn build_pipeline + CliNodeTypes, Client>( &self, config: &Config, client: Client, @@ -68,11 +68,11 @@ impl> Command { static_file_producer: StaticFileProducer>, ) -> eyre::Result> where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(client.clone(), Arc::clone(&consensus)) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) @@ -116,7 +116,7 @@ impl> Command { Ok(pipeline) } - async fn build_network>( + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, @@ -137,11 +137,14 @@ impl> Command { Ok(network) } - async fn fetch_block_hash( + async fn fetch_block_hash( &self, client: Client, block: BlockNumber, - ) -> eyre::Result { + ) -> eyre::Result + where + Client: HeadersClient, + { info!(target: "reth::cli", ?block, "Fetching block from the network."); loop { match get_single_header(&client, BlockHashOrNumber::Number(block)).await { @@ -157,7 +160,7 @@ impl> Command { } /// Execute `execution-debug` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { @@ -203,17 +206,12 @@ impl> Command { return Ok(()) } - let pipeline_events = pipeline.events(); - let events = stream_select( - network.event_listener().map(Into::into), - pipeline_events.map(Into::into), - ); ctx.task_executor.spawn_critical( "events task", reth_node_events::node::handle_events( Some(Box::new(network)), latest_block_number, - events, + pipeline.events().map(Into::into), ), ); @@ -231,11 +229,7 @@ impl> Command { trace!(target: "reth::cli", from = next_block, to = target_block, tip = ?target_block_hash, ?result, "Pipeline finished"); // Unwind the pipeline without committing. - { - provider_factory - .provider_rw()? - .take_block_and_execution_range(next_block..=target_block)?; - } + provider_factory.provider_rw()?.unwind_trie_state_range(next_block..=target_block)?; // Update latest block current_max_block = target_block; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 2c56da9b4cfc..870dc1ddf233 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -7,9 +7,10 @@ use crate::{ use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; +use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -18,12 +19,13 @@ use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::EthExecutorProvider; +use reth_primitives::BlockExt; use reth_provider::{ - writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, - HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StageCheckpointReader, StateWriter, StaticFileProviderFactory, StorageReader, + providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, + HashingWriter, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, + StageCheckpointReader, StateWriter, StorageLocation, StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -55,7 +57,15 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -77,7 +87,7 @@ impl> Command { } /// Execute `debug in-memory-merkle` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { @@ -124,17 +134,15 @@ impl> Command { let client = fetch_client.clone(); let chain = provider_factory.chain_spec(); - let block = (move || get_single_body(client.clone(), Arc::clone(&chain), header.clone())) + let consensus = Arc::new(EthBeaconConsensus::new(chain.clone())); + let block = (move || get_single_body(client.clone(), header.clone(), consensus.clone())) .retry(backoff) .notify( |err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying..."), ) .await?; - let db = StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider_factory.static_file_provider(), - )); + let db = StateProviderDatabase::new(LatestStateProviderRef::new(&provider)); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); @@ -144,7 +152,7 @@ impl> Command { ( &block .clone() - .unseal() + .unseal::>() .with_recovered_senders() .ok_or(BlockValidationError::SenderRecoveryError)?, merkle_block_td + block.difficulty, @@ -164,7 +172,7 @@ impl> Command { return Ok(()) } - let provider_rw = provider_factory.provider_rw()?; + let provider_rw = provider_factory.database_provider_rw()?; // Insert block, state and hashes provider_rw.insert_historical_block( @@ -173,8 +181,11 @@ impl> Command { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw.0); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + provider_rw.write_state( + execution_outcome, + OriginalValuesKnown::No, + StorageLocation::Database, + )?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plain_state_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 3c6e38512c9e..78e32df52664 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -6,7 +6,7 @@ use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -17,12 +17,12 @@ use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::EthExecutorProvider; use reth_provider::{ - writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, + providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, ProviderFactory, StateWriter, StaticFileProviderFactory, + ProviderError, ProviderFactory, StateWriter, StorageLocation, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -56,7 +56,15 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -78,7 +86,7 @@ impl> Command { } /// Execute `merkle-debug` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { @@ -145,24 +153,24 @@ impl> Command { for block in blocks.into_iter().rev() { let block_number = block.number; let sealed_block = block - .try_seal_with_senders() + .try_seal_with_senders::>() .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; trace!(target: "reth::cli", block_number, "Executing block"); - provider_rw.insert_block(sealed_block.clone())?; + provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?; td += sealed_block.difficulty; let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new( - provider_rw.tx_ref(), - provider_rw.static_file_provider().clone(), - ), + LatestStateProviderRef::new(&provider_rw), )); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let execution_outcome = executor.finalize(); - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider_rw.write_state( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::Database, + )?; let checkpoint = Some(StageCheckpoint::new( block_number diff --git a/bin/reth/src/commands/debug_cmd/mod.rs b/bin/reth/src/commands/debug_cmd/mod.rs index 51681e8c59e1..65329f414007 100644 --- a/bin/reth/src/commands/debug_cmd/mod.rs +++ b/bin/reth/src/commands/debug_cmd/mod.rs @@ -3,8 +3,8 @@ use clap::{Parser, Subcommand}; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::CliNodeTypes; use reth_cli_runner::CliContext; -use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::EthEngineTypes; mod build_block; @@ -37,9 +37,7 @@ pub enum Subcommands { impl> Command { /// Execute `debug` command - pub async fn execute< - N: NodeTypesWithEngine, - >( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 9314a439265d..04d3b5763aef 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -8,7 +8,7 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -18,13 +18,12 @@ use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage} use reth_fs_util as fs; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{ - EngineApiMessageVersion, NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine, -}; +use reth_node_api::{EngineApiMessageVersion, NodePrimitives, NodeTypesWithDBAdapter}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ - providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, + providers::{BlockchainProvider, ProviderNodeTypes}, + CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, }; use reth_prune::PruneModes; use reth_stages::Pipeline; @@ -56,7 +55,15 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -78,9 +85,7 @@ impl> Command { } /// Execute `debug replay-engine` command - pub async fn execute< - N: NodeTypesWithEngine, - >( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 6b71f48de123..53c592063eca 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -15,7 +15,7 @@ //! - `min-error-logs`: Disables all logs below `error` level. //! - `min-warn-logs`: Disables all logs below `warn` level. //! - `min-info-logs`: Disables all logs below `info` level. This can speed up the node, since fewer -//! calls to the logging component is made. +//! calls to the logging component are made. //! - `min-debug-logs`: Disables all logs below `debug` level. //! - `min-trace-logs`: Disables all logs below `trace` level. diff --git a/book/developers/profiling.md b/book/developers/profiling.md index f1fdf520eb2e..956bc5633030 100644 --- a/book/developers/profiling.md +++ b/book/developers/profiling.md @@ -25,7 +25,7 @@ In this tutorial, we will be reviewing: [Jemalloc](https://jemalloc.net/) is a general-purpose allocator that is used [across the industry in production](https://engineering.fb.com/2011/01/03/core-data/scalable-memory-allocation-using-jemalloc/), well known for its performance benefits, predictability, and profiling capabilities. We've seen significant performance benefits in reth when using jemalloc, but will be primarily focusing on its profiling capabilities. -Jemalloc also provides tools for analyzing and visualizing its the allocation profiles it generates, notably `jeprof`. +Jemalloc also provides tools for analyzing and visualizing its allocation profiles it generates, notably `jeprof`. #### Enabling jemalloc in reth diff --git a/book/installation/installation.md b/book/installation/installation.md index ebf6c8ef3f90..1df122d4d442 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -44,13 +44,13 @@ As of April 2024 at block number 19.6M: * Archive Node: At least 2.14TB is required * Full Node: At least 1.13TB is required -NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. +NVMe based SSD drives are recommended for the best performance, with SATA SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. As of February 2024, syncing an Ethereum mainnet node to block 19.3M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. > **Note** > -> It is highly recommended to choose a TLC drive when using NVMe, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). +> It is highly recommended to choose a TLC drive when using an NVMe drive, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). ### CPU diff --git a/book/run/config.md b/book/run/config.md index 10fd40ca7630..bb28d855de8d 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -36,7 +36,7 @@ The defaults shipped with Reth try to be relatively reasonable, but may not be o ### `headers` -The headers section controls both the behavior of the header stage, which download historical headers, as well as the primary downloader that fetches headers over P2P. +The headers section controls both the behavior of the header stage, which downloads historical headers, as well as the primary downloader that fetches headers over P2P. ```toml [stages.headers] @@ -65,7 +65,7 @@ commit_threshold = 10000 ### `bodies` -The bodies section controls both the behavior of the bodies stage, which download historical block bodies, as well as the primary downloader that fetches block bodies over P2P. +The bodies section controls both the behavior of the bodies stage, which downloads historical block bodies, as well as the primary downloader that fetches block bodies over P2P. ```toml [stages.bodies] @@ -102,7 +102,7 @@ The sender recovery stage recovers the address of transaction senders using tran ```toml [stages.sender_recovery] -# The amount of transactions to recover senders for before +# The number of transactions to recover senders for before # writing the results to disk. # # Lower thresholds correspond to more frequent disk I/O (writes), diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index 3a987e52c73a..28253ca9f010 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -6,7 +6,7 @@ This guide uses [Kurtosis' ethereum-package](https://github.com/ethpandaops/ethe * Go [here](https://docs.kurtosis.com/install/) to install Kurtosis * Go [here](https://docs.docker.com/get-docker/) to install Docker -The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. +The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth and various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/ethpandaops/ethereum-package#configuration). diff --git a/book/run/pruning.md b/book/run/pruning.md index da3bb07e2cdf..25d11b4e46e3 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -18,7 +18,7 @@ the steps for running Reth as a full node, what caveats to expect and how to con - Full Node – Reth node that has the latest state and historical data for only the last 10064 blocks available for querying in the same way as an archive node. -The node type that was chosen when first [running a node](./run-a-node.md) **can not** be changed after +The node type that was chosen when first [running a node](./run-a-node.md) **cannot** be changed after the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported. ## Modes diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md index 2a862314a1d5..0e2090acbcb5 100644 --- a/book/run/sync-op-mainnet.md +++ b/book/run/sync-op-mainnet.md @@ -1,6 +1,6 @@ # Sync OP Mainnet -To sync OP mainnet, bedrock state needs to be imported as a starting point. There are currently two ways: +To sync OP mainnet, Bedrock state needs to be imported as a starting point. There are currently two ways: * Minimal bootstrap **(recommended)**: only state snapshot at Bedrock block is imported without any OVM historical data. * Full bootstrap **(not recommended)**: state, blocks and receipts are imported. *Not recommended for now: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node diff --git a/book/run/transactions.md b/book/run/transactions.md index 61327b57300a..edb3a24d76f2 100644 --- a/book/run/transactions.md +++ b/book/run/transactions.md @@ -38,7 +38,7 @@ Alongside the `accessList` parameter and legacy parameters (except `gasPrice`), The base fee is burned, while the priority fee is paid to the miner who includes the transaction, incentivizing miners to include transactions with higher priority fees per gas. -## EIP-4844 Transaction +## EIP-4844 Transactions [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844) transactions (type `0x3`) was introduced in Ethereum's Dencun fork. This provides a temporary but significant scaling relief for rollups by allowing them to initially scale to 0.375 MB per slot, with a separate fee market allowing fees to be very low while usage of this system is limited. diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index 7368b6631abb..cab39cb1165c 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -8,7 +8,7 @@ This page tries to answer how to deal with the most popular issues. If you're: 1. Running behind the tip -2. Have slow canonical commit time according to the `Canonical Commit Latency time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) +2. Have slow canonical commit time according to the `Canonical Commit Latency Time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) 3. Seeing warnings in your logs such as ```console 2023-11-08T15:17:24.789731Z WARN providers::db: Transaction insertion took too long block_number=18528075 tx_num=2150227643 hash=0xb7de1d6620efbdd3aa8547c47a0ff09a7fd3e48ba3fd2c53ce94c6683ed66e7c elapsed=6.793759034s @@ -48,7 +48,7 @@ equal to the [freshly synced node](../installation/installation.md#hardware-requ mv reth_compact.dat $(reth db path)/mdbx.dat ``` 7. Start Reth -8. Confirm that the values on the `Freelist` chart is near zero and the values on the `Canonical Commit Latency time` chart +8. Confirm that the values on the `Freelist` chart are near zero and the values on the `Canonical Commit Latency Time` chart is less than 1 second. 9. Delete original database ```bash diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/book/sources/exex/hello-world/src/bin/3.rs index 21bd25a56dbf..ebeaf6c84f19 100644 --- a/book/sources/exex/hello-world/src/bin/3.rs +++ b/book/sources/exex/hello-world/src/bin/3.rs @@ -1,10 +1,12 @@ use futures_util::TryStreamExt; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; -async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { +async fn my_exex>>( + mut ctx: ExExContext, +) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.try_next().await? { match ¬ification { ExExNotification::ChainCommitted { new } => { diff --git a/book/sources/exex/remote/src/exex.rs b/book/sources/exex/remote/src/exex.rs index 1ae4785db8b8..00392b4dad10 100644 --- a/book/sources/exex/remote/src/exex.rs +++ b/book/sources/exex/remote/src/exex.rs @@ -3,6 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; +use reth::{primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -44,7 +45,7 @@ impl RemoteExEx for ExExService { } } -async fn remote_exex( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/remote/src/exex_4.rs b/book/sources/exex/remote/src/exex_4.rs index 24c7bf2c2f11..c37f26d739dc 100644 --- a/book/sources/exex/remote/src/exex_4.rs +++ b/book/sources/exex/remote/src/exex_4.rs @@ -3,6 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; +use reth::{primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -46,7 +47,7 @@ impl RemoteExEx for ExExService { // ANCHOR: snippet #[allow(dead_code)] -async fn remote_exex( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/book/sources/exex/tracking-state/src/bin/1.rs index 0d42e0791a17..2cf43bec3a17 100644 --- a/book/sources/exex/tracking-state/src/bin/1.rs +++ b/book/sources/exex/tracking-state/src/bin/1.rs @@ -5,7 +5,7 @@ use std::{ }; use futures_util::{FutureExt, TryStreamExt}; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -14,7 +14,7 @@ struct MyExEx { ctx: ExExContext, } -impl Future for MyExEx { +impl>> Future for MyExEx { type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/sources/exex/tracking-state/src/bin/2.rs index 9416810668f5..b58d2a39c85c 100644 --- a/book/sources/exex/tracking-state/src/bin/2.rs +++ b/book/sources/exex/tracking-state/src/bin/2.rs @@ -6,7 +6,7 @@ use std::{ use alloy_primitives::BlockNumber; use futures_util::{FutureExt, TryStreamExt}; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, primitives::Block, providers::BlockReader}; use reth_exex::{ExExContext, ExExEvent}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -25,7 +25,7 @@ impl MyExEx { } } -impl Future for MyExEx { +impl>> Future for MyExEx { type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 0c48b3b9ce85..7778fb9262c5 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -377,8 +377,9 @@ impl BlockIndices { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_primitives::B256; - use reth_primitives::{Header, SealedBlock, SealedHeader}; + use reth_primitives::{SealedBlock, SealedHeader}; #[test] fn pending_block_num_hash_returns_none_if_no_fork() { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 20d1cfe9f1da..bbf1cb099617 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1,6 +1,7 @@ //! Implementation of [`BlockchainTree`] use crate::{ + externals::TreeNodeTypes, metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, state::{SidechainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, @@ -21,10 +22,10 @@ use reth_primitives::{ SealedHeader, StaticFileSegment, }; use reth_provider::{ - providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, - CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainSpecProvider, ChainSplit, ChainSplitTarget, DBProvider, DisplayBlocksChain, - HeaderProvider, ProviderError, StaticFileProviderFactory, + BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, + CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, + ChainSplitTarget, DBProvider, DisplayBlocksChain, HeaderProvider, ProviderError, + StaticFileProviderFactory, StorageLocation, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -93,7 +94,7 @@ impl BlockchainTree { impl BlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. @@ -1332,7 +1333,7 @@ where info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); // read block and execution result from database. and remove traces of block from tables. let blocks_and_execution = provider_rw - .take_block_and_execution_range(revert_range) + .take_block_and_execution_above(revert_until, StorageLocation::Database) .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; provider_rw.commit()?; @@ -1374,10 +1375,10 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip4895::Withdrawals}; use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, Sealable, B256}; + use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; @@ -1386,18 +1387,20 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_node_types::FullNodePrimitives; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - revm_primitives::AccountInfo, - Account, BlockBody, Header, Transaction, TransactionSigned, TransactionSignedEcRecovered, + Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use reth_provider::{ + providers::ProviderNodeTypes, test_utils::{ blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - ProviderFactory, + ProviderFactory, StorageLocation, }; + use reth_revm::primitives::AccountInfo; use reth_stages_api::StageCheckpoint; use reth_trie::{root::state_root_unhashed, StateRoot}; use std::collections::HashMap; @@ -1420,7 +1423,12 @@ mod tests { TreeExternals::new(provider_factory, consensus, executor_factory) } - fn setup_genesis(factory: &ProviderFactory, mut genesis: SealedBlock) { + fn setup_genesis< + N: ProviderNodeTypes>, + >( + factory: &ProviderFactory, + mut genesis: SealedBlock, + ) { // insert genesis to db. genesis.header.set_block_number(10); @@ -1551,6 +1559,7 @@ mod tests { SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) .try_seal_with_senders() .unwrap(), + StorageLocation::Database, ) .unwrap(); let account = Account { balance: initial_signer_balance, ..Default::default() }; @@ -1561,7 +1570,7 @@ mod tests { let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce, @@ -1580,7 +1589,9 @@ mod tests { body: Vec, num_of_signer_txs: u64| -> SealedBlockWithSenders { - let transactions_root = calculate_transaction_root(&body); + let signed_body = + body.clone().into_iter().map(|tx| tx.into_signed()).collect::>(); + let transactions_root = calculate_transaction_root(&signed_body); let receipts = body .iter() .enumerate() @@ -1598,7 +1609,7 @@ mod tests { // receipts root computation is different for OP let receipts_root = calculate_receipt_root(&receipts); - let sealed = Header { + let header = Header { number, parent_hash: parent.unwrap_or_default(), gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, @@ -1620,15 +1631,13 @@ mod tests { ), )])), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; SealedBlockWithSenders::new( SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { - transactions: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), + transactions: signed_body, ommers: Vec::new(), withdrawals: Some(Withdrawals::default()), }, diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 09ba5c3f851c..6ac39c316702 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -23,7 +23,7 @@ use reth_provider::{ }; use reth_revm::database::StateProviderDatabase; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie_parallel::root::ParallelStateRoot; use std::{ collections::BTreeMap, ops::{Deref, DerefMut}, diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 4e22fcb78b6b..2a825921f893 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -2,7 +2,7 @@ use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::Consensus; -use reth_db::{static_file::HeaderMask, tables}; +use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_node_types::NodeTypesWithDB; use reth_primitives::StaticFileSegment; @@ -13,6 +13,8 @@ use reth_provider::{ use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; +pub use reth_provider::providers::{NodeTypesForTree, TreeNodeTypes}; + /// A container for external components. /// /// This is a simple container for external components used throughout the blockchain tree @@ -75,7 +77,7 @@ impl TreeExternals { hashes.extend(range.clone().zip(static_file_provider.fetch_range_with_predicate( StaticFileSegment::Headers, range, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::(number.into()), |_| true, )?)); } diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 862b02e76070..f5d2ad8c6f78 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -6,10 +6,10 @@ use reth_blockchain_tree_api::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, }; use reth_storage_errors::provider::ProviderResult; use std::collections::BTreeMap; @@ -126,6 +126,10 @@ impl BlockchainTreePendingStateProvider for NoopBlockchainTree { } } +impl NodePrimitivesProvider for NoopBlockchainTree { + type Primitives = EthPrimitives; +} + impl CanonStateSubscriptions for NoopBlockchainTree { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canon_state_notification_sender diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 8e6cceccdd19..484b4b51869e 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -1,5 +1,7 @@ //! Wrapper around `BlockchainTree` that allows for it to be shared. +use crate::externals::TreeNodeTypes; + use super::BlockchainTree; use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; @@ -13,8 +15,8 @@ use reth_evm::execute::BlockExecutorProvider; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateSubscriptions, - FullExecutionDataProvider, ProviderError, + providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; @@ -36,7 +38,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { @@ -107,7 +109,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { @@ -170,7 +172,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where - N: ProviderNodeTypes, + N: TreeNodeTypes, E: BlockExecutorProvider, { fn find_pending_state_provider( @@ -183,12 +185,20 @@ where } } -impl CanonStateSubscriptions for ShareableBlockchainTree +impl NodePrimitivesProvider for ShareableBlockchainTree where N: ProviderNodeTypes, E: Send + Sync, { - fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { + type Primitives = N::Primitives; +} + +impl CanonStateSubscriptions for ShareableBlockchainTree +where + N: TreeNodeTypes, + E: Send + Sync, +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); self.tree.read().subscribe_canon_state() } diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index ca8af6f9b581..a8e43240f4fa 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -184,7 +184,7 @@ mod tests { let mut tree_state = TreeState::new(0, vec![], 5); // Create a chain with two blocks - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::random(); let block2_hash = B256::random(); @@ -254,8 +254,8 @@ mod tests { let block1_hash = B256::random(); let block2_hash = B256::random(); - let mut block1 = SealedBlockWithSenders::default(); - let mut block2 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(block1_hash); block1.block.header.set_block_number(9); @@ -296,8 +296,8 @@ mod tests { let block1_hash = B256::random(); let block2_hash = B256::random(); - let mut block1 = SealedBlockWithSenders::default(); - let mut block2 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(block1_hash); block1.block.header.set_block_number(9); @@ -336,7 +336,7 @@ mod tests { // Create a block with a random hash and add it to the buffer let block_hash = B256::random(); - let mut block = SealedBlockWithSenders::default(); + let mut block: SealedBlockWithSenders = Default::default(); block.block.header.set_hash(block_hash); // Add the block to the buffered blocks in the TreeState @@ -363,8 +363,8 @@ mod tests { let ancestor_hash = B256::random(); let descendant_hash = B256::random(); - let mut ancestor_block = SealedBlockWithSenders::default(); - let mut descendant_block = SealedBlockWithSenders::default(); + let mut ancestor_block: SealedBlockWithSenders = Default::default(); + let mut descendant_block: SealedBlockWithSenders = Default::default(); ancestor_block.block.header.set_hash(ancestor_hash); descendant_block.block.header.set_hash(descendant_hash); @@ -397,7 +397,7 @@ mod tests { let receipt1 = Receipt::default(); let receipt2 = Receipt::default(); - let mut block = SealedBlockWithSenders::default(); + let mut block: SealedBlockWithSenders = Default::default(); block.block.header.set_hash(block_hash); let receipts = vec![receipt1, receipt2]; diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 9a88a3c54bc6..d2ef5870947b 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -18,22 +18,23 @@ reth-errors.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-trie.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # async -tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } +tokio = { workspace = true, default-features = false, features = ["sync", "macros"] } tokio-stream = { workspace = true, features = ["sync"] } # tracing tracing.workspace = true # misc -auto_impl.workspace = true derive_more.workspace = true metrics.workspace = true parking_lot.workspace = true @@ -42,7 +43,6 @@ pin-project.workspace = true # optional deps for test-utils alloy-signer = { workspace = true, optional = true } alloy-signer-local = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } rand = { workspace = true, optional = true } revm = { workspace = true, optional = true } @@ -56,13 +56,13 @@ revm.workspace = true [features] test-utils = [ - "alloy-signer", - "alloy-signer-local", - "alloy-consensus", - "rand", - "revm", - "reth-chainspec/test-utils", - "reth-primitives/test-utils", - "reth-trie/test-utils", - "revm?/test-utils" + "alloy-signer", + "alloy-signer-local", + "rand", + "revm", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-trie/test-utils", + "revm?/test-utils", ] diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index 3c75544ac460..1b8575005c40 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -1,8 +1,9 @@ +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::BlockNumber; use parking_lot::RwLock; use reth_chainspec::ChainInfo; -use reth_primitives::SealedHeader; +use reth_primitives::{NodePrimitives, SealedHeader}; use std::{ sync::{ atomic::{AtomicU64, Ordering}, @@ -14,17 +15,21 @@ use tokio::sync::watch; /// Tracks the chain info: canonical head, safe block, finalized block. #[derive(Debug, Clone)] -pub struct ChainInfoTracker { - inner: Arc, +pub struct ChainInfoTracker { + inner: Arc>, } -impl ChainInfoTracker { +impl ChainInfoTracker +where + N: NodePrimitives, + N::BlockHeader: BlockHeader, +{ /// Create a new chain info container for the given canonical head and finalized header if it /// exists. pub fn new( - head: SealedHeader, - finalized: Option, - safe: Option, + head: SealedHeader, + finalized: Option>, + safe: Option>, ) -> Self { let (finalized_block, _) = watch::channel(finalized); let (safe_block, _) = watch::channel(safe); @@ -33,7 +38,7 @@ impl ChainInfoTracker { inner: Arc::new(ChainInfoInner { last_forkchoice_update: RwLock::new(None), last_transition_configuration_exchange: RwLock::new(None), - canonical_head_number: AtomicU64::new(head.number), + canonical_head_number: AtomicU64::new(head.number()), canonical_head: RwLock::new(head), safe_block, finalized_block, @@ -44,7 +49,7 @@ impl ChainInfoTracker { /// Returns the [`ChainInfo`] for the canonical head. pub fn chain_info(&self) -> ChainInfo { let inner = self.inner.canonical_head.read(); - ChainInfo { best_hash: inner.hash(), best_number: inner.number } + ChainInfo { best_hash: inner.hash(), best_number: inner.number() } } /// Update the timestamp when we received a forkchoice update. @@ -68,17 +73,17 @@ impl ChainInfoTracker { } /// Returns the canonical head of the chain. - pub fn get_canonical_head(&self) -> SealedHeader { + pub fn get_canonical_head(&self) -> SealedHeader { self.inner.canonical_head.read().clone() } /// Returns the safe header of the chain. - pub fn get_safe_header(&self) -> Option { + pub fn get_safe_header(&self) -> Option> { self.inner.safe_block.borrow().clone() } /// Returns the finalized header of the chain. - pub fn get_finalized_header(&self) -> Option { + pub fn get_finalized_header(&self) -> Option> { self.inner.finalized_block.borrow().clone() } @@ -104,8 +109,8 @@ impl ChainInfoTracker { } /// Sets the canonical head of the chain. - pub fn set_canonical_head(&self, header: SealedHeader) { - let number = header.number; + pub fn set_canonical_head(&self, header: SealedHeader) { + let number = header.number(); *self.inner.canonical_head.write() = header; // also update the atomic number. @@ -113,7 +118,7 @@ impl ChainInfoTracker { } /// Sets the safe header of the chain. - pub fn set_safe(&self, header: SealedHeader) { + pub fn set_safe(&self, header: SealedHeader) { self.inner.safe_block.send_if_modified(|current_header| { if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { let _ = current_header.replace(header); @@ -125,7 +130,7 @@ impl ChainInfoTracker { } /// Sets the finalized header of the chain. - pub fn set_finalized(&self, header: SealedHeader) { + pub fn set_finalized(&self, header: SealedHeader) { self.inner.finalized_block.send_if_modified(|current_header| { if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { let _ = current_header.replace(header); @@ -137,19 +142,21 @@ impl ChainInfoTracker { } /// Subscribe to the finalized block. - pub fn subscribe_finalized_block(&self) -> watch::Receiver> { + pub fn subscribe_finalized_block( + &self, + ) -> watch::Receiver>> { self.inner.finalized_block.subscribe() } /// Subscribe to the safe block. - pub fn subscribe_safe_block(&self) -> watch::Receiver> { + pub fn subscribe_safe_block(&self) -> watch::Receiver>> { self.inner.safe_block.subscribe() } } /// Container type for all chain info fields #[derive(Debug)] -struct ChainInfoInner { +struct ChainInfoInner { /// Timestamp when we received the last fork choice update. /// /// This is mainly used to track if we're connected to a beacon node. @@ -161,16 +168,17 @@ struct ChainInfoInner { /// Tracks the number of the `canonical_head`. canonical_head_number: AtomicU64, /// The canonical head of the chain. - canonical_head: RwLock, + canonical_head: RwLock>, /// The block that the beacon node considers safe. - safe_block: watch::Sender>, + safe_block: watch::Sender>>, /// The block that the beacon node considers finalized. - finalized_block: watch::Sender>, + finalized_block: watch::Sender>>, } #[cfg(test)] mod tests { use super::*; + use reth_primitives::EthPrimitives; use reth_testing_utils::{generators, generators::random_header}; #[test] @@ -180,7 +188,8 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header.clone(), None, None); + let tracker: ChainInfoTracker = + ChainInfoTracker::new(header.clone(), None, None); // Fetch the chain information from the tracker let chain_info = tracker.chain_info(); @@ -197,7 +206,7 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header, None, None); // Assert that there has been no forkchoice update yet (the timestamp is None) assert!(tracker.last_forkchoice_update_received_at().is_none()); @@ -216,7 +225,7 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header, None, None); // Assert that there has been no transition configuration exchange yet (the timestamp is // None) @@ -239,7 +248,7 @@ mod tests { let header2 = random_header(&mut rng, 20, None); // Create a new chain info tracker with the first header - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Set the second header as the canonical head of the tracker tracker.set_canonical_head(header2.clone()); @@ -260,7 +269,7 @@ mod tests { let header2 = random_header(&mut rng, 20, None); // Create a new chain info tracker with the first header (header1) - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Call the set_safe method with the second header (header2) tracker.set_safe(header2.clone()); @@ -306,7 +315,7 @@ mod tests { let header3 = random_header(&mut rng, 30, None); // Create a new chain info tracker with the first header - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Initial state: finalize header should be None assert!(tracker.get_finalized_header().is_none()); @@ -343,7 +352,7 @@ mod tests { let finalized_header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the finalized header - let tracker = + let tracker: ChainInfoTracker = ChainInfoTracker::new(finalized_header.clone(), Some(finalized_header.clone()), None); // Assert that the BlockNumHash returned matches the finalized header @@ -357,7 +366,8 @@ mod tests { let safe_header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the safe header - let tracker = ChainInfoTracker::new(safe_header.clone(), None, None); + let tracker: ChainInfoTracker = + ChainInfoTracker::new(safe_header.clone(), None, None); tracker.set_safe(safe_header.clone()); // Assert that the BlockNumHash returned matches the safe header diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 6bef197bea98..f43aae562e00 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,16 +4,18 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockNumHash}; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, - TransactionMeta, TransactionSigned, + BlockWithSenders, HeaderExt, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; +use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -49,22 +51,22 @@ pub(crate) struct InMemoryStateMetrics { /// This holds, because only lookup by number functions need to acquire the numbers lock first to /// get the block hash. #[derive(Debug, Default)] -pub(crate) struct InMemoryState { +pub(crate) struct InMemoryState { /// All canonical blocks that are not on disk yet. - blocks: RwLock>>, + blocks: RwLock>>>, /// Mapping of block numbers to block hashes. numbers: RwLock>, /// The pending block that has not yet been made canonical. - pending: watch::Sender>, + pending: watch::Sender>>, /// Metrics for the in-memory state. metrics: InMemoryStateMetrics, } -impl InMemoryState { +impl InMemoryState { pub(crate) fn new( - blocks: HashMap>, + blocks: HashMap>>, numbers: BTreeMap, - pending: Option, + pending: Option>, ) -> Self { let (pending, _) = watch::channel(pending); let this = Self { @@ -94,12 +96,12 @@ impl InMemoryState { } /// Returns the state for a given block hash. - pub(crate) fn state_by_hash(&self, hash: B256) -> Option> { + pub(crate) fn state_by_hash(&self, hash: B256) -> Option>> { self.blocks.read().get(&hash).cloned() } /// Returns the state for a given block number. - pub(crate) fn state_by_number(&self, number: u64) -> Option> { + pub(crate) fn state_by_number(&self, number: u64) -> Option>> { let hash = self.hash_by_number(number)?; self.state_by_hash(hash) } @@ -110,14 +112,14 @@ impl InMemoryState { } /// Returns the current chain head state. - pub(crate) fn head_state(&self) -> Option> { + pub(crate) fn head_state(&self) -> Option>> { let hash = *self.numbers.read().last_key_value()?.1; self.state_by_hash(hash) } /// Returns the pending state corresponding to the current head plus one, /// from the payload received in newPayload that does not have a FCU yet. - pub(crate) fn pending_state(&self) -> Option { + pub(crate) fn pending_state(&self) -> Option> { self.pending.borrow().clone() } @@ -130,17 +132,17 @@ impl InMemoryState { /// Inner type to provide in memory state. It includes a chain tracker to be /// advanced internally by the tree. #[derive(Debug)] -pub(crate) struct CanonicalInMemoryStateInner { +pub(crate) struct CanonicalInMemoryStateInner { /// Tracks certain chain information, such as the canonical head, safe head, and finalized /// head. - pub(crate) chain_info_tracker: ChainInfoTracker, + pub(crate) chain_info_tracker: ChainInfoTracker, /// Tracks blocks at the tip of the chain that have not been persisted to disk yet. - pub(crate) in_memory_state: InMemoryState, + pub(crate) in_memory_state: InMemoryState, /// A broadcast stream that emits events when the canonical chain is updated. - pub(crate) canon_state_notification_sender: CanonStateNotificationSender, + pub(crate) canon_state_notification_sender: CanonStateNotificationSender, } -impl CanonicalInMemoryStateInner { +impl CanonicalInMemoryStateInner { /// Clears all entries in the in memory state. fn clear(&self) { { @@ -157,23 +159,26 @@ impl CanonicalInMemoryStateInner { } } +type PendingBlockAndReceipts = + (SealedBlockFor<::Block>, Vec>); + /// This type is responsible for providing the blocks, receipts, and state for /// all canonical blocks not on disk yet and keeps track of the block range that /// is in memory. #[derive(Debug, Clone)] -pub struct CanonicalInMemoryState { - pub(crate) inner: Arc, +pub struct CanonicalInMemoryState { + pub(crate) inner: Arc>, } -impl CanonicalInMemoryState { +impl CanonicalInMemoryState { /// Create a new in-memory state with the given blocks, numbers, pending state, and optional /// finalized header. pub fn new( - blocks: HashMap>, + blocks: HashMap>>, numbers: BTreeMap, - pending: Option, - finalized: Option, - safe: Option, + pending: Option>, + finalized: Option>, + safe: Option>, ) -> Self { let in_memory_state = InMemoryState::new(blocks, numbers, pending); let header = in_memory_state @@ -200,9 +205,9 @@ impl CanonicalInMemoryState { /// Create a new in memory state with the given local head and finalized header /// if it exists. pub fn with_head( - head: SealedHeader, - finalized: Option, - safe: Option, + head: SealedHeader, + finalized: Option>, + safe: Option>, ) -> Self { let chain_info_tracker = ChainInfoTracker::new(head, finalized, safe); let in_memory_state = InMemoryState::default(); @@ -223,7 +228,7 @@ impl CanonicalInMemoryState { } /// Returns the header corresponding to the given hash. - pub fn header_by_hash(&self, hash: B256) -> Option { + pub fn header_by_hash(&self, hash: B256) -> Option> { self.state_by_hash(hash).map(|block| block.block_ref().block.header.clone()) } @@ -235,9 +240,9 @@ impl CanonicalInMemoryState { /// Updates the pending block with the given block. /// /// Note: This assumes that the parent block of the pending block is canonical. - pub fn set_pending_block(&self, pending: ExecutedBlock) { + pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block - let parent = self.state_by_hash(pending.block().parent_hash); + let parent = self.state_by_hash(pending.block().parent_hash()); let pending = BlockState::with_parent(pending, parent); self.inner.in_memory_state.pending.send_modify(|p| { p.replace(pending); @@ -251,7 +256,7 @@ impl CanonicalInMemoryState { /// them to their parent blocks. fn update_blocks(&self, new_blocks: I, reorged: I) where - I: IntoIterator, + I: IntoIterator>, { { // acquire locks, starting with the numbers lock @@ -261,15 +266,15 @@ impl CanonicalInMemoryState { // we first remove the blocks from the reorged chain for block in reorged { let hash = block.block().hash(); - let number = block.block().number; + let number = block.block().number(); blocks.remove(&hash); numbers.remove(&number); } // insert the new blocks for block in new_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent); + let parent = blocks.get(&block.block().parent_hash()).cloned(); + let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -287,7 +292,7 @@ impl CanonicalInMemoryState { } /// Update the in memory state with the given chain update. - pub fn update_chain(&self, new_chain: NewCanonicalChain) { + pub fn update_chain(&self, new_chain: NewCanonicalChain) { match new_chain { NewCanonicalChain::Commit { new } => { self.update_blocks(new, vec![]); @@ -328,17 +333,17 @@ impl CanonicalInMemoryState { // height) let mut old_blocks = blocks .drain() - .filter(|(_, b)| b.block_ref().block().number > persisted_height) + .filter(|(_, b)| b.block_ref().block().number() > persisted_height) .map(|(_, b)| b.block.clone()) .collect::>(); // sort the blocks by number so we can insert them back in natural order (low -> high) - old_blocks.sort_unstable_by_key(|block| block.block().number); + old_blocks.sort_unstable_by_key(|block| block.block().number()); // re-insert the blocks in natural order and connect them to their parent blocks for block in old_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent); + let parent = blocks.get(&block.block().parent_hash()).cloned(); + let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -350,7 +355,7 @@ impl CanonicalInMemoryState { // also shift the pending state if it exists self.inner.in_memory_state.pending.send_modify(|p| { if let Some(p) = p.as_mut() { - p.parent = blocks.get(&p.block_ref().block.parent_hash).cloned(); + p.parent = blocks.get(&p.block_ref().block.parent_hash()).cloned(); } }); } @@ -358,22 +363,22 @@ impl CanonicalInMemoryState { } /// Returns in memory state corresponding the given hash. - pub fn state_by_hash(&self, hash: B256) -> Option> { + pub fn state_by_hash(&self, hash: B256) -> Option>> { self.inner.in_memory_state.state_by_hash(hash) } /// Returns in memory state corresponding the block number. - pub fn state_by_number(&self, number: u64) -> Option> { + pub fn state_by_number(&self, number: u64) -> Option>> { self.inner.in_memory_state.state_by_number(number) } /// Returns the in memory head state. - pub fn head_state(&self) -> Option> { + pub fn head_state(&self) -> Option>> { self.inner.in_memory_state.head_state() } /// Returns the in memory pending state. - pub fn pending_state(&self) -> Option { + pub fn pending_state(&self) -> Option> { self.inner.in_memory_state.pending_state() } @@ -426,81 +431,86 @@ impl CanonicalInMemoryState { } /// Canonical head setter. - pub fn set_canonical_head(&self, header: SealedHeader) { + pub fn set_canonical_head(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_canonical_head(header); } /// Safe head setter. - pub fn set_safe(&self, header: SealedHeader) { + pub fn set_safe(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_safe(header); } /// Finalized head setter. - pub fn set_finalized(&self, header: SealedHeader) { + pub fn set_finalized(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_finalized(header); } /// Canonical head getter. - pub fn get_canonical_head(&self) -> SealedHeader { + pub fn get_canonical_head(&self) -> SealedHeader { self.inner.chain_info_tracker.get_canonical_head() } /// Finalized header getter. - pub fn get_finalized_header(&self) -> Option { + pub fn get_finalized_header(&self) -> Option> { self.inner.chain_info_tracker.get_finalized_header() } /// Safe header getter. - pub fn get_safe_header(&self) -> Option { + pub fn get_safe_header(&self) -> Option> { self.inner.chain_info_tracker.get_safe_header() } /// Returns the `SealedHeader` corresponding to the pending state. - pub fn pending_sealed_header(&self) -> Option { + pub fn pending_sealed_header(&self) -> Option> { self.pending_state().map(|h| h.block_ref().block().header.clone()) } /// Returns the `Header` corresponding to the pending state. - pub fn pending_header(&self) -> Option
{ + pub fn pending_header(&self) -> Option { self.pending_sealed_header().map(|sealed_header| sealed_header.unseal()) } /// Returns the `SealedBlock` corresponding to the pending state. - pub fn pending_block(&self) -> Option { + pub fn pending_block(&self) -> Option> { self.pending_state().map(|block_state| block_state.block_ref().block().clone()) } /// Returns the `SealedBlockWithSenders` corresponding to the pending state. - pub fn pending_block_with_senders(&self) -> Option { + pub fn pending_block_with_senders(&self) -> Option> + where + N::SignedTx: SignedTransaction, + { self.pending_state() .and_then(|block_state| block_state.block_ref().block().clone().seal_with_senders()) } /// Returns a tuple with the `SealedBlock` corresponding to the pending /// state and a vector of its `Receipt`s. - pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { + pub fn pending_block_and_receipts(&self) -> Option> { self.pending_state().map(|block_state| { (block_state.block_ref().block().clone(), block_state.executed_block_receipts()) }) } /// Subscribe to new blocks events. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { + pub fn subscribe_canon_state(&self) -> CanonStateNotifications { self.inner.canon_state_notification_sender.subscribe() } /// Subscribe to new safe block events. - pub fn subscribe_safe_block(&self) -> watch::Receiver> { + pub fn subscribe_safe_block(&self) -> watch::Receiver>> { self.inner.chain_info_tracker.subscribe_safe_block() } /// Subscribe to new finalized block events. - pub fn subscribe_finalized_block(&self) -> watch::Receiver> { + pub fn subscribe_finalized_block( + &self, + ) -> watch::Receiver>> { self.inner.chain_info_tracker.subscribe_finalized_block() } /// Attempts to send a new [`CanonStateNotification`] to all active Receiver handles. - pub fn notify_canon_state(&self, event: CanonStateNotification) { + pub fn notify_canon_state(&self, event: CanonStateNotification) { self.inner.canon_state_notification_sender.send(event).ok(); } @@ -512,7 +522,7 @@ impl CanonicalInMemoryState { &self, hash: B256, historical: StateProviderBox, - ) -> MemoryOverlayStateProvider { + ) -> MemoryOverlayStateProvider { let in_memory = if let Some(state) = self.state_by_hash(hash) { state.chain().map(|block_state| block_state.block()).collect() } else { @@ -526,15 +536,23 @@ impl CanonicalInMemoryState { /// oldest (highest to lowest). /// /// This iterator contains a snapshot of the in-memory state at the time of the call. - pub fn canonical_chain(&self) -> impl Iterator> { + pub fn canonical_chain(&self) -> impl Iterator>> { self.inner.in_memory_state.head_state().into_iter().flat_map(|head| head.iter()) } /// Returns a `TransactionSigned` for the given `TxHash` if found. - pub fn transaction_by_hash(&self, hash: TxHash) -> Option { + pub fn transaction_by_hash(&self, hash: TxHash) -> Option + where + N::SignedTx: Encodable2718, + { for block_state in self.canonical_chain() { - if let Some(tx) = - block_state.block_ref().block().body.transactions().find(|tx| tx.hash() == hash) + if let Some(tx) = block_state + .block_ref() + .block() + .body + .transactions() + .iter() + .find(|tx| tx.trie_hash() == hash) { return Some(tx.clone()) } @@ -547,24 +565,28 @@ impl CanonicalInMemoryState { pub fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> Option<(TransactionSigned, TransactionMeta)> { + ) -> Option<(N::SignedTx, TransactionMeta)> + where + N::SignedTx: Encodable2718, + { for block_state in self.canonical_chain() { if let Some((index, tx)) = block_state .block_ref() .block() .body .transactions() + .iter() .enumerate() - .find(|(_, tx)| tx.hash() == tx_hash) + .find(|(_, tx)| tx.trie_hash() == tx_hash) { let meta = TransactionMeta { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number, - base_fee: block_state.block_ref().block.header.base_fee_per_gas, - timestamp: block_state.block_ref().block.timestamp, - excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + block_number: block_state.block_ref().block.number(), + base_fee: block_state.block_ref().block.header.base_fee_per_gas(), + timestamp: block_state.block_ref().block.timestamp(), + excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), }; return Some((tx.clone(), meta)) } @@ -576,22 +598,22 @@ impl CanonicalInMemoryState { /// State after applying the given block, this block is part of the canonical chain that partially /// stored in memory and can be traced back to a canonical block on disk. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct BlockState { +pub struct BlockState { /// The executed block that determines the state after this block has been executed. - block: ExecutedBlock, + block: ExecutedBlock, /// The block's parent block if it exists. - parent: Option>, + parent: Option>>, } #[allow(dead_code)] -impl BlockState { +impl BlockState { /// [`BlockState`] constructor. - pub const fn new(block: ExecutedBlock) -> Self { + pub const fn new(block: ExecutedBlock) -> Self { Self { block, parent: None } } /// [`BlockState`] constructor with parent. - pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { + pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { Self { block, parent } } @@ -605,24 +627,25 @@ impl BlockState { } /// Returns the executed block that determines the state. - pub fn block(&self) -> ExecutedBlock { + pub fn block(&self) -> ExecutedBlock { self.block.clone() } /// Returns a reference to the executed block that determines the state. - pub const fn block_ref(&self) -> &ExecutedBlock { + pub const fn block_ref(&self) -> &ExecutedBlock { &self.block } /// Returns the block with senders for the state. - pub fn block_with_senders(&self) -> BlockWithSenders { + pub fn block_with_senders(&self) -> BlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - BlockWithSenders { block: block.unseal(), senders } + let (header, body) = block.split(); + BlockWithSenders::new_unchecked(N::Block::new(header.unseal(), body), senders) } /// Returns the sealed block with senders for the state. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); SealedBlockWithSenders { block, senders } @@ -635,17 +658,17 @@ impl BlockState { /// Returns the block number of executed block that determines the state. pub fn number(&self) -> u64 { - self.block.block().number + self.block.block().number() } /// Returns the state root after applying the executed block that determines /// the state. pub fn state_root(&self) -> B256 { - self.block.block().header.state_root + self.block.block().header.state_root() } /// Returns the `Receipts` of executed block that determines the state. - pub fn receipts(&self) -> &Receipts { + pub fn receipts(&self) -> &Receipts { &self.block.execution_outcome().receipts } @@ -653,7 +676,7 @@ impl BlockState { /// We assume that the `Receipts` in the executed block `ExecutionOutcome` /// has only one element corresponding to the executed block associated to /// the state. - pub fn executed_block_receipts(&self) -> Vec { + pub fn executed_block_receipts(&self) -> Vec { let receipts = self.receipts(); debug_assert!( @@ -712,7 +735,7 @@ impl BlockState { /// /// This merges the state of all blocks that are part of the chain that the this block is /// the head of. This includes all blocks that connect back to the canonical block on disk. - pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { + pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { let in_memory = self.chain().map(|block_state| block_state.block()).collect(); MemoryOverlayStateProvider::new(historical, in_memory) @@ -727,14 +750,18 @@ impl BlockState { } /// Tries to find a transaction by [`TxHash`] in the chain ending at this block. - pub fn transaction_on_chain(&self, hash: TxHash) -> Option { + pub fn transaction_on_chain(&self, hash: TxHash) -> Option + where + N::SignedTx: Encodable2718, + { self.chain().find_map(|block_state| { block_state .block_ref() .block() .body .transactions() - .find(|tx| tx.hash() == hash) + .iter() + .find(|tx| tx.trie_hash() == hash) .cloned() }) } @@ -743,24 +770,28 @@ impl BlockState { pub fn transaction_meta_on_chain( &self, tx_hash: TxHash, - ) -> Option<(TransactionSigned, TransactionMeta)> { + ) -> Option<(N::SignedTx, TransactionMeta)> + where + N::SignedTx: Encodable2718, + { self.chain().find_map(|block_state| { block_state .block_ref() .block() .body .transactions() + .iter() .enumerate() - .find(|(_, tx)| tx.hash() == tx_hash) + .find(|(_, tx)| tx.trie_hash() == tx_hash) .map(|(index, tx)| { let meta = TransactionMeta { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number, - base_fee: block_state.block_ref().block.header.base_fee_per_gas, - timestamp: block_state.block_ref().block.timestamp, - excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + block_number: block_state.block_ref().block.number(), + base_fee: block_state.block_ref().block.header.base_fee_per_gas(), + timestamp: block_state.block_ref().block.timestamp(), + excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), }; (tx.clone(), meta) }) @@ -770,25 +801,25 @@ impl BlockState { /// Represents an executed block stored in-memory. #[derive(Clone, Debug, PartialEq, Eq, Default)] -pub struct ExecutedBlock { +pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. - pub block: Arc, + pub block: Arc>, /// Block's senders. pub senders: Arc>, /// Block's execution outcome. - pub execution_output: Arc, + pub execution_output: Arc>, /// Block's hashed state. pub hashed_state: Arc, /// Trie updates that result of applying the block. pub trie: Arc, } -impl ExecutedBlock { +impl ExecutedBlock { /// [`ExecutedBlock`] constructor. pub const fn new( - block: Arc, + block: Arc>, senders: Arc>, - execution_output: Arc, + execution_output: Arc>, hashed_state: Arc, trie: Arc, ) -> Self { @@ -796,7 +827,7 @@ impl ExecutedBlock { } /// Returns a reference to the executed block. - pub fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlockFor { &self.block } @@ -808,12 +839,12 @@ impl ExecutedBlock { /// Returns a [`SealedBlockWithSenders`] /// /// Note: this clones the block and senders. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { SealedBlockWithSenders { block: (*self.block).clone(), senders: (*self.senders).clone() } } /// Returns a reference to the block's execution outcome - pub fn execution_outcome(&self) -> &ExecutionOutcome { + pub fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_output } @@ -830,23 +861,23 @@ impl ExecutedBlock { /// Non-empty chain of blocks. #[derive(Debug)] -pub enum NewCanonicalChain { +pub enum NewCanonicalChain { /// A simple append to the current canonical head Commit { /// all blocks that lead back to the canonical head - new: Vec, + new: Vec>, }, /// A reorged chain consists of two chains that trace back to a shared ancestor block at which /// point they diverge. Reorg { /// All blocks of the _new_ chain - new: Vec, + new: Vec>, /// All blocks of the _old_ chain - old: Vec, + old: Vec>, }, } -impl NewCanonicalChain { +impl> NewCanonicalChain { /// Returns the length of the new chain. pub fn new_block_count(&self) -> usize { match self { @@ -863,7 +894,7 @@ impl NewCanonicalChain { } /// Converts the new chain into a notification that will be emitted to listeners - pub fn to_chain_notification(&self) -> CanonStateNotification { + pub fn to_chain_notification(&self) -> CanonStateNotification { match self { Self::Commit { new } => { let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { @@ -899,7 +930,7 @@ impl NewCanonicalChain { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlock { + pub fn tip(&self) -> &SealedBlockFor { match self { Self::Commit { new } | Self::Reorg { new, .. } => { new.last().expect("non empty blocks").block() @@ -916,15 +947,17 @@ mod tests { use alloy_primitives::{map::HashSet, BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; - use reth_primitives::{Account, Bytecode, Receipt}; + use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; - use reth_trie::{AccountProof, HashedStorage, MultiProof, StorageProof, TrieInput}; + use reth_trie::{ + AccountProof, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, + }; fn create_mock_state( - test_block_builder: &mut TestBlockBuilder, + test_block_builder: &mut TestBlockBuilder, block_number: u64, parent_hash: B256, ) -> BlockState { @@ -934,7 +967,7 @@ mod tests { } fn create_mock_state_chain( - test_block_builder: &mut TestBlockBuilder, + test_block_builder: &mut TestBlockBuilder, num_blocks: u64, ) -> Vec { let mut chain = Vec::with_capacity(num_blocks as usize); @@ -1031,6 +1064,15 @@ mod tests { ) -> ProviderResult { Ok(StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageMultiProof::empty()) + } } impl StateProofProvider for MockStateProvider { @@ -1064,7 +1106,7 @@ mod tests { fn test_in_memory_state_impl_state_by_hash() { let mut state_by_hash = HashMap::default(); let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); state_by_hash.insert(state.hash(), state.clone()); @@ -1080,7 +1122,7 @@ mod tests { let mut hash_by_number = BTreeMap::new(); let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); let hash = state.hash(); @@ -1097,7 +1139,7 @@ mod tests { fn test_in_memory_state_impl_head_state() { let mut state_by_hash = HashMap::default(); let mut hash_by_number = BTreeMap::new(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state1 = Arc::new(create_mock_state(&mut test_block_builder, 1, B256::random())); let hash1 = state1.hash(); let state2 = Arc::new(create_mock_state(&mut test_block_builder, 2, hash1)); @@ -1117,7 +1159,7 @@ mod tests { #[test] fn test_in_memory_state_impl_pending_state() { let pending_number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let pending_state = create_mock_state(&mut test_block_builder, pending_number, B256::random()); let pending_hash = pending_state.hash(); @@ -1134,7 +1176,8 @@ mod tests { #[test] fn test_in_memory_state_impl_no_pending_state() { - let in_memory_state = InMemoryState::new(HashMap::default(), BTreeMap::new(), None); + let in_memory_state: InMemoryState = + InMemoryState::new(HashMap::default(), BTreeMap::new(), None); assert_eq!(in_memory_state.pending_state(), None); } @@ -1142,7 +1185,7 @@ mod tests { #[test] fn test_state_new() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1153,7 +1196,7 @@ mod tests { #[test] fn test_state_block() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1164,7 +1207,7 @@ mod tests { #[test] fn test_state_hash() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1175,7 +1218,7 @@ mod tests { #[test] fn test_state_number() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block); @@ -1186,7 +1229,7 @@ mod tests { #[test] fn test_state_state_root() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1197,7 +1240,7 @@ mod tests { #[test] fn test_state_receipts() { let receipts = Receipts { receipt_vec: vec![vec![Some(Receipt::default())]] }; - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_receipts(receipts.clone(), B256::random()); @@ -1208,8 +1251,8 @@ mod tests { #[test] fn test_in_memory_state_chain_update() { - let state = CanonicalInMemoryState::empty(); - let mut test_block_builder = TestBlockBuilder::default(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); let block2 = test_block_builder.get_executed_block_with_number(0, B256::random()); let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] }; @@ -1233,8 +1276,8 @@ mod tests { #[test] fn test_in_memory_state_set_pending_block() { - let state = CanonicalInMemoryState::empty(); - let mut test_block_builder = TestBlockBuilder::default(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); // First random block let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); @@ -1285,7 +1328,7 @@ mod tests { #[test] fn test_canonical_in_memory_state_state_provider() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(1, B256::random()); let block2 = test_block_builder.get_executed_block_with_number(2, block1.block().hash()); let block3 = test_block_builder.get_executed_block_with_number(3, block2.block().hash()); @@ -1332,14 +1375,15 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_empty() { - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); let chain: Vec<_> = state.canonical_chain().collect(); assert!(chain.is_empty()); } #[test] fn test_canonical_in_memory_state_canonical_chain_single_block() { - let block = TestBlockBuilder::default().get_executed_block_with_number(1, B256::random()); + let block = TestBlockBuilder::::default() + .get_executed_block_with_number(1, B256::random()); let hash = block.block().hash(); let mut blocks = HashMap::default(); blocks.insert(hash, Arc::new(BlockState::new(block))); @@ -1358,7 +1402,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_multiple_blocks() { let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); for i in 1..=3 { let block = block_builder.get_executed_block_with_number(i, parent_hash); @@ -1380,7 +1424,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_with_pending_block() { let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); for i in 1..=2 { let block = block_builder.get_executed_block_with_number(i, parent_hash); @@ -1400,7 +1444,7 @@ mod tests { #[test] fn test_block_state_parent_blocks() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 4); let parents = chain[3].parent_state_chain(); @@ -1421,7 +1465,7 @@ mod tests { #[test] fn test_block_state_single_block_state_chain() { let single_block_number = 1; - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let single_block = create_mock_state(&mut test_block_builder, single_block_number, B256::random()); let single_block_hash = single_block.block().block.hash(); @@ -1437,7 +1481,7 @@ mod tests { #[test] fn test_block_state_chain() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 3); let block_state_chain = chain[2].chain().collect::>(); @@ -1459,7 +1503,7 @@ mod tests { #[test] fn test_to_chain_notification() { // Generate 4 blocks - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block0 = test_block_builder.get_executed_block_with_number(0, B256::random()); let block1 = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); let block1a = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index bd9b43a59eae..519469d67f60 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -27,3 +27,6 @@ pub use memory_overlay::{MemoryOverlayStateProvider, MemoryOverlayStateProviderR #[cfg(any(test, feature = "test-utils"))] /// Common test helpers pub mod test_utils; + +// todo: remove when generic data prim integration complete +pub use reth_primitives::EthPrimitives; diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index ada0faee4907..c84bd8c93f06 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,28 +1,30 @@ use super::ExecutedBlock; +use alloy_consensus::BlockHeader; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_errors::ProviderResult; -use reth_primitives::{Account, Bytecode}; +use reth_primitives::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, TrieInput, }; use std::sync::OnceLock; /// A state provider that stores references to in-memory blocks along with their state as well as a /// reference of the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] -pub struct MemoryOverlayStateProviderRef<'a> { +pub struct MemoryOverlayStateProviderRef<'a, N: NodePrimitives = reth_primitives::EthPrimitives> { /// Historical state provider for state lookups that are not found in in-memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_state: OnceLock, } @@ -30,11 +32,11 @@ pub struct MemoryOverlayStateProviderRef<'a> { /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] -pub struct MemoryOverlayStateProvider { +pub struct MemoryOverlayStateProvider { /// Historical state provider for state lookups that are not found in in-memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_state: OnceLock, } @@ -49,7 +51,7 @@ macro_rules! impl_state_provider { /// - `in_memory` - the collection of executed ancestor blocks in reverse. /// - `historical` - a historical state provider for the latest ancestor block stored in the /// database. - pub fn new(historical: $historical_type, in_memory: Vec) -> Self { + pub fn new(historical: $historical_type, in_memory: Vec>) -> Self { Self { historical, in_memory, trie_state: OnceLock::new() } } @@ -74,7 +76,7 @@ macro_rules! impl_state_provider { impl $($tokens)* BlockHashReader for $type { fn block_hash(&self, number: BlockNumber) -> ProviderResult> { for block in &self.in_memory { - if block.block.number == number { + if block.block.number() == number { return Ok(Some(block.block.hash())) } } @@ -91,9 +93,9 @@ macro_rules! impl_state_provider { let mut earliest_block_number = None; let mut in_memory_hashes = Vec::new(); for block in &self.in_memory { - if range.contains(&block.block.number) { + if range.contains(&block.block.number()) { in_memory_hashes.insert(0, block.block.hash()); - earliest_block_number = Some(block.block.number); + earliest_block_number = Some(block.block.number()); } } @@ -167,6 +169,20 @@ macro_rules! impl_state_provider { hashed_storage.extend(&storage); self.historical.storage_proof(address, slot, hashed_storage) } + + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_multiproof(address, slots, hashed_storage) + } } impl $($tokens)* StateProofProvider for $type { @@ -230,8 +246,8 @@ macro_rules! impl_state_provider { }; } -impl_state_provider!([], MemoryOverlayStateProvider, Box); -impl_state_provider!([<'a>], MemoryOverlayStateProviderRef<'a>, Box); +impl_state_provider!([], MemoryOverlayStateProvider, Box); +impl_state_provider!([<'a, N: NodePrimitives>], MemoryOverlayStateProviderRef<'a, N>, Box); /// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. #[derive(Clone, Default, Debug)] diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 582e1d2a05d4..c4e0415436a5 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -1,9 +1,10 @@ //! Canonical chain state notification trait and types. -use auto_impl::auto_impl; +use alloy_eips::eip2718::Encodable2718; use derive_more::{Deref, DerefMut}; use reth_execution_types::{BlockReceipts, Chain}; -use reth_primitives::{SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_storage_api::NodePrimitivesProvider; use std::{ pin::Pin, sync::Arc, @@ -17,37 +18,48 @@ use tokio_stream::{ use tracing::debug; /// Type alias for a receiver that receives [`CanonStateNotification`] -pub type CanonStateNotifications = broadcast::Receiver; +pub type CanonStateNotifications = + broadcast::Receiver>; /// Type alias for a sender that sends [`CanonStateNotification`] -pub type CanonStateNotificationSender = broadcast::Sender; +pub type CanonStateNotificationSender = + broadcast::Sender>; /// A type that allows to register chain related event subscriptions. -#[auto_impl(&, Arc)] -pub trait CanonStateSubscriptions: Send + Sync { +pub trait CanonStateSubscriptions: NodePrimitivesProvider + Send + Sync { /// Get notified when a new canonical chain was imported. /// /// A canonical chain be one or more blocks, a reorg or a revert. - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications; + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications; /// Convenience method to get a stream of [`CanonStateNotification`]. - fn canonical_state_stream(&self) -> CanonStateNotificationStream { + fn canonical_state_stream(&self) -> CanonStateNotificationStream { CanonStateNotificationStream { st: BroadcastStream::new(self.subscribe_to_canonical_state()), } } } +impl CanonStateSubscriptions for &T { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + (*self).subscribe_to_canonical_state() + } + + fn canonical_state_stream(&self) -> CanonStateNotificationStream { + (*self).canonical_state_stream() + } +} + /// A Stream of [`CanonStateNotification`]. #[derive(Debug)] #[pin_project::pin_project] -pub struct CanonStateNotificationStream { +pub struct CanonStateNotificationStream { #[pin] - st: BroadcastStream, + st: BroadcastStream>, } -impl Stream for CanonStateNotificationStream { - type Item = CanonStateNotification; +impl Stream for CanonStateNotificationStream { + type Item = CanonStateNotification; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { @@ -68,11 +80,11 @@ impl Stream for CanonStateNotificationStream { /// The notification contains at least one [`Chain`] with the imported segment. If some blocks were /// reverted (e.g. during a reorg), the old chain is also returned. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum CanonStateNotification { +pub enum CanonStateNotification { /// The canonical chain was extended. Commit { /// The newly added chain segment. - new: Arc, + new: Arc>, }, /// A chain segment was reverted or reorged. /// @@ -82,18 +94,18 @@ pub enum CanonStateNotification { /// chain segment. Reorg { /// The chain segment that was reverted. - old: Arc, + old: Arc>, /// The chain segment that was added on top of the canonical chain, minus the reverted /// blocks. /// /// In the case of a revert, not a reorg, this chain segment is empty. - new: Arc, + new: Arc>, }, } -impl CanonStateNotification { +impl CanonStateNotification { /// Get the chain segment that was reverted, if any. - pub fn reverted(&self) -> Option> { + pub fn reverted(&self) -> Option>> { match self { Self::Commit { .. } => None, Self::Reorg { old, .. } => Some(old.clone()), @@ -101,7 +113,7 @@ impl CanonStateNotification { } /// Get the newly imported chain segment, if any. - pub fn committed(&self) -> Arc { + pub fn committed(&self) -> Arc> { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.clone(), } @@ -111,7 +123,7 @@ impl CanonStateNotification { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.tip(), } @@ -122,7 +134,10 @@ impl CanonStateNotification { /// /// The boolean in the tuple (2nd element) denotes whether the receipt was from the reverted /// chain segment. - pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> { + pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> + where + N::SignedTx: Encodable2718, + { let mut receipts = Vec::new(); // get old receipts @@ -140,7 +155,9 @@ impl CanonStateNotification { /// Wrapper around a broadcast receiver that receives fork choice notifications. #[derive(Debug, Deref, DerefMut)] -pub struct ForkChoiceNotifications(pub watch::Receiver>); +pub struct ForkChoiceNotifications( + pub watch::Receiver>>, +); /// A trait that allows to register to fork choice related events /// and get notified when a new fork choice is available. @@ -194,13 +211,13 @@ impl Stream for ForkChoiceStream { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::B256; + use alloy_primitives::{b256, B256}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{Receipt, Receipts, TransactionSigned, TxType}; #[test] fn test_commit_notification() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); @@ -212,7 +229,7 @@ mod tests { block2.set_block_number(2); block2.set_hash(block2_hash); - let chain = Arc::new(Chain::new( + let chain: Arc = Arc::new(Chain::new( vec![block1.clone(), block2.clone()], ExecutionOutcome::default(), None, @@ -233,7 +250,7 @@ mod tests { #[test] fn test_reorg_notification() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -250,7 +267,7 @@ mod tests { block3.set_block_number(3); block3.set_hash(block3_hash); - let old_chain = + let old_chain: Arc = Arc::new(Chain::new(vec![block1.clone()], ExecutionOutcome::default(), None)); let new_chain = Arc::new(Chain::new( vec![block2.clone(), block3.clone()], @@ -275,7 +292,7 @@ mod tests { #[test] fn test_block_receipts_commit() { // Create a default block instance for use in block definitions. - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define unique hashes for two blocks to differentiate them in the chain. let block1_hash = B256::new([0x01; 32]); @@ -313,7 +330,7 @@ mod tests { let execution_outcome = ExecutionOutcome { receipts, ..Default::default() }; // Create a new chain segment with `block1` and `block2` and the execution outcome. - let new_chain = + let new_chain: Arc = Arc::new(Chain::new(vec![block1.clone(), block2.clone()], execution_outcome, None)); // Create a commit notification containing the new chain segment. @@ -330,7 +347,11 @@ mod tests { block_receipts[0].0, BlockReceipts { block: block1.num_hash(), - tx_receipts: vec![(B256::default(), receipt1)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + receipt1 + )] } ); @@ -341,7 +362,7 @@ mod tests { #[test] fn test_block_receipts_reorg() { // Define block1 for the old chain segment, which will be reverted. - let mut old_block1 = SealedBlockWithSenders::default(); + let mut old_block1: SealedBlockWithSenders = Default::default(); old_block1.set_block_number(1); old_block1.set_hash(B256::new([0x01; 32])); old_block1.block.body.transactions.push(TransactionSigned::default()); @@ -361,10 +382,11 @@ mod tests { ExecutionOutcome { receipts: old_receipts, ..Default::default() }; // Create an old chain segment to be reverted, containing `old_block1`. - let old_chain = Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); + let old_chain: Arc = + Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); // Define block2 for the new chain segment, which will be committed. - let mut new_block1 = SealedBlockWithSenders::default(); + let mut new_block1: SealedBlockWithSenders = Default::default(); new_block1.set_block_number(2); new_block1.set_hash(B256::new([0x02; 32])); new_block1.block.body.transactions.push(TransactionSigned::default()); @@ -400,7 +422,11 @@ mod tests { block_receipts[0].0, BlockReceipts { block: old_block1.num_hash(), - tx_receipts: vec![(B256::default(), old_receipt)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + old_receipt + )] } ); // Confirm this is from the reverted segment. @@ -412,7 +438,11 @@ mod tests { block_receipts[1].0, BlockReceipts { block: new_block1.num_hash(), - tx_receipts: vec![(B256::default(), new_receipt)] + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + new_receipt + )] } ); // Confirm this is from the committed segment. diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 564df9fe341a..f6b0a4f17723 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -1,10 +1,12 @@ +use core::marker::PhantomData; + use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; +use alloy_consensus::{Header, Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::Requests}; -use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use rand::{thread_rng, Rng}; @@ -12,9 +14,11 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, - Transaction, TransactionSigned, TransactionSignedEcRecovered, + BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, SealedBlock, + SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, + TransactionSignedEcRecovered, }; +use reth_storage_api::NodePrimitivesProvider; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; use std::{ @@ -27,7 +31,7 @@ use tokio::sync::broadcast::{self, Sender}; /// Functionality to build blocks for tests and help with assertions about /// their execution. #[derive(Debug)] -pub struct TestBlockBuilder { +pub struct TestBlockBuilder { /// The account that signs all the block's transactions. pub signer: Address, /// Private key for signing. @@ -40,9 +44,10 @@ pub struct TestBlockBuilder { pub signer_build_account_info: AccountInfo, /// Chain spec of the blocks generated by this builder pub chain_spec: ChainSpec, + _prims: PhantomData, } -impl Default for TestBlockBuilder { +impl Default for TestBlockBuilder { fn default() -> Self { let initial_account_info = AccountInfo::from_balance(U256::from(10).pow(U256::from(18))); let signer_pk = PrivateKeySigner::random(); @@ -53,6 +58,7 @@ impl Default for TestBlockBuilder { signer_pk, signer_execute_account_info: initial_account_info.clone(), signer_build_account_info: initial_account_info, + _prims: PhantomData, } } } @@ -98,8 +104,7 @@ impl TestBlockBuilder { let signature_hash = tx.signature_hash(); let signature = self.signer_pk.sign_hash_sync(&signature_hash).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) - .with_signer(self.signer) + TransactionSigned::new_unhashed(tx, signature).with_signer(self.signer) }; let num_txs = rng.gen_range(0..5); @@ -136,7 +141,9 @@ impl TestBlockBuilder { gas_limit: self.chain_spec.max_gas_limit, mix_hash: B256::random(), base_fee_per_gas: Some(INITIAL_BASE_FEE), - transactions_root: calculate_transaction_root(&transactions), + transactions_root: calculate_transaction_root( + &transactions.clone().into_iter().map(|tx| tx.into_signed()).collect::>(), + ), receipts_root: calculate_receipt_root(&receipts), beneficiary: Address::random(), state_root: state_root_unhashed(HashMap::from([( @@ -160,11 +167,8 @@ impl TestBlockBuilder { ..Default::default() }; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let block = SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions: transactions.into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), @@ -292,8 +296,8 @@ impl TestBlockBuilder { } /// A test `ChainEventSubscriptions` #[derive(Clone, Debug, Default)] -pub struct TestCanonStateSubscriptions { - canon_notif_tx: Arc>>>, +pub struct TestCanonStateSubscriptions { + canon_notif_tx: Arc>>>>, } impl TestCanonStateSubscriptions { @@ -312,6 +316,10 @@ impl TestCanonStateSubscriptions { } } +impl NodePrimitivesProvider for TestCanonStateSubscriptions { + type Primitives = EthPrimitives; +} + impl CanonStateSubscriptions for TestCanonStateSubscriptions { /// Sets up a broadcast channel with a buffer size of 100. fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index ee25f72bae8d..94b4285f92dd 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,12 +1,12 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; +use alloy_consensus::Header; use alloy_eips::eip1559::BaseFeeParams; use alloy_genesis::Genesis; use alloy_primitives::B256; use core::fmt::{Debug, Display}; use reth_network_peers::NodeRecord; -use reth_primitives_traits::Header; /// Trait representing type configuring a chain spec. #[auto_impl::auto_impl(&, Arc)] @@ -109,6 +109,6 @@ impl EthChainSpec for ChainSpec { } fn is_optimism(&self) -> bool { - self.chain.is_optimism() + false } } diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 779eb8a37577..1f8ebd45f45d 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -11,7 +11,12 @@ use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; use derive_more::From; -use alloy_consensus::constants::{DEV_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; +use alloy_consensus::{ + constants::{ + DEV_GENESIS_HASH, HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + }, + Header, +}; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, @@ -21,7 +26,7 @@ use reth_network_peers::{ base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes, NodeRecord, }; -use reth_primitives_traits::{constants::HOLESKY_GENESIS_HASH, Header, SealedHeader}; +use reth_primitives_traits::SealedHeader; use reth_trie_common::root::state_root_ref_unhashed; use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec, LazyLock, OnceLock}; diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index a0bc5147700d..90acb82d71d7 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -32,6 +32,7 @@ reth-fs-util.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } +reth-node-api.workspace = true reth-node-builder.workspace = true reth-node-core.workspace = true reth-node-events.workspace = true @@ -52,6 +53,7 @@ reth-trie-common = { workspace = true, optional = true } alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true +alloy-consensus.workspace = true itertools.workspace = true futures.workspace = true @@ -94,22 +96,23 @@ reth-discv4.workspace = true [features] default = [] arbitrary = [ - "dep:proptest", - "dep:arbitrary", - "dep:proptest-arbitrary-interop", - "reth-primitives/arbitrary", - "reth-db-api/arbitrary", - "reth-eth-wire/arbitrary", - "reth-db/arbitrary", - "reth-chainspec/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary", - "reth-codecs/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils", - "reth-trie-common/test-utils", - "reth-codecs?/arbitrary", - "reth-prune-types?/arbitrary", - "reth-stages-types?/arbitrary", - "reth-trie-common?/arbitrary" + "dep:proptest", + "dep:arbitrary", + "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-eth-wire/arbitrary", + "reth-db/arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", + "reth-trie-common/test-utils", + "reth-codecs?/arbitrary", + "reth-prune-types?/arbitrary", + "reth-stages-types?/arbitrary", + "reth-trie-common?/arbitrary", + "alloy-consensus/arbitrary", ] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 49fee347ed45..b2ad1452aa46 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -15,7 +15,11 @@ use reth_node_core::{ args::{DatabaseArgs, DatadirArgs}, dirs::{ChainPath, DataDirPath}, }; -use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; +use reth_primitives::EthPrimitives; +use reth_provider::{ + providers::{NodeTypesForProvider, StaticFileProvider}, + ProviderFactory, StaticFileProviderFactory, +}; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -53,7 +57,7 @@ pub struct EnvironmentArgs { impl> EnvironmentArgs { /// Initializes environment according to [`AccessRights`] and returns an instance of /// [`Environment`]. - pub fn init>( + pub fn init>( &self, access: AccessRights, ) -> eyre::Result> { @@ -105,13 +109,13 @@ impl> Environmen /// If it's a read-write environment and an issue is found, it will attempt to heal (including a /// pipeline unwind). Otherwise, it will print out an warning, advising the user to restart the /// node to heal. - fn create_provider_factory>( + fn create_provider_factory>( &self, config: &Config, db: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, ) -> eyre::Result>>> { - let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); + let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); let factory = ProviderFactory::>>::new( @@ -188,3 +192,14 @@ impl AccessRights { matches!(self, Self::RW) } } + +/// Helper trait with a common set of requirements for the +/// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. +pub trait CliNodeTypes: + NodeTypesWithEngine + NodeTypesForProvider +{ +} +impl CliNodeTypes for N where + N: NodeTypesWithEngine + NodeTypesForProvider +{ +} diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index 9aa48e0e865d..76d92962f724 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -1,11 +1,14 @@ -use crate::db::get::{maybe_json_value_parser, table_key}; +use crate::{ + common::CliNodeTypes, + db::get::{maybe_json_value_parser, table_key}, +}; use ahash::RandomState; use clap::Parser; use reth_chainspec::EthereumHardforks; use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; use reth_db_api::{cursor::DbCursorRO, table::Table, transaction::DbTx}; use reth_db_common::DbTool; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_provider::{providers::ProviderNodeTypes, DBProvider}; use std::{ hash::{BuildHasher, Hasher}, @@ -36,7 +39,7 @@ pub struct Command { impl Command { /// Execute `db checksum` command - pub fn execute>( + pub fn execute>( self, tool: &DbTool>>, ) -> eyre::Result<()> { @@ -79,17 +82,17 @@ impl TableViewer<(u64, Duration)> for ChecksumViewer<'_, N let mut cursor = tx.cursor_read::>()?; let walker = match (self.start_key.as_deref(), self.end_key.as_deref()) { (Some(start), Some(end)) => { - let start_key = table_key::(start).map(RawKey::::new)?; - let end_key = table_key::(end).map(RawKey::::new)?; + let start_key = table_key::(start).map(RawKey::new)?; + let end_key = table_key::(end).map(RawKey::new)?; cursor.walk_range(start_key..=end_key)? } (None, Some(end)) => { - let end_key = table_key::(end).map(RawKey::::new)?; + let end_key = table_key::(end).map(RawKey::new)?; cursor.walk_range(..=end_key)? } (Some(start), None) => { - let start_key = table_key::(start).map(RawKey::::new)?; + let start_key = table_key::(start).map(RawKey::new)?; cursor.walk_range(start_key..)? } (None, None) => cursor.walk_range(..)?, diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 4006d1660aa3..13b7b70347e2 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,13 +1,16 @@ +use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ - static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask}, + static_file::{ + ColumnSelectorOne, ColumnSelectorTwo, HeaderWithHashMask, ReceiptMask, TransactionMask, + }, tables, RawKey, RawTable, Receipts, TableViewer, Transactions, }; use reth_db_api::table::{Decompress, DupSort, Table}; use reth_db_common::DbTool; +use reth_node_api::{ReceiptTy, TxTy}; use reth_node_builder::NodeTypesWithDB; -use reth_primitives::Header; use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use tracing::error; @@ -61,16 +64,14 @@ impl Command { Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { StaticFileSegment::Headers => { - (table_key::(&key)?, >::MASK) + (table_key::(&key)?, >::MASK) + } + StaticFileSegment::Transactions => { + (table_key::(&key)?, >>::MASK) + } + StaticFileSegment::Receipts => { + (table_key::(&key)?, >>::MASK) } - StaticFileSegment::Transactions => ( - table_key::(&key)?, - ::Value>>::MASK, - ), - StaticFileSegment::Receipts => ( - table_key::(&key)?, - ::Value>>::MASK, - ), }; let content = tool.provider_factory.static_file_provider().find_static_file( @@ -128,12 +129,12 @@ impl Command { /// Get an instance of key for given table pub(crate) fn table_key(key: &str) -> Result { - serde_json::from_str::(key).map_err(|e| eyre::eyre!(e)) + serde_json::from_str(key).map_err(|e| eyre::eyre!(e)) } /// Get an instance of subkey for given dupsort table fn table_subkey(subkey: Option<&str>) -> Result { - serde_json::from_str::(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e)) + serde_json::from_str(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e)) } struct GetValueViewer<'a, N: NodeTypesWithDB> { diff --git a/crates/cli/commands/src/db/mod.rs b/crates/cli/commands/src/db/mod.rs index e1a9a90bacc3..e80b51160e20 100644 --- a/crates/cli/commands/src/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -1,10 +1,9 @@ -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithEngine; use std::io::{self, Write}; mod checksum; @@ -65,9 +64,7 @@ macro_rules! db_ro_exec { impl> Command { /// Execute `db` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain()); let db_path = data_dir.db(); let static_files_path = data_dir.static_files(); diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index ac36b866b07a..71ea995800fc 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -1,4 +1,4 @@ -use crate::db::checksum::ChecksumViewer; +use crate::{common::CliNodeTypes, db::checksum::ChecksumViewer}; use clap::Parser; use comfy_table::{Cell, Row, Table as ComfyTable}; use eyre::WrapErr; @@ -9,7 +9,7 @@ use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Ta use reth_db_api::database::Database; use reth_db_common::DbTool; use reth_fs_util as fs; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_builder::{NodePrimitives, NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::providers::{ProviderNodeTypes, StaticFileProvider}; use reth_static_file_types::SegmentRangeInclusive; @@ -38,7 +38,7 @@ pub struct Command { impl Command { /// Execute `db stats` command - pub fn execute>( + pub fn execute>( self, data_dir: ChainPath, tool: &DbTool>>, @@ -49,7 +49,7 @@ impl Command { println!("\n"); } - let static_files_stats_table = self.static_files_stats_table(data_dir)?; + let static_files_stats_table = self.static_files_stats_table::(data_dir)?; println!("{static_files_stats_table}"); println!("\n"); @@ -143,7 +143,7 @@ impl Command { Ok(table) } - fn static_files_stats_table( + fn static_files_stats_table( &self, data_dir: ChainPath, ) -> eyre::Result { @@ -173,7 +173,8 @@ impl Command { } let static_files = iter_static_files(data_dir.static_files())?; - let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?; + let static_file_provider = + StaticFileProvider::::read_only(data_dir.static_files(), false)?; let mut total_data_size = 0; let mut total_index_size = 0; diff --git a/crates/cli/commands/src/db/tui.rs b/crates/cli/commands/src/db/tui.rs index 240ca376970c..1a9fae7f8918 100644 --- a/crates/cli/commands/src/db/tui.rs +++ b/crates/cli/commands/src/db/tui.rs @@ -365,7 +365,7 @@ where .map(|(i, k)| { ListItem::new(format!("[{:0>width$}]: {k:?}", i + app.skip, width = key_length)) }) - .collect::>>(); + .collect::>(); let key_list = List::new(formatted_keys) .block(Block::default().borders(Borders::ALL).title(format!( diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index a7c81e53052b..c1f6408b49b0 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -1,5 +1,5 @@ //! Command that initializes the node by importing a chain from a file. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_primitives::B256; use clap::Parser; use futures::{Stream, StreamExt}; @@ -20,7 +20,6 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::version::SHORT_VERSION; use reth_node_events::node::NodeEvent; use reth_provider::{ @@ -60,7 +59,7 @@ impl> ImportComm /// Execute `import` command pub async fn execute(self, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, + N: CliNodeTypes, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { @@ -168,7 +167,7 @@ pub fn build_import_pipeline( executor: E, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: ProviderNodeTypes, + N: ProviderNodeTypes + CliNodeTypes, C: Consensus + 'static, E: BlockExecutorProvider, { @@ -203,7 +202,7 @@ where let max_block = file_client.max_block().unwrap_or(0); - let pipeline = Pipeline::::builder() + let pipeline = Pipeline::builder() .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) diff --git a/crates/cli/commands/src/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs index 5fde9ac0d0ba..83f471d629db 100644 --- a/crates/cli/commands/src/init_cmd.rs +++ b/crates/cli/commands/src/init_cmd.rs @@ -1,10 +1,9 @@ //! Command that initializes the node from a genesis file. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_node_builder::NodeTypesWithEngine; use reth_provider::BlockHashReader; use tracing::info; @@ -17,9 +16,7 @@ pub struct InitCommand { impl> InitCommand { /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth init starting"); let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index adaec3e8be37..bdade252a668 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -1,18 +1,17 @@ //! Command that initializes the node from a genesis file. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_primitives::{B256, U256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; use reth_primitives::SealedHeader; use reth_provider::{ BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{fs::File, io::BufReader, path::PathBuf, str::FromStr}; +use std::{io::BufReader, path::PathBuf, str::FromStr}; use tracing::info; pub mod without_evm; @@ -68,9 +67,7 @@ pub struct InitStateCommand { impl> InitStateCommand { /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "Reth init-state starting"); let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; @@ -97,7 +94,6 @@ impl> InitStateC if last_block_number == 0 { without_evm::setup_without_evm( &provider_rw, - &static_file_provider, // &header, // header_hash, SealedHeader::new(header, header_hash), @@ -119,8 +115,7 @@ impl> InitStateC info!(target: "reth::cli", "Initiating state dump"); - let file = File::open(self.state)?; - let reader = BufReader::new(file); + let reader = BufReader::new(reth_fs_util::open(self.state)?); let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index 187996653c30..22236d14c76b 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,11 +1,12 @@ use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::Decodable; -use reth_primitives::{ - BlockBody, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, -}; +use alloy_consensus::Header; +use reth_node_builder::NodePrimitives; +use reth_primitives::{SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment}; use reth_provider::{ - providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileWriter, + providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileProviderFactory, + StaticFileWriter, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; @@ -26,21 +27,23 @@ pub(crate) fn read_header_from_file(path: PathBuf) -> Result( provider_rw: &Provider, - static_file_provider: &StaticFileProvider, header: SealedHeader, total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: StageCheckpointWriter + BlockWriter, + Provider: StaticFileProviderFactory + + StageCheckpointWriter + + BlockWriter>, { info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); + let static_file_provider = provider_rw.static_file_provider(); // Write EVM dummy data up to `header - 1` block - append_dummy_chain(static_file_provider, header.number - 1)?; + append_dummy_chain(&static_file_provider, header.number - 1)?; info!(target: "reth::cli", "Appending first valid block."); - append_first_block(provider_rw, static_file_provider, &header, total_difficulty)?; + append_first_block(provider_rw, &header, total_difficulty)?; for stage in StageId::ALL { provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number))?; @@ -55,17 +58,23 @@ where /// /// By appending it, static file writer also verifies that all segments are at the same /// height. -fn append_first_block( - provider_rw: impl BlockWriter, - sf_provider: &StaticFileProvider, +fn append_first_block( + provider_rw: &Provider, header: &SealedHeader, total_difficulty: U256, -) -> Result<(), eyre::Error> { +) -> Result<(), eyre::Error> +where + Provider: BlockWriter> + + StaticFileProviderFactory, +{ provider_rw.insert_block( - SealedBlockWithSenders::new(SealedBlock::new(header.clone(), BlockBody::default()), vec![]) + SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) .expect("no senders or txes"), + StorageLocation::Database, )?; + let sf_provider = provider_rw.static_file_provider(); + sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( header, total_difficulty, @@ -84,8 +93,8 @@ fn append_first_block( /// * Headers: It will push an empty block. /// * Transactions: It will not push any tx, only increments the end block range. /// * Receipts: It will not push any receipt, only increments the end block range. -fn append_dummy_chain( - sf_provider: &StaticFileProvider, +fn append_dummy_chain( + sf_provider: &StaticFileProvider, target_height: BlockNumber, ) -> Result<(), eyre::Error> { let (tx, rx) = std::sync::mpsc::channel(); diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index 7dbb66fc2faf..37f0637b0a5c 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -1,9 +1,8 @@ //! Command that runs pruning without any limits. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_node_builder::NodeTypesWithEngine; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use tracing::info; @@ -17,9 +16,7 @@ pub struct PruneCommand { impl> PruneCommand { /// Execute the `prune` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; let prune_config = config.prune.unwrap_or_default(); diff --git a/crates/cli/commands/src/recover/mod.rs b/crates/cli/commands/src/recover/mod.rs index 3216449e49b6..a2d943602279 100644 --- a/crates/cli/commands/src/recover/mod.rs +++ b/crates/cli/commands/src/recover/mod.rs @@ -1,10 +1,10 @@ //! `reth recover` command. +use crate::common::CliNodeTypes; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; -use reth_node_builder::NodeTypesWithEngine; mod storage_tries; @@ -24,7 +24,7 @@ pub enum Subcommands { impl> Command { /// Execute `recover` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs index 794058fac1d8..f879c393c6b1 100644 --- a/crates/cli/commands/src/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -1,4 +1,4 @@ -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -8,7 +8,6 @@ use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRW}, transaction::DbTx, }; -use reth_node_builder::NodeTypesWithEngine; use reth_provider::{BlockNumReader, HeaderProvider, ProviderError}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -23,7 +22,7 @@ pub struct Command { impl> Command { /// Execute `storage-tries` recovery command - pub async fn execute>( + pub async fn execute>( self, _ctx: CliContext, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 3a277cabd185..49bbc55ec241 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -1,5 +1,5 @@ //! Database debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -10,9 +10,10 @@ use reth_db_common::{ init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}, DbTool, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::args::StageEnum; -use reth_provider::{writer::UnifiedStorageWriter, StaticFileProviderFactory}; +use reth_provider::{ + writer::UnifiedStorageWriter, DatabaseProviderFactory, StaticFileProviderFactory, +}; use reth_prune::PruneSegment; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -28,13 +29,9 @@ pub struct Command { impl> Command { /// Execute `db` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let static_file_provider = provider_factory.static_file_provider(); - let tool = DbTool::new(provider_factory)?; let static_file_segment = match self.stage { @@ -60,7 +57,7 @@ impl> Command } } - let provider_rw = tool.provider_factory.provider_rw()?; + let provider_rw = tool.provider_factory.database_provider_rw()?; let tx = provider_rw.tx_ref(); match self.stage { @@ -71,7 +68,7 @@ impl> Command tx.clear::()?; reset_stage_checkpoint(tx, StageId::Headers)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } StageEnum::Bodies => { tx.clear::()?; @@ -83,7 +80,7 @@ impl> Command tx.clear::()?; reset_stage_checkpoint(tx, StageId::Bodies)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } StageEnum::Senders => { tx.clear::()?; @@ -104,7 +101,7 @@ impl> Command reset_stage_checkpoint(tx, StageId::Execution)?; let alloc = &self.env.chain.genesis().alloc; - insert_genesis_state(&provider_rw.0, alloc.iter())?; + insert_genesis_state(&provider_rw, alloc.iter())?; } StageEnum::AccountHashing => { tx.clear::()?; @@ -142,20 +139,20 @@ impl> Command reset_stage_checkpoint(tx, StageId::IndexAccountHistory)?; reset_stage_checkpoint(tx, StageId::IndexStorageHistory)?; - insert_genesis_history(&provider_rw.0, self.env.chain.genesis().alloc.iter())?; + insert_genesis_history(&provider_rw, self.env.chain.genesis().alloc.iter())?; } StageEnum::TxLookup => { tx.clear::()?; reset_prune_checkpoint(tx, PruneSegment::TransactionLookup)?; reset_stage_checkpoint(tx, StageId::TransactionLookup)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } } tx.put::(StageId::Finish.to_string(), Default::default())?; - UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; Ok(()) } diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 709fc59190d4..000c1b542dbf 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -7,7 +7,8 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_api::NodePrimitives; +use reth_node_builder::NodeTypesWithDB; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -25,7 +26,13 @@ pub(crate) async fn dump_execution_stage( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + DB = Arc, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -36,7 +43,7 @@ where if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, @@ -131,7 +138,14 @@ fn import_tables_with_range( /// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. -fn unwind_and_copy( +fn unwind_and_copy< + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, +>( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -168,7 +182,12 @@ fn dry_run( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 738dcabafa70..97452cee8920 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -6,7 +6,6 @@ use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -15,7 +14,7 @@ use reth_provider::{ use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_account_stage( +pub(crate) async fn dump_hashing_account_stage>>( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -37,7 +36,7 @@ pub(crate) async fn dump_hashing_account_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, diff --git a/crates/cli/commands/src/stage/dump/hashing_storage.rs b/crates/cli/commands/src/stage/dump/hashing_storage.rs index 204c087a234d..06b064bc02f7 100644 --- a/crates/cli/commands/src/stage/dump/hashing_storage.rs +++ b/crates/cli/commands/src/stage/dump/hashing_storage.rs @@ -5,7 +5,6 @@ use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -14,7 +13,7 @@ use reth_provider::{ use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_storage_stage( +pub(crate) async fn dump_hashing_storage_stage>>( db_tool: &DbTool, from: u64, to: u64, @@ -27,7 +26,7 @@ pub(crate) async fn dump_hashing_storage_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index f7e9e2fc1afc..ce187437218a 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -9,7 +9,7 @@ use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_node_builder::NodeTypesWithDBAdapter; +use reth_node_api::NodePrimitives; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -25,7 +25,15 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage( +pub(crate) async fn dump_merkle_stage< + N: ProviderNodeTypes< + DB = Arc, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, +>( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -54,7 +62,7 @@ pub(crate) async fn dump_merkle_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, @@ -68,7 +76,14 @@ pub(crate) async fn dump_merkle_stage( } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -fn unwind_and_copy( +fn unwind_and_copy< + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, +>( db_tool: &DbTool, range: (u64, u64), tip_block_number: u64, diff --git a/crates/cli/commands/src/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs index 6fd2f23aa0e5..36b8fb122580 100644 --- a/crates/cli/commands/src/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -1,5 +1,5 @@ //! Database debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -10,7 +10,7 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::execute::BlockExecutorProvider; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_builder::NodeTypesWithDB; use reth_node_core::{ args::DatadirArgs, dirs::{DataDirPath, PlatformPath}, @@ -92,7 +92,7 @@ impl> Command /// Execute `dump-stage` command pub async fn execute(self, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, + N: CliNodeTypes, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index 562bd73a28d7..b9e0725428a9 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -2,12 +2,12 @@ use std::sync::Arc; +use crate::common::CliNodeTypes; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_evm::execute::BlockExecutorProvider; -use reth_node_builder::NodeTypesWithEngine; pub mod drop; pub mod dump; @@ -43,7 +43,7 @@ impl> Command /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, + N: CliNodeTypes, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 23d6f6f28ac6..c852eea05a7e 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -2,7 +2,7 @@ //! //! Stage debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; @@ -11,6 +11,7 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig}; +use reth_db_api::database_metrics::DatabaseMetrics; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -19,7 +20,6 @@ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network::BlockDownloaderProvider; use reth_network_p2p::HeadersClient; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::{ args::{NetworkArgs, StageEnum}, version::{ @@ -106,7 +106,7 @@ impl> Command /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, + N: CliNodeTypes, E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { @@ -133,10 +133,20 @@ impl> Command }, ChainSpecInfo { name: provider_factory.chain_spec().chain().to_string() }, ctx.task_executor, - Hooks::new( - provider_factory.db_ref().clone(), - provider_factory.static_file_provider(), - ), + Hooks::builder() + .with_hook({ + let db = provider_factory.db_ref().clone(); + move || db.report_metrics() + }) + .with_hook({ + let sfp = provider_factory.static_file_provider(); + move || { + if let Err(error) = sfp.report_metrics() { + error!(%error, "Failed to report metrics from static file provider"); + } + } + }) + .build(), ); MetricServer::new(config).serve().await?; @@ -329,10 +339,7 @@ impl> Command } if self.commit { - UnifiedStorageWriter::commit_unwind( - provider_rw, - provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; provider_rw = provider_factory.database_provider_rw()?; } } @@ -355,7 +362,7 @@ impl> Command provider_rw.save_stage_checkpoint(exec_stage.id(), checkpoint)?; } if self.commit { - UnifiedStorageWriter::commit(provider_rw, provider_factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; provider_rw = provider_factory.database_provider_rw()?; } diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index a5c9956c95b2..2d29121d0698 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -1,8 +1,8 @@ //! Unwinding a certain block range -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::B256; use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -13,11 +13,11 @@ use reth_db::DatabaseEnv; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::args::NetworkArgs; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainSpecProvider, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, + StorageLocation, }; use reth_prune::PruneModes; use reth_stages::{ @@ -26,7 +26,7 @@ use reth_stages::{ ExecutionStageThresholds, Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; -use std::{ops::RangeInclusive, sync::Arc}; +use std::sync::Arc; use tokio::sync::watch; use tracing::info; @@ -50,21 +50,16 @@ pub struct Command { impl> Command { /// Execute `db stage unwind` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; - let range = self.command.unwind_range(provider_factory.clone())?; - if *range.start() == 0 { - eyre::bail!("Cannot unwind genesis block") - } + let target = self.command.unwind_target(provider_factory.clone())?; let highest_static_file_block = provider_factory .static_file_provider() .get_highest_static_files() .max() - .filter(|highest_static_file_block| highest_static_file_block >= range.start()); + .filter(|highest_static_file_block| *highest_static_file_block > target); // Execute a pipeline unwind if the start of the range overlaps the existing static // files. If that's the case, then copy all available data from MDBX to static files, and @@ -78,9 +73,9 @@ impl> Command } if let Some(highest_static_file_block) = highest_static_file_block { - info!(target: "reth::cli", ?range, ?highest_static_file_block, "Executing a pipeline unwind."); + info!(target: "reth::cli", ?target, ?highest_static_file_block, "Executing a pipeline unwind."); } else { - info!(target: "reth::cli", ?range, "Executing a pipeline unwind."); + info!(target: "reth::cli", ?target, "Executing a pipeline unwind."); } // This will build an offline-only pipeline if the `offline` flag is enabled @@ -89,34 +84,30 @@ impl> Command // Move all applicable data from database to static files. pipeline.move_to_static_files()?; - pipeline.unwind((*range.start()).saturating_sub(1), None)?; + pipeline.unwind(target, None)?; } else { - info!(target: "reth::cli", ?range, "Executing a database unwind."); + info!(target: "reth::cli", ?target, "Executing a database unwind."); let provider = provider_factory.provider_rw()?; - let _ = provider - .take_block_and_execution_range(range.clone()) + provider + .remove_block_and_execution_above(target, StorageLocation::Both) .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; // update finalized block if needed let last_saved_finalized_block_number = provider.last_finalized_block_number()?; - let range_min = - range.clone().min().ok_or(eyre::eyre!("Could not fetch lower range end"))?; - if last_saved_finalized_block_number.is_none() || - Some(range_min) < last_saved_finalized_block_number - { - provider.save_finalized_block_number(BlockNumber::from(range_min))?; + if last_saved_finalized_block_number.is_none_or(|f| f > target) { + provider.save_finalized_block_number(target)?; } provider.commit()?; } - info!(target: "reth::cli", range=?range.clone(), count=range.count(), "Unwound blocks"); + info!(target: "reth::cli", ?target, "Unwound blocks"); Ok(()) } - fn build_pipeline>( + fn build_pipeline + CliNodeTypes>( self, config: Config, provider_factory: ProviderFactory, @@ -186,13 +177,11 @@ enum Subcommands { } impl Subcommands { - /// Returns the block range to unwind. - /// - /// This returns an inclusive range: [target..=latest] - fn unwind_range>>( + /// Returns the block to unwind to. The returned block will stay in database. + fn unwind_target>>( &self, factory: ProviderFactory, - ) -> eyre::Result> { + ) -> eyre::Result { let provider = factory.provider()?; let last = provider.last_block_number()?; let target = match self { @@ -203,11 +192,11 @@ impl Subcommands { BlockHashOrNumber::Number(num) => *num, }, Self::NumBlocks { amount } => last.saturating_sub(*amount), - } + 1; + }; if target > last { eyre::bail!("Target block number is higher than the latest block number") } - Ok(target..=last) + Ok(target) } } diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs index c498718e9fcb..5490f568d3a8 100644 --- a/crates/cli/commands/src/test_vectors/compact.rs +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -22,8 +22,7 @@ use reth_db::{ }; use reth_fs_util as fs; use reth_primitives::{ - Account, Log, LogData, Receipt, ReceiptWithBloom, StorageEntry, Transaction, - TransactionSignedNoHash, TxType, + Account, Log, LogData, Receipt, StorageEntry, Transaction, TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneMode}; use reth_stages_types::{ @@ -76,7 +75,6 @@ compact_types!( // reth-primitives Account, Receipt, - ReceiptWithBloom, // reth_codecs::alloy Authorization, GenesisAccount, diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 6b523c6edd19..fd7d3b3799d8 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use alloy_primitives::{hex, private::getrandom::getrandom}; use arbitrary::Arbitrary; use eyre::Result; @@ -10,7 +11,7 @@ use proptest_arbitrary_interop::arb; use reth_db::tables; use reth_db_api::table::{DupSort, Table, TableRow}; use reth_fs_util as fs; -use reth_primitives::{Header, TransactionSignedNoHash}; +use reth_primitives::TransactionSignedNoHash; use std::collections::HashSet; use tracing::error; diff --git a/crates/cli/util/src/load_secret_key.rs b/crates/cli/util/src/load_secret_key.rs index 25da0e066761..8b3bee09c8ca 100644 --- a/crates/cli/util/src/load_secret_key.rs +++ b/crates/cli/util/src/load_secret_key.rs @@ -41,10 +41,7 @@ pub fn get_secret_key(secret_key_path: &Path) -> Result { let contents = fs::read_to_string(secret_key_path)?; - Ok(contents - .as_str() - .parse::() - .map_err(SecretKeyError::SecretKeyDecodeError)?) + Ok(contents.as_str().parse().map_err(SecretKeyError::SecretKeyDecodeError)?) } Ok(false) => { if let Some(dir) = secret_key_path.parent() { diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index 9bb803bcca89..fb27e1420c0a 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -23,11 +23,11 @@ pub fn parse_duration_from_secs_or_ms( arg: &str, ) -> eyre::Result { if arg.ends_with("ms") { - arg.trim_end_matches("ms").parse::().map(Duration::from_millis) + arg.trim_end_matches("ms").parse().map(Duration::from_millis) } else if arg.ends_with('s') { - arg.trim_end_matches('s').parse::().map(Duration::from_secs) + arg.trim_end_matches('s').parse().map(Duration::from_secs) } else { - arg.parse::().map(Duration::from_secs) + arg.parse().map(Duration::from_secs) } } @@ -75,7 +75,7 @@ pub fn parse_socket_address(value: &str) -> eyre::Result() { + if let Ok(port) = value.parse() { return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) } value diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index d3aa51246683..a7e326848391 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -14,12 +14,16 @@ workspace = true # reth reth-ethereum-consensus.workspace = true reth-blockchain-tree-api.workspace = true +reth-codecs.workspace = true +reth-db-api.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-stages-api.workspace = true reth-errors.workspace = true reth-provider.workspace = true reth-tasks.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-prune.workspace = true @@ -34,6 +38,7 @@ reth-chainspec = { workspace = true, optional = true } alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["std"] } alloy-eips.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -73,15 +78,17 @@ reth-exex-types.workspace = true reth-prune-types.workspace = true reth-chainspec.workspace = true alloy-genesis.workspace = true - assert_matches.workspace = true [features] optimism = [ + "reth-blockchain-tree/optimism", + "reth-codecs/optimism", "reth-chainspec", + "reth-db-api/optimism", + "reth-db/optimism", + "reth-downloaders/optimism", "reth-primitives/optimism", "reth-provider/optimism", - "reth-blockchain-tree/optimism", - "reth-db/optimism", - "reth-db-api/optimism", + "reth-downloaders/optimism", ] diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 5fc6df2b884d..2092ea49f779 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -77,24 +77,3 @@ impl From for BeaconForkChoiceUpdateError { Self::internal(e) } } - -/// Represents all error cases when handling a new payload. -/// -/// This represents all possible error cases that must be returned as JSON RCP errors back to the -/// beacon node. -#[derive(Debug, thiserror::Error)] -pub enum BeaconOnNewPayloadError { - /// Thrown when the engine task is unavailable/stopped. - #[error("beacon consensus engine task stopped")] - EngineUnavailable, - /// An internal error occurred, not necessarily related to the payload. - #[error(transparent)] - Internal(Box), -} - -impl BeaconOnNewPayloadError { - /// Create a new internal error. - pub fn internal(e: E) -> Self { - Self::Internal(Box::new(e)) - } -} diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 975085a32f35..b503e1e102af 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,7 +1,8 @@ -use crate::engine::forkchoice::ForkchoiceStatus; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_engine_primitives::ForkchoiceStatus; +use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlock, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, @@ -10,23 +11,23 @@ use std::{ /// Events emitted by [`crate::BeaconConsensusEngine`]. #[derive(Clone, Debug)] -pub enum BeaconConsensusEngineEvent { +pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(Arc, Duration), + ForkBlockAdded(Arc>, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(Arc, Duration), + CanonicalBlockAdded(Arc>, Duration), /// A canonical chain was committed, and the elapsed time committing the data - CanonicalChainCommitted(Box, Duration), + CanonicalChainCommitted(Box>, Duration), /// The consensus engine is involved in live sync, and has specific progress LiveSyncProgress(ConsensusEngineLiveSyncProgress), } -impl BeaconConsensusEngineEvent { +impl BeaconConsensusEngineEvent { /// Returns the canonical header if the event is a /// [`BeaconConsensusEngineEvent::CanonicalChainCommitted`]. - pub const fn canonical_header(&self) -> Option<&SealedHeader> { + pub const fn canonical_header(&self) -> Option<&SealedHeader> { match self { Self::CanonicalChainCommitted(header, _) => Some(header), _ => None, @@ -34,7 +35,10 @@ impl BeaconConsensusEngineEvent { } } -impl Display for BeaconConsensusEngineEvent { +impl Display for BeaconConsensusEngineEvent +where + N: NodePrimitives, +{ fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self { Self::ForkchoiceUpdated(state, status) => { diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index f8840cf78abb..339f2fb067f7 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -1,14 +1,14 @@ //! `BeaconConsensusEngine` external API -use crate::{ - engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, - BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, -}; +use crate::{BeaconConsensusEngineEvent, BeaconForkChoiceUpdateError}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use futures::TryFutureExt; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + OnForkChoiceUpdated, +}; use reth_errors::RethResult; use reth_tokio_util::{EventSender, EventStream}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 89231ed55825..7cd286f659c3 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -6,8 +6,10 @@ use crate::{ }; use alloy_primitives::BlockNumber; use futures::FutureExt; +use reth_codecs::Compact; +use reth_db_api::table::Value; use reth_errors::RethResult; -use reth_primitives::static_file::HighestStaticFiles; +use reth_primitives::{static_file::HighestStaticFiles, NodePrimitives}; use reth_provider::{ BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, @@ -33,7 +35,11 @@ impl StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + + BlockReader + + ChainStateBlockReader, > + 'static, { /// Create a new instance @@ -145,7 +151,11 @@ impl EngineHook for StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + + BlockReader + + ChainStateBlockReader, > + 'static, { fn name(&self) -> &'static str { diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 8a1c95d73ceb..0a72129a6274 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -1,11 +1,12 @@ +use alloy_consensus::Header; use alloy_primitives::B256; use reth_metrics::{ metrics::{Counter, Gauge}, Metrics, }; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use schnellru::{ByLength, LruMap}; -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use tracing::warn; /// The max hit counter for invalid headers in the cache before it is forcefully evicted. @@ -16,20 +17,20 @@ const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. #[derive(Debug)] -pub struct InvalidHeaderCache { +pub struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. - headers: LruMap, + headers: LruMap>, /// Metrics for the cache. metrics: InvalidHeaderCacheMetrics, } -impl InvalidHeaderCache { +impl InvalidHeaderCache { /// Invalid header cache constructor. pub fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } - fn insert_entry(&mut self, hash: B256, header: Arc
) { + fn insert_entry(&mut self, hash: B256, header: Arc) { self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); } @@ -37,7 +38,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option> { + pub fn get(&mut self, hash: &B256) -> Option> { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -52,11 +53,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub fn insert_with_invalid_ancestor( - &mut self, - header_hash: B256, - invalid_ancestor: Arc
, - ) { + pub fn insert_with_invalid_ancestor(&mut self, header_hash: B256, invalid_ancestor: Arc) { if self.get(&header_hash).is_none() { warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); self.insert_entry(header_hash, invalid_ancestor); @@ -68,7 +65,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: SealedHeader) { + pub fn insert(&mut self, invalid_ancestor: SealedHeader) { if self.get(&invalid_ancestor.hash()).is_none() { let hash = invalid_ancestor.hash(); let header = invalid_ancestor.unseal(); @@ -82,11 +79,11 @@ impl InvalidHeaderCache { } } -struct HeaderEntry { +struct HeaderEntry { /// Keeps track how many times this header has been hit. hit_count: u8, /// The actually header entry - header: Arc
, + header: Arc, } /// Metrics for the invalid headers cache. @@ -106,14 +103,12 @@ struct InvalidHeaderCacheMetrics { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Sealable; #[test] fn test_hit_eviction() { let mut cache = InvalidHeaderCache::new(10); - let sealed = Header::default().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = Header::default(); + let header = SealedHeader::seal(header); cache.insert(header.clone()); assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 65904196e1c1..7a894f08e1c7 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,3 +1,4 @@ +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ @@ -10,17 +11,22 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes, PayloadTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus, OnForkChoiceUpdated, + PayloadTypes, +}; use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, - BlockClient, + EthBlockClient, }; -use reth_node_types::NodeTypesWithEngine; +use reth_node_types::{Block, BlockTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Head, Header, SealedBlock, SealedHeader}; +use reth_primitives::{EthPrimitives, Head, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, @@ -41,14 +47,8 @@ use tokio::sync::{ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; -mod message; -pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; - mod error; -pub use error::{ - BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, -}; +pub use error::{BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError}; mod invalid_headers; pub use invalid_headers::InvalidHeaderCache; @@ -59,9 +59,6 @@ pub use event::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; mod handle; pub use handle::BeaconConsensusEngineHandle; -mod forkchoice; -pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; - mod metrics; use metrics::EngineMetrics; @@ -87,9 +84,15 @@ const MAX_INVALID_HEADERS: u32 = 512u32; pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// Helper trait expressing requirements for node types to be used in engine. -pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} +pub trait EngineNodeTypes: + ProviderNodeTypes + NodeTypesWithEngine +{ +} -impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} +impl EngineNodeTypes for T where + T: ProviderNodeTypes + NodeTypesWithEngine +{ +} /// Represents a pending forkchoice update. /// @@ -174,7 +177,7 @@ type PendingForkchoiceUpdate = pub struct BeaconConsensusEngine where N: EngineNodeTypes, - Client: BlockClient, + Client: EthBlockClient, BT: BlockchainTreeEngine + BlockReader + BlockIdReader @@ -231,13 +234,13 @@ impl BeaconConsensusEngine where N: EngineNodeTypes, BT: BlockchainTreeEngine - + BlockReader + + BlockReader> + BlockIdReader + CanonChainTracker + StageCheckpointReader + ChainSpecProvider + 'static, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance of the [`BeaconConsensusEngine`]. #[allow(clippy::too_many_arguments)] @@ -949,7 +952,7 @@ where .blockchain .find_block_by_hash(safe_block_hash, BlockSource::Any)? .ok_or(ProviderError::UnknownBlockHash(safe_block_hash))?; - self.blockchain.set_safe(SealedHeader::new(safe.header, safe_block_hash)); + self.blockchain.set_safe(SealedHeader::new(safe.split().0, safe_block_hash)); } Ok(()) } @@ -969,9 +972,9 @@ where .blockchain .find_block_by_hash(finalized_block_hash, BlockSource::Any)? .ok_or(ProviderError::UnknownBlockHash(finalized_block_hash))?; - self.blockchain.finalize_block(finalized.number)?; + self.blockchain.finalize_block(finalized.header().number())?; self.blockchain - .set_finalized(SealedHeader::new(finalized.header, finalized_block_hash)); + .set_finalized(SealedHeader::new(finalized.split().0, finalized_block_hash)); } Ok(()) } @@ -1799,9 +1802,9 @@ where impl Future for BeaconConsensusEngine where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, BT: BlockchainTreeEngine - + BlockReader + + BlockReader> + BlockIdReader + CanonChainTracker + StageCheckpointReader @@ -1994,7 +1997,9 @@ mod tests { use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use assert_matches::assert_matches; use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_provider::{BlockWriter, ProviderFactory}; + use reth_node_types::FullNodePrimitives; + use reth_primitives::BlockExt; + use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use reth_stages_api::StageCheckpoint; @@ -2172,7 +2177,10 @@ mod tests { assert_matches!(rx.await, Ok(Ok(()))); } - fn insert_blocks<'a, N: ProviderNodeTypes>( + fn insert_blocks< + 'a, + N: ProviderNodeTypes>, + >( provider_factory: ProviderFactory, mut blocks: impl Iterator, ) { @@ -2182,6 +2190,7 @@ mod tests { provider .insert_block( b.clone().try_seal_with_senders().expect("invalid tx signature in block"), + StorageLocation::Database, ) .map(drop) }) @@ -2876,7 +2885,7 @@ mod tests { block1.header.set_difficulty( MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), ); - block1 = block1.unseal().seal_slow(); + block1 = block1.unseal::().seal_slow(); let (block2, exec_result2) = data.blocks[1].clone(); let mut block2 = block2.unseal().block; block2.body.withdrawals = None; diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 9426ca19712f..861aeebf1eb8 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -4,13 +4,14 @@ use crate::{ engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, EthBeaconConsensus, }; +use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256}; use futures::FutureExt; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, + EthBlockClient, }; -use reth_primitives::SealedBlock; +use reth_primitives::{BlockBody, EthPrimitives, NodePrimitives, SealedBlock}; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; @@ -34,7 +35,7 @@ use tracing::trace; pub(crate) struct EngineSyncController where N: ProviderNodeTypes, - Client: BlockClient, + Client: EthBlockClient, { /// A downloader that can download full blocks from the network. full_block_client: FullBlockClient, @@ -64,7 +65,7 @@ where impl EngineSyncController where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance pub(crate) fn new( @@ -345,25 +346,33 @@ where /// A wrapper type around [`SealedBlock`] that implements the [Ord] trait by block number. #[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlock(SealedBlock); +struct OrderedSealedBlock(SealedBlock); -impl PartialOrd for OrderedSealedBlock { +impl PartialOrd for OrderedSealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, +{ fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedSealedBlock { +impl Ord for OrderedSealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, +{ fn cmp(&self, other: &Self) -> Ordering { - self.0.number.cmp(&other.0.number) + self.0.number().cmp(&other.0.number()) } } /// The event type emitted by the [`EngineSyncController`]. #[derive(Debug)] -pub(crate) enum EngineSyncEvent { +pub(crate) enum EngineSyncEvent { /// A full block has been downloaded from the network. - FetchedFullBlock(SealedBlock), + FetchedFullBlock(SealedBlock), /// Pipeline started syncing /// /// This is none if the pipeline is triggered without a specific target. @@ -410,12 +419,12 @@ impl PipelineState { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Sealable; + use alloy_consensus::Header; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient}; - use reth_primitives::{BlockBody, Header, SealedHeader}; + use reth_primitives::{BlockBody, SealedHeader}; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ExecutionOutcome, @@ -522,7 +531,7 @@ mod tests { ) -> EngineSyncController> where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { let client = self .client @@ -599,9 +608,7 @@ mod tests { header.parent_hash = hash; header.number += 1; header.timestamp += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } } @@ -617,14 +624,12 @@ mod tests { ); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..10); // set up a pipeline diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 6e03aebfa8db..0ebef1efe6e6 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -2,9 +2,9 @@ use crate::{ engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, + EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -19,12 +19,14 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; -use reth_engine_primitives::EngineApiMessageVersion; +use reth_engine_primitives::{BeaconOnNewPayloadError, EngineApiMessageVersion}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; -use reth_network_p2p::{sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, BlockClient}; +use reth_network_p2p::{ + sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, EthBlockClient, +}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::SealedHeader; use reth_provider::{ @@ -237,7 +239,7 @@ impl TestConsensusEngineBuilder { client: Client, ) -> NetworkedTestConsensusEngineBuilder where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { NetworkedTestConsensusEngineBuilder { base_config: self, client: Some(client) } } @@ -264,7 +266,7 @@ pub struct NetworkedTestConsensusEngineBuilder { impl NetworkedTestConsensusEngineBuilder where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Set the pipeline execution outputs to use for the test consensus engine. #[allow(dead_code)] @@ -319,7 +321,7 @@ where client: ClientType, ) -> NetworkedTestConsensusEngineBuilder where - ClientType: BlockClient + 'static, + ClientType: EthBlockClient + 'static, { NetworkedTestConsensusEngineBuilder { base_config: self.base_config, client: Some(client) } } @@ -368,7 +370,7 @@ where .with_tip_sender(tip_tx), TestPipelineConfig::Real => { let header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(client.clone(), consensus.clone()) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task(); let body_downloader = BodiesDownloaderBuilder::default() @@ -400,9 +402,8 @@ where BlockchainTree::new(externals, BlockchainTreeConfig::new(1, 2, 3, 2)) .expect("failed to create tree"), )); - let sealed = self.base_config.chain_spec.genesis_header().clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - let genesis_block = SealedHeader::new(header, seal); + let header = self.base_config.chain_spec.genesis_header().clone(); + let genesis_block = SealedHeader::seal(header); let blockchain_provider = BlockchainProvider::with_blocks( provider_factory.clone(), @@ -450,7 +451,7 @@ pub fn spawn_consensus_engine( engine: TestBeaconConsensusEngine, ) -> oneshot::Receiver> where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index c83312577e9e..272adbb9297a 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -19,6 +19,7 @@ reth-consensus.workspace = true # ethereum alloy-primitives.workspace = true revm-primitives.workspace = true +reth-primitives-traits.workspace = true alloy-consensus.workspace = true alloy-eips.workspace = true diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 5a74433e58bb..9e7f8d451fff 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,19 +1,24 @@ //! Collection of methods for block validation. -use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; -use alloy_eips::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader}; +use alloy_eips::{ + calc_next_block_base_fee, + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, +}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::{EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_primitives::{ + BlockBody, BlockBodyTxExt, EthereumHardfork, GotExpected, SealedBlock, SealedHeader, +}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. #[inline] -pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> { - if header.gas_used > header.gas_limit { +pub fn validate_header_gas(header: &H) -> Result<(), ConsensusError> { + if header.gas_used() > header.gas_limit() { return Err(ConsensusError::HeaderGasUsedExceedsGasLimit { - gas_used: header.gas_used, - gas_limit: header.gas_limit, + gas_used: header.gas_used(), + gas_limit: header.gas_limit(), }) } Ok(()) @@ -21,12 +26,12 @@ pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> /// Ensure the EIP-1559 base fee is set if the London hardfork is active. #[inline] -pub fn validate_header_base_fee( - header: &Header, +pub fn validate_header_base_fee( + header: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number) && - header.base_fee_per_gas.is_none() + if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number()) && + header.base_fee_per_gas().is_none() { return Err(ConsensusError::BaseFeeMissing) } @@ -39,15 +44,16 @@ pub fn validate_header_base_fee( /// /// [EIP-4895]: https://eips.ethereum.org/EIPS/eip-4895 #[inline] -pub fn validate_shanghai_withdrawals(block: &SealedBlock) -> Result<(), ConsensusError> { - let withdrawals = - block.body.withdrawals.as_ref().ok_or(ConsensusError::BodyWithdrawalsMissing)?; +pub fn validate_shanghai_withdrawals( + block: &SealedBlock, +) -> Result<(), ConsensusError> { + let withdrawals = block.body.withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?; let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); let header_withdrawals_root = - block.withdrawals_root.as_ref().ok_or(ConsensusError::WithdrawalsRootMissing)?; + block.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?; if withdrawals_root != *header_withdrawals_root { return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: *header_withdrawals_root }.into(), + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), )); } Ok(()) @@ -73,6 +79,49 @@ pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { Ok(()) } +/// Ensures the block response data matches the header. +/// +/// This ensures the body response items match the header's hashes: +/// - ommer hash +/// - transaction root +/// - withdrawals root +pub fn validate_body_against_header( + body: &BlockBody, + header: &SealedHeader, +) -> Result<(), ConsensusError> { + let ommers_hash = body.calculate_ommers_root(); + if header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), + )) + } + + let tx_root = body.calculate_tx_root(); + if header.transactions_root != tx_root { + return Err(ConsensusError::BodyTransactionRootDiff( + GotExpected { got: tx_root, expected: header.transactions_root }.into(), + )) + } + + match (header.withdrawals_root, &body.withdrawals) { + (Some(header_withdrawals_root), Some(withdrawals)) => { + let withdrawals = withdrawals.as_slice(); + let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); + if withdrawals_root != header_withdrawals_root { + return Err(ConsensusError::BodyWithdrawalsRootDiff( + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), + )) + } + } + (None, None) => { + // this is ok because we assume the fork is not active in this case + } + _ => return Err(ConsensusError::WithdrawalsRootUnexpected), + } + + Ok(()) +} + /// Validate a block without regard for state: /// /// - Compares the ommer hash in the block header to the block body @@ -116,11 +165,11 @@ pub fn validate_block_pre_execution( /// * `blob_gas_used` is less than or equal to `MAX_DATA_GAS_PER_BLOCK` /// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB` /// * `excess_blob_gas` is a multiple of `DATA_GAS_PER_BLOB` -pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusError> { - let blob_gas_used = header.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; - let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; +pub fn validate_4844_header_standalone(header: &H) -> Result<(), ConsensusError> { + let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; - if header.parent_beacon_block_root.is_none() { + if header.parent_beacon_block_root().is_none() { return Err(ConsensusError::ParentBeaconBlockRootMissing) } @@ -155,8 +204,8 @@ pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusE /// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. /// This must be 32 bytes or fewer; formally Hx. #[inline] -pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - let extradata_len = header.extra_data.len(); +pub fn validate_header_extradata(header: &H) -> Result<(), ConsensusError> { + let extradata_len = header.extra_data().len(); if extradata_len > MAXIMUM_EXTRA_DATA_SIZE { Err(ConsensusError::ExtraDataExceedsMax { len: extradata_len }) } else { @@ -169,21 +218,21 @@ pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> /// This function ensures that the header block number is sequential and that the hash of the parent /// header matches the parent hash in the header. #[inline] -pub fn validate_against_parent_hash_number( - header: &Header, +pub fn validate_against_parent_hash_number( + header: &H, parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Parent number is consistent. - if parent.number + 1 != header.number { + if parent.number + 1 != header.number() { return Err(ConsensusError::ParentBlockNumberMismatch { parent_block_number: parent.number, - block_number: header.number, + block_number: header.number(), }) } - if parent.hash() != header.parent_hash { + if parent.hash() != header.parent_hash() { return Err(ConsensusError::ParentHashMismatch( - GotExpected { got: header.parent_hash, expected: parent.hash() }.into(), + GotExpected { got: header.parent_hash(), expected: parent.hash() }.into(), )) } @@ -192,23 +241,30 @@ pub fn validate_against_parent_hash_number( /// Validates the base fee against the parent and EIP-1559 rules. #[inline] -pub fn validate_against_parent_eip1559_base_fee( - header: &Header, - parent: &Header, +pub fn validate_against_parent_eip1559_base_fee< + H: BlockHeader, + ChainSpec: EthChainSpec + EthereumHardforks, +>( + header: &H, + parent: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number) { - let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?; + if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number()) { + let base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; let expected_base_fee = - if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { + if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) { alloy_eips::eip1559::INITIAL_BASE_FEE } else { // This BaseFeeMissing will not happen as previous blocks are checked to have // them. - parent - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(header.timestamp)) - .ok_or(ConsensusError::BaseFeeMissing)? + let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; + calc_next_block_base_fee( + parent.gas_used(), + parent.gas_limit(), + base_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp()), + ) }; if expected_base_fee != base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { @@ -223,14 +279,14 @@ pub fn validate_against_parent_eip1559_base_fee( + header: &H, + parent: &H, ) -> Result<(), ConsensusError> { - if header.timestamp <= parent.timestamp { + if header.timestamp() <= parent.timestamp() { return Err(ConsensusError::TimestampIsInPast { - parent_timestamp: parent.timestamp, - timestamp: header.timestamp, + parent_timestamp: parent.timestamp(), + timestamp: header.timestamp(), }) } Ok(()) @@ -240,9 +296,9 @@ pub const fn validate_against_parent_timestamp( /// ensures that the `blob_gas_used` and `excess_blob_gas` fields exist in the child header, and /// that the `excess_blob_gas` field matches the expected `excess_blob_gas` calculated from the /// parent header fields. -pub fn validate_against_parent_4844( - header: &Header, - parent: &Header, +pub fn validate_against_parent_4844( + header: &H, + parent: &H, ) -> Result<(), ConsensusError> { // From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension): // @@ -250,13 +306,13 @@ pub fn validate_against_parent_4844( // > are evaluated as 0. // // This means in the first post-fork block, calc_excess_blob_gas will return 0. - let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0); - let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0); + let parent_blob_gas_used = parent.blob_gas_used().unwrap_or(0); + let parent_excess_blob_gas = parent.excess_blob_gas().unwrap_or(0); - if header.blob_gas_used.is_none() { + if header.blob_gas_used().is_none() { return Err(ConsensusError::BlobGasUsedMissing) } - let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; + let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; let expected_excess_blob_gas = calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used); @@ -274,14 +330,14 @@ pub fn validate_against_parent_4844( #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, }; use alloy_primitives::{ hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, PrimitiveSignature as Signature, - Sealable, U256, + U256, }; use mockall::mock; use rand::Rng; @@ -407,7 +463,7 @@ mod tests { let signature = Signature::new(U256::default(), U256::default(), true); - TransactionSigned::from_transaction_and_signature(request, signature) + TransactionSigned::new_unhashed(request, signature) } /// got test block @@ -450,12 +506,9 @@ mod tests { let ommers = Vec::new(); let transactions = Vec::new(); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - ( SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions, ommers, withdrawals: None }, }, parent, @@ -474,15 +527,13 @@ mod tests { .collect(), ); - let sealed = Header { + let header = Header { withdrawals_root: Some(proofs::calculate_withdrawals_root(&withdrawals)), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { withdrawals: Some(withdrawals), ..Default::default() }, } }; @@ -513,16 +564,14 @@ mod tests { // create a tx with 10 blobs let transaction = mock_blob_tx(1, 10); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), blob_gas_used: Some(1), transactions_root: proofs::calculate_transaction_root(&[transaction.clone()]), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); let body = BlockBody { transactions: vec![transaction], diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 2faf3f2ac719..55188dd8472b 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -13,10 +13,12 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # misc auto_impl.workspace = true @@ -26,9 +28,13 @@ derive_more.workspace = true default = ["std"] std = [ "reth-primitives/std", + "reth-primitives-traits/std", "alloy-primitives/std", - "alloy-eips/std" + "alloy-eips/std", + "alloy-consensus/std", + "reth-primitives-traits/std" ] test-utils = [ - "reth-primitives/test-utils" + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils" ] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 4bf5da3b152f..3ad53456cbdf 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -11,13 +11,15 @@ extern crate alloc; -use alloc::{fmt::Debug, vec::Vec}; +use alloc::{fmt::Debug, sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockWithSenders, GotExpected, GotExpectedBoxed, Header, - InvalidTransactionError, Receipt, SealedBlock, SealedHeader, + BlockBody, BlockWithSenders, GotExpected, GotExpectedBoxed, InvalidTransactionError, Receipt, + SealedBlock, SealedHeader, }; +use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; /// A consensus implementation that does nothing. pub mod noop; @@ -28,27 +30,64 @@ pub mod test_utils; /// Post execution input passed to [`Consensus::validate_block_post_execution`]. #[derive(Debug)] -pub struct PostExecutionInput<'a> { +pub struct PostExecutionInput<'a, R = Receipt> { /// Receipts of the block. - pub receipts: &'a [Receipt], + pub receipts: &'a [R], /// EIP-7685 requests of the block. pub requests: &'a Requests, } -impl<'a> PostExecutionInput<'a> { +impl<'a, R> PostExecutionInput<'a, R> { /// Creates a new instance of `PostExecutionInput`. - pub const fn new(receipts: &'a [Receipt], requests: &'a Requests) -> Self { + pub const fn new(receipts: &'a [R], requests: &'a Requests) -> Self { Self { receipts, requests } } } /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: Debug + Send + Sync { +pub trait Consensus: + AsHeaderValidator + HeaderValidator + Debug + Send + Sync +{ + /// Ensures that body field values match the header. + fn validate_body_against_header( + &self, + body: &B, + header: &SealedHeader, + ) -> Result<(), ConsensusError>; + + /// Validate a block disregarding world state, i.e. things that can be checked before sender + /// recovery and execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and + /// 11.1 "Ommer Validation". + /// + /// **This should not be called for the genesis block**. + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_pre_execution(&self, block: &SealedBlock) + -> Result<(), ConsensusError>; + + /// Validate a block considering world state, i.e. things that can not be checked before + /// execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity". + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError>; +} + +/// HeaderValidator is a protocol that validates headers and their relationships. +#[auto_impl::auto_impl(&, Arc)] +pub trait HeaderValidator: Debug + Send + Sync { /// Validate if header is correct and follows consensus specification. /// /// This is called on standalone header to check if all hashes are correct. - fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError>; + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError>; /// Validate that the header information regarding parent are correct. /// This checks the block number, timestamp, basefee and gas limit increment. @@ -58,11 +97,12 @@ pub trait Consensus: Debug + Send + Sync { /// /// **This should not be called for the genesis block**. /// - /// Note: Validating header against its parent does not include other Consensus validations. + /// Note: Validating header against its parent does not include other HeaderValidator + /// validations. fn validate_header_against_parent( &self, - header: &SealedHeader, - parent: &SealedHeader, + header: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError>; /// Validates the given headers @@ -71,7 +111,13 @@ pub trait Consensus: Debug + Send + Sync { /// on its own and valid against its parent. /// /// Note: this expects that the headers are in natural order (ascending block number) - fn validate_header_range(&self, headers: &[SealedHeader]) -> Result<(), HeaderConsensusError> { + fn validate_header_range( + &self, + headers: &[SealedHeader], + ) -> Result<(), HeaderConsensusError> + where + H: Clone, + { if let Some((initial_header, remaining_headers)) = headers.split_first() { self.validate_header(initial_header) .map_err(|e| HeaderConsensusError(e, initial_header.clone()))?; @@ -91,35 +137,29 @@ pub trait Consensus: Debug + Send + Sync { /// /// Some consensus engines may want to do additional checks here. /// - /// Note: validating headers with TD does not include other Consensus validation. + /// Note: validating headers with TD does not include other HeaderValidator validation. fn validate_header_with_total_difficulty( &self, - header: &Header, + header: &H, total_difficulty: U256, ) -> Result<(), ConsensusError>; +} - /// Validate a block disregarding world state, i.e. things that can be checked before sender - /// recovery and execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and - /// 11.1 "Ommer Validation". - /// - /// **This should not be called for the genesis block**. - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>; +/// Helper trait to cast `Arc` to `Arc` +pub trait AsHeaderValidator: HeaderValidator { + /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] + fn as_header_validator<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a; +} - /// Validate a block considering world state, i.e. things that can not be checked before - /// execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity". - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError>; +impl, H> AsHeaderValidator for T { + fn as_header_validator<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a, + { + self + } } /// Consensus Errors @@ -407,4 +447,4 @@ impl From for ConsensusError { /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. #[derive(derive_more::Display, derive_more::Error, Debug)] #[display("Consensus error: {_0}, Invalid header: {_1:?}")] -pub struct HeaderConsensusError(ConsensusError, SealedHeader); +pub struct HeaderConsensusError(ConsensusError, SealedHeader); diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 53bdb72afb2d..6d12af08d519 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,34 +1,47 @@ -use crate::{Consensus, ConsensusError, PostExecutionInput}; +use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] pub struct NoopConsensus; -impl Consensus for NoopConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { +impl HeaderValidator for NoopConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } fn validate_header_against_parent( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _header: &SealedHeader, + _parent: &SealedHeader, ) -> Result<(), ConsensusError> { Ok(()) } fn validate_header_with_total_difficulty( &self, - _header: &Header, + _header: &H, _total_difficulty: U256, ) -> Result<(), ConsensusError> { Ok(()) } +} + +impl Consensus for NoopConsensus { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { + Ok(()) + } - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_block_pre_execution( + &self, + _block: &SealedBlock, + ) -> Result<(), ConsensusError> { Ok(()) } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 436947209178..ba683dd255f8 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,18 +1,25 @@ -use crate::{Consensus, ConsensusError, PostExecutionInput}; +use crate::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, SealedBlock, SealedHeader}; /// Consensus engine implementation for testing #[derive(Debug)] pub struct TestConsensus { /// Flag whether the header validation should purposefully fail fail_validation: AtomicBool, + /// Separate flag for setting whether `validate_body_against_header` should fail. It is needed + /// for testing networking logic for which the body failing this check is getting completely + /// rejected while more high-level failures are handled by the sync logic. + fail_body_against_header: AtomicBool, } impl Default for TestConsensus { fn default() -> Self { - Self { fail_validation: AtomicBool::new(false) } + Self { + fail_validation: AtomicBool::new(false), + fail_body_against_header: AtomicBool::new(false), + } } } @@ -24,23 +31,37 @@ impl TestConsensus { /// Update the validation flag. pub fn set_fail_validation(&self, val: bool) { - self.fail_validation.store(val, Ordering::SeqCst) + self.fail_validation.store(val, Ordering::SeqCst); + self.fail_body_against_header.store(val, Ordering::SeqCst); + } + + /// Returns the body validation flag. + pub fn fail_body_against_header(&self) -> bool { + self.fail_body_against_header.load(Ordering::SeqCst) + } + + /// Update the body validation flag. + pub fn set_fail_body_against_header(&self, val: bool) { + self.fail_body_against_header.store(val, Ordering::SeqCst); } } -impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - if self.fail_validation() { +impl Consensus for TestConsensus { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_header_against_parent( + fn validate_block_pre_execution( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _block: &SealedBlock, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -49,10 +70,10 @@ impl Consensus for TestConsensus { } } - fn validate_header_with_total_difficulty( + fn validate_block_post_execution( &self, - _header: &Header, - _total_difficulty: U256, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -60,8 +81,22 @@ impl Consensus for TestConsensus { Ok(()) } } +} + +impl HeaderValidator for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_header_against_parent( + &self, + _header: &SealedHeader, + _parent: &SealedHeader, + ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { @@ -69,10 +104,10 @@ impl Consensus for TestConsensus { } } - fn validate_block_post_execution( + fn validate_header_with_total_difficulty( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _header: &H, + _total_difficulty: U256, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index e56449551bbc..bedacbecd759 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -11,19 +11,31 @@ repository.workspace = true workspace = true [dependencies] -reth.workspace = true reth-chainspec.workspace = true reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-rpc-layer.workspace = true +reth-rpc-server-types.workspace = true +reth-rpc-eth-api.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true +reth-network-api.workspace = true +reth-network.workspace = true +reth-node-api.workspace = true +reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true reth-network-peers.workspace = true reth-engine-local.workspace = true +reth-tasks.workspace = true + +# currently need to enable this for workspace level +reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } # rpc jsonrpsee.workspace = true @@ -32,6 +44,7 @@ url.workspace = true # ethereum alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true futures-util.workspace = true diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index cfa245e1de01..8c0f03bafd3a 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,20 +1,17 @@ use crate::traits::PayloadEnvelopeExt; use alloy_primitives::B256; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatusEnum}; use jsonrpsee::{ core::client::ClientT, http_client::{transport::HttpBackend, HttpClient}, }; -use reth::{ - api::{EngineTypes, PayloadBuilderAttributes}, - providers::CanonStateNotificationStream, - rpc::{ - api::EngineApiClient, - types::engine::{ForkchoiceState, PayloadStatusEnum}, - }, -}; use reth_chainspec::EthereumHardforks; +use reth_node_api::EngineTypes; use reth_node_builder::BuiltPayload; use reth_payload_builder::PayloadId; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_provider::CanonStateNotificationStream; +use reth_rpc_api::EngineApiClient; use reth_rpc_layer::AuthClientService; use std::{marker::PhantomData, sync::Arc}; @@ -83,7 +80,7 @@ impl EngineApiTestContext( attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesWithEngine, + N: Default + Node> + NodeTypesForTree + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, @@ -120,7 +122,8 @@ pub async fn setup_engine( where N: Default + Node>>> - + NodeTypesWithEngine, + + NodeTypesWithEngine + + NodeTypesForProvider, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter>>, Components: NodeComponents< @@ -128,7 +131,11 @@ where Network: PeersHandleProvider, >, >, - N::AddOns: RethRpcAddOns>>>, + N::AddOns: RethRpcAddOns>>> + + EngineValidatorAddOn< + Adapter>>, + Validator: EngineValidator, + >, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 3f25915b35b4..2efc8d47f2d7 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,5 +1,7 @@ use futures_util::StreamExt; -use reth::network::{NetworkEvent, NetworkEventListenerProvider, PeersHandleProvider, PeersInfo}; +use reth_network_api::{ + test_utils::PeersHandleProvider, NetworkEvent, NetworkEventListenerProvider, PeersInfo, +}; use reth_network_peers::{NodeRecord, PeerId}; use reth_tokio_util::EventStream; use reth_tracing::tracing::info; diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index c3dff527eb20..b3eb641c1371 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,30 +1,28 @@ -use std::{marker::PhantomData, pin::Pin}; - +use crate::{ + engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, + rpc::RpcTestContext, traits::PayloadEnvelopeExt, +}; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; +use alloy_rpc_types_engine::PayloadStatusEnum; use alloy_rpc_types_eth::BlockNumberOrTag; use eyre::Ok; use futures_util::Future; -use reth::{ - api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, - builder::FullNode, - network::PeersHandleProvider, - providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, - rpc::{ - api::eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, - types::engine::PayloadStatusEnum, - }, -}; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{rpc::RethRpcAddOns, NodeTypes, NodeTypesWithEngine}; +use reth_network_api::test_utils::PeersHandleProvider; +use reth_node_api::{Block, EngineTypes, FullNodeComponents}; +use reth_node_builder::{rpc::RethRpcAddOns, FullNode, NodeTypes, NodeTypesWithEngine}; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; +use reth_primitives::EthPrimitives; +use reth_provider::{ + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader, +}; +use reth_rpc_eth_api::helpers::{EthApiSpec, EthTransactions, TraceExt}; use reth_stages_types::StageId; +use std::{marker::PhantomData, pin::Pin}; use tokio_stream::StreamExt; use url::Url; -use crate::{ - engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, - rpc::RpcTestContext, traits::PayloadEnvelopeExt, -}; - /// An helper struct to handle node actions #[allow(missing_debug_implementations)] pub struct NodeTestContext @@ -51,7 +49,11 @@ impl NodeTestContext where Engine: EngineTypes, Node: FullNodeComponents, - Node::Types: NodeTypesWithEngine, + Node::Types: NodeTypesWithEngine< + ChainSpec: EthereumHardforks, + Engine = Engine, + Primitives = EthPrimitives, + >, Node::Network: PeersHandleProvider, AddOns: RethRpcAddOns, { @@ -178,7 +180,7 @@ where if check { if let Some(latest_block) = self.inner.provider.block_by_number(number)? { - assert_eq!(latest_block.hash_slow(), expected_block_hash); + assert_eq!(latest_block.header().hash_slow(), expected_block_hash); break } assert!( @@ -216,7 +218,7 @@ where // get head block from notifications stream and verify the tx has been pushed to the // pool is actually present in the canonical block let head = self.engine_api.canonical_stream.next().await.unwrap(); - let tx = head.tip().transactions().next(); + let tx = head.tip().transactions().first(); assert_eq!(tx.unwrap().hash().as_slice(), tip_tx_hash.as_slice()); loop { @@ -225,10 +227,10 @@ where if let Some(latest_block) = self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)? { - if latest_block.number == block_number { + if latest_block.header().number() == block_number { // make sure the block hash we submitted via FCU engine api is the new latest // block using an RPC call - assert_eq!(latest_block.hash_slow(), block_hash); + assert_eq!(latest_block.header().hash_slow(), block_hash); break } } diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 29aa11895b7d..45889a171c1a 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,7 +1,7 @@ use futures_util::StreamExt; -use reth::api::{BuiltPayload, PayloadBuilderAttributes}; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; -use reth_payload_primitives::{Events, PayloadBuilder, PayloadTypes}; +use reth_payload_builder_primitives::{Events, PayloadBuilder}; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 7b7dabdf2404..8399a482dfd6 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,18 +1,15 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; -use reth::{ - builder::{rpc::RpcRegistry, FullNodeComponents}, - rpc::api::{ - eth::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, - }, - DebugApiServer, - }, -}; +use alloy_rlp::Encodable; use reth_chainspec::EthereumHardforks; -use reth_node_builder::NodeTypes; +use reth_node_api::{FullNodeComponents, NodePrimitives}; +use reth_node_builder::{rpc::RpcRegistry, NodeTypes}; +use reth_rpc_api::DebugApiServer; +use reth_rpc_eth_api::{ + helpers::{EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, +}; #[allow(missing_debug_implementations)] pub struct RpcTestContext { @@ -21,7 +18,12 @@ pub struct RpcTestContext { impl RpcTestContext where - Node: FullNodeComponents>, + Node: FullNodeComponents< + Types: NodeTypes< + ChainSpec: EthereumHardforks, + Primitives: NodePrimitives, + >, + >, EthApi: EthApiSpec + EthTransactions + TraceExt, { /// Injects a raw transaction into the node tx pool via RPC server diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs index d14445370d41..6d9bf14dbc12 100644 --- a/crates/e2e-test-utils/src/traits.rs +++ b/crates/e2e-test-utils/src/traits.rs @@ -1,6 +1,7 @@ -use alloy_rpc_types_engine::ExecutionPayloadEnvelopeV4; +use alloy_rpc_types_engine::{ + ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV3, +}; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; -use reth::rpc::types::engine::{ExecutionPayloadEnvelopeV3, ExecutionPayloadV3}; /// The execution payload envelope type. pub trait PayloadEnvelopeExt: Send + Sync + std::fmt::Debug { diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index b33b8c00a1cd..a7b0153d0d4b 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -16,16 +16,18 @@ reth-chainspec.workspace = true reth-engine-primitives.workspace = true reth-evm.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-revm = { workspace = true, features = ["serde"] } reth-rpc-api = { workspace = true, features = ["client"] } reth-tracing.workspace = true -reth-trie = { workspace = true, features = ["serde"] } +reth-trie.workspace = true # alloy alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types-debug.workspace = true +alloy-consensus.workspace = true # async futures.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 416c4adb40f8..98ee8dd2d137 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,5 +1,4 @@ -use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; - +use alloy_consensus::Header; use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use eyre::OptionExt; @@ -9,18 +8,18 @@ use reth_engine_primitives::InvalidBlockHook; use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; -use reth_primitives::{Header, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ - database::StateProviderDatabase, - db::states::bundle_state::BundleRetention, - primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, - DatabaseCommit, StateBuilder, + database::StateProviderDatabase, db::states::bundle_state::BundleRetention, + primitives::EnvWithHandlerCfg, DatabaseCommit, StateBuilder, }; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; use reth_trie::{updates::TrieUpdates, HashedPostState, HashedStorage}; use serde::Serialize; +use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; /// Generates a witness for the given block and saves it to a file. #[derive(Debug)] @@ -75,9 +74,7 @@ where .build(); // Setup environment for the execution. - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, block.header(), U256::MAX); + let (cfg, block_env) = self.evm_config.cfg_and_block_env(block.header(), U256::MAX); // Setup EVM let mut evm = self.evm_config.evm_with_env( diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 2ab448e3bbfc..d8a66e65e04c 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -16,11 +16,12 @@ reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-engine-service.workspace = true reth-engine-tree.workspace = true +reth-node-types.workspace = true reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-rpc-types-compat.workspace = true diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index 7cebd3063097..a5c7cf4d4c60 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -4,13 +4,11 @@ use alloy_primitives::{TxHash, B256}; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar, ForkchoiceState}; use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_engine_primitives::{BeaconEngineMessage, EngineApiMessageVersion, EngineTypes}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{ - BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadKind, PayloadTypes, -}; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::{BuiltPayload, PayloadAttributesBuilder, PayloadKind, PayloadTypes}; use reth_provider::{BlockReader, ChainSpecProvider}; use reth_rpc_types_compat::engine::payload::block_to_payload; use reth_transaction_pool::TransactionPool; @@ -212,12 +210,13 @@ where let block = payload.block(); - let cancun_fields = - self.provider.chain_spec().is_cancun_active_at_timestamp(block.timestamp).then(|| { - CancunPayloadFields { - parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), - versioned_hashes: block.blob_versioned_hashes().into_iter().copied().collect(), - } + let cancun_fields = self + .provider + .chain_spec() + .is_cancun_active_at_timestamp(block.timestamp) + .then(|| CancunPayloadFields { + parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), + versioned_hashes: block.body.blob_versioned_hashes().into_iter().copied().collect(), }); let (tx, rx) = oneshot::channel(); diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 93a9cf11ecc4..5838cb89116b 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -16,9 +16,10 @@ use std::{ use crate::miner::{LocalMiner, MiningMode}; use futures_util::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineNodeTypes}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::Consensus; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ chain::{ChainEvent, HandlerEvent}, @@ -26,13 +27,13 @@ use reth_engine_tree::{ EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineRequestHandler, FromEngine, RequestHandlerEvent, }, - persistence::PersistenceHandle, + persistence::{PersistenceHandle, PersistenceNodeTypes}, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; +use reth_node_types::BlockTy; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ChainSpecProvider, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; @@ -58,17 +59,18 @@ where impl LocalEngineService where - N: EngineNodeTypes, + N: EngineNodeTypes + PersistenceNodeTypes, { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new( consensus: Arc, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, invalid_block_hook: Box, sync_metrics_tx: MetricEventsSender, @@ -79,6 +81,7 @@ where ) -> Self where B: PayloadAttributesBuilder<::PayloadAttributes>, + V: EngineValidator>, { let chain_spec = provider.chain_spec(); let engine_kind = @@ -86,11 +89,9 @@ where let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); - let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( blockchain_db.clone(), executor_factory, consensus, diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index 008af4503327..2da1be9c928e 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -14,11 +14,21 @@ workspace = true # reth reth-execution-types.workspace = true reth-payload-primitives.workspace = true +reth-payload-builder-primitives.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-trie.workspace = true +reth-errors.workspace = true # alloy alloy-primitives.workspace = true +alloy-consensus.workspace = true +alloy-rpc-types-engine.workspace = true + +# async +tokio = { workspace = true, features = ["sync"] } +futures.workspace = true # misc serde.workspace = true +thiserror.workspace = true diff --git a/crates/engine/primitives/src/error.rs b/crates/engine/primitives/src/error.rs new file mode 100644 index 000000000000..b7deb607bcf9 --- /dev/null +++ b/crates/engine/primitives/src/error.rs @@ -0,0 +1,20 @@ +/// Represents all error cases when handling a new payload. +/// +/// This represents all possible error cases that must be returned as JSON RCP errors back to the +/// beacon node. +#[derive(Debug, thiserror::Error)] +pub enum BeaconOnNewPayloadError { + /// Thrown when the engine task is unavailable/stopped. + #[error("beacon consensus engine task stopped")] + EngineUnavailable, + /// An internal error occurred, not necessarily related to the payload. + #[error(transparent)] + Internal(Box), +} + +impl BeaconOnNewPayloadError { + /// Create a new internal error. + pub fn internal(e: E) -> Self { + Self::Internal(Box::new(e)) + } +} diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/engine/primitives/src/forkchoice.rs similarity index 97% rename from crates/consensus/beacon/src/engine/forkchoice.rs rename to crates/engine/primitives/src/forkchoice.rs index a9d9301738f7..9d680d5a1241 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/engine/primitives/src/forkchoice.rs @@ -47,24 +47,24 @@ impl ForkchoiceStateTracker { /// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`] #[allow(dead_code)] pub(crate) fn is_latest_valid(&self) -> bool { - self.latest_status().map_or(false, |s| s.is_valid()) + self.latest_status().is_some_and(|s| s.is_valid()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`] #[allow(dead_code)] pub(crate) fn is_latest_syncing(&self) -> bool { - self.latest_status().map_or(false, |s| s.is_syncing()) + self.latest_status().is_some_and(|s| s.is_syncing()) } /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] #[allow(dead_code)] - pub(crate) fn is_latest_invalid(&self) -> bool { - self.latest_status().map_or(false, |s| s.is_invalid()) + pub fn is_latest_invalid(&self) -> bool { + self.latest_status().is_some_and(|s| s.is_invalid()) } /// Returns the last valid head hash. #[allow(dead_code)] - pub(crate) fn last_valid_head(&self) -> Option { + pub fn last_valid_head(&self) -> Option { self.last_valid.as_ref().map(|s| s.head_block_hash) } @@ -188,7 +188,7 @@ pub enum ForkchoiceStateHash { impl ForkchoiceStateHash { /// Tries to find a matching hash in the given [`ForkchoiceState`]. - pub(crate) fn find(state: &ForkchoiceState, hash: B256) -> Option { + pub fn find(state: &ForkchoiceState, hash: B256) -> Option { if state.head_block_hash == hash { Some(Self::Head(hash)) } else if state.safe_block_hash == hash { @@ -201,7 +201,7 @@ impl ForkchoiceStateHash { } /// Returns true if this is the head hash of the [`ForkchoiceState`] - pub(crate) const fn is_head(&self) -> bool { + pub const fn is_head(&self) -> bool { matches!(self, Self::Head(_)) } } diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 949ebf0155c4..89fb7459b7de 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -8,6 +8,18 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod error; + +use alloy_consensus::BlockHeader; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; +pub use error::BeaconOnNewPayloadError; + +mod forkchoice; +pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; + +mod message; +pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; + mod invalid_block_hook; pub use invalid_block_hook::InvalidBlockHook; @@ -15,6 +27,9 @@ pub use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; +use reth_payload_primitives::{InvalidPayloadAttributesError, PayloadAttributes}; +use reth_primitives::SealedBlockFor; +use reth_primitives_traits::Block; use serde::{de::DeserializeOwned, ser::Serialize}; /// This type defines the versioned types of the engine API. @@ -65,8 +80,11 @@ pub trait EngineTypes: + 'static; } -/// Type that validates the payloads sent to the engine. +/// Type that validates the payloads processed by the engine. pub trait EngineValidator: Clone + Send + Sync + Unpin + 'static { + /// The block type used by the engine. + type Block: Block; + /// Validates the presence or exclusion of fork-specific fields based on the payload attributes /// and the message version. fn validate_version_specific_fields( @@ -81,4 +99,38 @@ pub trait EngineValidator: Clone + Send + Sync + Unpin + 'st version: EngineApiMessageVersion, attributes: &::PayloadAttributes, ) -> Result<(), EngineObjectValidationError>; + + /// Ensures that the given payload does not violate any consensus rules that concern the block's + /// layout. + /// + /// This function must convert the payload into the executable block and pre-validate its + /// fields. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError>; + + /// Validates the payload attributes with respect to the header. + /// + /// By default, this enforces that the payload attributes timestamp is greater than the + /// timestamp according to: + /// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than + /// > timestamp + /// > of a block referenced by forkchoiceState.headBlockHash. + /// + /// See also [engine api spec](https://github.com/ethereum/execution-apis/tree/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine) + fn validate_payload_attributes_against_header( + &self, + attr: &::PayloadAttributes, + header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() <= header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/engine/primitives/src/message.rs similarity index 97% rename from crates/consensus/beacon/src/engine/message.rs rename to crates/engine/primitives/src/message.rs index fa7457c1225d..d8a4c1322ad0 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -1,12 +1,11 @@ -use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; +use crate::{BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, ForkchoiceStatus}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use futures::{future::Either, FutureExt}; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; use reth_errors::RethResult; -use reth_payload_primitives::PayloadBuilderError; +use reth_payload_builder_primitives::PayloadBuilderError; use std::{ fmt::Display, future::Future, diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index c6098bfe6671..8854fd18879d 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -18,13 +18,13 @@ reth-engine-tree.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true reth-payload-builder.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true reth-node-types.workspace = true reth-chainspec.workspace = true +reth-engine-primitives.workspace = true # async futures.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 198438d457f1..a54a2ef9e1a1 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -1,13 +1,14 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineNodeTypes}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::Consensus; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler}, - persistence::PersistenceHandle, + persistence::{PersistenceHandle, PersistenceNodeTypes}, tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; pub use reth_engine_tree::{ @@ -15,10 +16,9 @@ pub use reth_engine_tree::{ engine::EngineApiEvent, }; use reth_evm::execute::BlockExecutorProvider; -use reth_network_p2p::BlockClient; -use reth_node_types::NodeTypesWithEngine; +use reth_network_p2p::EthBlockClient; +use reth_node_types::{BlockTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; @@ -49,7 +49,7 @@ type EngineServiceType = ChainOrchestrator< pub struct EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { orchestrator: EngineServiceType, @@ -58,13 +58,13 @@ where impl EngineService where - N: EngineNodeTypes, - Client: BlockClient + 'static, + N: EngineNodeTypes + PersistenceNodeTypes, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] - pub fn new( + pub fn new( consensus: Arc, executor_factory: E, chain_spec: Arc, @@ -76,10 +76,14 @@ where blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, invalid_block_hook: Box, sync_metrics_tx: MetricEventsSender, - ) -> Self { + ) -> Self + where + V: EngineValidator>, + { let engine_kind = if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; @@ -87,11 +91,10 @@ where let persistence_handle = PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( blockchain_db, executor_factory, consensus, @@ -124,7 +127,7 @@ where impl Stream for EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { type Item = ChainEvent; @@ -145,8 +148,9 @@ mod tests { use super::*; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; @@ -184,7 +188,7 @@ mod tests { let blockchain_db = BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()) .unwrap(); - + let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); @@ -202,6 +206,7 @@ mod tests { blockchain_db, pruner, PayloadBuilderHandle::new(tx), + engine_payload_validator, TreeConfig::default(), Box::new(NoopInvalidBlockHook::default()), sync_metrics_tx, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 2ce18aa0e7d1..680b6933ebe6 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -13,46 +13,49 @@ workspace = true [dependencies] # reth reth-beacon-consensus.workspace = true -reth-blockchain-tree.workspace = true reth-blockchain-tree-api.workspace = true +reth-blockchain-tree.workspace = true reth-chain-state.workspace = true +reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true -reth-chainspec.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true -reth-trie.workspace = true +reth-trie-db.workspace = true reth-trie-parallel.workspace = true +reth-trie-sparse.workspace = true +reth-trie.workspace = true # alloy -alloy-primitives.workspace = true +alloy-consensus.workspace = true alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rlp.workspace = true alloy-rpc-types-engine.workspace = true revm-primitives.workspace = true # common futures.workspace = true -pin-project.workspace = true -tokio = { workspace = true, features = ["macros", "sync"] } -tokio-stream.workspace = true thiserror.workspace = true +tokio = { workspace = true, features = ["macros", "sync"] } # metrics metrics.workspace = true reth-metrics = { workspace = true, features = ["common"] } # misc +rayon.workspace = true tracing.workspace = true # optional deps for test-utils @@ -63,42 +66,53 @@ reth-tracing = { workspace = true, optional = true } [dev-dependencies] # reth -reth-db = { workspace = true, features = ["test-utils"] } reth-chain-state = { workspace = true, features = ["test-utils"] } +reth-chainspec.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } -reth-prune.workspace = true reth-prune-types.workspace = true +reth-prune.workspace = true reth-rpc-types-compat.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true +reth-testing-utils.workspace = true reth-tracing.workspace = true -reth-chainspec.workspace = true +# alloy alloy-rlp.workspace = true assert_matches.workspace = true +criterion.workspace = true +crossbeam-channel = "0.5.13" +rand.workspace = true + +[[bench]] +name = "channel_perf" +harness = false [features] test-utils = [ - "reth-db/test-utils", - "reth-chain-state/test-utils", - "reth-network-p2p/test-utils", - "reth-prune-types", - "reth-stages/test-utils", - "reth-static-file", - "reth-tracing", - "reth-blockchain-tree/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-stages-api/test-utils", - "reth-provider/test-utils", - "reth-trie/test-utils", - "reth-prune-types?/test-utils" + "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-db/test-utils", + "reth-evm/test-utils", + "reth-network-p2p/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-provider/test-utils", + "reth-prune-types", + "reth-prune-types?/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-stages/test-utils", + "reth-static-file", + "reth-tracing", + "reth-trie/test-utils", + "reth-prune-types?/test-utils", + "reth-trie-db/test-utils", ] diff --git a/crates/engine/tree/benches/channel_perf.rs b/crates/engine/tree/benches/channel_perf.rs new file mode 100644 index 000000000000..c1c65e0a68e1 --- /dev/null +++ b/crates/engine/tree/benches/channel_perf.rs @@ -0,0 +1,132 @@ +//! Benchmark comparing `std::sync::mpsc` and `crossbeam` channels for `StateRootTask`. + +#![allow(missing_docs)] + +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use revm_primitives::{ + Account, AccountInfo, AccountStatus, Address, EvmState, EvmStorage, EvmStorageSlot, HashMap, + B256, U256, +}; +use std::thread; + +/// Creates a mock state with the specified number of accounts for benchmarking +fn create_bench_state(num_accounts: usize) -> EvmState { + let mut state_changes = HashMap::default(); + + for i in 0..num_accounts { + let storage = + EvmStorage::from_iter([(U256::from(i), EvmStorageSlot::new(U256::from(i + 1)))]); + + let account = Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }; + + let address = Address::random(); + state_changes.insert(address, account); + } + + state_changes +} + +/// Simulated `StateRootTask` with `std::sync::mpsc` +struct StdStateRootTask { + rx: std::sync::mpsc::Receiver, +} + +impl StdStateRootTask { + const fn new(rx: std::sync::mpsc::Receiver) -> Self { + Self { rx } + } + + fn run(self) { + while let Ok(state) = self.rx.recv() { + criterion::black_box(state); + } + } +} + +/// Simulated `StateRootTask` with `crossbeam-channel` +struct CrossbeamStateRootTask { + rx: crossbeam_channel::Receiver, +} + +impl CrossbeamStateRootTask { + const fn new(rx: crossbeam_channel::Receiver) -> Self { + Self { rx } + } + + fn run(self) { + while let Ok(state) = self.rx.recv() { + criterion::black_box(state); + } + } +} + +/// Benchmarks the performance of different channel implementations for state streaming +fn bench_state_stream(c: &mut Criterion) { + let mut group = c.benchmark_group("state_stream_channels"); + group.sample_size(10); + + for size in &[1, 10, 100] { + let bench_setup = || { + let states: Vec<_> = (0..100).map(|_| create_bench_state(*size)).collect(); + states + }; + + group.bench_with_input(BenchmarkId::new("std_channel", size), size, |b, _| { + b.iter_batched( + bench_setup, + |states| { + let (tx, rx) = std::sync::mpsc::channel(); + let task = StdStateRootTask::new(rx); + + let processor = thread::spawn(move || { + task.run(); + }); + + for state in states { + tx.send(state).unwrap(); + } + drop(tx); + + processor.join().unwrap(); + }, + BatchSize::LargeInput, + ); + }); + + group.bench_with_input(BenchmarkId::new("crossbeam_channel", size), size, |b, _| { + b.iter_batched( + bench_setup, + |states| { + let (tx, rx) = crossbeam_channel::unbounded(); + let task = CrossbeamStateRootTask::new(rx); + + let processor = thread::spawn(move || { + task.run(); + }); + + for state in states { + tx.send(state).unwrap(); + } + drop(tx); + + processor.join().unwrap(); + }, + BatchSize::LargeInput, + ); + }); + } + + group.finish(); +} + +criterion_group!(benches, bench_state_stream); +criterion_main!(benches); diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index 78e21a7b5efc..2ed0e758d505 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -230,12 +230,13 @@ impl PipelineState { mod tests { use super::*; use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder}; - use alloy_primitives::{BlockNumber, Sealable, B256}; + use alloy_consensus::Header; + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{Header, SealedHeader}; + use reth_primitives::SealedHeader; use reth_provider::test_utils::MockNodeTypesWithDB; use reth_stages::ExecOutput; use reth_stages_api::StageCheckpoint; @@ -267,14 +268,12 @@ mod tests { let pipeline_sync = PipelineSync::new(pipeline, Box::::default()); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..total_blocks); let tip = client.highest_block().expect("there should be blocks here").hash(); diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 9ecec70ae369..8a7ea583f0fc 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -6,12 +6,13 @@ use futures::FutureExt; use reth_consensus::Consensus; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, + BlockClient, EthBlockClient, }; use reth_primitives::{SealedBlock, SealedBlockWithSenders}; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap, HashSet, VecDeque}, + fmt::Debug, sync::Arc, task::{Context, Poll}, }; @@ -72,10 +73,13 @@ where impl BasicBlockDownloader where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance - pub fn new(client: Client, consensus: Arc) -> Self { + pub fn new( + client: Client, + consensus: Arc>, + ) -> Self { Self { full_block_client: FullBlockClient::new(client, consensus), inflight_full_block_requests: Vec::new(), @@ -182,7 +186,7 @@ where impl BlockDownloader for BasicBlockDownloader where - Client: BlockClient + 'static, + Client: EthBlockClient, { /// Handles incoming download actions. fn on_action(&mut self, action: DownloadAction) { @@ -305,12 +309,12 @@ impl BlockDownloader for NoopBlockDownloader { mod tests { use super::*; use crate::test_utils::insert_headers_into_client; - use alloy_primitives::Sealable; + use alloy_consensus::Header; use assert_matches::assert_matches; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{Header, SealedHeader}; + use reth_primitives::SealedHeader; use std::{future::poll_fn, sync::Arc}; struct TestHarness { @@ -329,14 +333,12 @@ mod tests { ); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..total_blocks); let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 914121adce51..947d025e9ab6 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -7,10 +7,10 @@ use crate::{ }; use alloy_primitives::B256; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; +use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; -use reth_engine_primitives::EngineTypes; -use reth_primitives::SealedBlockWithSenders; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; use std::{ collections::HashSet, fmt::Display, @@ -270,25 +270,25 @@ impl From> for FromEngine { /// Event from the consensus engine. // TODO(mattsse): find a more appropriate name for this variant, consider phasing it out. - BeaconConsensus(BeaconConsensusEngineEvent), + BeaconConsensus(BeaconConsensusEngineEvent), /// Backfill action is needed. BackfillAction(BackfillAction), /// Block download is needed. Download(DownloadRequest), } -impl EngineApiEvent { +impl EngineApiEvent { /// Returns `true` if the event is a backfill action. pub const fn is_backfill_action(&self) -> bool { matches!(self, Self::BackfillAction(_)) } } -impl From for EngineApiEvent { - fn from(event: BeaconConsensusEngineEvent) -> Self { +impl From> for EngineApiEvent { + fn from(event: BeaconConsensusEngineEvent) -> Self { Self::BeaconConsensus(event) } } diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index f4650a047b4f..950310b170f7 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -2,6 +2,7 @@ use crate::metrics::PersistenceMetrics; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; +use reth_primitives::EthPrimitives; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -16,6 +17,11 @@ use thiserror::Error; use tokio::sync::oneshot; use tracing::{debug, error}; +/// A helper trait with requirements for [`ProviderNodeTypes`] to be used within +/// [`PersistenceService`]. +pub trait PersistenceNodeTypes: ProviderNodeTypes {} +impl PersistenceNodeTypes for T where T: ProviderNodeTypes {} + /// Writes parts of reth's in memory tree state to the database and static files. /// /// This is meant to be a spawned service that listens for various incoming persistence operations, @@ -60,7 +66,7 @@ impl PersistenceService { } } -impl PersistenceService { +impl PersistenceService { /// This is the main loop, that will listen to database events and perform the requested /// database actions pub fn run(mut self) -> Result<(), PersistenceError> { @@ -120,7 +126,7 @@ impl PersistenceService { let new_tip_hash = provider_rw.block_hash(new_tip_num)?; UnifiedStorageWriter::from(&provider_rw, &sf_provider).remove_blocks_above(new_tip_num)?; - UnifiedStorageWriter::commit_unwind(provider_rw, sf_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; debug!(target: "engine::persistence", ?new_tip_num, ?new_tip_hash, "Removed blocks from disk"); self.metrics.remove_blocks_above_duration_seconds.record(start_time.elapsed()); @@ -141,8 +147,8 @@ impl PersistenceService { let provider_rw = self.provider.database_provider_rw()?; let static_file_provider = self.provider.static_file_provider(); - UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(&blocks)?; - UnifiedStorageWriter::commit(provider_rw, static_file_provider)?; + UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(blocks)?; + UnifiedStorageWriter::commit(provider_rw)?; } self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); Ok(last_block_hash_num) @@ -198,7 +204,7 @@ impl PersistenceHandle { } /// Create a new [`PersistenceHandle`], and spawn the persistence service. - pub fn spawn_service( + pub fn spawn_service( provider_factory: ProviderFactory, pruner: PrunerWithFactory>, sync_metrics_tx: MetricEventsSender, diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index f17766a43ed7..c1b534ebf5eb 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Sealable, B256}; +use alloy_primitives::B256; use reth_chainspec::ChainSpec; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::{BlockBody, SealedHeader}; @@ -76,9 +76,7 @@ pub fn insert_headers_into_client( header.parent_hash = hash; header.number += 1; header.timestamp += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 11dd95e55839..fd0e5aeec838 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1,9 +1,11 @@ use crate::{ backfill::{BackfillAction, BackfillSyncState}, chain::FromOrchestrator, - engine::{DownloadRequest, EngineApiEvent, FromEngine}, + engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, + tree::metrics::EngineApiMetrics, }; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -14,8 +16,7 @@ use alloy_rpc_types_engine::{ PayloadValidationError, }; use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, - OnForkChoiceUpdated, MIN_BLOCKS_FOR_PIPELINE_RUN, + BeaconConsensusEngineEvent, InvalidHeaderCache, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use reth_blockchain_tree::{ error::{InsertBlockErrorKindTwo, InsertBlockErrorTwo, InsertBlockFatalError}, @@ -24,16 +25,19 @@ use reth_blockchain_tree::{ use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; -use reth_chainspec::EthereumHardforks; use reth_consensus::{Consensus, PostExecutionInput}; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, +}; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; -use reth_payload_validator::ExecutionPayloadValidator; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - Block, GotExpected, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, + Block, EthPrimitives, GotExpected, NodePrimitives, SealedBlock, SealedBlockWithSenders, + SealedHeader, }; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, @@ -43,12 +47,13 @@ use reth_provider::{ use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::{ParallelStateRoot, ParallelStateRootError}; +use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use revm_primitives::ResultAndState; use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, fmt::Debug, + marker::PhantomData, ops::Bound, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -66,10 +71,6 @@ pub mod config; mod invalid_block_hook; mod metrics; mod persistence_state; -use crate::{ - engine::{EngineApiKind, EngineApiRequest}, - tree::metrics::EngineApiMetrics, -}; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; @@ -84,17 +85,17 @@ mod root; /// - This only stores blocks that are connected to the canonical chain. /// - All executed blocks are valid and have been executed. #[derive(Debug, Default)] -pub struct TreeState { +pub struct TreeState { /// __All__ unique executed blocks by block hash that are connected to the canonical chain. /// /// This includes blocks of all forks. - blocks_by_hash: HashMap, + blocks_by_hash: HashMap>, /// Executed blocks grouped by their respective block number. /// /// This maps unique block number to all known blocks for that height. /// /// Note: there can be multiple blocks at the same height due to forks. - blocks_by_number: BTreeMap>, + blocks_by_number: BTreeMap>>, /// Map of any parent block hash to its children. parent_to_child: HashMap>, /// Map of hash to trie updates for canonical blocks that are persisted but not finalized. @@ -105,7 +106,7 @@ pub struct TreeState { current_canonical_head: BlockNumHash, } -impl TreeState { +impl TreeState { /// Returns a new, empty tree state that points to the given canonical head. fn new(current_canonical_head: BlockNumHash) -> Self { Self { @@ -123,12 +124,12 @@ impl TreeState { } /// Returns the [`ExecutedBlock`] by hash. - fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { + fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { self.blocks_by_hash.get(&hash) } /// Returns the block by hash. - fn block_by_hash(&self, hash: B256) -> Option> { + fn block_by_hash(&self, hash: B256) -> Option>> { self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) } @@ -136,12 +137,12 @@ impl TreeState { /// newest to oldest. And the parent hash of the oldest block that is missing from the buffer. /// /// Returns `None` if the block for the given hash is not found. - fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec)> { + fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec>)> { let block = self.blocks_by_hash.get(&hash).cloned()?; - let mut parent_hash = block.block().parent_hash; + let mut parent_hash = block.block().parent_hash(); let mut blocks = vec![block]; while let Some(executed) = self.blocks_by_hash.get(&parent_hash) { - parent_hash = executed.block.parent_hash; + parent_hash = executed.block.parent_hash(); blocks.push(executed.clone()); } @@ -149,10 +150,10 @@ impl TreeState { } /// Insert executed block into the state. - fn insert_executed(&mut self, executed: ExecutedBlock) { + fn insert_executed(&mut self, executed: ExecutedBlock) { let hash = executed.block.hash(); - let parent_hash = executed.block.parent_hash; - let block_number = executed.block.number; + let parent_hash = executed.block.parent_hash(); + let block_number = executed.block.number(); if self.blocks_by_hash.contains_key(&hash) { return; @@ -180,11 +181,11 @@ impl TreeState { /// ## Returns /// /// The removed block and the block hashes of its children. - fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { + fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { let executed = self.blocks_by_hash.remove(&hash)?; // Remove this block from collection of children of its parent block. - let parent_entry = self.parent_to_child.entry(executed.block.parent_hash); + let parent_entry = self.parent_to_child.entry(executed.block.parent_hash()); if let hash_map::Entry::Occupied(mut entry) = parent_entry { entry.get_mut().remove(&hash); @@ -197,7 +198,7 @@ impl TreeState { let children = self.parent_to_child.remove(&hash).unwrap_or_default(); // Remove this block from `blocks_by_number`. - let block_number_entry = self.blocks_by_number.entry(executed.block.number); + let block_number_entry = self.blocks_by_number.entry(executed.block.number()); if let btree_map::Entry::Occupied(mut entry) = block_number_entry { // We have to find the index of the block since it exists in a vec if let Some(index) = entry.get().iter().position(|b| b.block.hash() == hash) { @@ -221,7 +222,7 @@ impl TreeState { } while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash; + current_block = executed.block.parent_hash(); if current_block == hash { return true } @@ -249,14 +250,14 @@ impl TreeState { // upper bound let mut current_block = self.current_canonical_head.hash; while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash; - if executed.block.number <= upper_bound { + current_block = executed.block.parent_hash(); + if executed.block.number() <= upper_bound { debug!(target: "engine::tree", num_hash=?executed.block.num_hash(), "Attempting to remove block walking back from the head"); if let Some((removed, _)) = self.remove_by_hash(executed.block.hash()) { debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed block walking back from the head"); // finally, move the trie updates self.persisted_trie_updates - .insert(removed.block.hash(), (removed.block.number, removed.trie)); + .insert(removed.block.hash(), (removed.block.number(), removed.trie)); } } } @@ -466,11 +467,14 @@ pub enum TreeAction { /// /// This type is responsible for processing engine API requests, maintaining the canonical state and /// emitting events. -pub struct EngineApiTreeHandler { +pub struct EngineApiTreeHandler +where + T: EngineTypes, +{ provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. state: EngineApiTreeState, /// The half for sending messages to the engine. @@ -506,10 +510,12 @@ pub struct EngineApiTreeHandler { invalid_block_hook: Box, /// The engine API variant of this handler engine_kind: EngineApiKind, + /// Captures the types the engine operates on + _primtives: PhantomData, } -impl std::fmt::Debug - for EngineApiTreeHandler +impl std::fmt::Debug + for EngineApiTreeHandler { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EngineApiTreeHandler") @@ -532,13 +538,19 @@ impl std::fmt::Debug } } -impl EngineApiTreeHandler +impl EngineApiTreeHandler where - P: DatabaseProviderFactory + BlockReader + StateProviderFactory + StateReader + Clone + 'static, + N: NodePrimitives, + P: DatabaseProviderFactory + + BlockReader + + StateProviderFactory + + StateReader + + Clone + + 'static,

::Provider: BlockReader, E: BlockExecutorProvider, T: EngineTypes, - Spec: Send + Sync + EthereumHardforks + 'static, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. #[allow(clippy::too_many_arguments)] @@ -546,7 +558,7 @@ where provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, outgoing: UnboundedSender, state: EngineApiTreeState, canonical_in_memory_state: CanonicalInMemoryState, @@ -576,6 +588,7 @@ where incoming_tx, invalid_block_hook: Box::new(NoopInvalidBlockHook), engine_kind, + _primtives: Default::default(), } } @@ -594,7 +607,7 @@ where provider: P, executor_provider: E, consensus: Arc, - payload_validator: ExecutionPayloadValidator, + payload_validator: V, persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, canonical_in_memory_state: CanonicalInMemoryState, @@ -1205,8 +1218,17 @@ where match request { EngineApiRequest::InsertExecutedBlock(block) => { debug!(target: "engine::tree", block=?block.block().num_hash(), "inserting already executed block"); + let now = Instant::now(); + let sealed_block = block.block.clone(); self.state.tree_state.insert_executed(block); self.metrics.engine.inserted_already_executed_blocks.increment(1); + + self.emit_event(EngineApiEvent::BeaconConsensus( + BeaconConsensusEngineEvent::CanonicalBlockAdded( + sealed_block, + now.elapsed(), + ), + )); } EngineApiRequest::Beacon(request) => { match request { @@ -1247,11 +1269,11 @@ where } BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { let output = self.on_new_payload(payload, sidecar); - if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { - reth_beacon_consensus::BeaconOnNewPayloadError::Internal( - Box::new(e), - ) - })) { + if let Err(err) = + tx.send(output.map(|o| o.outcome).map_err(|e| { + BeaconOnNewPayloadError::Internal(Box::new(e)) + })) + { error!(target: "engine::tree", "Failed to send event: {err:?}"); self.metrics .engine @@ -1537,8 +1559,8 @@ where .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))?; let execution_output = self .provider - .get_state(block.number)? - .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number))?; + .get_state(block.number())? + .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number()))?; let hashed_state = execution_output.hash_state_slow(); Ok(Some(ExecutedBlock { @@ -2265,7 +2287,7 @@ where self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); - let executed = ExecutedBlock { + let executed: ExecutedBlock = ExecutedBlock { block: sealed_block.clone(), senders: Arc::new(block.senders), execution_output: Arc::new(ExecutionOutcome::from((output, block_number))), @@ -2510,12 +2532,10 @@ where state: ForkchoiceState, version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { - // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp - // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held - // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT - // begin a payload build process. In such an event, the forkchoiceState update MUST NOT - // be rolled back. - if attrs.timestamp() <= head.timestamp { + if let Err(err) = + self.payload_validator.validate_payload_attributes_against_header(&attrs, head) + { + warn!(target: "engine::tree", %err, ?head, "Invalid payload attributes"); return OnForkChoiceUpdated::invalid_payload_attributes() } @@ -2597,15 +2617,17 @@ pub enum AdvancePersistenceError { mod tests { use super::*; use crate::persistence::PersistenceAction; - use alloy_primitives::{Bytes, Sealable}; + use alloy_primitives::Bytes; use alloy_rlp::Decodable; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; - use reth_beacon_consensus::{EthBeaconConsensus, ForkchoiceStatus}; + use reth_beacon_consensus::EthBeaconConsensus; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_engine_primitives::ForkchoiceStatus; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; + use reth_primitives::{BlockExt, EthPrimitives}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::updates::TrieUpdates; @@ -2670,8 +2692,13 @@ mod tests { } struct TestHarness { - tree: - EngineApiTreeHandler, + tree: EngineApiTreeHandler< + EthPrimitives, + MockEthProvider, + MockExecutorProvider, + EthEngineTypes, + EthereumEngineValidator, + >, to_tree_tx: Sender>>, from_tree_rx: UnboundedReceiver, blocks: Vec, @@ -2705,13 +2732,12 @@ mod tests { let provider = MockEthProvider::default(); let executor_provider = MockExecutorProvider::default(); - let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); + let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (from_tree_tx, from_tree_rx) = unbounded_channel(); - let sealed = chain_spec.genesis_header().clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = chain_spec.genesis_header().clone(); + let header = SealedHeader::seal(header); let engine_api_tree_state = EngineApiTreeState::new(10, 10, header.num_hash()); let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None, None); @@ -2931,7 +2957,7 @@ mod tests { EngineApiEvent::BeaconConsensus( BeaconConsensusEngineEvent::CanonicalBlockAdded(block, _), ) => { - assert!(block.hash() == expected_hash); + assert_eq!(block.hash(), expected_hash); } _ => panic!("Unexpected event: {:#?}", event), } diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index dc039d418eba..602e87a63dbe 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,19 +1,29 @@ //! State root task related functionality. -use futures::Stream; -use pin_project::pin_project; -use reth_provider::providers::ConsistentDbView; -use reth_trie::{updates::TrieUpdates, TrieInput}; -use reth_trie_parallel::parallel_root::ParallelStateRootError; -use revm_primitives::{EvmState, B256}; +use alloy_primitives::map::{HashMap, HashSet}; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, +}; +use reth_trie::{ + proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, Nibbles, + TrieInput, +}; +use reth_trie_db::DatabaseProof; +use reth_trie_parallel::root::ParallelStateRootError; +use reth_trie_sparse::{SparseStateTrie, SparseStateTrieResult, SparseTrieError}; +use revm_primitives::{keccak256, EvmState, B256}; use std::{ - future::Future, - pin::Pin, - sync::{mpsc, Arc}, - task::{Context, Poll}, + collections::BTreeMap, + sync::{ + mpsc::{self, Receiver, Sender}, + Arc, + }, + time::{Duration, Instant}, }; -use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::debug; +use tracing::{debug, error, trace}; + +/// The level below which the sparse trie hashes are calculated in [`update_sparse_trie`]. +const SPARSE_TRIE_INCREMENTAL_LEVEL: usize = 2; /// Result of the state root calculation pub(crate) type StateRootResult = Result<(B256, TrieUpdates), ParallelStateRootError>; @@ -28,12 +38,116 @@ pub(crate) struct StateRootHandle { #[allow(dead_code)] impl StateRootHandle { + /// Creates a new handle from a receiver. + pub(crate) const fn new(rx: mpsc::Receiver) -> Self { + Self { rx } + } + /// Waits for the state root calculation to complete. pub(crate) fn wait_for_result(self) -> StateRootResult { self.rx.recv().expect("state root task was dropped without sending result") } } +/// Common configuration for state root tasks +#[derive(Debug)] +pub(crate) struct StateRootConfig { + /// View over the state in the database. + pub consistent_view: ConsistentDbView, + /// Latest trie input. + pub input: Arc, +} + +/// Messages used internally by the state root task +#[derive(Debug)] +#[allow(dead_code)] +pub(crate) enum StateRootMessage { + /// New state update from transaction execution + StateUpdate(EvmState), + /// Proof calculation completed for a specific state update + ProofCalculated { + /// The calculated proof + proof: MultiProof, + /// The state update that was used to calculate the proof + state_update: HashedPostState, + /// The index of this proof in the sequence of state updates + sequence_number: u64, + }, + /// State root calculation completed + RootCalculated { + /// The updated sparse trie + trie: Box, + /// Time taken to calculate the root + elapsed: Duration, + }, +} + +/// Handle to track proof calculation ordering +#[derive(Debug, Default)] +pub(crate) struct ProofSequencer { + /// The next proof sequence number to be produced. + next_sequence: u64, + /// The next sequence number expected to be delivered. + next_to_deliver: u64, + /// Buffer for out-of-order proofs and corresponding state updates + pending_proofs: BTreeMap, +} + +impl ProofSequencer { + /// Creates a new proof sequencer + pub(crate) fn new() -> Self { + Self::default() + } + + /// Gets the next sequence number and increments the counter + pub(crate) fn next_sequence(&mut self) -> u64 { + let seq = self.next_sequence; + self.next_sequence += 1; + seq + } + + /// Adds a proof with the corresponding state update and returns all sequential proofs and state + /// updates if we have a continuous sequence + pub(crate) fn add_proof( + &mut self, + sequence: u64, + proof: MultiProof, + state_update: HashedPostState, + ) -> Vec<(MultiProof, HashedPostState)> { + if sequence >= self.next_to_deliver { + self.pending_proofs.insert(sequence, (proof, state_update)); + } + + // return early if we don't have the next expected proof + if !self.pending_proofs.contains_key(&self.next_to_deliver) { + return Vec::new() + } + + let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len()); + let mut current_sequence = self.next_to_deliver; + + // keep collecting proofs and state updates as long as we have consecutive sequence numbers + while let Some((proof, state_update)) = self.pending_proofs.remove(¤t_sequence) { + consecutive_proofs.push((proof, state_update)); + current_sequence += 1; + + // if we don't have the next number, stop collecting + if !self.pending_proofs.contains_key(¤t_sequence) { + break; + } + } + + self.next_to_deliver += consecutive_proofs.len() as u64; + + consecutive_proofs + } + + /// Returns true if we still have pending proofs + pub(crate) fn has_pending(&self) -> bool { + !self.pending_proofs.is_empty() + } +} + /// Standalone task that receives a transaction state stream and updates relevant /// data structures to calculate state root. /// @@ -42,83 +156,631 @@ impl StateRootHandle { /// fetches the proofs for relevant accounts from the database and reveal them /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. -#[pin_project] +#[derive(Debug)] pub(crate) struct StateRootTask { - /// View over the state in the database. - consistent_view: ConsistentDbView, - /// Incoming state updates. - #[pin] - state_stream: UnboundedReceiverStream, - /// Latest trie input. - input: Arc, + /// Task configuration. + config: StateRootConfig, + /// Receiver for state root related messages. + rx: Receiver, + /// Sender for state root related messages. + tx: Sender, + /// Proof targets that have been already fetched. + fetched_proof_targets: HashMap>, + /// Proof sequencing handler. + proof_sequencer: ProofSequencer, + /// The sparse trie used for the state root calculation. If [`None`], then update is in + /// progress. + sparse_trie: Option>, } #[allow(dead_code)] impl StateRootTask where - Factory: Send + 'static, + Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, { - /// Creates a new `StateRootTask`. - pub(crate) const fn new( - consistent_view: ConsistentDbView, - input: Arc, - state_stream: UnboundedReceiverStream, + /// Creates a new state root task with the unified message channel + pub(crate) fn new( + config: StateRootConfig, + tx: Sender, + rx: Receiver, ) -> Self { - Self { consistent_view, state_stream, input } + Self { + config, + rx, + tx, + fetched_proof_targets: Default::default(), + proof_sequencer: ProofSequencer::new(), + sparse_trie: Some(Box::new(SparseStateTrie::default().with_updates(true))), + } } /// Spawns the state root task and returns a handle to await its result. pub(crate) fn spawn(self) -> StateRootHandle { - let (tx, rx) = mpsc::channel(); + let (tx, rx) = mpsc::sync_channel(1); + std::thread::Builder::new() + .name("State Root Task".to_string()) + .spawn(move || { + debug!(target: "engine::tree", "Starting state root task"); + let result = self.run(); + let _ = tx.send(result); + }) + .expect("failed to spawn state root thread"); - // Spawn the task that will process state updates and calculate the root - tokio::spawn(async move { - debug!(target: "engine::tree", "Starting state root task"); - let result = self.await; - let _ = tx.send(result); - }); - - StateRootHandle { rx } + StateRootHandle::new(rx) } /// Handles state updates. + /// + /// Returns proof targets derived from the state update. fn on_state_update( - _view: &ConsistentDbView, - _input: &Arc, - _state: EvmState, - ) { - // TODO: calculate hashed state update and dispatch proof gathering for it. + view: ConsistentDbView, + input: Arc, + update: EvmState, + fetched_proof_targets: &HashMap>, + proof_sequence_number: u64, + state_root_message_sender: Sender, + ) -> HashMap> { + let mut hashed_state_update = HashedPostState::default(); + for (address, account) in update { + if account.is_touched() { + let hashed_address = keccak256(address); + + let destroyed = account.is_selfdestructed(); + let info = if account.is_empty() { None } else { Some(account.info.into()) }; + hashed_state_update.accounts.insert(hashed_address, info); + + let mut changed_storage_iter = account + .storage + .into_iter() + .filter_map(|(slot, value)| { + value + .is_changed() + .then(|| (keccak256(B256::from(slot)), value.present_value)) + }) + .peekable(); + if destroyed || changed_storage_iter.peek().is_some() { + hashed_state_update.storages.insert( + hashed_address, + HashedStorage::from_iter(destroyed, changed_storage_iter), + ); + } + } + } + + let proof_targets = get_proof_targets(&hashed_state_update, fetched_proof_targets); + + // Dispatch proof gathering for this state update + let targets = proof_targets.clone(); + rayon::spawn(move || { + let provider = match view.provider_ro() { + Ok(provider) => provider, + Err(error) => { + error!(target: "engine::root", ?error, "Could not get provider"); + return; + } + }; + + // TODO: replace with parallel proof + let result = Proof::overlay_multiproof( + provider.tx_ref(), + // TODO(alexey): this clone can be expensive, we should avoid it + input.as_ref().clone(), + targets, + ); + match result { + Ok(proof) => { + let _ = state_root_message_sender.send(StateRootMessage::ProofCalculated { + proof, + state_update: hashed_state_update, + sequence_number: proof_sequence_number, + }); + } + Err(e) => { + error!(target: "engine::root", error = ?e, "Could not calculate multiproof"); + } + } + }); + + proof_targets } -} -impl Future for StateRootTask -where - Factory: Send + 'static, -{ - type Output = StateRootResult; + /// Handler for new proof calculated, aggregates all the existing sequential proofs. + fn on_proof( + &mut self, + sequence_number: u64, + proof: MultiProof, + state_update: HashedPostState, + ) -> Option<(MultiProof, HashedPostState)> { + let ready_proofs = self.proof_sequencer.add_proof(sequence_number, proof, state_update); + + if ready_proofs.is_empty() { + None + } else { + // Merge all ready proofs and state updates + ready_proofs.into_iter().reduce(|mut acc, (proof, state_update)| { + acc.0.extend(proof); + acc.1.extend(state_update); + acc + }) + } + } + + /// Spawns root calculation with the current state and proofs. + fn spawn_root_calculation(&mut self, state: HashedPostState, multiproof: MultiProof) { + let Some(trie) = self.sparse_trie.take() else { return }; + + trace!( + target: "engine::root", + account_proofs = multiproof.account_subtree.len(), + storage_proofs = multiproof.storages.len(), + "Spawning root calculation" + ); + + // TODO(alexey): store proof targets in `ProofSequecner` to avoid recomputing them + let targets = get_proof_targets(&state, &HashMap::default()); - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); + let tx = self.tx.clone(); + rayon::spawn(move || { + let result = update_sparse_trie(trie, multiproof, targets, state); + match result { + Ok((trie, elapsed)) => { + trace!( + target: "engine::root", + ?elapsed, + "Root calculation completed, sending result" + ); + let _ = tx.send(StateRootMessage::RootCalculated { trie, elapsed }); + } + Err(e) => { + error!(target: "engine::root", error = ?e, "Could not calculate state root"); + } + } + }); + } + + fn run(mut self) -> StateRootResult { + let mut current_state_update = HashedPostState::default(); + let mut current_multiproof = MultiProof::default(); + let mut updates_received = 0; + let mut proofs_processed = 0; + let mut roots_calculated = 0; - // Process all items until the stream is closed loop { - match this.state_stream.as_mut().poll_next(cx) { - Poll::Ready(Some(state)) => { - Self::on_state_update(this.consistent_view, this.input, state); + match self.rx.recv() { + Ok(message) => match message { + StateRootMessage::StateUpdate(update) => { + updates_received += 1; + trace!( + target: "engine::root", + len = update.len(), + total_updates = updates_received, + "Received new state update" + ); + let targets = Self::on_state_update( + self.config.consistent_view.clone(), + self.config.input.clone(), + update, + &self.fetched_proof_targets, + self.proof_sequencer.next_sequence(), + self.tx.clone(), + ); + for (address, slots) in targets { + self.fetched_proof_targets.entry(address).or_default().extend(slots) + } + } + StateRootMessage::ProofCalculated { proof, state_update, sequence_number } => { + proofs_processed += 1; + trace!( + target: "engine::root", + sequence = sequence_number, + total_proofs = proofs_processed, + "Processing calculated proof" + ); + + if let Some((combined_proof, combined_state_update)) = + self.on_proof(sequence_number, proof, state_update) + { + if self.sparse_trie.is_none() { + current_multiproof.extend(combined_proof); + current_state_update.extend(combined_state_update); + } else { + self.spawn_root_calculation(combined_state_update, combined_proof); + } + } + } + StateRootMessage::RootCalculated { trie, elapsed } => { + roots_calculated += 1; + trace!( + target: "engine::root", + ?elapsed, + roots_calculated, + proofs = proofs_processed, + updates = updates_received, + "Computed intermediate root" + ); + self.sparse_trie = Some(trie); + + let has_new_proofs = !current_multiproof.account_subtree.is_empty() || + !current_multiproof.storages.is_empty(); + let all_proofs_received = proofs_processed >= updates_received; + let no_pending = !self.proof_sequencer.has_pending(); + + trace!( + target: "engine::root", + has_new_proofs, + all_proofs_received, + no_pending, + "State check" + ); + + // only spawn new calculation if we have accumulated new proofs + if has_new_proofs { + trace!( + target: "engine::root", + account_proofs = current_multiproof.account_subtree.len(), + storage_proofs = current_multiproof.storages.len(), + "Spawning subsequent root calculation" + ); + self.spawn_root_calculation( + std::mem::take(&mut current_state_update), + std::mem::take(&mut current_multiproof), + ); + } else if all_proofs_received && no_pending { + debug!( + target: "engine::root", + total_updates = updates_received, + total_proofs = proofs_processed, + roots_calculated, + "All proofs processed, ending calculation" + ); + let mut trie = self + .sparse_trie + .take() + .expect("sparse trie update should not be in progress"); + let root = trie.root().expect("sparse trie should be revealed"); + let trie_updates = trie + .take_trie_updates() + .expect("sparse trie should have updates retention enabled"); + return Ok((root, trie_updates)); + } + } + }, + Err(_) => { + // this means our internal message channel is closed, which shouldn't happen + // in normal operation since we hold both ends + error!( + target: "engine::root", + "Internal message channel closed unexpectedly" + ); + return Err(ParallelStateRootError::Other( + "Internal message channel closed unexpectedly".into(), + )); } - Poll::Ready(None) => { - // stream closed, return final result - return Poll::Ready(Ok((B256::default(), TrieUpdates::default()))); + } + } + } +} + +fn get_proof_targets( + state_update: &HashedPostState, + fetched_proof_targets: &HashMap>, +) -> HashMap> { + state_update + .accounts + .keys() + .filter(|hashed_address| !fetched_proof_targets.contains_key(*hashed_address)) + .map(|hashed_address| (*hashed_address, HashSet::default())) + .chain(state_update.storages.iter().map(|(hashed_address, storage)| { + let fetched_storage_proof_targets = fetched_proof_targets.get(hashed_address); + ( + *hashed_address, + storage + .storage + .keys() + .filter(|slot| { + !fetched_storage_proof_targets + .is_some_and(|targets| targets.contains(*slot)) + }) + .copied() + .collect(), + ) + })) + .collect() +} + +/// Updates the sparse trie with the given proofs and state, and returns the updated trie and the +/// time it took. +fn update_sparse_trie( + mut trie: Box, + multiproof: MultiProof, + targets: HashMap>, + state: HashedPostState, +) -> SparseStateTrieResult<(Box, Duration)> { + let started_at = Instant::now(); + + // Reveal new accounts and storage slots. + trie.reveal_multiproof(targets, multiproof)?; + + // Update storage slots with new values and calculate storage roots. + for (address, storage) in state.storages { + let storage_trie = trie.storage_trie_mut(&address).ok_or(SparseTrieError::Blind)?; + + if storage.wiped { + storage_trie.wipe(); + } + + for (slot, value) in storage.storage { + let slot_nibbles = Nibbles::unpack(slot); + if value.is_zero() { + // TODO: handle blinded node error + storage_trie.remove_leaf(&slot_nibbles)?; + } else { + storage_trie + .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + } + } + + storage_trie.root(); + } + + // Update accounts with new values + for (address, account) in state.accounts { + trie.update_account(address, account.unwrap_or_default())?; + } + + trie.calculate_below_level(SPARSE_TRIE_INCREMENTAL_LEVEL); + let elapsed = started_at.elapsed(); + + Ok((trie, elapsed)) +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::{Account as RethAccount, StorageEntry}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, + }; + use reth_testing_utils::generators::{self, Rng}; + use reth_trie::{test_utils::state_root, TrieInput}; + use revm_primitives::{ + Account as RevmAccount, AccountInfo, AccountStatus, Address, EvmState, EvmStorageSlot, + HashMap, B256, KECCAK_EMPTY, U256, + }; + use std::sync::Arc; + + fn convert_revm_to_reth_account(revm_account: &RevmAccount) -> RethAccount { + RethAccount { + balance: revm_account.info.balance, + nonce: revm_account.info.nonce, + bytecode_hash: if revm_account.info.code_hash == KECCAK_EMPTY { + None + } else { + Some(revm_account.info.code_hash) + }, + } + } + + fn create_mock_state_updates(num_accounts: usize, updates_per_account: usize) -> Vec { + let mut rng = generators::rng(); + let all_addresses: Vec

= (0..num_accounts).map(|_| rng.gen()).collect(); + let mut updates = Vec::new(); + + for _ in 0..updates_per_account { + let num_accounts_in_update = rng.gen_range(1..=num_accounts); + let mut state_update = EvmState::default(); + + let selected_addresses = &all_addresses[0..num_accounts_in_update]; + + for &address in selected_addresses { + let mut storage = HashMap::default(); + if rng.gen_bool(0.7) { + for _ in 0..rng.gen_range(1..10) { + let slot = U256::from(rng.gen::()); + storage.insert( + slot, + EvmStorageSlot::new_changed(U256::ZERO, U256::from(rng.gen::())), + ); + } } - Poll::Pending => { - return Poll::Pending; + + let account = RevmAccount { + info: AccountInfo { + balance: U256::from(rng.gen::()), + nonce: rng.gen::(), + code_hash: KECCAK_EMPTY, + code: Some(Default::default()), + }, + storage, + status: AccountStatus::Touched, + }; + + state_update.insert(address, account); + } + + updates.push(state_update); + } + + updates + } + + #[test] + fn test_state_root_task() { + reth_tracing::init_test_tracing(); + + let factory = create_test_provider_factory(); + let (tx, rx) = std::sync::mpsc::channel(); + + let state_updates = create_mock_state_updates(10, 10); + let mut hashed_state = HashedPostState::default(); + let mut accumulated_state: HashMap)> = + HashMap::default(); + + { + let provider_rw = factory.provider_rw().expect("failed to get provider"); + + for update in &state_updates { + let account_updates = update.iter().map(|(address, account)| { + (*address, Some(convert_revm_to_reth_account(account))) + }); + provider_rw + .insert_account_for_hashing(account_updates) + .expect("failed to insert accounts"); + + let storage_updates = update.iter().map(|(address, account)| { + let storage_entries = account.storage.iter().map(|(slot, value)| { + StorageEntry { key: B256::from(*slot), value: value.present_value } + }); + (*address, storage_entries) + }); + provider_rw + .insert_storage_for_hashing(storage_updates) + .expect("failed to insert storage"); + } + provider_rw.commit().expect("failed to commit changes"); + } + + for update in &state_updates { + for (address, account) in update { + let hashed_address = keccak256(*address); + + if account.is_touched() { + let destroyed = account.is_selfdestructed(); + hashed_state.accounts.insert( + hashed_address, + if destroyed || account.is_empty() { + None + } else { + Some(account.info.clone().into()) + }, + ); + + if destroyed || !account.storage.is_empty() { + let storage = account + .storage + .iter() + .filter(|&(_slot, value)| (!destroyed && value.is_changed())) + .map(|(slot, value)| { + (keccak256(B256::from(*slot)), value.present_value) + }); + hashed_state + .storages + .insert(hashed_address, HashedStorage::from_iter(destroyed, storage)); + } } + + let storage: HashMap = account + .storage + .iter() + .map(|(k, v)| (B256::from(*k), v.present_value)) + .collect(); + + let entry = accumulated_state.entry(*address).or_default(); + entry.0 = convert_revm_to_reth_account(account); + entry.1.extend(storage); } } - // TODO: - // * keep track of proof calculation - // * keep track of intermediate root computation - // * return final state root result + let config = StateRootConfig { + consistent_view: ConsistentDbView::new(factory, None), + input: Arc::new(TrieInput::from_state(hashed_state)), + }; + let task = StateRootTask::new(config, tx.clone(), rx); + let handle = task.spawn(); + + for update in state_updates { + tx.send(StateRootMessage::StateUpdate(update)).expect("failed to send state"); + } + drop(tx); + + let (root_from_task, _) = handle.wait_for_result().expect("task failed"); + let root_from_base = state_root(accumulated_state); + + assert_eq!( + root_from_task, root_from_base, + "State root mismatch: task={root_from_task:?}, base={root_from_base:?}" + ); + } + + #[test] + fn test_add_proof_in_sequence() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + sequencer.next_sequence = 2; + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(!sequencer.has_pending()); + + let ready = sequencer.add_proof(1, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_out_of_order() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + let proof3 = MultiProof::default(); + sequencer.next_sequence = 3; + + let ready = sequencer.add_proof(2, proof3, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(sequencer.has_pending()); + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(sequencer.has_pending()); + + let ready = sequencer.add_proof(1, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 2); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_with_gaps() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof3 = MultiProof::default(); + sequencer.next_sequence = 3; + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + + let ready = sequencer.add_proof(2, proof3, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(sequencer.has_pending()); + } + + #[test] + fn test_add_proof_duplicate_sequence() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + + let ready = sequencer.add_proof(0, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_batch_processing() { + let mut sequencer = ProofSequencer::new(); + let proofs: Vec<_> = (0..5).map(|_| MultiProof::default()).collect(); + sequencer.next_sequence = 5; + + sequencer.add_proof(4, proofs[4].clone(), HashedPostState::default()); + sequencer.add_proof(2, proofs[2].clone(), HashedPostState::default()); + sequencer.add_proof(1, proofs[1].clone(), HashedPostState::default()); + sequencer.add_proof(3, proofs[3].clone(), HashedPostState::default()); + + let ready = sequencer.add_proof(0, proofs[0].clone(), HashedPostState::default()); + assert_eq!(ready.len(), 5); + assert!(!sequencer.has_pending()); } } diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 07aa40165e2b..6eb22340ec10 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -17,7 +17,6 @@ reth-errors.workspace = true reth-fs-util.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true -reth-beacon-consensus.workspace = true reth-payload-validator.workspace = true reth-evm.workspace = true reth-revm.workspace = true @@ -51,8 +50,7 @@ tracing.workspace = true [features] optimism = [ - "reth-beacon-consensus/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "revm-primitives/optimism" + "reth-primitives/optimism", + "reth-provider/optimism", + "revm-primitives/optimism", ] diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index 6b584f0c1f55..efed83159b31 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -2,8 +2,7 @@ use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState}; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_fs_util as fs; use serde::{Deserialize, Serialize}; use std::{ diff --git a/crates/engine/util/src/lib.rs b/crates/engine/util/src/lib.rs index 26dc817fc958..42746c376cf1 100644 --- a/crates/engine/util/src/lib.rs +++ b/crates/engine/util/src/lib.rs @@ -1,8 +1,7 @@ //! Collection of various stream utilities for consensus engine. use futures::Stream; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_payload_validator::ExecutionPayloadValidator; use std::path::PathBuf; use tokio_util::either::Either; diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 69831389a658..20e2b21446ae 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,14 +1,16 @@ //! Stream wrapper that simulates reorgs. -use alloy_consensus::Transaction; +use alloy_consensus::{Header, Transaction}; use alloy_primitives::U256; use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, }; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; -use reth_beacon_consensus::{BeaconEngineMessage, BeaconOnNewPayloadError, OnForkChoiceUpdated}; -use reth_engine_primitives::{EngineApiMessageVersion, EngineTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + OnForkChoiceUpdated, +}; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; use reth_evm::{ @@ -16,7 +18,7 @@ use reth_evm::{ ConfigureEvm, }; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{proofs, Block, BlockBody, Header, Receipt, Receipts}; +use reth_primitives::{proofs, Block, BlockBody, BlockExt, Receipt, Receipts}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, @@ -25,9 +27,7 @@ use reth_revm::{ }; use reth_rpc_types_compat::engine::payload::block_to_payload; use reth_trie::HashedPostState; -use revm_primitives::{ - calc_excess_blob_gas, BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, -}; +use revm_primitives::{calc_excess_blob_gas, EVMError, EnvWithHandlerCfg}; use std::{ collections::VecDeque, future::Future, @@ -107,7 +107,7 @@ impl Stream for EngineReorg>, Engine: EngineTypes, - Provider: BlockReader + StateProviderFactory, + Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, Spec: EthereumHardforks, { @@ -254,7 +254,7 @@ fn create_reorg_head( next_sidecar: ExecutionPayloadSidecar, ) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where - Provider: BlockReader + StateProviderFactory, + Provider: BlockReader + StateProviderFactory, Evm: ConfigureEvm
, Spec: EthereumHardforks, { @@ -296,9 +296,7 @@ where .build(); // Configure environments - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, &reorg_target.header, U256::MAX); + let (cfg, block_env) = evm_config.cfg_and_block_env(&reorg_target.header, U256::MAX); let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); let mut evm = evm_config.evm_with_env(&mut state, env); @@ -337,7 +335,7 @@ where // Treat error as fatal Err(error) => { return Err(RethError::Execution(BlockExecutionError::Validation( - BlockValidationError::EVM { hash: tx.hash, error: Box::new(error) }, + BlockValidationError::EVM { hash: tx.hash(), error: Box::new(error) }, ))) } }; @@ -375,7 +373,7 @@ where // and 4788 contract call state.merge_transitions(BundleRetention::PlainState); - let outcome = ExecutionOutcome::new( + let outcome: ExecutionOutcome = ExecutionOutcome::new( state.take_bundle(), Receipts::from(vec![receipts]), reorg_target.number, diff --git a/crates/engine/util/src/skip_fcu.rs b/crates/engine/util/src/skip_fcu.rs index adadfb595f89..daa39ad572d9 100644 --- a/crates/engine/util/src/skip_fcu.rs +++ b/crates/engine/util/src/skip_fcu.rs @@ -1,8 +1,7 @@ //! Stream wrapper that skips specified number of FCUs. use futures::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes, OnForkChoiceUpdated}; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index 16f2e98197c9..ea89bdf6d106 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -2,8 +2,7 @@ use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 226989f319db..60572b459791 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -23,7 +23,7 @@ crc = "3" # misc serde = { workspace = true, features = ["derive"], optional = true } -thiserror = { workspace = true, default-features = false } +thiserror.workspace = true dyn-clone.workspace = true rustc-hash = { workspace = true, optional = true } diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index bace4195ca63..8e6158ff46cb 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-chainspec.workspace = true reth-consensus-common.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-consensus.workspace = true # alloy diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 07c2a71e8cf2..96dfbae3f166 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,19 +8,18 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, - validate_against_parent_timestamp, validate_block_pre_execution, validate_header_base_fee, - validate_header_extradata, validate_header_gas, -}; -use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Header, SealedBlock, SealedHeader, + validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, + validate_header_base_fee, validate_header_extradata, validate_header_gas, }; +use reth_primitives::{BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; +use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; use std::{fmt::Debug, sync::Arc, time::SystemTime}; /// The bound divisor of the gas limit, used in update calculations. @@ -93,10 +92,34 @@ impl EthBeaconConsensus impl Consensus for EthBeaconConsensus +{ + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validate_block_pre_execution(block, &self.chain_spec) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) + } +} + +impl HeaderValidator + for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_gas(header)?; - validate_header_base_fee(header, &self.chain_spec)?; + validate_header_gas(header.header())?; + validate_header_base_fee(header.header(), &self.chain_spec)?; // EIP-4895: Beacon chain push withdrawals as operations if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && @@ -111,7 +134,7 @@ impl Consensu // Ensures that EIP-4844 fields are valid once cancun is active. if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_4844_header_standalone(header)?; + validate_4844_header_standalone(header.header())?; } else if header.blob_gas_used.is_some() { return Err(ConsensusError::BlobGasUsedUnexpected) } else if header.excess_blob_gas.is_some() { @@ -136,19 +159,23 @@ impl Consensu header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_against_parent_hash_number(header, parent)?; + validate_against_parent_hash_number(header.header(), parent)?; - validate_against_parent_timestamp(header, parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; // TODO Check difficulty increment between parent and self // Ace age did increment it by some formula that we need to follow. self.validate_against_parent_gas_limit(header, parent)?; - validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_against_parent_4844(header, parent)?; + validate_against_parent_4844(header.header(), parent.header())?; } Ok(()) @@ -211,24 +238,12 @@ impl Consensu Ok(()) } - - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validate_block_pre_execution(block, &self.chain_spec) - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) - } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{Sealable, B256}; + use alloy_primitives::B256; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_primitives::proofs; @@ -313,16 +328,14 @@ mod tests { // that the header is valid let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; assert_eq!( - EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::new(header, seal)), + EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::seal(header,)), Ok(()) ); } diff --git a/crates/ethereum/engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml index e9bcd4256865..f019f6e5f2a6 100644 --- a/crates/ethereum/engine-primitives/Cargo.toml +++ b/crates/ethereum/engine-primitives/Cargo.toml @@ -16,6 +16,7 @@ reth-chainspec.workspace = true reth-primitives.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true +reth-payload-validator.workspace = true reth-rpc-types-compat.workspace = true alloy-rlp.workspace = true reth-chain-state.workspace = true diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 5addf2a18c51..beefd54ca05b 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -11,6 +11,7 @@ mod payload; use std::sync::Arc; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, @@ -22,6 +23,8 @@ use reth_payload_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, SealedBlock}; /// The types used in the default mainnet ethereum beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -63,13 +66,19 @@ impl PayloadTypes for EthPayloadTypes { /// Validator for the ethereum engine API. #[derive(Debug, Clone)] pub struct EthereumEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, } impl EthereumEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &ChainSpec { + self.inner.chain_spec() } } @@ -77,12 +86,14 @@ impl EngineValidator for EthereumEngineValidator where Types: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, EthPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, payload_or_attrs) + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } fn ensure_well_formed_attributes( @@ -90,6 +101,14 @@ where version: EngineApiMessageVersion, attributes: &EthPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into()) + validate_version_specific_fields(self.chain_spec(), version, attributes.into()) + } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result { + self.inner.ensure_well_formed_payload(payload, sidecar) } } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index fa14e260d651..8642df89698d 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -18,13 +18,13 @@ use reth_evm::{ }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, + ConfigureEvm, TxEnvOverrides, }; use reth_primitives::{BlockWithSenders, Receipt}; use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, + EnvWithHandlerCfg, ResultAndState, U256, }; /// Factory for [`EthExecutionStrategy`]. @@ -83,6 +83,8 @@ where chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Optional overrides for the transactions environment. + tx_env_overrides: Option>, /// Current state for block execution. state: State, /// Utility to call system smart contracts. @@ -96,7 +98,7 @@ where /// Creates a new [`EthExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); - Self { state, chain_spec, evm_config, system_caller } + Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } } } @@ -115,10 +117,7 @@ where header: &alloy_consensus::Header, total_difficulty: U256, ) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } @@ -130,6 +129,10 @@ where { type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + self.tx_env_overrides = Some(tx_env_overrides); + } + fn apply_pre_execution_changes( &mut self, block: &BlockWithSenders, @@ -172,6 +175,10 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + if let Some(tx_env_overrides) = &mut self.tx_env_overrides { + tx_env_overrides.apply(evm.tx_mut()); + } + // Execute transaction. let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); @@ -309,7 +316,9 @@ mod tests { BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, }; use reth_execution_types::BlockExecutionOutput; - use reth_primitives::{public_key_to_address, Account, Block, BlockBody, Transaction}; + use reth_primitives::{ + public_key_to_address, Account, Block, BlockBody, BlockExt, Transaction, + }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 1c340c0927ba..8042562357f4 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -20,10 +20,11 @@ extern crate alloc; use core::convert::Infallible; use alloc::{sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_primitives::{Address, Bytes, TxKind, U256}; use reth_chainspec::{ChainSpec, Head}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; -use reth_primitives::{transaction::FillTxEnv, Header, TransactionSigned}; +use reth_primitives::{transaction::FillTxEnv, TransactionSigned}; use revm_primitives::{ AnalysisKind, BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv, }; @@ -54,7 +55,7 @@ impl EthEvmConfig { } /// Returns the chain spec associated with this configuration. - pub fn chain_spec(&self) -> &ChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } @@ -195,31 +196,22 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::KECCAK_EMPTY; + use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::execute::ProviderError; - use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, - }; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, + primitives::{BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; use std::collections::HashSet; #[test] fn test_fill_cfg_and_block_env() { - // Create a new configuration environment - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - // Create a default block environment - let mut block_env = BlockEnv::default(); - // Create a default header let header = Header::default(); @@ -238,12 +230,8 @@ mod tests { // Use the `EthEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - EthEvmConfig::new(Arc::new(chain_spec.clone())).fill_cfg_and_block_env( - &mut cfg_env, - &mut block_env, - &header, - total_difficulty, - ); + let (cfg_env, _) = EthEvmConfig::new(Arc::new(chain_spec.clone())) + .cfg_and_block_env(&header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 69bbeeb5b433..e6f47483b586 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -32,6 +32,8 @@ reth-primitives.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true +alloy-consensus.workspace = true + # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -39,41 +41,47 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } eyre.workspace = true [dev-dependencies] -reth.workspace = true reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-api.workspace = true +reth-node-core.workspace = true +reth-payload-primitives.workspace = true reth-e2e-test-utils.workspace = true +reth-rpc-eth-api.workspace = true reth-tasks.workspace = true -futures.workspace = true + alloy-primitives.workspace = true -alloy-genesis.workspace = true -tokio.workspace = true -serde_json.workspace = true alloy-consensus.workspace = true alloy-provider.workspace = true -rand.workspace = true +alloy-genesis.workspace = true alloy-signer.workspace = true alloy-eips.workspace = true alloy-sol-types.workspace = true alloy-contract.workspace = true alloy-rpc-types-beacon.workspace = true +alloy-rpc-types-engine.workspace = true +alloy-rpc-types-eth.workspace = true + +futures.workspace = true +tokio.workspace = true +serde_json.workspace = true +rand.workspace = true [features] default = [] test-utils = [ - "reth-node-builder/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-network/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils", - "reth-trie-db/test-utils", - "revm/test-utils", - "reth-evm/test-utils" + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-evm/test-utils", ] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index e545a3c73c4d..a536b9dff907 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; @@ -12,8 +13,7 @@ use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_network::{NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodePrimitives, - NodeTypesWithDB, + AddOnsContext, ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB, }; use reth_node_builder::{ components::{ @@ -25,8 +25,8 @@ use reth_node_builder::{ BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::{Block, Header, Receipt, TransactionSigned}; -use reth_provider::CanonStateSubscriptions; +use reth_primitives::EthPrimitives; +use reth_provider::{CanonStateSubscriptions, EthStorage}; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -37,16 +37,6 @@ use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; -/// Ethereum primitive types. -#[derive(Debug)] -pub struct EthPrimitives; - -impl NodePrimitives for EthPrimitives { - type Block = Block; - type SignedTx = TransactionSigned; - type Receipt = Receipt; -} - /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -63,7 +53,7 @@ impl EthereumNode { EthereumConsensusBuilder, > where - Node: FullNodeTypes>, + Node: FullNodeTypes>, ::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -84,6 +74,7 @@ impl NodeTypes for EthereumNode { type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } impl NodeTypesWithEngine for EthereumNode { @@ -104,7 +95,13 @@ pub type EthereumAddOns = RpcAddOns< impl Node for EthereumNode where - Types: NodeTypesWithDB + NodeTypesWithEngine, + Types: NodeTypesWithDB + + NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< @@ -167,7 +164,7 @@ pub struct EthereumPoolBuilder { impl PoolBuilder for EthereumPoolBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type Pool = EthTransactionPool; @@ -243,7 +240,7 @@ impl EthereumPayloadBuilder { pool: Pool, ) -> eyre::Result> where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Evm: ConfigureEvm
, Pool: TransactionPool + Unpin + 'static, @@ -281,7 +278,7 @@ impl EthereumPayloadBuilder { impl PayloadServiceBuilder for EthereumPayloadBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, Types::Engine: PayloadTypes< @@ -307,7 +304,7 @@ pub struct EthereumNetworkBuilder { impl NetworkBuilder for EthereumNetworkBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn build_network( diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 976727bc8158..111810514504 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,21 +1,17 @@ -use std::sync::Arc; - +use crate::utils::eth_payload_attributes; use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; -use reth::{ - args::RpcServerArgs, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - rpc::types::engine::PayloadStatusEnum, - tasks::TaskManager, -}; +use alloy_rpc_types_engine::PayloadStatusEnum; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; use reth_transaction_pool::TransactionPool; - -use crate::utils::eth_payload_attributes; +use std::sync::Arc; #[tokio::test] async fn can_handle_blobs() -> eyre::Result<()> { diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index b6d0ffcfaaaf..325575998c26 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,17 +1,18 @@ -use std::sync::Arc; - +use alloy_eips::eip2718::Encodable2718; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::{args::DevArgs, rpc::api::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, FullNodePrimitives, NodeTypes}; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, }; +use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; use reth_provider::{providers::BlockchainProvider2, CanonStateSubscriptions}; +use reth_rpc_eth_api::helpers::EthTransactions; use reth_tasks::TaskManager; +use std::sync::Arc; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { @@ -46,6 +47,7 @@ async fn assert_chain_advances(node: FullNode) where N: FullNodeComponents, AddOns: RethRpcAddOns, + N::Types: NodeTypes, { let mut notifications = node.provider.canonical_state_stream(); @@ -63,8 +65,8 @@ where let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next().unwrap(); - assert_eq!(tx.hash(), hash); + let tx = &head.tip().transactions()[0]; + assert_eq!(tx.trie_hash(), hash); println!("mined transaction: {hash}"); } diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index cb7517c0c932..a91ccf6e391b 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -1,15 +1,13 @@ use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; -use reth::{ - args::RpcServerArgs, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; use std::sync::Arc; #[tokio::test] diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index 5b2a6654fbbd..f8680f47ae3e 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -7,9 +7,9 @@ use alloy_provider::{ }, Provider, ProviderBuilder, SendableTx, }; +use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; -use reth::rpc::types::TransactionRequest; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs index b1a11b1b5eb6..54bfbc8205e5 100644 --- a/crates/ethereum/node/tests/e2e/rpc.rs +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -6,17 +6,14 @@ use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, SignedBidSubmissionV3, SignedBidSubmissionV4, }; +use alloy_rpc_types_engine::BlobsBundleV1; +use alloy_rpc_types_eth::TransactionRequest; use rand::{rngs::StdRng, Rng, SeedableRng}; -use reth::{ - payload::BuiltPayload, - rpc::{ - compat::engine::payload::block_to_payload_v3, - types::{engine::BlobsBundleV1, TransactionRequest}, - }, -}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::setup_engine; +use reth_node_core::rpc::compat::engine::payload::block_to_payload_v3; use reth_node_ethereum::EthereumNode; +use reth_payload_primitives::BuiltPayload; use std::sync::Arc; alloy_sol_types::sol! { diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 6e534f5dc0ed..c3743de185f5 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,5 +1,5 @@ use alloy_primitives::{Address, B256}; -use reth::rpc::types::engine::PayloadAttributes; +use alloy_rpc_types_engine::PayloadAttributes; use reth_payload_builder::EthPayloadBuilderAttributes; /// Helper function to create a new eth payload attributes diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index 443e837b2ed1..4e0880d1d153 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -18,6 +18,7 @@ reth-revm.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-execution-types.workspace = true reth-basic-payload-builder.workspace = true @@ -30,7 +31,6 @@ reth-chainspec.workspace = true # ethereum revm.workspace = true -revm-primitives.workspace = true # alloy alloy-eips.workspace = true diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 87ceb4200b15..43bb04504884 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -9,12 +9,12 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::Requests, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, - PayloadConfig, WithdrawalsOutcome, + PayloadConfig, }; use reth_chain_state::ExecutedBlock; use reth_chainspec::ChainSpec; @@ -23,25 +23,27 @@ use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes} use reth_evm_ethereum::{eip6110::parse_deposits_from_receipts, EthEvmConfig}; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, EthereumHardforks, Header, Receipt, + Block, BlockBody, BlockExt, EthereumHardforks, InvalidTransactionError, Receipt, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, TransactionPool, - ValidPoolTransaction, + error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, + BestTransactionsAttributes, TransactionPool, ValidPoolTransaction, }; use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, + primitives::{ + calc_excess_blob_gas, BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, + InvalidTransaction, ResultAndState, TxEnv, + }, DatabaseCommit, }; -use revm_primitives::{calc_excess_blob_gas, TxEnv}; use std::sync::Arc; use tracing::{debug, trace, warn}; @@ -226,7 +228,10 @@ where // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit(pool_tx.gas_limit(), block_gas_limit), + ); continue } @@ -248,7 +253,13 @@ where // the iterator. This is similar to the gas limit condition // for regular transactions above. trace!(target: "payload_builder", tx=?tx.hash, ?sum_blob_gas_used, ?tx_blob_gas, "skipping blob transaction because it would exceed the max data gas per block"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -268,7 +279,12 @@ where // if the transaction is invalid, we can skip it and all of its // descendants trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue @@ -354,8 +370,8 @@ where None }; - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?; + let withdrawals_root = + commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, &attributes.withdrawals)?; // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call @@ -395,9 +411,11 @@ where // only determine cancun fields when active if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp) { // grab the blob sidecars from the executed txs - blob_sidecars = pool.get_all_blobs_exact( - executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), - )?; + blob_sidecars = pool + .get_all_blobs_exact( + executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash()).collect(), + ) + .map_err(PayloadBuilderError::other)?; excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { let parent_excess_blob_gas = parent_header.excess_blob_gas.unwrap_or_default(); @@ -436,6 +454,10 @@ where requests_hash, }; + let withdrawals = chain_spec + .is_shanghai_active_at_timestamp(attributes.timestamp) + .then(|| attributes.withdrawals.clone()); + // seal the block let block = Block { header, @@ -443,7 +465,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); + debug!(target: "payload_builder", id=%attributes.id, sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index c895110209b6..9d6a616af983 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -30,6 +30,7 @@ revm-primitives.workspace = true # alloy alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-consensus.workspace = true auto_impl.workspace = true futures-util.workspace = true diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index b6af3dee9afd..c7fbad673db1 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -12,12 +12,15 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-execution-errors.workspace = true +reth-trie-common = { workspace = true, optional = true } reth-trie.workspace = true revm.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -35,22 +38,30 @@ default = ["std"] optimism = ["reth-primitives/optimism", "revm/optimism"] serde = [ "dep:serde", - "reth-trie/serde", + "rand/serde", "revm/serde", "alloy-eips/serde", "alloy-primitives/serde", - "rand/serde" + "reth-primitives-traits/serde", + "alloy-consensus/serde", + "reth-trie/serde", + "reth-trie-common?/serde" ] serde-bincode-compat = [ + "serde", + "reth-trie-common/serde-bincode-compat", "reth-primitives/serde-bincode-compat", - "reth-trie/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", "serde_with", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", ] std = [ "reth-primitives/std", "alloy-eips/std", "alloy-primitives/std", "revm/std", - "serde?/std" + "serde?/std", + "reth-primitives-traits/std", + "alloy-consensus/std", ] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index dc633e2d7ab7..1767a7f43f65 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -2,14 +2,16 @@ use crate::ExecutionOutcome; use alloc::{borrow::Cow, collections::BTreeMap}; -use alloy_eips::{eip1898::ForkBlock, BlockNumHash}; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::ForkBlock, eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, - TransactionSignedEcRecovered, + transaction::SignedTransactionIntoRecoveredExt, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, TransactionSignedEcRecovered, }; +use reth_primitives_traits::{Block, BlockBody, NodePrimitives, SignedTransaction}; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; @@ -25,34 +27,34 @@ use revm::db::BundleState; /// A chain of blocks should not be empty. #[derive(Clone, Debug, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct Chain { +pub struct Chain { /// All blocks in this chain. - blocks: BTreeMap, + blocks: BTreeMap>, /// The outcome of block execution for this chain. /// /// This field contains the state of all accounts after the execution of all blocks in this /// chain, ranging from the [`Chain::first`] block to the [`Chain::tip`] block, inclusive. /// /// Additionally, it includes the individual state changes that led to the current state. - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, /// State trie updates after block is added to the chain. /// NOTE: Currently, trie updates are present only for /// single-block chains that extend the canonical chain. trie_updates: Option, } -impl Chain { +impl Chain { /// Create new Chain from blocks and state. /// /// # Warning /// /// A chain of blocks should not be empty. pub fn new( - blocks: impl IntoIterator, - execution_outcome: ExecutionOutcome, + blocks: impl IntoIterator>, + execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { - let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::>(); + let blocks = blocks.into_iter().map(|b| (b.number(), b)).collect::>(); debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); Self { blocks, execution_outcome, trie_updates } @@ -60,25 +62,25 @@ impl Chain { /// Create new Chain from a single block and its state. pub fn from_block( - block: SealedBlockWithSenders, - execution_outcome: ExecutionOutcome, + block: SealedBlockWithSenders, + execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { Self::new([block], execution_outcome, trie_updates) } /// Get the blocks in this chain. - pub const fn blocks(&self) -> &BTreeMap { + pub const fn blocks(&self) -> &BTreeMap> { &self.blocks } /// Consumes the type and only returns the blocks in this chain. - pub fn into_blocks(self) -> BTreeMap { + pub fn into_blocks(self) -> BTreeMap> { self.blocks } /// Returns an iterator over all headers in the block with increasing block numbers. - pub fn headers(&self) -> impl Iterator + '_ { + pub fn headers(&self) -> impl Iterator> + '_ { self.blocks.values().map(|block| block.header.clone()) } @@ -93,12 +95,12 @@ impl Chain { } /// Get execution outcome of this chain - pub const fn execution_outcome(&self) -> &ExecutionOutcome { + pub const fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_outcome } /// Get mutable execution outcome of this chain - pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { + pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { &mut self.execution_outcome } @@ -119,12 +121,15 @@ impl Chain { } /// Returns the block with matching hash. - pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlock> { + pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlockFor> { self.block_with_senders(block_hash).map(|block| &block.block) } /// Returns the block with matching hash. - pub fn block_with_senders(&self, block_hash: BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn block_with_senders( + &self, + block_hash: BlockHash, + ) -> Option<&SealedBlockWithSenders> { self.blocks.iter().find_map(|(_num, block)| (block.hash() == block_hash).then_some(block)) } @@ -132,8 +137,8 @@ impl Chain { pub fn execution_outcome_at_block( &self, block_number: BlockNumber, - ) -> Option { - if self.tip().number == block_number { + ) -> Option> { + if self.tip().number() == block_number { return Some(self.execution_outcome.clone()) } @@ -149,31 +154,34 @@ impl Chain { /// 1. The blocks contained in the chain. /// 2. The execution outcome representing the final state. /// 3. The optional trie updates. - pub fn into_inner(self) -> (ChainBlocks<'static>, ExecutionOutcome, Option) { + pub fn into_inner( + self, + ) -> (ChainBlocks<'static, N::Block>, ExecutionOutcome, Option) { (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_updates) } /// Destructure the chain into its inner components: /// 1. A reference to the blocks contained in the chain. /// 2. A reference to the execution outcome representing the final state. - pub const fn inner(&self) -> (ChainBlocks<'_>, &ExecutionOutcome) { + pub const fn inner(&self) -> (ChainBlocks<'_, N::Block>, &ExecutionOutcome) { (ChainBlocks { blocks: Cow::Borrowed(&self.blocks) }, &self.execution_outcome) } /// Returns an iterator over all the receipts of the blocks in the chain. - pub fn block_receipts_iter(&self) -> impl Iterator>> + '_ { + pub fn block_receipts_iter(&self) -> impl Iterator>> + '_ { self.execution_outcome.receipts().iter() } /// Returns an iterator over all blocks in the chain with increasing block number. - pub fn blocks_iter(&self) -> impl Iterator + '_ { + pub fn blocks_iter(&self) -> impl Iterator> + '_ { self.blocks().iter().map(|block| block.1) } /// Returns an iterator over all blocks and their receipts in the chain. pub fn blocks_and_receipts( &self, - ) -> impl Iterator>)> + '_ { + ) -> impl Iterator, &Vec>)> + '_ + { self.blocks_iter().zip(self.block_receipts_iter()) } @@ -181,7 +189,7 @@ impl Chain { #[track_caller] pub fn fork_block(&self) -> ForkBlock { let first = self.first(); - ForkBlock { number: first.number.saturating_sub(1), hash: first.parent_hash } + ForkBlock { number: first.number().saturating_sub(1), hash: first.parent_hash() } } /// Get the first block in this chain. @@ -190,7 +198,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &SealedBlockWithSenders { self.blocks.first_key_value().expect("Chain should have at least one block").1 } @@ -200,7 +208,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -215,11 +223,11 @@ impl Chain { /// /// If chain doesn't have any blocks. pub fn range(&self) -> RangeInclusive { - self.first().number..=self.tip().number + self.first().number()..=self.tip().number() } /// Get all receipts for the given block. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { + pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { let num = self.block_number(block_hash)?; self.execution_outcome.receipts_by_block(num).iter().map(Option::as_ref).collect() } @@ -227,15 +235,18 @@ impl Chain { /// Get all receipts with attachment. /// /// Attachment includes block number, block hash, transaction hash and transaction index. - pub fn receipts_with_attachment(&self) -> Vec { + pub fn receipts_with_attachment(&self) -> Vec> + where + N::SignedTx: Encodable2718, + { let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) { let mut tx_receipts = Vec::with_capacity(receipts.len()); - for (tx, receipt) in block.body.transactions().zip(receipts.iter()) { + for (tx, receipt) in block.body.transactions().iter().zip(receipts.iter()) { tx_receipts.push(( - tx.hash(), + tx.trie_hash(), receipt.as_ref().expect("receipts have not been pruned").clone(), )); } @@ -249,10 +260,10 @@ impl Chain { /// This method assumes that blocks attachment to the chain has already been validated. pub fn append_block( &mut self, - block: SealedBlockWithSenders, - execution_outcome: ExecutionOutcome, + block: SealedBlockWithSenders, + execution_outcome: ExecutionOutcome, ) { - self.blocks.insert(block.number, block); + self.blocks.insert(block.number(), block); self.execution_outcome.extend(execution_outcome); self.trie_updates.take(); // reset } @@ -300,7 +311,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { + pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key(); let block_number = match split_at { ChainSplitTarget::Hash(block_hash) => { @@ -372,22 +383,22 @@ impl fmt::Display for DisplayBlocksChain<'_> { /// All blocks in the chain #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct ChainBlocks<'a> { - blocks: Cow<'a, BTreeMap>, +pub struct ChainBlocks<'a, B: Block> { + blocks: Cow<'a, BTreeMap>>, } -impl ChainBlocks<'_> { +impl>> ChainBlocks<'_, B> { /// Creates a consuming iterator over all blocks in the chain with increasing block number. /// /// Note: this always yields at least one block. #[inline] - pub fn into_blocks(self) -> impl Iterator { + pub fn into_blocks(self) -> impl Iterator> { self.blocks.into_owned().into_values() } /// Creates an iterator over all blocks in the chain with increasing block number. #[inline] - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator)> { self.blocks.iter() } @@ -397,7 +408,7 @@ impl ChainBlocks<'_> { /// /// Chains always have at least one block. #[inline] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -407,21 +418,21 @@ impl ChainBlocks<'_> { /// /// Chains always have at least one block. #[inline] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &SealedBlockWithSenders { self.blocks.first_key_value().expect("Chain should have at least one block").1 } /// Returns an iterator over all transactions in the chain. #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.body.transactions()) + pub fn transactions(&self) -> impl Iterator::Transaction> + '_ { + self.blocks.values().flat_map(|block| block.body.transactions().iter()) } /// Returns an iterator over all transactions and their senders. #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction)> + '_ { self.blocks.values().flat_map(|block| block.transactions_with_sender()) } @@ -431,20 +442,21 @@ impl ChainBlocks<'_> { #[inline] pub fn transactions_ecrecovered( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction>> + '_ + { self.transactions_with_sender().map(|(signer, tx)| tx.clone().with_signer(*signer)) } /// Returns an iterator over all transaction hashes in the block #[inline] pub fn transaction_hashes(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.transactions().map(|tx| tx.hash)) + self.blocks.values().flat_map(|block| block.transactions().iter().map(|tx| tx.trie_hash())) } } -impl IntoIterator for ChainBlocks<'_> { - type Item = (BlockNumber, SealedBlockWithSenders); - type IntoIter = std::collections::btree_map::IntoIter; +impl IntoIterator for ChainBlocks<'_, B> { + type Item = (BlockNumber, SealedBlockWithSenders); + type IntoIter = std::collections::btree_map::IntoIter>; fn into_iter(self) -> Self::IntoIter { #[allow(clippy::unnecessary_to_owned)] @@ -454,11 +466,11 @@ impl IntoIterator for ChainBlocks<'_> { /// Used to hold receipts and their attachment. #[derive(Default, Clone, Debug, PartialEq, Eq)] -pub struct BlockReceipts { +pub struct BlockReceipts { /// Block identifier pub block: BlockNumHash, /// Transaction identifier and receipt. - pub tx_receipts: Vec<(TxHash, Receipt)>, + pub tx_receipts: Vec<(TxHash, T)>, } /// The target block where the chain should be split. @@ -484,42 +496,40 @@ impl From for ChainSplitTarget { /// Result of a split chain. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ChainSplit { +pub enum ChainSplit { /// Chain is not split. Pending chain is returned. /// Given block split is higher than last block. /// Or in case of split by hash when hash is unknown. - NoSplitPending(Chain), + NoSplitPending(Chain), /// Chain is not split. Canonical chain is returned. /// Given block split is lower than first block. - NoSplitCanonical(Chain), + NoSplitCanonical(Chain), /// Chain is split into two: `[canonical]` and `[pending]` /// The target of this chain split [`ChainSplitTarget`] belongs to the `canonical` chain. Split { /// Contains lower block numbers that are considered canonicalized. It ends with /// the [`ChainSplitTarget`] block. The state of this chain is now empty and no longer /// usable. - canonical: Chain, + canonical: Chain, /// Right contains all subsequent blocks __after__ the [`ChainSplitTarget`] that are still /// pending. /// /// The state of the original chain is moved here. - pending: Chain, + pending: Chain, }, } /// Bincode-compatible [`Chain`] serde implementation. -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { - use std::collections::BTreeMap; - + use crate::ExecutionOutcome; use alloc::borrow::Cow; use alloy_primitives::BlockNumber; use reth_primitives::serde_bincode_compat::SealedBlockWithSenders; - use reth_trie::serde_bincode_compat::updates::TrieUpdates; + use reth_trie_common::serde_bincode_compat::updates::TrieUpdates; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; - - use crate::ExecutionOutcome; + use std::collections::BTreeMap; /// Bincode-compatible [`super::Chain`] serde implementation. /// @@ -660,7 +670,7 @@ mod tests { #[test] fn chain_append() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -678,7 +688,7 @@ mod tests { block3.set_parent_hash(block2_hash); - let mut chain1 = + let mut chain1: Chain = Chain { blocks: BTreeMap::from([(1, block1), (2, block2)]), ..Default::default() }; let chain2 = @@ -692,7 +702,7 @@ mod tests { #[test] fn test_number_split() { - let execution_outcome1 = ExecutionOutcome::new( + let execution_outcome1: ExecutionOutcome = ExecutionOutcome::new( BundleState::new( vec![( Address::new([2; 20]), @@ -724,13 +734,13 @@ mod tests { vec![], ); - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([15; 32]); block1.set_block_number(1); block1.set_hash(block1_hash); block1.senders.push(Address::new([4; 20])); - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); let block2_hash = B256::new([16; 32]); block2.set_block_number(2); block2.set_hash(block2_hash); @@ -739,7 +749,8 @@ mod tests { let mut block_state_extended = execution_outcome1; block_state_extended.extend(execution_outcome2); - let chain = Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None); + let chain: Chain = + Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None); let (split1_execution_outcome, split2_execution_outcome) = chain.execution_outcome.clone().split_at(2); @@ -793,7 +804,7 @@ mod tests { use reth_primitives::{Receipt, Receipts, TxType}; // Create a default SealedBlockWithSenders object - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); @@ -838,7 +849,7 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain = Chain { + let chain: Chain = Chain { blocks: BTreeMap::from([(10, block1), (11, block2)]), execution_outcome: execution_outcome.clone(), ..Default::default() diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 026e6b37c42c..412269ace9cd 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,13 +1,16 @@ -use crate::BlockExecutionOutput; +use std::collections::HashMap; + use alloy_eips::eip7685::Requests; use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; -use reth_primitives::{logs_bloom, Account, Bytecode, Receipt, Receipts, StorageEntry}; +use reth_primitives::{logs_bloom, Account, Bytecode, Receipts, StorageEntry}; +use reth_primitives_traits::{receipt::ReceiptExt, Receipt}; use reth_trie::HashedPostState; use revm::{ db::{states::BundleState, BundleAccount}, primitives::AccountInfo, }; -use std::collections::HashMap; + +use crate::BlockExecutionOutput; /// Represents a changed account #[derive(Clone, Copy, Debug, PartialEq, Eq)] @@ -33,7 +36,7 @@ impl ChangedAccount { /// blocks, capturing the resulting state, receipts, and requests following the execution. #[derive(Default, Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ExecutionOutcome { +pub struct ExecutionOutcome { /// Bundle state with reverts. pub bundle: BundleState, /// The collection of receipts. @@ -41,7 +44,7 @@ pub struct ExecutionOutcome { /// The inner vector stores receipts ordered by transaction number. /// /// If receipt is None it means it is pruned. - pub receipts: Receipts, + pub receipts: Receipts, /// First block of bundle state. pub first_block: BlockNumber, /// The collection of EIP-7685 requests. @@ -63,14 +66,14 @@ pub type AccountRevertInit = (Option>, Vec); /// Type used to initialize revms reverts. pub type RevertsInit = HashMap>; -impl ExecutionOutcome { +impl ExecutionOutcome { /// Creates a new `ExecutionOutcome`. /// /// This constructor initializes a new `ExecutionOutcome` instance with the provided /// bundle state, receipts, first block number, and EIP-7685 requests. pub const fn new( bundle: BundleState, - receipts: Receipts, + receipts: Receipts, first_block: BlockNumber, requests: Vec, ) -> Self { @@ -85,7 +88,7 @@ impl ExecutionOutcome { state_init: BundleStateInit, revert_init: RevertsInit, contracts_init: impl IntoIterator, - receipts: Receipts, + receipts: Receipts, first_block: BlockNumber, requests: Vec, ) -> Self { @@ -179,53 +182,29 @@ impl ExecutionOutcome { Some(index as usize) } - /// Returns an iterator over all block logs. - pub fn logs(&self, block_number: BlockNumber) -> Option> { - let index = self.block_number_to_index(block_number)?; - Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs.iter())).flatten()) - } - - /// Return blocks logs bloom - pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { - Some(logs_bloom(self.logs(block_number)?)) - } - - /// Returns the receipt root for all recorded receipts. - /// Note: this function calculated Bloom filters for every receipt and created merkle trees - /// of receipt. This is a expensive operation. - pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option { - #[cfg(feature = "optimism")] - panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); - #[cfg(not(feature = "optimism"))] - self.receipts.root_slow( - self.block_number_to_index(_block_number)?, - reth_primitives::proofs::calculate_receipt_root_no_memo, - ) - } - /// Returns the receipt root for all recorded receipts. /// Note: this function calculated Bloom filters for every receipt and created merkle trees /// of receipt. This is a expensive operation. pub fn generic_receipts_root_slow( &self, block_number: BlockNumber, - f: impl FnOnce(&[&Receipt]) -> B256, + f: impl FnOnce(&[&T]) -> B256, ) -> Option { self.receipts.root_slow(self.block_number_to_index(block_number)?, f) } /// Returns reference to receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts { &self.receipts } /// Returns mutable reference to receipts. - pub fn receipts_mut(&mut self) -> &mut Receipts { + pub fn receipts_mut(&mut self) -> &mut Receipts { &mut self.receipts } /// Return all block receipts - pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { + pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { let Some(index) = self.block_number_to_index(block_number) else { return &[] }; &self.receipts[index] } @@ -277,7 +256,10 @@ impl ExecutionOutcome { /// # Panics /// /// If the target block number is not included in the state block range. - pub fn split_at(self, at: BlockNumber) -> (Option, Self) { + pub fn split_at(self, at: BlockNumber) -> (Option, Self) + where + T: Clone, + { if at == self.first_block { return (None, self) } @@ -329,7 +311,7 @@ impl ExecutionOutcome { } /// Create a new instance with updated receipts. - pub fn with_receipts(mut self, receipts: Receipts) -> Self { + pub fn with_receipts(mut self, receipts: Receipts) -> Self { self.receipts = receipts; self } @@ -352,8 +334,34 @@ impl ExecutionOutcome { } } -impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { - fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { +impl ExecutionOutcome { + /// Returns an iterator over all block logs. + pub fn logs(&self, block_number: BlockNumber) -> Option> { + let index = self.block_number_to_index(block_number)?; + Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs().iter())).flatten()) + } + + /// Return blocks logs bloom + pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { + Some(logs_bloom(self.logs(block_number)?)) + } + + /// Returns the receipt root for all recorded receipts. + /// Note: this function calculated Bloom filters for every receipt and created merkle trees + /// of receipt. This is a expensive operation. + pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option + where + T: ReceiptExt, + { + #[cfg(feature = "optimism")] + panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); + #[cfg(not(feature = "optimism"))] + self.receipts.root_slow(self.block_number_to_index(_block_number)?, T::receipts_root) + } +} + +impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { + fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { Self { bundle: value.0.state, receipts: Receipts::from(value.0.receipts), @@ -385,7 +393,7 @@ mod tests { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -447,7 +455,7 @@ mod tests { fn test_block_number_to_index() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -482,7 +490,7 @@ mod tests { fn test_get_logs() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], @@ -514,7 +522,7 @@ mod tests { fn test_receipts_by_block() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], @@ -540,7 +548,7 @@ mod tests { // Assert that the receipts for block number 123 match the expected receipts assert_eq!( receipts_by_block, - vec![&Some(Receipt { + vec![&Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], @@ -554,7 +562,7 @@ mod tests { fn test_receipts_len() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], @@ -563,7 +571,7 @@ mod tests { }; // Create an empty Receipts object - let receipts_empty = Receipts { receipt_vec: vec![] }; + let receipts_empty: Receipts = Receipts { receipt_vec: vec![] }; // Define the first block number let first_block = 123; @@ -602,7 +610,7 @@ mod tests { #[cfg(not(feature = "optimism"))] fn test_revert_to() { // Create a random receipt object - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -651,7 +659,7 @@ mod tests { #[cfg(not(feature = "optimism"))] fn test_extend_execution_outcome() { // Create a Receipt object with specific attributes. - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -695,7 +703,7 @@ mod tests { #[cfg(not(feature = "optimism"))] fn test_split_at_execution_outcome() { // Create a random receipt object - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], @@ -803,7 +811,7 @@ mod tests { }, ); - let execution_outcome = ExecutionOutcome { + let execution_outcome: ExecutionOutcome = ExecutionOutcome { bundle: bundle_state, receipts: Receipts::default(), first_block: 0, diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index f98ebfe73a5f..fb872cd596e4 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -26,7 +26,7 @@ pub use execution_outcome::*; /// all fields are serialized. /// /// Read more: -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { pub use super::chain::serde_bincode_compat::*; } diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 82f84301f03a..85bc7e7f9a79 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -6,6 +6,7 @@ use crate::{ execute::{BatchExecutor, BlockExecutorProvider, Executor}, system_calls::OnStateHook, }; +use alloc::boxed::Box; use alloy_primitives::BlockNumber; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; @@ -70,6 +71,13 @@ where type Output = BlockExecutionOutput; type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + match self { + Self::Left(a) => a.init(tx_env_overrides), + Self::Right(b) => b.init(tx_env_overrides), + } + } + fn execute(self, input: Self::Input<'_>) -> Result { match self { Self::Left(a) => a.execute(input), diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 677a15dfa1b4..42c756f4d93f 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -6,9 +6,8 @@ pub use reth_execution_errors::{ }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; pub use reth_storage_errors::provider::ProviderError; -use revm::db::states::bundle_state::BundleRetention; -use crate::system_calls::OnStateHook; +use crate::{system_calls::OnStateHook, TxEnvOverrides}; use alloc::{boxed::Box, vec::Vec}; use alloy_eips::eip7685::Requests; use alloy_primitives::BlockNumber; @@ -17,7 +16,10 @@ use reth_consensus::ConsensusError; use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; -use revm::{db::BundleState, State}; +use revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + State, +}; use revm_primitives::{db::Database, U256}; /// A general purpose executor trait that executes an input (e.g. block) and produces an output @@ -32,6 +34,9 @@ pub trait Executor { /// The error type returned by the executor. type Error; + /// Initialize the executor with the given transaction environment overrides. + fn init(&mut self, _tx_env_overrides: Box) {} + /// Consumes the type and executes the block. /// /// # Note @@ -184,6 +189,9 @@ where /// The error type returned by this strategy's methods. type Error: From + core::error::Error; + /// Initialize the strategy with the given transaction environment overrides. + fn init(&mut self, _tx_env_overrides: Box) {} + /// Applies any necessary changes before executing the block's transactions. fn apply_pre_execution_changes( &mut self, @@ -329,6 +337,10 @@ where type Output = BlockExecutionOutput; type Error = S::Error; + fn init(&mut self, env_overrides: Box) { + self.strategy.init(env_overrides); + } + fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; @@ -480,7 +492,7 @@ mod tests { use alloy_primitives::U256; use reth_chainspec::{ChainSpec, MAINNET}; use revm::db::{CacheDB, EmptyDBTyped}; - use revm_primitives::bytes; + use revm_primitives::{bytes, TxEnv}; use std::sync::Arc; #[derive(Clone, Default)] @@ -703,4 +715,28 @@ mod tests { assert_eq!(block_execution_output.requests, expected_apply_post_execution_changes_result); assert_eq!(block_execution_output.state, expected_finish_result); } + + #[test] + fn test_tx_env_overrider() { + let strategy_factory = TestExecutorStrategyFactory { + execute_transactions_result: ExecuteOutput { + receipts: vec![Receipt::default()], + gas_used: 10, + }, + apply_post_execution_changes_result: Requests::new(vec![bytes!("deadbeef")]), + finish_result: BundleState::default(), + }; + let provider = BasicBlockExecutorProvider::new(strategy_factory); + let db = CacheDB::>::default(); + + // if we want to apply tx env overrides the executor must be mut. + let mut executor = provider.executor(db); + // execute consumes the executor, so we can only call it once. + // let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); + executor.init(Box::new(|tx_env: &mut TxEnv| { + tx_env.nonce.take(); + })); + let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); + assert!(result.is_ok()); + } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index e30ff9b1a7ad..ae884bdd5f86 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -18,6 +18,7 @@ extern crate alloc; use crate::builder::RethEvmBuilder; +use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; use reth_primitives::TransactionSigned; use reth_primitives_traits::BlockHeader; @@ -33,7 +34,6 @@ pub mod noop; pub mod provider; pub mod state_change; pub mod system_calls; - #[cfg(any(test, feature = "test-utils"))] /// test helpers for mocking executor pub mod test_utils; @@ -138,9 +138,16 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { data: Bytes, ); + /// Returns a [`CfgEnvWithHandlerCfg`] for the given header. + fn cfg_env(&self, header: &Self::Header, total_difficulty: U256) -> CfgEnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + self.fill_cfg_env(&mut cfg, header, total_difficulty); + cfg + } + /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header. /// - /// This must set the corresponding spec id in the handler cfg, based on timestamp or total + /// This __must__ set the corresponding spec id in the handler cfg, based on timestamp or total /// difficulty fn fill_cfg_env( &self, @@ -155,7 +162,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { block_env.coinbase = header.beneficiary(); block_env.timestamp = U256::from(header.timestamp()); if after_merge { - block_env.prevrandao = Some(header.mix_hash()); + block_env.prevrandao = header.mix_hash(); block_env.difficulty = U256::ZERO; } else { block_env.difficulty = header.difficulty(); @@ -170,6 +177,18 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { } } + /// Creates a new [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the given header. + fn cfg_and_block_env( + &self, + header: &Self::Header, + total_difficulty: U256, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + (cfg, block_env) + } + /// Convenience function to call both [`fill_cfg_env`](ConfigureEvmEnv::fill_cfg_env) and /// [`ConfigureEvmEnv::fill_block_env`]. /// @@ -211,3 +230,18 @@ pub struct NextBlockEnvAttributes { /// The randomness value for the next block. pub prev_randao: B256, } + +/// Function hook that allows to modify a transaction environment. +pub trait TxEnvOverrides { + /// Apply the overrides by modifying the given `TxEnv`. + fn apply(&mut self, env: &mut TxEnv); +} + +impl TxEnvOverrides for F +where + F: FnMut(&mut TxEnv), +{ + fn apply(&mut self, env: &mut TxEnv) { + self(env) + } +} diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 84c38db0dc5f..0d4f45c4d9d7 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -1,8 +1,8 @@ //! Provider trait for populating the EVM environment. use crate::ConfigureEvmEnv; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; -use reth_primitives::Header; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; diff --git a/crates/evm/src/system_calls/eip2935.rs b/crates/evm/src/system_calls/eip2935.rs index edb71c8b4e06..4848feb7281c 100644 --- a/crates/evm/src/system_calls/eip2935.rs +++ b/crates/evm/src/system_calls/eip2935.rs @@ -4,10 +4,10 @@ use alloc::{boxed::Box, string::ToString}; use alloy_eips::eip2935::HISTORY_STORAGE_ADDRESS; use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::ResultAndState; diff --git a/crates/evm/src/system_calls/eip4788.rs b/crates/evm/src/system_calls/eip4788.rs index bc535809680f..2ad02c26eb90 100644 --- a/crates/evm/src/system_calls/eip4788.rs +++ b/crates/evm/src/system_calls/eip4788.rs @@ -2,11 +2,11 @@ use alloc::{boxed::Box, string::ToString}; use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::ResultAndState; diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs index 5e36f2bdeb93..f20b7a54c089 100644 --- a/crates/evm/src/system_calls/eip7002.rs +++ b/crates/evm/src/system_calls/eip7002.rs @@ -1,10 +1,10 @@ //! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; +use alloy_consensus::Header; use alloy_eips::eip7002::WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index 7a55c7a5aeab..112f724df764 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,10 +1,10 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. use crate::ConfigureEvm; use alloc::{boxed::Box, format}; +use alloy_consensus::Header; use alloy_eips::eip7251::CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS; use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 7fdb31d967dd..47fd59d735fc 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -2,12 +2,13 @@ use crate::ConfigureEvm; use alloc::{boxed::Box, sync::Arc, vec}; +use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; -use reth_primitives::{Block, Header}; +use reth_primitives::Block; use revm::{Database, DatabaseCommit, Evm}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index f7ab4fce5df0..b70fb921599e 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -35,6 +35,7 @@ reth-tasks.workspace = true reth-tracing.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -78,5 +79,6 @@ serde = [ "alloy-primitives/serde", "parking_lot/serde", "rand/serde", - "secp256k1/serde" + "secp256k1/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 77a7b50477b0..7e670620472c 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -4,12 +4,14 @@ use std::{ time::{Duration, Instant}, }; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_primitives::{Block, BlockWithSenders, Receipt}; -use reth_primitives_traits::format_gas_throughput; +use reth_node_api::{Block as _, BlockBody as _}; +use reth_primitives::{BlockExt, BlockWithSenders, Receipt}; +use reth_primitives_traits::{format_gas_throughput, SignedTransaction}; use reth_provider::{ BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, }; @@ -37,7 +39,9 @@ pub struct BackfillJob { impl Iterator for BackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + + BlockReader + + StateProviderFactory, { type Item = BackfillJobResult; @@ -53,7 +57,9 @@ where impl BackfillJob where E: BlockExecutorProvider, - P: BlockReader + HeaderProvider + StateProviderFactory, + P: BlockReader + + HeaderProvider + + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. pub fn into_single_blocks(self) -> SingleBlockBackfillJob { @@ -100,10 +106,10 @@ where fetch_block_duration += fetch_block_start.elapsed(); - cumulative_gas += block.gas_used; + cumulative_gas += block.gas_used(); // Configure the executor to use the current state. - trace!(target: "exex::backfill", number = block_number, txs = block.body.transactions.len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block.body.transactions().len(), "Executing block"); // Execute the block let execute_start = Instant::now(); @@ -111,8 +117,7 @@ where // Unseal the block for execution let (block, senders) = block.into_components(); let (unsealed_header, hash) = block.header.split(); - let block = - Block { header: unsealed_header, body: block.body }.with_senders_unchecked(senders); + let block = P::Block::new(unsealed_header, block.body).with_senders_unchecked(senders); executor.execute_and_verify_one((&block, td).into())?; execution_duration += execute_start.elapsed(); @@ -134,7 +139,7 @@ where } } - let last_block_number = blocks.last().expect("blocks should not be empty").number; + let last_block_number = blocks.last().expect("blocks should not be empty").number(); debug!( target: "exex::backfill", range = ?*self.range.start()..=last_block_number, @@ -165,7 +170,7 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)>; @@ -177,7 +182,7 @@ where impl SingleBlockBackfillJob where E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. pub fn into_stream( @@ -189,7 +194,7 @@ where pub(crate) fn execute_block( &self, block_number: u64, - ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { + ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { let td = self .provider .header_td_by_number(block_number)? @@ -206,7 +211,7 @@ where self.provider.history_by_block_number(block_number.saturating_sub(1))?, )); - trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body.transactions.len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body().transactions().len(), "Executing block"); let block_execution_output = executor.execute((&block_with_senders, td).into())?; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index c55b8651daf1..46177ceda122 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -103,7 +103,13 @@ where impl Stream for StreamBackfillJob where E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, + P: HeaderProvider + + BlockReader + + StateProviderFactory + + Clone + + Send + + Unpin + + 'static, { type Item = BackfillJobResult; @@ -136,7 +142,13 @@ where impl Stream for StreamBackfillJob where E: BlockExecutorProvider + Clone + Send + 'static, - P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, + P: HeaderProvider + + BlockReader + + StateProviderFactory + + Clone + + Send + + Unpin + + 'static, { type Item = BackfillJobResult; diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 0a8bde242457..6d93314e22bd 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use alloy_consensus::{constants::ETH_TO_WEI, TxEip2930}; +use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip2930}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{b256, Address, TxKind, U256}; use eyre::OptionExt; @@ -9,12 +9,13 @@ use reth_evm::execute::{ BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; use reth_evm_ethereum::execute::EthExecutorProvider; +use reth_node_api::FullNodePrimitives; use reth_primitives::{ - Block, BlockBody, BlockWithSenders, Header, Receipt, SealedBlockWithSenders, Transaction, + Block, BlockBody, BlockExt, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, - ProviderFactory, StaticFileProviderFactory, + ProviderFactory, }; use reth_revm::database::StateProviderDatabase; use reth_testing_utils::generators::sign_tx_with_key_pair; @@ -57,16 +58,19 @@ pub(crate) fn execute_block_and_commit_to_database( block: &BlockWithSenders, ) -> eyre::Result> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, + >, { let provider = provider_factory.provider()?; // Execute the block to produce a block execution output let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) - .executor(StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider(), - ))) + .executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))) .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; block_execution_output.state.reverts.sort(); @@ -164,7 +168,13 @@ pub(crate) fn blocks_and_execution_outputs( key_pair: Keypair, ) -> eyre::Result)>> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, + >, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; @@ -186,15 +196,15 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec, ExecutionOutcome)> where N: ProviderNodeTypes, + N::Primitives: + FullNodePrimitives, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; let provider = provider_factory.provider()?; - let executor = - EthExecutorProvider::ethereum(chain_spec).batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new(provider.tx_ref(), provider.static_file_provider()), - )); + let executor = EthExecutorProvider::ethereum(chain_spec) + .batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))); let mut execution_outcome = executor.execute_and_verify_batch(vec![ (&block1, U256::ZERO).into(), diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 4e0d9f5956c7..3d303c9bbac0 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -3,6 +3,7 @@ use reth_exex_types::ExExHead; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; +use reth_provider::BlockReader; use reth_tasks::TaskExecutor; use std::fmt::Debug; use tokio::sync::mpsc::UnboundedSender; @@ -56,7 +57,7 @@ where impl ExExContext where Node: FullNodeComponents, - Node::Provider: Debug, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, { /// Returns dynamic version of the context @@ -106,13 +107,19 @@ where /// Sets notifications stream to [`crate::ExExNotificationsWithoutHead`], a stream of /// notifications without a head. - pub fn set_notifications_without_head(&mut self) { + pub fn set_notifications_without_head(&mut self) + where + Node::Provider: BlockReader, + { self.notifications.set_without_head(); } /// Sets notifications stream to [`crate::ExExNotificationsWithHead`], a stream of notifications /// with the provided head. - pub fn set_notifications_with_head(&mut self, head: ExExHead) { + pub fn set_notifications_with_head(&mut self, head: ExExHead) + where + Node::Provider: BlockReader, + { self.notifications.set_with_head(head); } } @@ -121,6 +128,7 @@ where mod tests { use reth_exex_types::ExExHead; use reth_node_api::FullNodeComponents; + use reth_provider::BlockReader; use crate::ExExContext; @@ -132,7 +140,10 @@ mod tests { ctx: ExExContext, } - impl ExEx { + impl ExEx + where + Node::Provider: BlockReader, + { async fn _test_bounds(mut self) -> eyre::Result<()> { self.ctx.pool(); self.ctx.block_executor(); diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs index b48a6ebc951f..3ce0f488f40c 100644 --- a/crates/exex/exex/src/dyn_context.rs +++ b/crates/exex/exex/src/dyn_context.rs @@ -6,6 +6,7 @@ use std::fmt::Debug; use reth_chainspec::{EthChainSpec, Head}; use reth_node_api::FullNodeComponents; use reth_node_core::node_config::NodeConfig; +use reth_provider::BlockReader; use tokio::sync::mpsc; use crate::{ExExContext, ExExEvent, ExExNotificationsStream}; @@ -51,7 +52,7 @@ impl Debug for ExExContextDyn { impl From> for ExExContextDyn where Node: FullNodeComponents, - Node::Provider: Debug, + Node::Provider: Debug + BlockReader, Node::Executor: Debug, { fn from(ctx: ExExContext) -> Self { diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index a17de660862b..ea5ddf2e8c62 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -10,7 +10,7 @@ use reth_chainspec::Head; use reth_metrics::{metrics::Counter, Metrics}; use reth_primitives::SealedHeader; use reth_provider::HeaderProvider; -use reth_tracing::tracing::debug; +use reth_tracing::tracing::{debug, warn}; use std::{ collections::VecDeque, fmt::Debug, @@ -35,6 +35,12 @@ use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; /// or 17 minutes of 1-second blocks. pub const DEFAULT_EXEX_MANAGER_CAPACITY: usize = 1024; +/// The maximum number of blocks allowed in the WAL before emitting a warning. +/// +/// This constant defines the threshold for the Write-Ahead Log (WAL) size. If the number of blocks +/// in the WAL exceeds this limit, a warning is logged to indicate potential issues. +pub const WAL_BLOCKS_WARNING: usize = 128; + /// The source of the notification. /// /// This distinguishment is needed to not commit any pipeline notificatations to [WAL](`Wal`), @@ -377,6 +383,13 @@ where .unwrap(); self.wal.finalize(lowest_finished_height)?; + if self.wal.num_blocks() > WAL_BLOCKS_WARNING { + warn!( + target: "exex::manager", + blocks = ?self.wal.num_blocks(), + "WAL contains too many blocks and is not getting cleared. That will lead to increased disk space usage. Check that you emit the FinishedHeight event from your ExExes." + ); + } } else { let unfinalized_exexes = exex_finished_heights .into_iter() @@ -644,7 +657,7 @@ mod tests { use reth_primitives::SealedBlockWithSenders; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, - BlockWriter, Chain, DatabaseProviderFactory, TransactionVariant, + BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, TransactionVariant, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; @@ -723,7 +736,7 @@ mod tests { ExExManager::new((), vec![exex_handle], 10, wal, empty_finalized_header_stream()); // Define the notification for testing - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -741,7 +754,7 @@ mod tests { assert_eq!(exex_manager.next_id, 1); // Push another notification - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block2.block.header.set_hash(B256::new([0x02; 32])); block2.block.header.set_block_number(20); @@ -779,7 +792,7 @@ mod tests { ); // Push some notifications to fill part of the buffer - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -1038,11 +1051,11 @@ mod tests { assert_eq!(exex_handle.next_notification_id, 0); // Setup two blocks for the chain commit notification - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block2.block.header.set_hash(B256::new([0x02; 32])); block2.block.header.set_block_number(11); @@ -1091,7 +1104,7 @@ mod tests { // Set finished_height to a value higher than the block tip exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -1222,10 +1235,10 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .unwrap(); let provider_rw = provider_factory.database_provider_rw().unwrap(); - provider_rw.insert_block(block.clone()).unwrap(); + provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); provider_rw.commit().unwrap(); let provider = BlockchainProvider2::new(provider_factory).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 14cfe9be4d92..954a057fc09c 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -90,7 +90,12 @@ impl ExExNotifications { impl ExExNotificationsStream for ExExNotifications where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { fn set_without_head(&mut self) { @@ -139,7 +144,12 @@ where impl Stream for ExExNotifications where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { type Item = eyre::Result; @@ -262,7 +272,12 @@ impl ExExNotificationsWithHead { impl ExExNotificationsWithHead where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { /// Checks if the ExEx head is on the canonical chain. @@ -339,7 +354,12 @@ where impl Stream for ExExNotificationsWithHead where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + P: BlockReader + + HeaderProvider + + StateProviderFactory + + Clone + + Unpin + + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { type Item = eyre::Result; @@ -400,10 +420,10 @@ mod tests { use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::Block; + use reth_primitives::{Block, BlockExt}; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, - Chain, DatabaseProviderFactory, + Chain, DatabaseProviderFactory, StorageLocation, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; use tokio::sync::mpsc; @@ -431,6 +451,7 @@ mod tests { let provider_rw = provider_factory.provider_rw()?; provider_rw.insert_block( node_head_block.clone().seal_with_senders().ok_or_eyre("failed to recover senders")?, + StorageLocation::Database, )?; provider_rw.commit()?; @@ -566,7 +587,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; let node_head = Head { number: node_head_block.number, @@ -574,7 +595,7 @@ mod tests { ..Default::default() }; let provider_rw = provider.database_provider_rw()?; - provider_rw.insert_block(node_head_block)?; + provider_rw.insert_block(node_head_block, StorageLocation::Database)?; provider_rw.commit()?; let node_head_notification = ExExNotification::ChainCommitted { new: Arc::new( diff --git a/crates/exex/exex/src/wal/cache.rs b/crates/exex/exex/src/wal/cache.rs index 882b65e15892..86943f33cfa0 100644 --- a/crates/exex/exex/src/wal/cache.rs +++ b/crates/exex/exex/src/wal/cache.rs @@ -35,6 +35,11 @@ impl BlockCache { self.notification_max_blocks.is_empty() } + /// Returns the number of blocks in the cache. + pub(super) fn num_blocks(&self) -> usize { + self.committed_blocks.len() + } + /// Removes all files from the cache that has notifications with a tip block less than or equal /// to the given block number. /// diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 00b0ea919ef6..066fbe1b58c1 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -66,6 +66,11 @@ impl Wal { ) -> eyre::Result> + '_>> { self.inner.iter_notifications() } + + /// Returns the number of blocks in the WAL. + pub fn num_blocks(&self) -> usize { + self.inner.block_cache().num_blocks() + } } /// Inner type for the WAL. @@ -231,13 +236,13 @@ mod tests { use crate::wal::{cache::CachedBlock, Wal}; fn read_notifications(wal: &Wal) -> eyre::Result> { - let Some(files_range) = wal.inner.storage.files_range()? else { return Ok(Vec::new()) }; - - wal.inner - .storage - .iter_notifications(files_range) - .map(|entry| Ok(entry?.2)) - .collect::>() + wal.inner.storage.files_range()?.map_or(Ok(Vec::new()), |range| { + wal.inner + .storage + .iter_notifications(range) + .map(|entry| entry.map(|(_, _, n)| n)) + .collect() + }) } fn sort_committed_blocks( @@ -263,21 +268,25 @@ mod tests { // Create 4 canonical blocks and one reorged block with number 2 let blocks = random_block_range(&mut rng, 0..=3, BlockRangeParams::default()) .into_iter() - .map(|block| block.seal_with_senders().ok_or_eyre("failed to recover senders")) + .map(|block| { + block + .seal_with_senders::() + .ok_or_eyre("failed to recover senders") + }) .collect::>>()?; let block_1_reorged = random_block( &mut rng, 1, BlockParams { parent: Some(blocks[0].hash()), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; let block_2_reorged = random_block( &mut rng, 2, BlockParams { parent: Some(blocks[1].hash()), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; // Create notifications for the above blocks. diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 5c3468a3c1c7..ca0ea46551c5 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -45,10 +45,10 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{Head, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, - BlockReader, ProviderFactory, + BlockReader, EthStorage, ProviderFactory, }; use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; @@ -118,9 +118,10 @@ where pub struct TestNode; impl NodeTypes for TestNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = reth_trie_db::MerklePatriciaTrie; + type Storage = EthStorage; } impl NodeTypesWithEngine for TestNode { @@ -129,7 +130,14 @@ impl NodeTypesWithEngine for TestNode { impl Node for TestNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, @@ -257,7 +265,7 @@ pub async fn test_exex_context_with_chain_spec( let (static_dir, _) = create_test_static_files_dir(); let db = create_test_rw_db(); - let provider_factory = ProviderFactory::new( + let provider_factory = ProviderFactory::>::new( db, chain_spec.clone(), StaticFileProvider::read_write(static_dir.into_path()).expect("static file provider"), @@ -281,7 +289,7 @@ pub async fn test_exex_context_with_chain_spec( let (_, payload_builder) = NoopPayloadBuilderService::::new(); - let components = NodeAdapter::, _>, _> { + let components = NodeAdapter::, _> { components: Components { transaction_pool, evm_config, @@ -298,7 +306,7 @@ pub async fn test_exex_context_with_chain_spec( .block_by_hash(genesis_hash)? .ok_or_else(|| eyre::eyre!("genesis block not found"))? .seal_slow() - .seal_with_senders() + .seal_with_senders::() .ok_or_else(|| eyre::eyre!("failed to recover senders"))?; let head = Head { diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 51097d6109c1..3b67fd5aa500 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chain-state.workspace = true reth-execution-types.workspace = true +reth-primitives-traits.workspace = true # reth alloy-primitives.workspace = true @@ -38,11 +39,13 @@ serde = [ "reth-execution-types/serde", "alloy-eips/serde", "alloy-primitives/serde", - "rand/serde" + "rand/serde", + "reth-primitives-traits/serde", ] serde-bincode-compat = [ "reth-execution-types/serde-bincode-compat", "serde_with", "reth-primitives/serde-bincode-compat", - "alloy-eips/serde-bincode-compat" + "alloy-eips/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", ] diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 61d42a3319be..fb0762f04b3e 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -2,27 +2,28 @@ use std::sync::Arc; use reth_chain_state::CanonStateNotification; use reth_execution_types::Chain; +use reth_primitives_traits::NodePrimitives; /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum ExExNotification { +pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { /// The new chain after commit. - new: Arc, + new: Arc>, }, /// Chain got reorged, and both the old and the new chains are returned. ChainReorged { /// The old chain before reorg. - old: Arc, + old: Arc>, /// The new chain after reorg. - new: Arc, + new: Arc>, }, /// Chain got reverted, and only the old chain is returned. ChainReverted { /// The old chain before reversion. - old: Arc, + old: Arc>, }, } @@ -60,8 +61,8 @@ impl ExExNotification { } } -impl From for ExExNotification { - fn from(notification: CanonStateNotification) -> Self { +impl From> for ExExNotification

{ + fn from(notification: CanonStateNotification

) -> Self { match notification { CanonStateNotification::Commit { new } => Self::ChainCommitted { new }, CanonStateNotification::Reorg { old, new } => Self::ChainReorged { old, new }, diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index d242ecc98e2d..c1aa4900e03f 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -210,6 +210,12 @@ impl FsPathError { } } +/// Wrapper for [`File::open`]. +pub fn open(path: impl AsRef) -> Result { + let path = path.as_ref(); + File::open(path).map_err(|err| FsPathError::open(err, path)) +} + /// Wrapper for `std::fs::read_to_string` pub fn read_to_string(path: impl AsRef) -> Result { let path = path.as_ref(); diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 4a534afbef53..61ab94b4f2fc 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -412,11 +412,13 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); if let Some(discv5_addr) = discv5_addr_ipv4 { - warn!(target: "net::discv5", - %discv5_addr, - %rlpx_addr, - "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" - ); + if discv5_addr != rlpx_addr { + warn!(target: "net::discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" + ); + } } // overwrite discv5 ipv4 addr with RLPx address. this is since there is no @@ -429,11 +431,13 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); if let Some(discv5_addr) = discv5_addr_ipv6 { - warn!(target: "net::discv5", - %discv5_addr, - %rlpx_addr, - "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" - ); + if discv5_addr != rlpx_addr { + warn!(target: "net::discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" + ); + } } // overwrite discv5 ipv6 addr with RLPx address. this is since there is no diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 272db6fc6d1c..f4cc134ec484 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -18,6 +18,7 @@ reth-consensus.workspace = true reth-network-p2p.workspace = true reth-network-peers.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-tasks.workspace = true @@ -27,6 +28,7 @@ reth-db-api = { workspace = true, optional = true } reth-testing-utils = { workspace = true, optional = true } # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true @@ -44,9 +46,9 @@ reth-metrics.workspace = true metrics.workspace = true # misc -tracing.workspace = true rayon.workspace = true thiserror.workspace = true +tracing.workspace = true tempfile = { workspace = true, optional = true } itertools.workspace = true @@ -70,9 +72,16 @@ rand.workspace = true tempfile.workspace = true [features] +optimism = [ + "reth-primitives/optimism", + "reth-db?/optimism", + "reth-db-api?/optimism", + "reth-provider/optimism" +] + test-utils = [ - "dep:tempfile", - "dep:reth-db-api", + "tempfile", + "reth-db-api", "reth-db/test-utils", "reth-consensus/test-utils", "reth-network-p2p/test-utils", @@ -80,5 +89,6 @@ test-utils = [ "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-db-api?/test-utils", - "reth-provider/test-utils" + "reth-provider/test-utils", + "reth-primitives-traits/test-utils" ] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 314f3a09084c..82f45dd23bfe 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -14,11 +14,13 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::SealedHeader; +use reth_primitives_traits::size::InMemorySize; use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::Ordering, collections::BinaryHeap, + fmt::Debug, mem, ops::RangeInclusive, pin::Pin, @@ -36,7 +38,7 @@ pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -56,16 +58,16 @@ pub struct BodiesDownloader { /// Requests in progress in_progress_queue: BodiesRequestQueue, /// Buffered responses - buffered_responses: BinaryHeap, + buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } impl BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. @@ -190,14 +192,14 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); } /// Removes the next response from the buffer. - fn pop_buffered_response(&mut self) -> Option { + fn pop_buffered_response(&mut self) -> Option> { let resp = self.buffered_responses.pop()?; self.metrics.buffered_responses.decrement(1.); self.buffered_blocks_size_bytes -= resp.size(); @@ -207,10 +209,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response(&mut self, response: Vec) { + fn buffer_bodies_response(&mut self, response: Vec>) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::(); + response.capacity() * mem::size_of::>(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -224,7 +226,7 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered(&mut self) -> Option> { + fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -250,7 +252,7 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch(&mut self) -> Option> { + fn try_split_next_batch(&mut self) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -282,12 +284,12 @@ where Self: BodyDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader { + pub fn into_task(self) -> TaskDownloader<::Body> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader + pub fn into_task_with(self, spawner: &S) -> TaskDownloader<::Body> where S: TaskSpawner, { @@ -297,9 +299,11 @@ where impl BodyDownloader for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { + type Body = B::Body; + /// Set a new download range (exclusive). /// /// This method will drain all queued bodies, filter out ones outside the range and put them @@ -345,10 +349,10 @@ where impl Stream for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -430,13 +434,13 @@ where } #[derive(Debug)] -struct OrderedBodiesResponse { - resp: Vec, +struct OrderedBodiesResponse { + resp: Vec>, /// The total size of the response in bytes size: usize, } -impl OrderedBodiesResponse { +impl OrderedBodiesResponse { /// Returns the block number of the first element /// /// # Panics @@ -467,21 +471,21 @@ impl OrderedBodiesResponse { } } -impl PartialEq for OrderedBodiesResponse { +impl PartialEq for OrderedBodiesResponse { fn eq(&self, other: &Self) -> bool { self.first_block_number() == other.first_block_number() } } -impl Eq for OrderedBodiesResponse {} +impl Eq for OrderedBodiesResponse {} -impl PartialOrd for OrderedBodiesResponse { +impl PartialOrd for OrderedBodiesResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedBodiesResponse { +impl Ord for OrderedBodiesResponse { fn cmp(&self, other: &Self) -> Ordering { self.first_block_number().cmp(&other.first_block_number()).reverse() } @@ -561,7 +565,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index e70c534a0e39..494a5f2ef2ec 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -4,6 +4,7 @@ use reth_network_p2p::{ bodies::{downloader::BodyDownloader, response::BlockResponse}, error::{DownloadError, DownloadResult}, }; +use reth_primitives::BlockBody; use std::ops::RangeInclusive; /// A [`BodyDownloader`] implementation that does nothing. @@ -12,13 +13,15 @@ use std::ops::RangeInclusive; pub struct NoopBodiesDownloader; impl BodyDownloader for NoopBodiesDownloader { + type Body = BlockBody; + fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } impl Stream for NoopBodiesDownloader { - type Item = Result, DownloadError>; + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index db7ff71cfc9e..aa6ec9e4af0f 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -9,6 +9,7 @@ use reth_network_p2p::{ error::DownloadResult, }; use reth_primitives::SealedHeader; +use reth_primitives_traits::InMemorySize; use std::{ pin::Pin, sync::Arc, @@ -57,7 +58,7 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc, + consensus: Arc>, request: Vec, ) { // Set last max requested block number @@ -77,9 +78,9 @@ where impl Stream for BodiesRequestQueue where - B: BodiesClient + 'static, + B: BodiesClient + 'static, { - type Item = DownloadResult>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index c2b36732b51d..66287624f890 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -9,6 +9,7 @@ use reth_network_p2p::{ }; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader}; +use reth_primitives_traits::InMemorySize; use std::{ collections::VecDeque, mem, @@ -26,7 +27,7 @@ use std::{ /// It then proceeds to verify the downloaded bodies. In case of an validation error, /// the future will start over. /// -/// The future will filter out any empty headers (see [`reth_primitives::Header::is_empty`]) from +/// The future will filter out any empty headers (see [`alloy_consensus::Header::is_empty`]) from /// the request. If [`BodiesRequestFuture`] was initialized with all empty headers, no request will /// be dispatched and they will be immediately returned upon polling. /// @@ -38,7 +39,7 @@ use std::{ /// and eventually disconnected. pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. @@ -46,7 +47,7 @@ pub(crate) struct BodiesRequestFuture { // Headers to download. The collection is shrunk as responses are buffered. pending_headers: VecDeque, /// Internal buffer for all blocks - buffer: Vec, + buffer: Vec>, fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, @@ -59,7 +60,7 @@ where /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { @@ -114,7 +115,10 @@ where /// Process block response. /// Returns an error if the response is invalid. - fn on_block_response(&mut self, response: WithPeerId>) -> DownloadResult<()> { + fn on_block_response(&mut self, response: WithPeerId>) -> DownloadResult<()> + where + B::Body: InMemorySize, + { let (peer_id, bodies) = response.split(); let request_len = self.last_request_len.unwrap_or_default(); let response_len = bodies.len(); @@ -157,7 +161,10 @@ where /// /// This method removes headers from the internal collection. /// If the response fails validation, then the header will be put back. - fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> { + fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> + where + B::Body: InMemorySize, + { let bodies_capacity = bodies.capacity(); let bodies_len = bodies.len(); let mut bodies = bodies.into_iter().peekable(); @@ -207,9 +214,9 @@ where impl Future for BodiesRequestFuture where - B: BodiesClient + 'static, + B: BodiesClient + 'static, { - type Output = DownloadResult>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index eeafb7ab121b..a2b63c8ed186 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -8,6 +8,7 @@ use reth_network_p2p::{ }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ + fmt::Debug, future::Future, ops::RangeInclusive, pin::Pin, @@ -23,15 +24,15 @@ pub const BODIES_TASK_BUFFER_SIZE: usize = 4; /// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream, + from_downloader: ReceiverStream>, to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's /// connected to that task. /// @@ -45,12 +46,16 @@ impl TaskDownloader { /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_network_p2p::bodies::client::BodiesClient; + /// use reth_primitives_traits::InMemorySize; /// use reth_storage_api::HeaderProvider; - /// use std::sync::Arc; + /// use std::{fmt::Debug, sync::Arc}; /// - /// fn t( + /// fn t< + /// B: BodiesClient + 'static, + /// Provider: HeaderProvider + Unpin + 'static, + /// >( /// client: Arc, - /// consensus: Arc, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); @@ -59,7 +64,7 @@ impl TaskDownloader { /// ``` pub fn spawn(downloader: T) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader + 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -68,7 +73,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader + 'static, S: TaskSpawner, { let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE); @@ -86,15 +91,17 @@ impl TaskDownloader { } } -impl BodyDownloader for TaskDownloader { +impl BodyDownloader for TaskDownloader { + type Body = B; + fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { let _ = self.to_downloader.send(range); Ok(()) } } -impl Stream for TaskDownloader { - type Item = BodyDownloaderResult; +impl Stream for TaskDownloader { + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -102,9 +109,9 @@ impl Stream for TaskDownloader { } /// A [`BodyDownloader`] that runs on its own task -struct SpawnedDownloader { +struct SpawnedDownloader { updates: UnboundedReceiverStream>, - bodies_tx: PollSender, + bodies_tx: PollSender>, downloader: T, } diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 5b21c82fb3f8..ff352bc23049 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, io, path::Path}; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, Sealable, B256}; use futures::Future; @@ -12,16 +13,16 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, Header, SealedHeader}; +use reth_primitives::SealedHeader; +use reth_primitives_traits::{Block, BlockBody, FullBlock}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; use tokio_util::codec::FramedRead; use tracing::{debug, trace, warn}; -use crate::receipt_file_client::FromReceiptReader; - use super::file_codec::BlockFileCodec; +use crate::receipt_file_client::FromReceiptReader; /// Default byte length of chunk to read from chain file. /// @@ -40,15 +41,15 @@ pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000; /// /// This reads the entire file into memory, so it is not suitable for large files. #[derive(Debug)] -pub struct FileClient { +pub struct FileClient { /// The buffered headers retrieved when fetching new bodies. - headers: HashMap, + headers: HashMap, /// A mapping between block hash and number. hash_to_number: HashMap, /// The buffered bodies retrieved when fetching new headers. - bodies: HashMap, + bodies: HashMap, } /// An error that can occur when constructing and using a [`FileClient`]. @@ -73,7 +74,7 @@ impl From<&'static str> for FileClientError { } } -impl FileClient { +impl FileClient { /// Create a new file client from a file path. pub async fn new>(path: P) -> Result { let file = File::open(path).await?; @@ -114,12 +115,8 @@ impl FileClient { /// Clones and returns the highest header of this client has or `None` if empty. Seals header /// before returning. - pub fn tip_header(&self) -> Option { - self.headers.get(&self.max_block()?).map(|h| { - let sealed = h.clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) + pub fn tip_header(&self) -> Option> { + self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal(h.clone())) } /// Returns true if all blocks are canonical (no gaps) @@ -141,13 +138,13 @@ impl FileClient { } /// Use the provided bodies as the file client's block body buffer. - pub fn with_bodies(mut self, bodies: HashMap) -> Self { + pub fn with_bodies(mut self, bodies: HashMap) -> Self { self.bodies = bodies; self } /// Use the provided headers as the file client's block body buffer. - pub fn with_headers(mut self, headers: HashMap) -> Self { + pub fn with_headers(mut self, headers: HashMap) -> Self { self.headers = headers; for (number, header) in &self.headers { self.hash_to_number.insert(header.hash_slow(), *number); @@ -166,14 +163,14 @@ impl FileClient { } /// Returns an iterator over headers in the client. - pub fn headers_iter(&self) -> impl Iterator { + pub fn headers_iter(&self) -> impl Iterator { self.headers.values() } /// Returns a mutable iterator over bodies in the client. /// /// Panics, if file client headers and bodies are not mapping 1-1. - pub fn bodies_iter_mut(&mut self) -> impl Iterator { + pub fn bodies_iter_mut(&mut self) -> impl Iterator { let bodies = &mut self.bodies; let numbers = &self.hash_to_number; bodies.iter_mut().map(|(hash, body)| (numbers[hash], body)) @@ -181,27 +178,28 @@ impl FileClient { /// Returns the current number of transactions in the client. pub fn total_transactions(&self) -> usize { - self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions.len()) + self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions().len()) } } -impl FromReader for FileClient { +impl FromReader for FileClient { type Error = FileClientError; /// Initialize the [`FileClient`] from bytes that have been read from file. - fn from_reader( - reader: B, + fn from_reader( + reader: R, num_bytes: u64, ) -> impl Future, Self::Error>> where - B: AsyncReadExt + Unpin, + R: AsyncReadExt + Unpin, { let mut headers = HashMap::default(); let mut hash_to_number = HashMap::default(); let mut bodies = HashMap::default(); // use with_capacity to make sure the internal buffer contains the entire chunk - let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + let mut stream = + FramedRead::with_capacity(reader, BlockFileCodec::::default(), num_bytes as usize); trace!(target: "downloaders::file", target_num_bytes=num_bytes, @@ -229,13 +227,13 @@ impl FromReader for FileClient { } Err(err) => return Err(err), }; - let block_number = block.header.number; - let block_hash = block.header.hash_slow(); + let block_number = block.header().number(); + let block_hash = block.header().hash_slow(); // add to the internal maps - headers.insert(block.header.number, block.header.clone()); - hash_to_number.insert(block_hash, block.header.number); - bodies.insert(block_hash, block.into()); + headers.insert(block.header().number(), block.header().clone()); + hash_to_number.insert(block_hash, block.header().number()); + bodies.insert(block_hash, block.body().clone()); if log_interval == 0 { trace!(target: "downloaders::file", @@ -264,8 +262,9 @@ impl FromReader for FileClient { } } -impl HeadersClient for FileClient { - type Output = HeadersFut; +impl HeadersClient for FileClient { + type Header = B::Header; + type Output = HeadersFut; fn get_headers_with_priority( &self, @@ -314,8 +313,9 @@ impl HeadersClient for FileClient { } } -impl BodiesClient for FileClient { - type Output = BodiesFut; +impl BodiesClient for FileClient { + type Body = B::Body; + type Output = BodiesFut; fn get_block_bodies_with_priority( &self, @@ -338,7 +338,7 @@ impl BodiesClient for FileClient { } } -impl DownloadClient for FileClient { +impl DownloadClient for FileClient { fn report_bad_message(&self, _peer_id: PeerId) { warn!("Reported a bad message on a file client, the file may be corrupted or invalid"); // noop @@ -544,7 +544,7 @@ mod tests { // create an empty file let file = tempfile::tempfile().unwrap(); - let client = + let client: Arc = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_bodies(bodies.clone())); let mut downloader = BodiesDownloaderBuilder::default().build( client.clone(), @@ -569,14 +569,14 @@ mod tests { let p0 = child_header(&p1); let file = tempfile::tempfile().unwrap(); - let client = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_headers( - HashMap::from([ + let client: Arc = Arc::new( + FileClient::from_file(file.into()).await.unwrap().with_headers(HashMap::from([ (0u64, p0.clone().unseal()), (1, p1.clone().unseal()), (2, p2.clone().unseal()), (3, p3.clone().unseal()), - ]), - )); + ])), + ); let mut downloader = ReverseHeadersDownloaderBuilder::default() .stream_batch_size(3) @@ -598,7 +598,7 @@ mod tests { // Generate some random blocks let (file, headers, _) = generate_bodies_file(0..=19).await; // now try to read them back - let client = Arc::new(FileClient::from_file(file).await.unwrap()); + let client: Arc = Arc::new(FileClient::from_file(file).await.unwrap()); // construct headers downloader and use first header let mut header_downloader = ReverseHeadersDownloaderBuilder::default() @@ -623,7 +623,7 @@ mod tests { let (file, headers, mut bodies) = generate_bodies_file(0..=19).await; // now try to read them back - let client = Arc::new(FileClient::from_file(file).await.unwrap()); + let client: Arc = Arc::new(FileClient::from_file(file).await.unwrap()); // insert headers in db for the bodies downloader insert_headers(factory.db_ref().db(), &headers); diff --git a/crates/net/downloaders/src/file_codec.rs b/crates/net/downloaders/src/file_codec.rs index 3e754f9cf49b..57a15b6c888c 100644 --- a/crates/net/downloaders/src/file_codec.rs +++ b/crates/net/downloaders/src/file_codec.rs @@ -3,7 +3,6 @@ use crate::file_client::FileClientError; use alloy_primitives::bytes::{Buf, BytesMut}; use alloy_rlp::{Decodable, Encodable}; -use reth_primitives::Block; use tokio_util::codec::{Decoder, Encoder}; /// Codec for reading raw block bodies from a file. @@ -19,10 +18,16 @@ use tokio_util::codec::{Decoder, Encoder}; /// /// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set /// the capacity of the framed reader to the size of the file. -pub(crate) struct BlockFileCodec; +pub(crate) struct BlockFileCodec(std::marker::PhantomData); -impl Decoder for BlockFileCodec { - type Item = Block; +impl Default for BlockFileCodec { + fn default() -> Self { + Self(std::marker::PhantomData) + } +} + +impl Decoder for BlockFileCodec { + type Item = B; type Error = FileClientError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -31,18 +36,17 @@ impl Decoder for BlockFileCodec { } let buf_slice = &mut src.as_ref(); - let body = - Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + let body = B::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; src.advance(src.len() - buf_slice.len()); Ok(Some(body)) } } -impl Encoder for BlockFileCodec { +impl Encoder for BlockFileCodec { type Error = FileClientError; - fn encode(&mut self, item: Block, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: B, dst: &mut BytesMut) -> Result<(), Self::Error> { item.encode(dst); Ok(()) } diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index 210655f7e26e..58da73123878 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use futures::Stream; use reth_network_p2p::headers::{ downloader::{HeaderDownloader, SyncTarget}, @@ -11,6 +12,8 @@ use reth_primitives::SealedHeader; pub struct NoopHeaderDownloader; impl HeaderDownloader for NoopHeaderDownloader { + type Header = Header; + fn update_local_head(&mut self, _: SealedHeader) {} fn update_sync_target(&mut self, _: SyncTarget) {} @@ -19,7 +22,7 @@ impl HeaderDownloader for NoopHeaderDownloader { } impl Stream for NoopHeaderDownloader { - type Item = Result, HeadersDownloaderError>; + type Item = Result, HeadersDownloaderError

>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 941d140b39d4..63a20ff27f5b 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -2,24 +2,25 @@ use super::task::TaskDownloader; use crate::metrics::HeaderDownloaderMetrics; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ - client::{HeadersClient, HeadersDirection, HeadersRequest}, + client::{HeadersClient, HeadersRequest}, downloader::{validate_header_download, HeaderDownloader, SyncTarget}, error::{HeadersDownloaderError, HeadersDownloaderResult}, }, priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{GotExpected, Header, SealedHeader}; +use reth_primitives::{GotExpected, SealedHeader}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::{Ordering, Reverse}, @@ -39,14 +40,14 @@ const REQUESTS_PER_PEER_MULTIPLIER: usize = 5; /// Wrapper for internal downloader errors. #[derive(Error, Debug)] -enum ReverseHeadersDownloaderError { +enum ReverseHeadersDownloaderError { #[error(transparent)] - Downloader(#[from] HeadersDownloaderError), + Downloader(#[from] HeadersDownloaderError), #[error(transparent)] Response(#[from] Box), } -impl From for ReverseHeadersDownloaderError { +impl From for ReverseHeadersDownloaderError { fn from(value: HeadersResponseError) -> Self { Self::Response(Box::new(value)) } @@ -59,24 +60,25 @@ impl From for ReverseHeadersDownloaderError { /// tries to fill the gap between the local head of the node and the chain tip by issuing multiple /// requests at a time but yielding them in batches on [`Stream::poll_next`]. /// -/// **Note:** This downloader downloads in reverse, see also [`HeadersDirection::Falling`], this -/// means the batches of headers that this downloader yields will start at the chain tip and move -/// towards the local head: falling block numbers. +/// **Note:** This downloader downloads in reverse, see also +/// [`reth_network_p2p::headers::client::HeadersDirection`], this means the batches of headers that +/// this downloader yields will start at the chain tip and move towards the local head: falling +/// block numbers. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] pub struct ReverseHeadersDownloader { /// Consensus client used to validate headers - consensus: Arc, + consensus: Arc>, /// Client used to download headers. client: Arc, /// The local head of the chain. - local_head: Option, + local_head: Option>, /// Block we want to close the gap to. sync_target: Option, /// The block number to use for requests. next_request_block_number: u64, /// Keeps track of the block we need to validate next. - lowest_validated_header: Option, + lowest_validated_header: Option>, /// Tip block number to start validating from (in reverse) next_chain_tip_block_number: u64, /// The batch size per one request @@ -97,11 +99,11 @@ pub struct ReverseHeadersDownloader { /// requests in progress in_progress_queue: FuturesUnordered>, /// Buffered, unvalidated responses - buffered_responses: BinaryHeap, + buffered_responses: BinaryHeap>, /// Buffered, _sorted_ and validated headers ready to be returned. /// /// Note: headers are sorted from high to low - queued_validated_headers: Vec, + queued_validated_headers: Vec>, /// Header downloader metrics. metrics: HeaderDownloaderMetrics, } @@ -110,7 +112,7 @@ pub struct ReverseHeadersDownloader { impl ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { /// Convenience method to create a [`ReverseHeadersDownloaderBuilder`] without importing it pub fn builder() -> ReverseHeadersDownloaderBuilder { @@ -120,7 +122,7 @@ where /// Returns the block number the local node is at. #[inline] fn local_block_number(&self) -> Option { - self.local_head.as_ref().map(|h| h.number) + self.local_head.as_ref().map(|h| h.number()) } /// Returns the existing local head block number @@ -130,7 +132,7 @@ where /// If the local head has not been set. #[inline] fn existing_local_block_number(&self) -> BlockNumber { - self.local_head.as_ref().expect("is initialized").number + self.local_head.as_ref().expect("is initialized").number() } /// Returns the existing sync target. @@ -197,14 +199,14 @@ where /// `lowest_validated_header`. /// /// This only returns `None` if we haven't fetched the initial chain tip yet. - fn lowest_validated_header(&self) -> Option<&SealedHeader> { + fn lowest_validated_header(&self) -> Option<&SealedHeader> { self.queued_validated_headers.last().or(self.lowest_validated_header.as_ref()) } /// Validate that the received header matches the expected sync target. fn validate_sync_target( &self, - header: &SealedHeader, + header: &SealedHeader, request: HeadersRequest, peer_id: PeerId, ) -> Result<(), Box> { @@ -220,12 +222,12 @@ where ), })) } - SyncTargetBlock::Number(number) if header.number != number => { + SyncTargetBlock::Number(number) if header.number() != number => { Err(Box::new(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::InvalidTipNumber(GotExpected { - got: header.number, + got: header.number(), expected: number, }), })) @@ -244,20 +246,12 @@ where fn process_next_headers( &mut self, request: HeadersRequest, - headers: Vec
, + headers: Vec, peer_id: PeerId, - ) -> Result<(), ReverseHeadersDownloaderError> { + ) -> Result<(), ReverseHeadersDownloaderError> { let mut validated = Vec::with_capacity(headers.len()); - let sealed_headers = headers - .into_par_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - - SealedHeader::new(header, seal) - }) - .collect::>(); + let sealed_headers = headers.into_par_iter().map(SealedHeader::seal).collect::>(); for parent in sealed_headers { // Validate that the header is the parent header of the last validated header. if let Some(validated_header) = @@ -280,17 +274,17 @@ where if let Some((last_header, head)) = validated .last_mut() .zip(self.local_head.as_ref()) - .filter(|(last, head)| last.number == head.number + 1) + .filter(|(last, head)| last.number() == head.number() + 1) { // Every header must be valid on its own - if let Err(error) = self.consensus.validate_header(last_header) { + if let Err(error) = self.consensus.validate_header(&*last_header) { trace!(target: "downloaders::headers", %error, "Failed to validate header"); return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::HeaderValidation { hash: head.hash(), - number: head.number, + number: head.number(), error: Box::new(error), }, } @@ -299,9 +293,9 @@ where // If the header is valid on its own, but not against its parent, we return it as // detached head error. - if let Err(error) = self.consensus.validate_header_against_parent(last_header, head) { + if let Err(error) = self.consensus.validate_header_against_parent(&*last_header, head) { // Replace the last header with a detached variant - error!(target: "downloaders::headers", %error, number = last_header.number, hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); + error!(target: "downloaders::headers", %error, number = last_header.number(), hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); return Err(HeadersDownloaderError::DetachedHead { local_head: Box::new(head.clone()), header: Box::new(last_header.clone()), @@ -313,7 +307,7 @@ where // update tracked block info (falling block number) self.next_chain_tip_block_number = - validated.last().expect("exists").number.saturating_sub(1); + validated.last().expect("exists").number().saturating_sub(1); self.queued_validated_headers.extend(validated); Ok(()) @@ -345,7 +339,7 @@ where let skip = self .queued_validated_headers .iter() - .take_while(|last| last.number > target_block_number) + .take_while(|last| last.number() > target_block_number) .count(); // removes all headers that are higher than current target self.queued_validated_headers.drain(..skip); @@ -360,8 +354,8 @@ where /// Handles the response for the request for the sync target fn on_sync_target_outcome( &mut self, - response: HeadersRequestOutcome, - ) -> Result<(), ReverseHeadersDownloaderError> { + response: HeadersRequestOutcome, + ) -> Result<(), ReverseHeadersDownloaderError> { let sync_target = self.existing_sync_target(); let HeadersRequestOutcome { request, outcome } = response; match outcome { @@ -372,7 +366,7 @@ where self.metrics.total_downloaded.increment(headers.len() as u64); // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); + headers.sort_unstable_by_key(|h| Reverse(h.number())); if headers.is_empty() { return Err(HeadersResponseError { @@ -383,9 +377,8 @@ where .into()) } - let sealed_target = headers.swap_remove(0).seal_slow(); - let (header, seal) = sealed_target.into_parts(); - let target = SealedHeader::new(header, seal); + let header = headers.swap_remove(0); + let target = SealedHeader::seal(header); match sync_target { SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. } => { @@ -401,12 +394,12 @@ where } } SyncTargetBlock::Number(number) => { - if target.number != number { + if target.number() != number { return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::InvalidTipNumber(GotExpected { - got: target.number, + got: target.number(), expected: number, }), } @@ -415,17 +408,17 @@ where } } - trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number, "Received sync target"); + trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number(), "Received sync target"); // This is the next block we need to start issuing requests from - let parent_block_number = target.number.saturating_sub(1); - self.on_block_number_update(target.number, parent_block_number); + let parent_block_number = target.number().saturating_sub(1); + self.on_block_number_update(target.number(), parent_block_number); self.queued_validated_headers.push(target); // try to validate all buffered responses blocked by this successful response self.try_validate_buffered() - .map(Err::<(), ReverseHeadersDownloaderError>) + .map(Err::<(), ReverseHeadersDownloaderError>) .transpose()?; Ok(()) @@ -439,8 +432,8 @@ where /// Invoked when we received a response fn on_headers_outcome( &mut self, - response: HeadersRequestOutcome, - ) -> Result<(), ReverseHeadersDownloaderError> { + response: HeadersRequestOutcome, + ) -> Result<(), ReverseHeadersDownloaderError> { let requested_block_number = response.block_number(); let HeadersRequestOutcome { request, outcome } = response; @@ -475,19 +468,19 @@ where } // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); + headers.sort_unstable_by_key(|h| Reverse(h.number())); // validate the response let highest = &headers[0]; - trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number, "Validating non-empty headers response"); + trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number(), "Validating non-empty headers response"); - if highest.number != requested_block_number { + if highest.number() != requested_block_number { return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::HeadersResponseStartBlockMismatch(GotExpected { - got: highest.number, + got: highest.number(), expected: requested_block_number, }), } @@ -495,14 +488,14 @@ where } // check if the response is the next expected - if highest.number == self.next_chain_tip_block_number { + if highest.number() == self.next_chain_tip_block_number { // is next response, validate it self.process_next_headers(request, headers, peer_id)?; // try to validate all buffered responses blocked by this successful response self.try_validate_buffered() - .map(Err::<(), ReverseHeadersDownloaderError>) + .map(Err::<(), ReverseHeadersDownloaderError>) .transpose()?; - } else if highest.number > self.existing_local_block_number() { + } else if highest.number() > self.existing_local_block_number() { self.metrics.buffered_responses.increment(1.); // can't validate yet self.buffered_responses.push(OrderedHeadersResponse { @@ -549,7 +542,7 @@ where /// Attempts to validate the buffered responses /// /// Returns an error if the next expected response was popped, but failed validation. - fn try_validate_buffered(&mut self) -> Option { + fn try_validate_buffered(&mut self) -> Option> { loop { // Check to see if we've already received the next value let next_response = self.buffered_responses.peek_mut()?; @@ -575,7 +568,7 @@ where /// Returns the request for the `sync_target` header. const fn get_sync_target_request(&self, start: BlockHashOrNumber) -> HeadersRequest { - HeadersRequest { start, limit: 1, direction: HeadersDirection::Falling } + HeadersRequest::falling(start, 1) } /// Starts a request future @@ -598,7 +591,11 @@ where } /// Validate whether the header is valid in relation to it's parent - fn validate(&self, header: &SealedHeader, parent: &SealedHeader) -> DownloadResult<()> { + fn validate( + &self, + header: &SealedHeader, + parent: &SealedHeader, + ) -> DownloadResult<()> { validate_header_download(&self.consensus, header, parent) } @@ -614,7 +611,7 @@ where } /// Splits off the next batch of headers - fn split_next_batch(&mut self) -> Vec { + fn split_next_batch(&mut self) -> Vec> { let batch_size = self.stream_batch_size.min(self.queued_validated_headers.len()); let mut rem = self.queued_validated_headers.split_off(batch_size); std::mem::swap(&mut rem, &mut self.queued_validated_headers); @@ -644,12 +641,15 @@ where Self: HeaderDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader { + pub fn into_task(self) -> TaskDownloader<::Header> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given `spawner`. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader + pub fn into_task_with( + self, + spawner: &S, + ) -> TaskDownloader<::Header> where S: TaskSpawner, { @@ -659,11 +659,17 @@ where impl HeaderDownloader for ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { - fn update_local_head(&mut self, head: SealedHeader) { + type Header = H::Header; + + fn update_local_head(&mut self, head: SealedHeader) { // ensure we're only yielding headers that are in range and follow the current local head. - while self.queued_validated_headers.last().is_some_and(|last| last.number <= head.number) { + while self + .queued_validated_headers + .last() + .is_some_and(|last| last.number() <= head.number()) + { // headers are sorted high to low self.queued_validated_headers.pop(); } @@ -686,7 +692,7 @@ where .queued_validated_headers .first() .filter(|h| h.hash() == tip) - .map(|h| h.number) + .map(|h| h.number()) { self.sync_target = Some(new_sync_target.with_number(target_number)); return @@ -701,13 +707,13 @@ where } } SyncTarget::Gap(existing) => { - let target = existing.parent_hash; + let target = existing.parent; if Some(target) != current_tip { // there could be a sync target request in progress self.sync_target_request.take(); // If the target has changed, update the request pointers based on the new // targeted block number - let parent_block_number = existing.number.saturating_sub(1); + let parent_block_number = existing.block.number.saturating_sub(1); trace!(target: "downloaders::headers", current=?current_tip, new=?target, %parent_block_number, "Updated sync target"); @@ -740,9 +746,9 @@ where impl Stream for ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { - type Item = HeadersDownloaderResult>; + type Item = HeadersDownloaderResult>, H::Header>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -883,18 +889,18 @@ where } } -/// A future that returns a list of [`Header`] on success. +/// A future that returns a list of headers on success. #[derive(Debug)] struct HeadersRequestFuture { request: Option, fut: F, } -impl Future for HeadersRequestFuture +impl Future for HeadersRequestFuture where - F: Future>> + Sync + Send + Unpin, + F: Future>> + Sync + Send + Unpin, { - type Output = HeadersRequestOutcome; + type Output = HeadersRequestOutcome; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -906,14 +912,14 @@ where } /// The outcome of the [`HeadersRequestFuture`] -struct HeadersRequestOutcome { +struct HeadersRequestOutcome { request: HeadersRequest, - outcome: PeerRequestResult>, + outcome: PeerRequestResult>, } // === impl OrderedHeadersResponse === -impl HeadersRequestOutcome { +impl HeadersRequestOutcome { fn block_number(&self) -> u64 { self.request.start.as_number().expect("is number") } @@ -921,35 +927,35 @@ impl HeadersRequestOutcome { /// Wrapper type to order responses #[derive(Debug)] -struct OrderedHeadersResponse { - headers: Vec
, +struct OrderedHeadersResponse { + headers: Vec, request: HeadersRequest, peer_id: PeerId, } // === impl OrderedHeadersResponse === -impl OrderedHeadersResponse { +impl OrderedHeadersResponse { fn block_number(&self) -> u64 { self.request.start.as_number().expect("is number") } } -impl PartialEq for OrderedHeadersResponse { +impl PartialEq for OrderedHeadersResponse { fn eq(&self, other: &Self) -> bool { self.block_number() == other.block_number() } } -impl Eq for OrderedHeadersResponse {} +impl Eq for OrderedHeadersResponse {} -impl PartialOrd for OrderedHeadersResponse { +impl PartialOrd for OrderedHeadersResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedHeadersResponse { +impl Ord for OrderedHeadersResponse { fn cmp(&self, other: &Self) -> Ordering { self.block_number().cmp(&other.block_number()) } @@ -1156,7 +1162,11 @@ impl ReverseHeadersDownloaderBuilder { /// Build [`ReverseHeadersDownloader`] with provided consensus /// and header client implementations - pub fn build(self, client: H, consensus: Arc) -> ReverseHeadersDownloader + pub fn build( + self, + client: H, + consensus: Arc>, + ) -> ReverseHeadersDownloader where H: HeadersClient + 'static, { @@ -1207,16 +1217,19 @@ fn calc_next_request( let diff = next_request_block_number - local_head; let limit = diff.min(request_limit); let start = next_request_block_number; - HeadersRequest { start: start.into(), limit, direction: HeadersDirection::Falling } + HeadersRequest::falling(start.into(), limit) } #[cfg(test)] mod tests { use super::*; use crate::headers::test_utils::child_header; + use alloy_consensus::Header; + use alloy_eips::BlockNumHash; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; use reth_network_p2p::test_utils::TestHeadersClient; + use reth_primitives_traits::BlockWithParent; /// Tests that `replace_number` works the same way as `Option::replace` #[test] @@ -1296,7 +1309,10 @@ mod tests { assert!(downloader.sync_target_request.is_some()); downloader.sync_target_request.take(); - let target = SyncTarget::Gap(SealedHeader::new(Header::default(), B256::random())); + let target = SyncTarget::Gap(BlockWithParent { + block: BlockNumHash::new(0, B256::random()), + parent: Default::default(), + }); downloader.update_sync_target(target); assert!(downloader.sync_target_request.is_none()); assert_matches!( @@ -1310,7 +1326,7 @@ mod tests { fn test_head_update() { let client = Arc::new(TestHeadersClient::default()); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let mut downloader = ReverseHeadersDownloaderBuilder::default() .build(Arc::clone(&client), Arc::new(TestConsensus::default())); @@ -1373,7 +1389,7 @@ mod tests { fn test_resp_order() { let mut heap = BinaryHeap::new(); let hi = 1u64; - heap.push(OrderedHeadersResponse { + heap.push(OrderedHeadersResponse::
{ headers: vec![], request: HeadersRequest { start: hi.into(), limit: 0, direction: Default::default() }, peer_id: Default::default(), diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index b3fa27fde59c..3dbfd5e3615e 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -8,6 +8,7 @@ use reth_network_p2p::headers::{ use reth_primitives::SealedHeader; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ + fmt::Debug, future::Future, pin::Pin, task::{ready, Context, Poll}, @@ -22,15 +23,15 @@ pub const HEADERS_TASK_BUFFER_SIZE: usize = 8; /// A [HeaderDownloader] that drives a spawned [HeaderDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream>>, - to_downloader: UnboundedSender, + from_downloader: ReceiverStream>, H>>, + to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] and returns a [`TaskDownloader`] /// that's connected to that task. /// @@ -44,9 +45,10 @@ impl TaskDownloader { /// # use std::sync::Arc; /// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader; /// # use reth_downloaders::headers::task::TaskDownloader; - /// # use reth_consensus::Consensus; + /// # use reth_consensus::HeaderValidator; /// # use reth_network_p2p::headers::client::HeadersClient; - /// # fn t(consensus:Arc, client: Arc) { + /// # use reth_primitives_traits::BlockHeader; + /// # fn t + 'static>(consensus:Arc>, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( /// client, /// consensus @@ -55,7 +57,7 @@ impl TaskDownloader { /// # } pub fn spawn(downloader: T) -> Self where - T: HeaderDownloader + 'static, + T: HeaderDownloader
+ 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -64,7 +66,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: HeaderDownloader + 'static, + T: HeaderDownloader
+ 'static, S: TaskSpawner, { let (headers_tx, headers_rx) = mpsc::channel(HEADERS_TASK_BUFFER_SIZE); @@ -81,12 +83,14 @@ impl TaskDownloader { } } -impl HeaderDownloader for TaskDownloader { - fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { +impl HeaderDownloader for TaskDownloader { + type Header = H; + + fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { let _ = self.to_downloader.send(DownloaderUpdates::UpdateSyncGap(head, target)); } - fn update_local_head(&mut self, head: SealedHeader) { + fn update_local_head(&mut self, head: SealedHeader) { let _ = self.to_downloader.send(DownloaderUpdates::UpdateLocalHead(head)); } @@ -99,8 +103,8 @@ impl HeaderDownloader for TaskDownloader { } } -impl Stream for TaskDownloader { - type Item = HeadersDownloaderResult>; +impl Stream for TaskDownloader { + type Item = HeadersDownloaderResult>, H>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -108,9 +112,10 @@ impl Stream for TaskDownloader { } /// A [`HeaderDownloader`] that runs on its own task -struct SpawnedDownloader { - updates: UnboundedReceiverStream, - headers_tx: PollSender>>, +#[expect(clippy::complexity)] +struct SpawnedDownloader { + updates: UnboundedReceiverStream>, + headers_tx: PollSender>, T::Header>>, downloader: T, } @@ -170,9 +175,9 @@ impl Future for SpawnedDownloader { } /// Commands delegated tot the spawned [`HeaderDownloader`] -enum DownloaderUpdates { - UpdateSyncGap(SealedHeader, SyncTarget), - UpdateLocalHead(SealedHeader), +enum DownloaderUpdates { + UpdateSyncGap(SealedHeader, SyncTarget), + UpdateLocalHead(SealedHeader), UpdateSyncTarget(SyncTarget), SetBatchSize(usize), } diff --git a/crates/net/downloaders/src/headers/test_utils.rs b/crates/net/downloaders/src/headers/test_utils.rs index 923ad9969373..baea409f20e7 100644 --- a/crates/net/downloaders/src/headers/test_utils.rs +++ b/crates/net/downloaders/src/headers/test_utils.rs @@ -2,7 +2,6 @@ #![allow(dead_code)] -use alloy_primitives::Sealable; use reth_primitives::SealedHeader; /// Returns a new [`SealedHeader`] that's the child header of the given `parent`. @@ -10,7 +9,5 @@ pub(crate) fn child_header(parent: &SealedHeader) -> SealedHeader { let mut child = parent.as_ref().clone(); child.number += 1; child.parent_hash = parent.hash_slow(); - let sealed = child.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) + SealedHeader::seal(child) } diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index be8373f8235d..d84d92363ee2 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -78,6 +78,7 @@ impl DownloadClient for TestBodiesClient { } impl BodiesClient for TestBodiesClient { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 7755c5e6017c..635383ce3f34 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -43,7 +43,7 @@ pub(crate) async fn generate_bodies_file( let raw_block_bodies = create_raw_bodies(headers.iter().cloned(), &mut bodies.clone()); let file: File = tempfile::tempfile().unwrap().into(); - let mut writer = FramedWrite::new(file, BlockFileCodec); + let mut writer = FramedWrite::new(file, BlockFileCodec::default()); // rlp encode one after the other for block in raw_block_bodies { diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 1d2b54872455..8b89603167d4 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -16,12 +16,14 @@ workspace = true reth-chainspec.workspace = true reth-codecs-derive.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true # ethereum alloy-chains = { workspace = true, features = ["rlp"] } alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } +alloy-consensus.workspace = true bytes.workspace = true derive_more.workspace = true @@ -41,26 +43,27 @@ arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true rand.workspace = true -alloy-consensus.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "alloy-chains/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", - "reth-chainspec/arbitrary", - "alloy-consensus/arbitrary", - "alloy-eips/arbitrary", - "alloy-primitives/arbitrary" + "reth-primitives/arbitrary", + "alloy-chains/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-primitives-traits/arbitrary", ] serde = [ - "dep:serde", - "alloy-chains/serde", - "alloy-consensus/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "bytes/serde", - "rand/serde" + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index c24fc45022fa..97bbe36b3d61 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -5,8 +5,7 @@ use crate::HeadersDirection; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{BlockBody, Header}; +use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; /// A request for a peer to return block headers starting at the requested block. /// The peer must return at most [`limit`](#structfield.limit) headers. @@ -41,34 +40,16 @@ pub struct GetBlockHeaders { /// The response to [`GetBlockHeaders`], containing headers if any headers were found. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(rlp, 10)] -pub struct BlockHeaders( +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +pub struct BlockHeaders( /// The requested headers. - pub Vec
, + pub Vec, ); -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for BlockHeaders { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let headers_count: usize = u.int_in_range(0..=10)?; - let mut headers = Vec::with_capacity(headers_count); - - for _ in 0..headers_count { - headers.push(reth_primitives::generate_valid_header( - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - )) - } - - Ok(Self(headers)) - } -} +generate_tests!(#[rlp, 10] BlockHeaders, EthBlockHeadersTests); -impl From> for BlockHeaders { - fn from(headers: Vec
) -> Self { +impl From> for BlockHeaders { + fn from(headers: Vec) -> Self { Self(headers) } } @@ -94,14 +75,15 @@ impl From> for GetBlockBodies { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(rlp, 16)] -pub struct BlockBodies( +pub struct BlockBodies( /// The requested block bodies, each of which should correspond to a hash in the request. - pub Vec, + pub Vec, ); -impl From> for BlockBodies { - fn from(bodies: Vec) -> Self { +generate_tests!(#[rlp, 16] BlockBodies, EthBlockBodiesTests); + +impl From> for BlockBodies { + fn from(bodies: Vec) -> Self { Self(bodies) } } @@ -112,15 +94,13 @@ mod tests { message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, HeadersDirection, }; - use alloy_consensus::TxLegacy; + use alloy_consensus::{Header, TxLegacy}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::{Header, Transaction, TransactionSigned}; + use reth_primitives::{BlockBody, Transaction, TransactionSigned}; use std::str::FromStr; - use super::BlockBody; - #[test] fn decode_hash() { // this is a valid 32 byte rlp string @@ -218,7 +198,7 @@ mod tests { fn encode_get_block_header_number() { let expected = hex!("ca820457c682270f050580"); let mut data = vec![]; - RequestPair:: { + RequestPair { request_id: 1111, message: GetBlockHeaders { start_block: BlockHashOrNumber::Number(9999), @@ -235,7 +215,7 @@ mod tests { #[test] fn decode_get_block_header_number() { let data = hex!("ca820457c682270f050580"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: GetBlockHeaders { start_block: BlockHashOrNumber::Number(9999), @@ -254,7 +234,7 @@ mod tests { // [ (f90202) 0x0457 = 1111, [ (f901fc) [ (f901f9) header ] ] ] let expected = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - RequestPair:: { + RequestPair { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -289,7 +269,7 @@ mod tests { #[test] fn decode_block_header() { let data = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -326,7 +306,7 @@ mod tests { fn encode_get_block_bodies() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - RequestPair:: { + RequestPair { request_id: 1111, message: GetBlockBodies(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -341,7 +321,7 @@ mod tests { #[test] fn decode_get_block_bodies() { let data = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: GetBlockBodies(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -357,12 +337,12 @@ mod tests { fn encode_block_bodies() { let expected = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: BlockBodies(vec![ BlockBody { transactions: vec![ - TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { + TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, gas_price: 0x4a817c808, @@ -376,7 +356,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { + TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x9u64, gas_price: 0x4a817c809, @@ -428,12 +408,12 @@ mod tests { #[test] fn decode_block_bodies() { let data = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: BlockBodies(vec![ BlockBody { transactions: vec![ - TransactionSigned::from_transaction_and_signature(Transaction::Legacy( + TransactionSigned::new_unhashed(Transaction::Legacy( TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -443,13 +423,13 @@ mod tests { value: U256::from(0x200u64), input: Default::default(), }), - Signature::new( + Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x9u64, @@ -504,7 +484,7 @@ mod tests { let body = BlockBodies::default(); let mut buf = Vec::new(); body.encode(&mut buf); - let decoded = BlockBodies::decode(&mut buf.as_slice()).unwrap(); + let decoded = BlockBodies::::decode(&mut buf.as_slice()).unwrap(); assert_eq!(body, decoded); } } diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 2ef6083a5001..b54fd0df2db2 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -1,15 +1,14 @@ //! Types for broadcasting new data. -use crate::{EthMessage, EthVersion}; +use crate::{EthMessage, EthVersion, NetworkPrimitives}; +use alloy_primitives::{Bytes, TxHash, B256, U128}; use alloy_rlp::{ Decodable, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper, }; - -use alloy_primitives::{Bytes, TxHash, B256, U128}; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; -use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{Block, PooledTransactionsElement, TransactionSigned}; - +use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; +use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; use std::{ collections::{HashMap, HashSet}, mem, @@ -75,23 +74,24 @@ impl From for Vec { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(rlp, 25)] -pub struct NewBlock { +pub struct NewBlock { /// A new block. - pub block: Block, + pub block: B, /// The current total difficulty. pub td: U128, } +generate_tests!(#[rlp, 25] NewBlock, EthNewBlockTests); + /// This informs peers of transactions that have appeared on the network and are not yet included /// in a block. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp, 10)] -pub struct Transactions( +pub struct Transactions( /// New transactions for the peer to include in its mempool. - pub Vec, + pub Vec, ); impl Transactions { @@ -101,14 +101,14 @@ impl Transactions { } } -impl From> for Transactions { - fn from(txs: Vec) -> Self { +impl From> for Transactions { + fn from(txs: Vec) -> Self { Self(txs) } } -impl From for Vec { - fn from(txs: Transactions) -> Self { +impl From> for Vec { + fn from(txs: Transactions) -> Self { txs.0 } } @@ -120,9 +120,9 @@ impl From for Vec { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp, 20)] -pub struct SharedTransactions( +pub struct SharedTransactions( /// New transactions for the peer to include in its mempool. - pub Vec>, + pub Vec>, ); /// A wrapper type for all different new pooled transaction types @@ -269,7 +269,7 @@ impl NewPooledTransactionHashes { } } -impl From for EthMessage { +impl From for EthMessage { fn from(value: NewPooledTransactionHashes) -> Self { match value { NewPooledTransactionHashes::Eth66(msg) => Self::NewPooledTransactionHashes66(msg), @@ -309,7 +309,7 @@ impl From> for NewPooledTransactionHashes66 { } } -/// Same as [`NewPooledTransactionHashes66`] but extends that that beside the transaction hashes, +/// Same as [`NewPooledTransactionHashes66`] but extends that beside the transaction hashes, /// the node sends the transaction types and their sizes (as defined in EIP-2718) as well. #[derive(Clone, Debug, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] @@ -554,7 +554,7 @@ pub trait HandleVersionedMempoolData { fn msg_version(&self) -> EthVersion; } -impl HandleMempoolData for Vec { +impl HandleMempoolData for Vec { fn is_empty(&self) -> bool { self.is_empty() } @@ -564,7 +564,7 @@ impl HandleMempoolData for Vec { } fn retain_by_hash(&mut self, mut f: impl FnMut(&TxHash) -> bool) { - self.retain(|tx| f(tx.hash())) + self.retain(|tx| f(tx.tx_hash())) } } @@ -732,7 +732,7 @@ impl RequestTxHashes { impl FromIterator<(TxHash, Eth68TxMetadata)> for RequestTxHashes { fn from_iter>(iter: I) -> Self { - Self::new(iter.into_iter().map(|(hash, _)| hash).collect::>()) + Self::new(iter.into_iter().map(|(hash, _)| hash).collect()) } } diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 8c11bfa82bb6..9fa3b150d9e1 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -87,10 +87,9 @@ impl From for bool { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::Header; use std::str::FromStr; // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs index 0e8fd5df98ae..ac7ea55d0b90 100644 --- a/crates/net/eth-wire-types/src/lib.rs +++ b/crates/net/eth-wire-types/src/lib.rs @@ -40,3 +40,6 @@ pub use disconnect_reason::*; pub mod capability; pub use capability::*; + +pub mod primitives; +pub use primitives::*; diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 8546bfe14c8f..9a8667203108 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -11,8 +11,7 @@ use super::{ GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewPooledTransactionHashes66, NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, Transactions, }; -use crate::{EthVersion, SharedTransactions}; - +use crate::{EthNetworkPrimitives, EthVersion, NetworkPrimitives, SharedTransactions}; use alloy_primitives::bytes::{Buf, BufMut}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use std::{fmt::Debug, sync::Arc}; @@ -35,14 +34,18 @@ pub enum MessageError { /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ProtocolMessage { +pub struct ProtocolMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message, including specific data based on the message type. - pub message: EthMessage, + #[cfg_attr( + feature = "serde", + serde(bound = "EthMessage: serde::Serialize + serde::de::DeserializeOwned") + )] + pub message: EthMessage, } -impl ProtocolMessage { +impl ProtocolMessage { /// Create a new `ProtocolMessage` from a message type and message rlp bytes. pub fn decode_message(version: EthVersion, buf: &mut &[u8]) -> Result { let message_type = EthMessageID::decode(buf)?; @@ -73,58 +76,36 @@ impl ProtocolMessage { )?) } } - EthMessageID::GetBlockHeaders => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetBlockHeaders(request_pair) - } - EthMessageID::BlockHeaders => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::BlockHeaders(request_pair) - } - EthMessageID::GetBlockBodies => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetBlockBodies(request_pair) - } - EthMessageID::BlockBodies => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::BlockBodies(request_pair) - } + EthMessageID::GetBlockHeaders => EthMessage::GetBlockHeaders(RequestPair::decode(buf)?), + EthMessageID::BlockHeaders => EthMessage::BlockHeaders(RequestPair::decode(buf)?), + EthMessageID::GetBlockBodies => EthMessage::GetBlockBodies(RequestPair::decode(buf)?), + EthMessageID::BlockBodies => EthMessage::BlockBodies(RequestPair::decode(buf)?), EthMessageID::GetPooledTransactions => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetPooledTransactions(request_pair) + EthMessage::GetPooledTransactions(RequestPair::decode(buf)?) } EthMessageID::PooledTransactions => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::PooledTransactions(request_pair) + EthMessage::PooledTransactions(RequestPair::decode(buf)?) } EthMessageID::GetNodeData => { if version >= EthVersion::Eth67 { return Err(MessageError::Invalid(version, EthMessageID::GetNodeData)) } - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetNodeData(request_pair) + EthMessage::GetNodeData(RequestPair::decode(buf)?) } EthMessageID::NodeData => { if version >= EthVersion::Eth67 { return Err(MessageError::Invalid(version, EthMessageID::GetNodeData)) } - let request_pair = RequestPair::::decode(buf)?; - EthMessage::NodeData(request_pair) - } - EthMessageID::GetReceipts => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetReceipts(request_pair) - } - EthMessageID::Receipts => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::Receipts(request_pair) + EthMessage::NodeData(RequestPair::decode(buf)?) } + EthMessageID::GetReceipts => EthMessage::GetReceipts(RequestPair::decode(buf)?), + EthMessageID::Receipts => EthMessage::Receipts(RequestPair::decode(buf)?), }; Ok(Self { message_type, message }) } } -impl Encodable for ProtocolMessage { +impl Encodable for ProtocolMessage { /// Encodes the protocol message into bytes. The message type is encoded as a single byte and /// prepended to the message. fn encode(&self, out: &mut dyn BufMut) { @@ -136,23 +117,23 @@ impl Encodable for ProtocolMessage { } } -impl From for ProtocolMessage { - fn from(message: EthMessage) -> Self { +impl From> for ProtocolMessage { + fn from(message: EthMessage) -> Self { Self { message_type: message.message_id(), message } } } /// Represents messages that can be sent to multiple peers. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ProtocolBroadcastMessage { +#[derive(Clone, Debug)] +pub struct ProtocolBroadcastMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message to be broadcasted, including specific data based on the message /// type. - pub message: EthBroadcastMessage, + pub message: EthBroadcastMessage, } -impl Encodable for ProtocolBroadcastMessage { +impl Encodable for ProtocolBroadcastMessage { /// Encodes the protocol message into bytes. The message type is encoded as a single byte and /// prepended to the message. fn encode(&self, out: &mut dyn BufMut) { @@ -164,8 +145,8 @@ impl Encodable for ProtocolBroadcastMessage { } } -impl From for ProtocolBroadcastMessage { - fn from(message: EthBroadcastMessage) -> Self { +impl From> for ProtocolBroadcastMessage { + fn from(message: EthBroadcastMessage) -> Self { Self { message_type: message.message_id(), message } } } @@ -189,15 +170,23 @@ impl From for ProtocolBroadcastMessage { /// [`NewPooledTransactionHashes68`] is defined. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum EthMessage { +pub enum EthMessage { /// Represents a Status message required for the protocol handshake. Status(Status), /// Represents a `NewBlockHashes` message broadcast to the network. NewBlockHashes(NewBlockHashes), /// Represents a `NewBlock` message broadcast to the network. - NewBlock(Box), + #[cfg_attr( + feature = "serde", + serde(bound = "N::Block: serde::Serialize + serde::de::DeserializeOwned") + )] + NewBlock(Box>), /// Represents a Transactions message broadcast to the network. - Transactions(Transactions), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BroadcastedTransaction: serde::Serialize + serde::de::DeserializeOwned") + )] + Transactions(Transactions), /// Represents a `NewPooledTransactionHashes` message for eth/66 version. NewPooledTransactionHashes66(NewPooledTransactionHashes66), /// Represents a `NewPooledTransactionHashes` message for eth/68 version. @@ -206,15 +195,27 @@ pub enum EthMessage { /// Represents a `GetBlockHeaders` request-response pair. GetBlockHeaders(RequestPair), /// Represents a `BlockHeaders` request-response pair. - BlockHeaders(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BlockHeader: serde::Serialize + serde::de::DeserializeOwned") + )] + BlockHeaders(RequestPair>), /// Represents a `GetBlockBodies` request-response pair. GetBlockBodies(RequestPair), /// Represents a `BlockBodies` request-response pair. - BlockBodies(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BlockBody: serde::Serialize + serde::de::DeserializeOwned") + )] + BlockBodies(RequestPair>), /// Represents a `GetPooledTransactions` request-response pair. GetPooledTransactions(RequestPair), /// Represents a `PooledTransactions` request-response pair. - PooledTransactions(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::PooledTransaction: serde::Serialize + serde::de::DeserializeOwned") + )] + PooledTransactions(RequestPair>), /// Represents a `GetNodeData` request-response pair. GetNodeData(RequestPair), /// Represents a `NodeData` request-response pair. @@ -225,7 +226,7 @@ pub enum EthMessage { Receipts(RequestPair), } -impl EthMessage { +impl EthMessage { /// Returns the message's ID. pub const fn message_id(&self) -> EthMessageID { match self { @@ -250,7 +251,7 @@ impl EthMessage { } } -impl Encodable for EthMessage { +impl Encodable for EthMessage { fn encode(&self, out: &mut dyn BufMut) { match self { Self::Status(status) => status.encode(out), @@ -301,16 +302,16 @@ impl Encodable for EthMessage { /// /// Note: This is only useful for outgoing messages. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum EthBroadcastMessage { +pub enum EthBroadcastMessage { /// Represents a new block broadcast message. - NewBlock(Arc), + NewBlock(Arc>), /// Represents a transactions broadcast message. - Transactions(SharedTransactions), + Transactions(SharedTransactions), } // === impl EthBroadcastMessage === -impl EthBroadcastMessage { +impl EthBroadcastMessage { /// Returns the message's ID. pub const fn message_id(&self) -> EthMessageID { match self { @@ -320,7 +321,7 @@ impl EthBroadcastMessage { } } -impl Encodable for EthBroadcastMessage { +impl Encodable for EthBroadcastMessage { fn encode(&self, out: &mut dyn BufMut) { match self { Self::NewBlock(new_block) => new_block.encode(out), @@ -502,8 +503,8 @@ where mod tests { use super::MessageError; use crate::{ - message::RequestPair, EthMessage, EthMessageID, EthVersion, GetNodeData, NodeData, - ProtocolMessage, + message::RequestPair, EthMessage, EthMessageID, EthNetworkPrimitives, EthVersion, + GetNodeData, NodeData, ProtocolMessage, }; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable, Error}; @@ -516,20 +517,30 @@ mod tests { #[test] fn test_removed_message_at_eth67() { - let get_node_data = - EthMessage::GetNodeData(RequestPair { request_id: 1337, message: GetNodeData(vec![]) }); + let get_node_data = EthMessage::::GetNodeData(RequestPair { + request_id: 1337, + message: GetNodeData(vec![]), + }); let buf = encode(ProtocolMessage { message_type: EthMessageID::GetNodeData, message: get_node_data, }); - let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); + let msg = ProtocolMessage::::decode_message( + crate::EthVersion::Eth67, + &mut &buf[..], + ); assert!(matches!(msg, Err(MessageError::Invalid(..)))); - let node_data = - EthMessage::NodeData(RequestPair { request_id: 1337, message: NodeData(vec![]) }); + let node_data = EthMessage::::NodeData(RequestPair { + request_id: 1337, + message: NodeData(vec![]), + }); let buf = encode(ProtocolMessage { message_type: EthMessageID::NodeData, message: node_data }); - let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); + let msg = ProtocolMessage::::decode_message( + crate::EthVersion::Eth67, + &mut &buf[..], + ); assert!(matches!(msg, Err(MessageError::Invalid(..)))); } @@ -578,10 +589,11 @@ mod tests { #[test] fn empty_block_bodies_protocol() { - let empty_block_bodies = ProtocolMessage::from(EthMessage::BlockBodies(RequestPair { - request_id: 0, - message: Default::default(), - })); + let empty_block_bodies = + ProtocolMessage::from(EthMessage::::BlockBodies(RequestPair { + request_id: 0, + message: Default::default(), + })); let mut buf = Vec::new(); empty_block_bodies.encode(&mut buf); let decoded = diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs new file mode 100644 index 000000000000..ff7ab1c801bd --- /dev/null +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -0,0 +1,98 @@ +//! Abstraction over primitive types in network messages. + +use alloy_rlp::{Decodable, Encodable}; +use reth_primitives_traits::{Block, BlockHeader}; +use std::fmt::Debug; + +/// Abstraction over primitive types which might appear in network messages. See +/// [`crate::EthMessage`] for more context. +pub trait NetworkPrimitives: + Send + Sync + Unpin + Clone + Debug + PartialEq + Eq + 'static +{ + /// The block header type. + type BlockHeader: BlockHeader + + Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// The block body type. + type BlockBody: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// Full block type. + type Block: Block
+ + Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + + /// The transaction type which peers announce in `Transactions` messages. It is different from + /// `PooledTransactions` to account for Ethereum case where EIP-4844 transactions are not being + /// announced and can only be explicitly requested from peers. + type BroadcastedTransaction: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + /// The transaction type which peers return in `PooledTransactions` messages. + type PooledTransaction: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; + + /// The transaction type which peers return in `GetReceipts` messages. + type Receipt: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; +} + +/// Primitive types used by Ethereum network. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub struct EthNetworkPrimitives; + +impl NetworkPrimitives for EthNetworkPrimitives { + type BlockHeader = alloy_consensus::Header; + type BlockBody = reth_primitives::BlockBody; + type Block = reth_primitives::Block; + type BroadcastedTransaction = reth_primitives::TransactionSigned; + type PooledTransaction = reth_primitives::PooledTransactionsElement; + type Receipt = reth_primitives::Receipt; +} diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index db9d6f871e41..ca5e85a146f8 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -54,7 +54,7 @@ mod tests { fn encode_get_receipts() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetReceipts(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -72,7 +72,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetReceipts(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -88,7 +88,7 @@ mod tests { fn encode_receipts() { let expected = hex!("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: Receipts(vec![vec![ ReceiptWithBloom { @@ -124,7 +124,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: Receipts(vec![ vec![ diff --git a/crates/net/eth-wire-types/src/response.rs b/crates/net/eth-wire-types/src/response.rs deleted file mode 100644 index dfcf5ed56a8c..000000000000 --- a/crates/net/eth-wire-types/src/response.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::{ - BlockBodies, BlockHeaders, NodeData, PooledTransactions, Receipts, RequestPair, Status, -}; - -// This type is analogous to the `zebra_network::Response` type. -/// An ethereum network response for version 66. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Response { - /// The request does not have a response. - Nil, - - /// The [`Status`](super::Status) message response in the eth protocol handshake. - Status(Status), - - /// The response to a [`Request::GetBlockHeaders`](super::Request::GetBlockHeaders) request. - BlockHeaders(RequestPair), - - /// The response to a [`Request::GetBlockBodies`](super::Request::GetBlockBodies) request. - BlockBodies(RequestPair), - - /// The response to a [`Request::GetPooledTransactions`](super::Request::GetPooledTransactions) request. - PooledTransactions(RequestPair), - - /// The response to a [`Request::GetNodeData`](super::Request::GetNodeData) request. - NodeData(RequestPair), - - /// The response to a [`Request::GetReceipts`](super::Request::GetReceipts) request. - Receipts(RequestPair), -} diff --git a/crates/net/eth-wire-types/src/state.rs b/crates/net/eth-wire-types/src/state.rs index 16a2959b338e..57273adc6b11 100644 --- a/crates/net/eth-wire-types/src/state.rs +++ b/crates/net/eth-wire-types/src/state.rs @@ -36,7 +36,7 @@ mod tests { fn encode_get_node_data() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetNodeData(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -54,7 +54,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetNodeData(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -69,7 +69,7 @@ mod tests { fn encode_node_data() { let expected = hex!("ce820457ca84deadc0de84feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: NodeData(vec![ hex!("deadc0de").as_slice().into(), @@ -87,7 +87,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: NodeData(vec![ hex!("deadc0de").as_slice().into(), diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index d9e8d4319b5b..fa73d0907feb 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -338,7 +338,7 @@ mod tests { let total_difficulty = U256::from(rng.gen::()); // create a genesis that has a random part, so we can check that the hash is preserved - let genesis = Genesis { nonce: rng.gen::(), ..Default::default() }; + let genesis = Genesis { nonce: rng.gen(), ..Default::default() }; // build head let head = Head { diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index 97d18001f13c..ca76f0a8c7ed 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -1,12 +1,11 @@ //! Implements the `GetPooledTransactions` and `PooledTransactions` message types. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{Constructor, Deref, IntoIterator}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{ - transaction::TransactionConversionError, PooledTransactionsElement, TransactionSigned, -}; +use reth_primitives::PooledTransactionsElement; /// A list of transaction hashes that the peer would like transaction bodies for. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -42,38 +41,46 @@ where Eq, RlpEncodableWrapper, RlpDecodableWrapper, - Default, IntoIterator, Deref, Constructor, )] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct PooledTransactions( +pub struct PooledTransactions( /// The transaction bodies, each of which should correspond to a requested hash. - pub Vec, + pub Vec, ); -impl PooledTransactions { +impl PooledTransactions { /// Returns an iterator over the transaction hashes in this response. - pub fn hashes(&self) -> impl Iterator + '_ { - self.0.iter().map(|tx| tx.hash()) + pub fn hashes(&self) -> impl Iterator + '_ { + self.0.iter().map(|tx| tx.trie_hash()) } } -impl TryFrom> for PooledTransactions { - type Error = TransactionConversionError; +impl TryFrom> for PooledTransactions +where + T: TryFrom, +{ + type Error = T::Error; - fn try_from(txs: Vec) -> Result { - txs.into_iter().map(PooledTransactionsElement::try_from).collect() + fn try_from(txs: Vec) -> Result { + txs.into_iter().map(T::try_from).collect() } } -impl FromIterator for PooledTransactions { - fn from_iter>(iter: I) -> Self { +impl FromIterator for PooledTransactions { + fn from_iter>(iter: I) -> Self { Self(iter.into_iter().collect()) } } +impl Default for PooledTransactions { + fn default() -> Self { + Self(Default::default()) + } +} + #[cfg(test)] mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; @@ -89,7 +96,7 @@ mod tests { fn encode_get_pooled_transactions() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetPooledTransactions(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -107,7 +114,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetPooledTransactions(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -123,7 +130,7 @@ mod tests { let expected = hex!("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"); let mut data = vec![]; let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -145,7 +152,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x09u64, @@ -175,7 +182,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: PooledTransactions(message), /* Assuming PooledTransactions wraps a * Vec */ @@ -189,7 +196,7 @@ mod tests { fn decode_pooled_transactions() { let data = hex!("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -211,7 +218,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x09u64, @@ -241,10 +248,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let expected = RequestPair:: { - request_id: 1111, - message: PooledTransactions(message), - }; + let expected = RequestPair { request_id: 1111, message: PooledTransactions(message) }; let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!(request, expected); @@ -256,7 +260,7 @@ mod tests { let decoded_transactions = RequestPair::::decode(&mut &data[..]).unwrap(); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 15u64, @@ -278,7 +282,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: 4, nonce: 26u64, @@ -302,7 +306,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 3u64, @@ -324,7 +328,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 1u64, @@ -346,7 +350,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 2u64, @@ -376,10 +380,8 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let expected_transactions = RequestPair:: { - request_id: 0, - message: PooledTransactions(message), - }; + let expected_transactions = + RequestPair { request_id: 0, message: PooledTransactions(message) }; // checking tx by tx for easier debugging if there are any regressions for (decoded, expected) in @@ -395,7 +397,7 @@ mod tests { fn encode_pooled_transactions_network() { let expected = hex!("f9022980f90225f8650f84832156008287fb94cf7f9e66af820a19257a2108375b180b0ec491678204d2802ca035b7bfeb9ad9ece2cbafaaf8e202e706b4cfaeb233f46198f00b44d4a566a981a0612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860b87502f872041a8459682f008459682f0d8252089461815774383099e24810ab832a5b2a5425c154d58829a2241af62c000080c001a059e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafda0016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469f86b0384773594008398968094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba0ce6834447c0a4193c40382e6c57ae33b241379c5418caac9cdc18d786fd12071a03ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88f86b01843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac3960468702769bb01b2a00802ba0e24d8bd32ad906d6f8b8d7741e08d1959df021698b19ee232feba15361587d0aa05406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631daf86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18"); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 15u64, @@ -417,7 +419,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: 4, nonce: 26u64, @@ -441,7 +443,7 @@ mod tests { true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 3u64, @@ -463,7 +465,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 1u64, @@ -485,7 +487,7 @@ mod tests { false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 2u64, @@ -515,10 +517,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let transactions = RequestPair:: { - request_id: 0, - message: PooledTransactions(message), - }; + let transactions = RequestPair { request_id: 0, message: PooledTransactions(message) }; let mut encoded = vec![]; transactions.encode(&mut encoded); diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 83a3e163ebc1..ffbd3017fa62 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -13,16 +13,17 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-codecs.workspace = true -reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } reth-eth-wire-types.workspace = true reth-network-peers.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-primitives.workspace = true +alloy-chains.workspace = true # metrics reth-metrics.workspace = true @@ -44,6 +45,7 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-eth-wire-types = { workspace = true, features = ["arbitrary"] } reth-tracing.workspace = true @@ -66,13 +68,15 @@ alloy-eips.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", "reth-eth-wire-types/arbitrary", "dep:arbitrary", - "reth-chainspec/arbitrary", "alloy-eips/arbitrary", "alloy-primitives/arbitrary", - "reth-codecs/arbitrary" + "reth-codecs/arbitrary", + "alloy-chains/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-ethereum-forks/arbitrary", + "reth-primitives/arbitrary" ] serde = [ "dep:serde", @@ -82,7 +86,9 @@ serde = [ "bytes/serde", "rand/serde", "secp256k1/serde", - "reth-codecs/serde" + "reth-codecs/serde", + "alloy-chains/serde", + "reth-primitives-traits/serde", ] [[test]] diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index d60e500744c0..625971e0e7bd 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -5,10 +5,11 @@ use crate::{ p2pstream::MAX_RESERVED_MESSAGE_ID, protocol::{ProtoVersion, Protocol}, version::ParseVersionError, - Capability, EthMessage, EthMessageID, EthVersion, + Capability, EthMessageID, EthVersion, }; use alloy_primitives::bytes::Bytes; use derive_more::{Deref, DerefMut}; +use reth_eth_wire_types::{EthMessage, EthNetworkPrimitives, NetworkPrimitives}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -30,9 +31,13 @@ pub struct RawCapabilityMessage { /// network. #[derive(Debug)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum CapabilityMessage { +pub enum CapabilityMessage { /// Eth sub-protocol message. - Eth(EthMessage), + #[cfg_attr( + feature = "serde", + serde(bound = "EthMessage: Serialize + serde::de::DeserializeOwned") + )] + Eth(EthMessage), /// Any other capability message. Other(RawCapabilityMessage), } diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 1f8b995afda4..499ff8089bfc 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -3,10 +3,11 @@ use crate::{ errors::P2PStreamError, message::MessageError, version::ParseVersionError, DisconnectReason, }; +use alloy_chains::Chain; use alloy_primitives::B256; -use reth_chainspec::Chain; use reth_eth_wire_types::EthVersion; -use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError}; +use reth_ethereum_forks::ValidationError; +use reth_primitives_traits::{GotExpected, GotExpectedBoxed}; use std::io; /// Errors when sending/receiving messages diff --git a/crates/net/eth-wire/src/errors/p2p.rs b/crates/net/eth-wire/src/errors/p2p.rs index 2cfef926984b..f24e2cebc784 100644 --- a/crates/net/eth-wire/src/errors/p2p.rs +++ b/crates/net/eth-wire/src/errors/p2p.rs @@ -3,7 +3,7 @@ use std::io; use reth_eth_wire_types::{DisconnectReason, UnknownDisconnectReason}; -use reth_primitives::GotExpected; +use reth_primitives_traits::GotExpected; use crate::{capability::SharedCapabilityError, ProtocolVersion}; diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 8ae599b6792d..675ea19a5cec 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -2,12 +2,15 @@ use crate::{ errors::{EthHandshakeError, EthStreamError}, message::{EthBroadcastMessage, ProtocolBroadcastMessage}, p2pstream::HANDSHAKE_TIMEOUT, - CanDisconnect, DisconnectReason, EthMessage, EthVersion, ProtocolMessage, Status, + CanDisconnect, DisconnectReason, EthMessage, EthNetworkPrimitives, EthVersion, ProtocolMessage, + Status, }; use alloy_primitives::bytes::{Bytes, BytesMut}; use futures::{ready, Sink, SinkExt, StreamExt}; use pin_project::pin_project; -use reth_primitives::{ForkFilter, GotExpected}; +use reth_eth_wire_types::NetworkPrimitives; +use reth_ethereum_forks::ForkFilter; +use reth_primitives_traits::GotExpected; use std::{ pin::Pin, task::{Context, Poll}, @@ -53,32 +56,32 @@ where /// Consumes the [`UnauthedEthStream`] and returns an [`EthStream`] after the `Status` /// handshake is completed successfully. This also returns the `Status` message sent by the /// remote peer. - pub async fn handshake( + pub async fn handshake( self, status: Status, fork_filter: ForkFilter, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { self.handshake_with_timeout(status, fork_filter, HANDSHAKE_TIMEOUT).await } /// Wrapper around handshake which enforces a timeout. - pub async fn handshake_with_timeout( + pub async fn handshake_with_timeout( self, status: Status, fork_filter: ForkFilter, timeout_limit: Duration, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { timeout(timeout_limit, Self::handshake_without_timeout(self, status, fork_filter)) .await .map_err(|_| EthStreamError::StreamTimeout)? } /// Handshake with no timeout - pub async fn handshake_without_timeout( + pub async fn handshake_without_timeout( mut self, status: Status, fork_filter: ForkFilter, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { trace!( %status, "sending eth status to peer" @@ -87,7 +90,10 @@ where // we need to encode and decode here on our own because we don't have an `EthStream` yet // The max length for a status with TTD is: + self.inner - .send(alloy_rlp::encode(ProtocolMessage::from(EthMessage::Status(status))).into()) + .send( + alloy_rlp::encode(ProtocolMessage::::from(EthMessage::::Status(status))) + .into(), + ) .await?; let their_msg_res = self.inner.next().await; @@ -106,7 +112,7 @@ where } let version = status.version; - let msg = match ProtocolMessage::decode_message(version, &mut their_msg.as_ref()) { + let msg = match ProtocolMessage::::decode_message(version, &mut their_msg.as_ref()) { Ok(m) => m, Err(err) => { debug!("decode error in eth handshake: msg={their_msg:x}"); @@ -187,19 +193,21 @@ where /// compatible with eth-networking protocol messages, which get RLP encoded/decoded. #[pin_project] #[derive(Debug)] -pub struct EthStream { +pub struct EthStream { /// Negotiated eth version. version: EthVersion, #[pin] inner: S, + + _pd: std::marker::PhantomData, } -impl EthStream { +impl EthStream { /// Creates a new unauthed [`EthStream`] from a provided stream. You will need /// to manually handshake a peer. #[inline] pub const fn new(version: EthVersion, inner: S) -> Self { - Self { version, inner } + Self { version, inner, _pd: std::marker::PhantomData } } /// Returns the eth version. @@ -227,15 +235,16 @@ impl EthStream { } } -impl EthStream +impl EthStream where S: Sink + Unpin, EthStreamError: From, + N: NetworkPrimitives, { /// Same as [`Sink::start_send`] but accepts a [`EthBroadcastMessage`] instead. pub fn start_send_broadcast( &mut self, - item: EthBroadcastMessage, + item: EthBroadcastMessage, ) -> Result<(), EthStreamError> { self.inner.start_send_unpin(Bytes::from(alloy_rlp::encode( ProtocolBroadcastMessage::from(item), @@ -245,12 +254,13 @@ where } } -impl Stream for EthStream +impl Stream for EthStream where S: Stream> + Unpin, EthStreamError: From, + N: NetworkPrimitives, { - type Item = Result; + type Item = Result, EthStreamError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); @@ -292,10 +302,11 @@ where } } -impl Sink for EthStream +impl Sink> for EthStream where S: CanDisconnect + Unpin, EthStreamError: From<>::Error>, + N: NetworkPrimitives, { type Error = EthStreamError; @@ -303,7 +314,7 @@ where self.project().inner.poll_ready(cx).map_err(Into::into) } - fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { if matches!(item, EthMessage::Status(_)) { // TODO: to disconnect here we would need to do something similar to P2PStream's // start_disconnect, which would ideally be a part of the CanDisconnect trait, or at @@ -333,10 +344,11 @@ where } } -impl CanDisconnect for EthStream +impl CanDisconnect> for EthStream where S: CanDisconnect + Send, EthStreamError: From<>::Error>, + N: NetworkPrimitives, { async fn disconnect(&mut self, reason: DisconnectReason) -> Result<(), EthStreamError> { self.inner.disconnect(reason).await.map_err(Into::into) @@ -354,12 +366,13 @@ mod tests { EthMessage, EthStream, EthVersion, HelloMessageWithProtocols, PassthroughCodec, ProtocolVersion, Status, }; + use alloy_chains::NamedChain; use alloy_primitives::{B256, U256}; use futures::{SinkExt, StreamExt}; - use reth_chainspec::NamedChain; use reth_ecies::stream::ECIESStream; + use reth_eth_wire_types::EthNetworkPrimitives; + use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::pk2id; - use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; @@ -390,7 +403,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -402,8 +415,10 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let (_, their_status) = - UnauthedEthStream::new(sink).handshake(status, fork_filter).await.unwrap(); + let (_, their_status) = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await + .unwrap(); // their status is a clone of our status, these should be equal assert_eq!(their_status, status); @@ -437,7 +452,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -449,8 +464,10 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let (_, their_status) = - UnauthedEthStream::new(sink).handshake(status, fork_filter).await.unwrap(); + let (_, their_status) = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await + .unwrap(); // their status is a clone of our status, these should be equal assert_eq!(their_status, status); @@ -483,8 +500,9 @@ mod tests { // roughly based off of the design of tokio::net::TcpListener let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); - let handshake_res = - UnauthedEthStream::new(stream).handshake(status_clone, fork_filter_clone).await; + let handshake_res = UnauthedEthStream::new(stream) + .handshake::(status_clone, fork_filter_clone) + .await; // make sure the handshake fails due to td too high assert!(matches!( @@ -499,7 +517,9 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let handshake_res = UnauthedEthStream::new(sink).handshake(status, fork_filter).await; + let handshake_res = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await; // this handshake should also fail due to td too high assert!(matches!( @@ -517,7 +537,7 @@ mod tests { async fn can_write_and_read_cleartext() { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -552,7 +572,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -594,7 +614,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg = EthMessage::NewBlockHashes( + let test_msg: EthMessage = EthMessage::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -698,7 +718,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -711,7 +731,11 @@ mod tests { // try to connect let handshake_result = UnauthedEthStream::new(sink) - .handshake_with_timeout(status, fork_filter, Duration::from_secs(1)) + .handshake_with_timeout::( + status, + fork_filter, + Duration::from_secs(1), + ) .await; // Assert that a timeout error occurred diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 2eb42eaeb49d..5d7650b4b7bb 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -2,7 +2,7 @@ use crate::{Capability, EthVersion, ProtocolVersion}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::add_arbitrary_tests; use reth_network_peers::PeerId; -use reth_primitives::constants::RETH_CLIENT_VERSION; +use reth_primitives_traits::constants::RETH_CLIENT_VERSION; /// The default tcp port for p2p. /// diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index d1d977aba784..e46563cad48f 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -24,7 +24,8 @@ use crate::{ }; use bytes::{Bytes, BytesMut}; use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; -use reth_primitives::ForkFilter; +use reth_eth_wire_types::NetworkPrimitives; +use reth_ethereum_forks::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -204,11 +205,11 @@ impl RlpxProtocolMultiplexer { /// Converts this multiplexer into a [`RlpxSatelliteStream`] with eth protocol as the given /// primary protocol. - pub async fn into_eth_satellite_stream( + pub async fn into_eth_satellite_stream( self, status: Status, fork_filter: ForkFilter, - ) -> Result<(RlpxSatelliteStream>, Status), EthStreamError> + ) -> Result<(RlpxSatelliteStream>, Status), EthStreamError> where St: Stream> + Sink + Unpin, { @@ -674,6 +675,7 @@ mod tests { }, UnauthedP2PStream, }; + use reth_eth_wire_types::EthNetworkPrimitives; use tokio::{net::TcpListener, sync::oneshot}; use tokio_util::codec::Decoder; @@ -693,7 +695,7 @@ mod tests { UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap(); let (_eth_stream, _) = UnauthedEthStream::new(p2p_stream) - .handshake(other_status, other_fork_filter) + .handshake::(other_status, other_fork_filter) .await .unwrap(); @@ -708,7 +710,9 @@ mod tests { .into_satellite_stream_with_handshake( eth.capability().as_ref(), move |proxy| async move { - UnauthedEthStream::new(proxy).handshake(status, fork_filter).await + UnauthedEthStream::new(proxy) + .handshake::(status, fork_filter) + .await }, ) .await @@ -731,7 +735,7 @@ mod tests { let (conn, _) = UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap(); let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn) - .into_eth_satellite_stream(other_status, other_fork_filter) + .into_eth_satellite_stream::(other_status, other_fork_filter) .await .unwrap(); @@ -762,7 +766,7 @@ mod tests { let conn = connect_passthrough(local_addr, test_hello().0).await; let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn) - .into_eth_satellite_stream(status, fork_filter) + .into_eth_satellite_stream::(status, fork_filter) .await .unwrap(); diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 76075838bc76..0ae546daafb1 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -14,7 +14,7 @@ use futures::{Sink, SinkExt, StreamExt}; use pin_project::pin_project; use reth_codecs::add_arbitrary_tests; use reth_metrics::metrics::counter; -use reth_primitives::GotExpected; +use reth_primitives_traits::GotExpected; use std::{ collections::VecDeque, io, diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index d7a3aa582b72..56656d60e94a 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -6,10 +6,10 @@ use crate::{ hello::DEFAULT_TCP_PORT, EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, Status, UnauthedP2PStream, }; +use alloy_chains::Chain; use alloy_primitives::{B256, U256}; -use reth_chainspec::Chain; +use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::pk2id; -use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; use std::net::SocketAddr; use tokio::net::TcpStream; diff --git a/crates/net/eth-wire/tests/new_block.rs b/crates/net/eth-wire/tests/new_block.rs index 266752b74abf..366bf26a3a20 100644 --- a/crates/net/eth-wire/tests/new_block.rs +++ b/crates/net/eth-wire/tests/new_block.rs @@ -11,7 +11,7 @@ fn decode_new_block_network() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/new_block_network_rlp"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } #[test] @@ -20,7 +20,7 @@ fn decode_new_block_network_bsc_one() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_one"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } #[test] @@ -29,5 +29,5 @@ fn decode_new_block_network_bsc_two() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_two"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 6690f42631a6..3b17d04cba51 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -12,10 +12,7 @@ use test_fuzz::test_fuzz; #[test_fuzz] fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Error> { let input_rlp = &mut &hex_data[..]; - let txs = match PooledTransactions::decode(input_rlp) { - Ok(txs) => txs, - Err(e) => return Err(e), - }; + let txs: PooledTransactions = PooledTransactions::decode(input_rlp)?; // get the amount of bytes decoded in `decode` by subtracting the length of the original buf, // from the length of the remaining bytes @@ -28,7 +25,7 @@ fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Err assert_eq!(expected_encoding, buf); // now do another decoding, on what we encoded - this should succeed - let txs2 = PooledTransactions::decode(&mut &buf[..]).unwrap(); + let txs2: PooledTransactions = PooledTransactions::decode(&mut &buf[..]).unwrap(); // ensure that the payload length is the same assert_eq!(txs.length(), txs2.length()); @@ -54,7 +51,8 @@ fn decode_request_pair_pooled_blob_transactions() { .join("testdata/request_pair_pooled_blob_transactions"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); + let _txs: ProtocolMessage = + ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); } #[test] diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index 600ba97cd2dd..962f1e49efd9 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -111,7 +111,7 @@ impl FromStr for NatResolver { "Unknown Nat Resolver: {s}" ))) }; - Self::ExternalIp(ip.parse::()?) + Self::ExternalIp(ip.parse()?) } }; Ok(r) diff --git a/crates/net/network-api/src/downloaders.rs b/crates/net/network-api/src/downloaders.rs index f081c16ed815..cbfe816134ea 100644 --- a/crates/net/network-api/src/downloaders.rs +++ b/crates/net/network-api/src/downloaders.rs @@ -1,5 +1,7 @@ //! API related to syncing blocks. +use std::fmt::Debug; + use futures::Future; use reth_network_p2p::BlockClient; use tokio::sync::oneshot; @@ -7,10 +9,13 @@ use tokio::sync::oneshot; /// Provides client for downloading blocks. #[auto_impl::auto_impl(&, Arc)] pub trait BlockDownloaderProvider { + /// The client this type can provide. + type Client: BlockClient + Send + Sync + Clone + 'static; + /// Returns a new [`BlockClient`], used for fetching blocks from peers. /// /// The client is the entrypoint for sending block requests to the network. fn fetch_client( &self, - ) -> impl Future> + Send; + ) -> impl Future> + Send; } diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index d2bd66d1fdd9..624c43f5e1ba 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -4,8 +4,9 @@ use std::{fmt, net::SocketAddr, sync::Arc}; use reth_eth_wire_types::{ message::RequestPair, BlockBodies, BlockHeaders, Capabilities, DisconnectReason, EthMessage, - EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, GetPooledTransactions, GetReceipts, - NodeData, PooledTransactions, Receipts, Status, + EthNetworkPrimitives, EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, + GetPooledTransactions, GetReceipts, NetworkPrimitives, NodeData, PooledTransactions, Receipts, + Status, }; use reth_ethereum_forks::ForkId; use reth_network_p2p::error::{RequestError, RequestResult}; @@ -30,8 +31,8 @@ pub trait NetworkEventListenerProvider: Send + Sync { /// /// This includes any event types that may be relevant to tasks, for metrics, keep track of peers /// etc. -#[derive(Debug, Clone)] -pub enum NetworkEvent { +#[derive(Debug)] +pub enum NetworkEvent { /// Closed the peer session. SessionClosed { /// The identifier of the peer to which a session was closed. @@ -50,7 +51,7 @@ pub enum NetworkEvent { /// Capabilities the peer announced capabilities: Arc, /// A request channel to the session task. - messages: PeerRequestSender, + messages: PeerRequestSender, /// The status of the peer to which a session was established. status: Arc, /// negotiated eth version of the session @@ -62,6 +63,35 @@ pub enum NetworkEvent { PeerRemoved(PeerId), } +impl Clone for NetworkEvent { + fn clone(&self) -> Self { + match self { + Self::SessionClosed { peer_id, reason } => { + Self::SessionClosed { peer_id: *peer_id, reason: *reason } + } + Self::SessionEstablished { + peer_id, + remote_addr, + client_version, + capabilities, + messages, + status, + version, + } => Self::SessionEstablished { + peer_id: *peer_id, + remote_addr: *remote_addr, + client_version: client_version.clone(), + capabilities: capabilities.clone(), + messages: messages.clone(), + status: status.clone(), + version: *version, + }, + Self::PeerAdded(peer) => Self::PeerAdded(*peer), + Self::PeerRemoved(peer) => Self::PeerRemoved(*peer), + } + } +} + /// Events produced by the `Discovery` manager. #[derive(Debug, Clone, PartialEq, Eq)] pub enum DiscoveryEvent { @@ -98,7 +128,7 @@ pub enum DiscoveredEvent { /// Protocol related request messages that expect a response #[derive(Debug)] -pub enum PeerRequest { +pub enum PeerRequest { /// Requests block headers from the peer. /// /// The response should be sent through the channel. @@ -106,7 +136,7 @@ pub enum PeerRequest { /// The request for block headers. request: GetBlockHeaders, /// The channel to send the response for block headers. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests block bodies from the peer. /// @@ -115,7 +145,7 @@ pub enum PeerRequest { /// The request for block bodies. request: GetBlockBodies, /// The channel to send the response for block bodies. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests pooled transactions from the peer. /// @@ -124,7 +154,7 @@ pub enum PeerRequest { /// The request for pooled transactions. request: GetPooledTransactions, /// The channel to send the response for pooled transactions. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests `NodeData` from the peer. /// @@ -148,7 +178,7 @@ pub enum PeerRequest { // === impl PeerRequest === -impl PeerRequest { +impl PeerRequest { /// Invoked if we received a response which does not match the request pub fn send_bad_response(self) { self.send_err_response(RequestError::BadResponse) @@ -166,7 +196,7 @@ impl PeerRequest { } /// Returns the [`EthMessage`] for this type - pub fn create_request_message(&self, request_id: u64) -> EthMessage { + pub fn create_request_message(&self, request_id: u64) -> EthMessage { match self { Self::GetBlockHeaders { request, .. } => { EthMessage::GetBlockHeaders(RequestPair { request_id, message: *request }) @@ -199,24 +229,29 @@ impl PeerRequest { } /// A Cloneable connection for sending _requests_ directly to the session of a peer. -#[derive(Clone)] -pub struct PeerRequestSender { +pub struct PeerRequestSender { /// id of the remote node. pub peer_id: PeerId, /// The Sender half connected to a session. - pub to_session_tx: mpsc::Sender, + pub to_session_tx: mpsc::Sender, +} + +impl Clone for PeerRequestSender { + fn clone(&self) -> Self { + Self { peer_id: self.peer_id, to_session_tx: self.to_session_tx.clone() } + } } // === impl PeerRequestSender === -impl PeerRequestSender { +impl PeerRequestSender { /// Constructs a new sender instance that's wired to a session - pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender) -> Self { + pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender) -> Self { Self { peer_id, to_session_tx } } /// Attempts to immediately send a message on this Sender - pub fn try_send(&self, req: PeerRequest) -> Result<(), mpsc::error::TrySendError> { + pub fn try_send(&self, req: R) -> Result<(), mpsc::error::TrySendError> { self.to_session_tx.try_send(req) } @@ -226,7 +261,7 @@ impl PeerRequestSender { } } -impl fmt::Debug for PeerRequestSender { +impl fmt::Debug for PeerRequestSender { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PeerRequestSender").field("peer_id", &self.peer_id).finish_non_exhaustive() } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 6163c8730033..986d490c34f9 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -36,6 +36,7 @@ pub use events::{ use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; use reth_eth_wire_types::{capability::Capabilities, DisconnectReason, EthVersion, Status}; +use reth_network_p2p::EthBlockClient; use reth_network_peers::NodeRecord; /// The `PeerId` type. @@ -43,7 +44,7 @@ pub type PeerId = alloy_primitives::B512; /// Helper trait that unifies network API needed to launch node. pub trait FullNetwork: - BlockDownloaderProvider + BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider @@ -55,7 +56,7 @@ pub trait FullNetwork: } impl FullNetwork for T where - T: BlockDownloaderProvider + T: BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index f444aa7fe27c..ab9e89c2ca84 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -16,13 +16,16 @@ workspace = true reth-chainspec.workspace = true reth-fs-util.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-net-banlist.workspace = true reth-network-api.workspace = true reth-network-p2p.workspace = true reth-discv4.workspace = true reth-discv5.workspace = true reth-dns-discovery.workspace = true +reth-ethereum-forks.workspace = true reth-eth-wire.workspace = true +reth-eth-wire-types.workspace = true reth-ecies.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true @@ -34,6 +37,7 @@ reth-network-peers = { workspace = true, features = ["net"] } reth-network-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true @@ -109,6 +113,7 @@ serde = [ "reth-dns-discovery/serde", "reth-eth-wire/serde", "reth-provider?/serde", + "reth-eth-wire-types/serde", "alloy-consensus/serde", "alloy-eips/serde", "alloy-primitives/serde", @@ -116,7 +121,8 @@ serde = [ "parking_lot/serde", "rand/serde", "smallvec/serde", - "url/serde" + "url/serde", + "reth-primitives-traits/serde", ] test-utils = [ "dep:reth-provider", @@ -129,7 +135,8 @@ test-utils = [ "reth-discv4/test-utils", "reth-network/test-utils", "reth-network-p2p/test-utils", - "reth-primitives/test-utils" + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", ] [[bench]] diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index e6a5d9566419..da003a2e2907 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -1,14 +1,14 @@ //! Builder support for configuring the entire setup. -use reth_network_api::test_utils::PeersHandleProvider; -use reth_transaction_pool::TransactionPool; -use tokio::sync::mpsc; - use crate::{ eth_requests::EthRequestHandler, transactions::{TransactionsManager, TransactionsManagerConfig}, NetworkHandle, NetworkManager, }; +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; +use reth_network_api::test_utils::PeersHandleProvider; +use reth_transaction_pool::TransactionPool; +use tokio::sync::mpsc; /// We set the max channel capacity of the `EthRequestHandler` to 256 /// 256 requests with malicious 10MB body requests is 2.6GB which can be absorbed by the node. @@ -16,8 +16,8 @@ pub(crate) const ETH_REQUEST_CHANNEL_CAPACITY: usize = 256; /// A builder that can configure all components of the network. #[allow(missing_debug_implementations)] -pub struct NetworkBuilder { - pub(crate) network: NetworkManager, +pub struct NetworkBuilder { + pub(crate) network: NetworkManager, pub(crate) transactions: Tx, pub(crate) request_handler: Eth, } diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs index 758b49167908..32389ec4b7b1 100644 --- a/crates/net/network/src/cache.rs +++ b/crates/net/network/src/cache.rs @@ -1,11 +1,10 @@ //! Network cache support use core::hash::BuildHasher; -use std::{fmt, hash::Hash}; - use derive_more::{Deref, DerefMut}; use itertools::Itertools; use schnellru::{ByLength, Limiter, RandomState, Unlimited}; +use std::{fmt, hash::Hash}; /// A minimal LRU cache based on a [`LruMap`](schnellru::LruMap) with limited capacity. /// diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 96aef249d9f9..a7d8a98fae6d 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -1,25 +1,25 @@ //! Network config support -use std::{collections::HashSet, net::SocketAddr, sync::Arc}; - +use crate::{ + error::NetworkError, + import::{BlockImport, ProofOfStakeBlockImport}, + transactions::TransactionsManagerConfig, + NetworkHandle, NetworkManager, +}; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; -use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; +use reth_eth_wire::{ + EthNetworkPrimitives, HelloMessage, HelloMessageWithProtocols, NetworkPrimitives, Status, +}; +use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; use reth_network_types::{PeersConfig, SessionsConfig}; -use reth_primitives::{ForkFilter, Head}; use reth_storage_api::{noop::NoopBlockReader, BlockNumReader, BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; - -use crate::{ - error::NetworkError, - import::{BlockImport, ProofOfStakeBlockImport}, - transactions::TransactionsManagerConfig, - NetworkHandle, NetworkManager, -}; +use std::{collections::HashSet, net::SocketAddr, sync::Arc}; // re-export for convenience use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocols}; @@ -32,7 +32,7 @@ pub fn rng_secret_key() -> SecretKey { /// All network related initialization settings. #[derive(Debug)] -pub struct NetworkConfig { +pub struct NetworkConfig { /// The client type that can interact with the chain. /// /// This type is used to fetch the block number after we established a session and received the @@ -66,7 +66,7 @@ pub struct NetworkConfig { /// first hardfork, `Frontier` for mainnet. pub fork_filter: ForkFilter, /// The block importer type. - pub block_import: Box, + pub block_import: Box>, /// The default mode of the network. pub network_mode: NetworkMode, /// The executor to use for spawning tasks. @@ -87,9 +87,9 @@ pub struct NetworkConfig { // === impl NetworkConfig === -impl NetworkConfig<()> { +impl NetworkConfig<(), N> { /// Convenience method for creating the corresponding builder type - pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { + pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { NetworkConfigBuilder::new(secret_key) } @@ -99,7 +99,7 @@ impl NetworkConfig<()> { } } -impl NetworkConfig { +impl NetworkConfig { /// Create a new instance with all mandatory fields set, rest is field with defaults. pub fn new(client: C, secret_key: SecretKey) -> Self where @@ -134,19 +134,24 @@ impl NetworkConfig { } } -impl NetworkConfig +impl NetworkConfig where C: BlockNumReader + 'static, + N: NetworkPrimitives, { /// Convenience method for calling [`NetworkManager::new`]. - pub async fn manager(self) -> Result { + pub async fn manager(self) -> Result, NetworkError> { NetworkManager::new(self).await } } impl NetworkConfig where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Clone + + Unpin + + 'static, { /// Starts the networking stack given a [`NetworkConfig`] and returns a handle to the network. pub async fn start_network(self) -> Result { @@ -164,7 +169,7 @@ where /// Builder for [`NetworkConfig`](struct.NetworkConfig.html). #[derive(Debug)] -pub struct NetworkConfigBuilder { +pub struct NetworkConfigBuilder { /// The node's secret key, from which the node's identity is derived. secret_key: SecretKey, /// How to configure discovery over DNS. @@ -196,7 +201,7 @@ pub struct NetworkConfigBuilder { /// Whether tx gossip is disabled tx_gossip_disabled: bool, /// The block importer type - block_import: Option>, + block_import: Option>>, /// How to instantiate transactions manager. transactions_manager_config: TransactionsManagerConfig, /// The NAT resolver for external IP @@ -206,7 +211,7 @@ pub struct NetworkConfigBuilder { // === impl NetworkConfigBuilder === #[allow(missing_docs)] -impl NetworkConfigBuilder { +impl NetworkConfigBuilder { /// Create a new builder instance with a random secret key. pub fn with_rng_secret_key() -> Self { Self::new(rng_secret_key()) @@ -480,7 +485,7 @@ impl NetworkConfigBuilder { } /// Sets the block import type. - pub fn block_import(mut self, block_import: Box) -> Self { + pub fn block_import(mut self, block_import: Box>) -> Self { self.block_import = Some(block_import); self } @@ -490,7 +495,7 @@ impl NetworkConfigBuilder { pub fn build_with_noop_provider( self, chain_spec: Arc, - ) -> NetworkConfig> + ) -> NetworkConfig, N> where ChainSpec: EthChainSpec + Hardforks + 'static, { @@ -509,7 +514,7 @@ impl NetworkConfigBuilder { /// The given client is to be used for interacting with the chain, for example fetching the /// corresponding block for a given block hash we receive from a peer in the status message when /// establishing a connection. - pub fn build(self, client: C) -> NetworkConfig + pub fn build(self, client: C) -> NetworkConfig where C: ChainSpecProvider, { diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index 5b2bb788f478..c0b9ffa7630b 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -1,13 +1,9 @@ //! Discovery support for the network. -use std::{ - collections::VecDeque, - net::{IpAddr, SocketAddr}, - pin::Pin, - sync::Arc, - task::{ready, Context, Poll}, +use crate::{ + cache::LruMap, + error::{NetworkError, ServiceKind}, }; - use enr::Enr; use futures::StreamExt; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config}; @@ -15,20 +11,22 @@ use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; +use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; use reth_network_api::{DiscoveredEvent, DiscoveryEvent}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::PeerAddr; -use reth_primitives::{EnrForkIdEntry, ForkId}; use secp256k1::SecretKey; +use std::{ + collections::VecDeque, + net::{IpAddr, SocketAddr}, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::{sync::mpsc, task::JoinHandle}; use tokio_stream::{wrappers::ReceiverStream, Stream}; use tracing::trace; -use crate::{ - cache::LruMap, - error::{NetworkError, ServiceKind}, -}; - /// Default max capacity for cache of discovered peers. /// /// Default is 10 000 peers. diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 2709c4a29075..8156392b22f1 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -1,7 +1,6 @@ //! Possible errors when interacting with the network. -use std::{fmt, io, io::ErrorKind, net::SocketAddr}; - +use crate::session::PendingSessionHandshakeError; use reth_dns_discovery::resolver::ResolveError; use reth_ecies::ECIESErrorImpl; use reth_eth_wire::{ @@ -9,8 +8,7 @@ use reth_eth_wire::{ DisconnectReason, }; use reth_network_types::BackoffKind; - -use crate::session::PendingSessionHandshakeError; +use std::{fmt, io, io::ErrorKind, net::SocketAddr}; /// Service kind. #[derive(Debug, PartialEq, Eq, Copy, Clone)] diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index f0c355b174a4..bb45507bdbdb 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -1,32 +1,31 @@ //! Blocks/Headers management for the p2p network. -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, - time::Duration, +use crate::{ + budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, + metrics::EthRequestHandlerMetrics, }; - +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; use futures::StreamExt; use reth_eth_wire::{ - BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, - HeadersDirection, NodeData, Receipts, + BlockBodies, BlockHeaders, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, GetNodeData, + GetReceipts, HeadersDirection, NetworkPrimitives, NodeData, Receipts, }; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, Header}; +use reth_primitives_traits::Block; use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use tokio::sync::{mpsc::Receiver, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use crate::{ - budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, - metrics::EthRequestHandlerMetrics, -}; - // Limits: /// Maximum number of receipts to serve. @@ -53,7 +52,7 @@ const SOFT_RESPONSE_LIMIT: usize = 2 * 1024 * 1024; /// This can be spawned to another task and is supposed to be run as background service. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] -pub struct EthRequestHandler { +pub struct EthRequestHandler { /// The client type that can interact with the chain. client: C, /// Used for reporting peers. @@ -61,15 +60,15 @@ pub struct EthRequestHandler { #[allow(dead_code)] peers: PeersHandle, /// Incoming request from the [`NetworkManager`](crate::NetworkManager). - incoming_requests: ReceiverStream, + incoming_requests: ReceiverStream>, /// Metrics for the eth request handler. metrics: EthRequestHandlerMetrics, } // === impl EthRequestHandler === -impl EthRequestHandler { +impl EthRequestHandler { /// Create a new instance - pub fn new(client: C, peers: PeersHandle, incoming: Receiver) -> Self { + pub fn new(client: C, peers: PeersHandle, incoming: Receiver>) -> Self { Self { client, peers, @@ -81,7 +80,7 @@ impl EthRequestHandler { impl EthRequestHandler where - C: BlockReader + HeaderProvider + ReceiptProvider, + C: BlockReader + HeaderProvider + ReceiptProvider, { /// Returns the list of requested headers fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ @@ -147,7 +146,7 @@ where &self, _peer_id: PeerId, request: GetBlockHeaders, - response: oneshot::Sender>, + response: oneshot::Sender>>, ) { self.metrics.eth_headers_requests_received_total.increment(1); let headers = self.get_headers_response(request); @@ -158,7 +157,9 @@ where &self, _peer_id: PeerId, request: GetBlockBodies, - response: oneshot::Sender>, + response: oneshot::Sender< + RequestResult::Body>>, + >, ) { self.metrics.eth_bodies_requests_received_total.increment(1); let mut bodies = Vec::new(); @@ -167,8 +168,7 @@ where for hash in request.0 { if let Some(block) = self.client.block_by_hash(hash).unwrap_or_default() { - let body: BlockBody = block.into(); - + let (_, body) = block.split(); total_bytes += body.length(); bodies.push(body); @@ -224,7 +224,9 @@ where /// This should be spawned or used as part of `tokio::select!`. impl Future for EthRequestHandler where - C: BlockReader + HeaderProvider + Unpin, + C: BlockReader + + HeaderProvider + + Unpin, { type Output = (); @@ -271,7 +273,7 @@ where /// All `eth` request related to blocks delegated by the network. #[derive(Debug)] -pub enum IncomingEthRequest { +pub enum IncomingEthRequest { /// Request Block headers from the peer. /// /// The response should be sent through the channel. @@ -281,7 +283,7 @@ pub enum IncomingEthRequest { /// The specific block headers requested. request: GetBlockHeaders, /// The channel sender for the response containing block headers. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Request Block bodies from the peer. /// @@ -292,7 +294,7 @@ pub enum IncomingEthRequest { /// The specific block bodies requested. request: GetBlockBodies, /// The channel sender for the response containing block bodies. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Request Node Data from the peer. /// diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index c47ee5d234fc..e24ea167f5fe 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -1,12 +1,9 @@ //! A client implementation that can interact with the network and download data. -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; - +use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; use alloy_primitives::B256; use futures::{future, future::Either}; +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -17,11 +14,12 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::Header; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; - #[cfg_attr(doc, aquamarine::aquamarine)] /// Front-end API for fetching data from the network. /// @@ -30,16 +28,16 @@ use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; /// /// include_mmd!("docs/mermaid/fetch-client.mmd") #[derive(Debug, Clone)] -pub struct FetchClient { +pub struct FetchClient { /// Sender half of the request channel. - pub(crate) request_tx: UnboundedSender, + pub(crate) request_tx: UnboundedSender>, /// The handle to the peers pub(crate) peers_handle: PeersHandle, /// Number of active peer sessions the node's currently handling. pub(crate) num_active_peers: Arc, } -impl DownloadClient for FetchClient { +impl DownloadClient for FetchClient { fn report_bad_message(&self, peer_id: PeerId) { self.peers_handle.reputation_change(peer_id, ReputationChangeKind::BadMessage); } @@ -53,8 +51,9 @@ impl DownloadClient for FetchClient { // or an error. type HeadersClientFuture = Either, future::Ready>; -impl HeadersClient for FetchClient { - type Output = HeadersClientFuture>>; +impl HeadersClient for FetchClient { + type Header = N::BlockHeader; + type Output = HeadersClientFuture>>; /// Sends a `GetBlockHeaders` request to an available peer. fn get_headers_with_priority( @@ -75,8 +74,9 @@ impl HeadersClient for FetchClient { } } -impl BodiesClient for FetchClient { - type Output = BodiesFut; +impl BodiesClient for FetchClient { + type Body = N::BlockBody; + type Output = BodiesFut; /// Sends a `GetBlockBodies` request to an available peer. fn get_block_bodies_with_priority( diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index f5c0006bc3a4..c5474587adfb 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -4,18 +4,10 @@ mod client; pub use client::FetchClient; -use std::{ - collections::{HashMap, VecDeque}, - sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, - }, - task::{Context, Poll}, -}; - +use crate::message::BlockRequest; use alloy_primitives::B256; use futures::StreamExt; -use reth_eth_wire::{GetBlockBodies, GetBlockHeaders}; +use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult}, @@ -24,11 +16,19 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{BlockBody, Header}; +use std::{ + collections::{HashMap, VecDeque}, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::message::BlockRequest; +type InflightHeadersRequest = Request>>; +type InflightBodiesRequest = Request, PeerRequestResult>>; /// Manages data fetching operations. /// @@ -37,13 +37,11 @@ use crate::message::BlockRequest; /// /// This type maintains a list of connected peers that are available for requests. #[derive(Debug)] -pub struct StateFetcher { +pub struct StateFetcher { /// Currently active [`GetBlockHeaders`] requests - inflight_headers_requests: - HashMap>>>, + inflight_headers_requests: HashMap>, /// Currently active [`GetBlockBodies`] requests - inflight_bodies_requests: - HashMap, PeerRequestResult>>>, + inflight_bodies_requests: HashMap>, /// The list of _available_ peers for requests. peers: HashMap, /// The handle to the peers manager @@ -51,16 +49,16 @@ pub struct StateFetcher { /// Number of active peer sessions the node's currently handling. num_active_peers: Arc, /// Requests queued for processing - queued_requests: VecDeque, + queued_requests: VecDeque>, /// Receiver for new incoming download requests - download_requests_rx: UnboundedReceiverStream, + download_requests_rx: UnboundedReceiverStream>, /// Sender for download requests, used to detach a [`FetchClient`] - download_requests_tx: UnboundedSender, + download_requests_tx: UnboundedSender>, } // === impl StateSyncer === -impl StateFetcher { +impl StateFetcher { pub(crate) fn new(peers_handle: PeersHandle, num_active_peers: Arc) -> Self { let (download_requests_tx, download_requests_rx) = mpsc::unbounded_channel(); Self { @@ -217,7 +215,7 @@ impl StateFetcher { /// Handles a new request to a peer. /// /// Caution: this assumes the peer exists and is idle - fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { + fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { // update the peer's state if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = req.peer_state(); @@ -260,7 +258,7 @@ impl StateFetcher { pub(crate) fn on_block_headers_response( &mut self, peer_id: PeerId, - res: RequestResult>, + res: RequestResult>, ) -> Option { let is_error = res.is_err(); let maybe_reputation_change = res.reputation_change_err(); @@ -296,7 +294,7 @@ impl StateFetcher { pub(crate) fn on_block_bodies_response( &mut self, peer_id: PeerId, - res: RequestResult>, + res: RequestResult>, ) -> Option { let is_likely_bad_response = res.as_ref().map_or(true, |bodies| bodies.is_empty()); @@ -315,7 +313,7 @@ impl StateFetcher { } /// Returns a new [`FetchClient`] that can send requests to this type. - pub(crate) fn client(&self) -> FetchClient { + pub(crate) fn client(&self) -> FetchClient { FetchClient { request_tx: self.download_requests_tx.clone(), peers_handle: self.peers_handle.clone(), @@ -405,24 +403,24 @@ struct Request { /// Requests that can be sent to the Syncer from a [`FetchClient`] #[derive(Debug)] -pub(crate) enum DownloadRequest { +pub(crate) enum DownloadRequest { /// Download the requested headers and send response through channel GetBlockHeaders { request: HeadersRequest, - response: oneshot::Sender>>, + response: oneshot::Sender>>, priority: Priority, }, /// Download the requested headers and send response through channel GetBlockBodies { request: Vec, - response: oneshot::Sender>>, + response: oneshot::Sender>>, priority: Priority, }, } // === impl DownloadRequest === -impl DownloadRequest { +impl DownloadRequest { /// Returns the corresponding state for a peer that handles the request. const fn peer_state(&self) -> PeerState { match self { @@ -472,14 +470,14 @@ pub(crate) enum BlockResponseOutcome { mod tests { use super::*; use crate::{peers::PeersManager, PeersConfig}; + use alloy_consensus::Header; use alloy_primitives::B512; - use reth_primitives::SealedHeader; use std::future::poll_fn; #[tokio::test(flavor = "multi_thread")] async fn test_poll_fetcher() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); poll_fn(move |cx| { assert!(fetcher.poll(cx).is_pending()); @@ -499,7 +497,7 @@ mod tests { #[tokio::test] async fn test_peer_rotation() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -522,7 +520,7 @@ mod tests { #[tokio::test] async fn test_peer_prioritization() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -547,7 +545,7 @@ mod tests { #[tokio::test] async fn test_on_block_headers_response() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); let peer_id = B512::random(); assert_eq!(fetcher.on_block_headers_response(peer_id, Ok(vec![Header::default()])), None); @@ -577,7 +575,7 @@ mod tests { #[tokio::test] async fn test_header_response_outcome() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher: StateFetcher = StateFetcher::new(manager.handle(), Default::default()); let peer_id = B512::random(); let request_pair = || { @@ -590,8 +588,7 @@ mod tests { }, response: tx, }; - let mut header = SealedHeader::default().unseal(); - header.number = 0u64; + let header = Header { number: 0, ..Default::default() }; (req, header) }; @@ -612,7 +609,10 @@ mod tests { let outcome = fetcher.on_block_headers_response(peer_id, Err(RequestError::Timeout)).unwrap(); - assert!(EthResponseValidator::reputation_change_err(&Err(RequestError::Timeout)).is_some()); + assert!(EthResponseValidator::reputation_change_err(&Err::, _>( + RequestError::Timeout + )) + .is_some()); match outcome { BlockResponseOutcome::BadResponse(peer, _) => { diff --git a/crates/net/network/src/flattened_response.rs b/crates/net/network/src/flattened_response.rs index 78c3c35f5981..61dae9c7c724 100644 --- a/crates/net/network/src/flattened_response.rs +++ b/crates/net/network/src/flattened_response.rs @@ -1,10 +1,9 @@ +use futures::Future; +use pin_project::pin_project; use std::{ pin::Pin, task::{Context, Poll}, }; - -use futures::Future; -use pin_project::pin_project; use tokio::sync::oneshot::{error::RecvError, Receiver}; /// Flatten a [Receiver] message in order to get rid of the [RecvError] result @@ -24,10 +23,7 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); - this.receiver.poll(cx).map(|r| match r { - Ok(r) => r, - Err(err) => Err(err.into()), - }) + this.receiver.poll(cx).map(|r| r.unwrap_or_else(|err| Err(err.into()))) } } diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 201dc3e4f786..f63bf2dd7a8c 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -1,13 +1,11 @@ //! This module provides an abstraction over block import in the form of the `BlockImport` trait. -use std::task::{Context, Poll}; - -use reth_network_peers::PeerId; - use crate::message::NewBlockMessage; +use reth_network_peers::PeerId; +use std::task::{Context, Poll}; /// Abstraction over block import. -pub trait BlockImport: std::fmt::Debug + Send + Sync { +pub trait BlockImport: std::fmt::Debug + Send + Sync { /// Invoked for a received `NewBlock` broadcast message from the peer. /// /// > When a `NewBlock` announcement message is received from a peer, the client first verifies @@ -15,35 +13,35 @@ pub trait BlockImport: std::fmt::Debug + Send + Sync { /// /// This is supposed to start verification. The results are then expected to be returned via /// [`BlockImport::poll`]. - fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); + fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); /// Returns the results of a [`BlockImport::on_new_block`] - fn poll(&mut self, cx: &mut Context<'_>) -> Poll; + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; } /// Outcome of the [`BlockImport`]'s block handling. #[derive(Debug)] -pub struct BlockImportOutcome { +pub struct BlockImportOutcome { /// Sender of the `NewBlock` message. pub peer: PeerId, /// The result after validating the block - pub result: Result, + pub result: Result, BlockImportError>, } /// Represents the successful validation of a received `NewBlock` message. #[derive(Debug)] -pub enum BlockValidation { +pub enum BlockValidation { /// Basic Header validity check, after which the block should be relayed to peers via a /// `NewBlock` message ValidHeader { /// received block - block: NewBlockMessage, + block: NewBlockMessage, }, /// Successfully imported: state-root matches after execution. The block should be relayed via /// `NewBlockHashes` ValidBlock { /// validated block. - block: NewBlockMessage, + block: NewBlockMessage, }, } @@ -62,10 +60,10 @@ pub enum BlockImportError { #[non_exhaustive] pub struct ProofOfStakeBlockImport; -impl BlockImport for ProofOfStakeBlockImport { - fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} +impl BlockImport for ProofOfStakeBlockImport { + fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} - fn poll(&mut self, _cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Pending } } diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 0e433a388628..0eae99e7c50a 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -46,7 +46,9 @@ //! //! ``` //! # async fn launch() { -//! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +//! use reth_network::{ +//! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, +//! }; //! use reth_network_peers::mainnet_nodes; //! use reth_provider::test_utils::NoopProvider; //! @@ -59,7 +61,7 @@ //! let config = NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client); //! //! // create the network instance -//! let network = NetworkManager::new(config).await.unwrap(); +//! let network = NetworkManager::::new(config).await.unwrap(); //! //! // keep a handle to the network and spawn it //! let handle = network.handle().clone(); @@ -138,6 +140,7 @@ mod state; mod swarm; pub use reth_eth_wire::{DisconnectReason, HelloMessageWithProtocols}; +pub use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives}; pub use reth_network_api::{ BlockDownloaderProvider, DiscoveredEvent, DiscoveryEvent, NetworkEvent, NetworkEventListenerProvider, NetworkInfo, PeerRequest, PeerRequestSender, Peers, PeersInfo, diff --git a/crates/net/network/src/listener.rs b/crates/net/network/src/listener.rs index e5094f689481..9fcc15a104b5 100644 --- a/crates/net/network/src/listener.rs +++ b/crates/net/network/src/listener.rs @@ -1,13 +1,12 @@ //! Contains connection-oriented interfaces. +use futures::{ready, Stream}; use std::{ io, net::SocketAddr, pin::Pin, task::{Context, Poll}, }; - -use futures::{ready, Stream}; use tokio::net::{TcpListener, TcpStream}; /// A tcp connection listener. diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 3a7f94985fc9..c1db91773e38 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -15,36 +15,6 @@ //! (IP+port) of our node is published via discovery, remote peers can initiate inbound connections //! to the local node. Once a (tcp) connection is established, both peers start to authenticate a [RLPx session](https://github.com/ethereum/devp2p/blob/master/rlpx.md) via a handshake. If the handshake was successful, both peers announce their capabilities and are now ready to exchange sub-protocol messages via the `RLPx` session. -use std::{ - net::SocketAddr, - path::Path, - pin::Pin, - sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, - }, - task::{Context, Poll}, - time::{Duration, Instant}, -}; - -use futures::{Future, StreamExt}; -use parking_lot::Mutex; -use reth_eth_wire::{capability::CapabilityMessage, Capabilities, DisconnectReason}; -use reth_fs_util::{self as fs, FsPathError}; -use reth_metrics::common::mpsc::UnboundedMeteredSender; -use reth_network_api::{ - test_utils::PeersHandle, EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest, -}; -use reth_network_peers::{NodeRecord, PeerId}; -use reth_network_types::ReputationChangeKind; -use reth_storage_api::BlockNumReader; -use reth_tasks::shutdown::GracefulShutdown; -use reth_tokio_util::EventSender; -use secp256k1::SecretKey; -use tokio::sync::mpsc::{self, error::TrySendError}; -use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::{debug, error, trace, warn}; - use crate::{ budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM}, config::NetworkConfig, @@ -65,6 +35,37 @@ use crate::{ transactions::NetworkTransactionEvent, FetchClient, NetworkBuilder, }; +use futures::{Future, StreamExt}; +use parking_lot::Mutex; +use reth_eth_wire::{ + capability::CapabilityMessage, Capabilities, DisconnectReason, EthNetworkPrimitives, + NetworkPrimitives, +}; +use reth_fs_util::{self as fs, FsPathError}; +use reth_metrics::common::mpsc::UnboundedMeteredSender; +use reth_network_api::{ + test_utils::PeersHandle, EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest, +}; +use reth_network_peers::{NodeRecord, PeerId}; +use reth_network_types::ReputationChangeKind; +use reth_storage_api::BlockNumReader; +use reth_tasks::shutdown::GracefulShutdown; +use reth_tokio_util::EventSender; +use secp256k1::SecretKey; +use std::{ + net::SocketAddr, + path::Path, + pin::Pin, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::{Duration, Instant}, +}; +use tokio::sync::mpsc::{self, error::TrySendError}; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tracing::{debug, error, trace, warn}; #[cfg_attr(doc, aquamarine::aquamarine)] /// Manages the _entire_ state of the network. @@ -76,20 +77,20 @@ use crate::{ /// include_mmd!("docs/mermaid/network-manager.mmd") #[derive(Debug)] #[must_use = "The NetworkManager does nothing unless polled"] -pub struct NetworkManager { +pub struct NetworkManager { /// The type that manages the actual network part, which includes connections. - swarm: Swarm, + swarm: Swarm, /// Underlying network handle that can be shared. - handle: NetworkHandle, + handle: NetworkHandle, /// Receiver half of the command channel set up between this type and the [`NetworkHandle`] - from_handle_rx: UnboundedReceiverStream, + from_handle_rx: UnboundedReceiverStream>, /// Handles block imports according to the `eth` protocol. - block_import: Box, + block_import: Box>, /// Sender for high level network events. - event_sender: EventSender, + event_sender: EventSender>>, /// Sender half to send events to the /// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured. - to_transactions_manager: Option>, + to_transactions_manager: Option>>, /// Sender half to send events to the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) task, if configured. /// @@ -103,7 +104,7 @@ pub struct NetworkManager { /// Thus, we use a bounded channel here to avoid unbounded build up if the node is flooded with /// requests. This channel size is set at /// [`ETH_REQUEST_CHANNEL_CAPACITY`](crate::builder::ETH_REQUEST_CHANNEL_CAPACITY) - to_eth_request_handler: Option>, + to_eth_request_handler: Option>>, /// Tracks the number of active session (connected peers). /// /// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`] @@ -116,17 +117,17 @@ pub struct NetworkManager { } // === impl NetworkManager === -impl NetworkManager { +impl NetworkManager { /// Sets the dedicated channel for events indented for the /// [`TransactionsManager`](crate::transactions::TransactionsManager). - pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender) { + pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender>) { self.to_transactions_manager = Some(UnboundedMeteredSender::new(tx, NETWORK_POOL_TRANSACTIONS_SCOPE)); } /// Sets the dedicated channel for events indented for the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler). - pub fn set_eth_request_handler(&mut self, tx: mpsc::Sender) { + pub fn set_eth_request_handler(&mut self, tx: mpsc::Sender>) { self.to_eth_request_handler = Some(tx); } @@ -138,7 +139,7 @@ impl NetworkManager { /// Returns the [`NetworkHandle`] that can be cloned and shared. /// /// The [`NetworkHandle`] can be used to interact with this [`NetworkManager`] - pub const fn handle(&self) -> &NetworkHandle { + pub const fn handle(&self) -> &NetworkHandle { &self.handle } @@ -165,7 +166,7 @@ impl NetworkManager { /// The [`NetworkManager`] is an endless future that needs to be polled in order to advance the /// state of the entire network. pub async fn new( - config: NetworkConfig, + config: NetworkConfig, ) -> Result { let NetworkConfig { client, @@ -253,7 +254,7 @@ impl NetworkManager { let (to_manager_tx, from_handle_rx) = mpsc::unbounded_channel(); - let event_sender: EventSender = Default::default(); + let event_sender: EventSender>> = Default::default(); let handle = NetworkHandle::new( Arc::clone(&num_active_peers), @@ -314,14 +315,14 @@ impl NetworkManager { /// } /// ``` pub async fn builder( - config: NetworkConfig, - ) -> Result, NetworkError> { + config: NetworkConfig, + ) -> Result, NetworkError> { let network = Self::new(config).await?; Ok(network.into_builder()) } /// Create a [`NetworkBuilder`] to configure all components of the network - pub const fn into_builder(self) -> NetworkBuilder<(), ()> { + pub const fn into_builder(self) -> NetworkBuilder<(), (), N> { NetworkBuilder { network: self, transactions: (), request_handler: () } } @@ -369,7 +370,7 @@ impl NetworkManager { /// Returns a new [`FetchClient`] that can be cloned and shared. /// /// The [`FetchClient`] is the entrypoint for sending requests to the network. - pub fn fetch_client(&self) -> FetchClient { + pub fn fetch_client(&self) -> FetchClient { self.swarm.state().fetch_client() } @@ -408,7 +409,7 @@ impl NetworkManager { /// Sends an event to the [`TransactionsManager`](crate::transactions::TransactionsManager) if /// configured. - fn notify_tx_manager(&self, event: NetworkTransactionEvent) { + fn notify_tx_manager(&self, event: NetworkTransactionEvent) { if let Some(ref tx) = self.to_transactions_manager { let _ = tx.send(event); } @@ -416,7 +417,7 @@ impl NetworkManager { /// Sends an event to the [`EthRequestManager`](crate::eth_requests::EthRequestHandler) if /// configured. - fn delegate_eth_request(&self, event: IncomingEthRequest) { + fn delegate_eth_request(&self, event: IncomingEthRequest) { if let Some(ref reqs) = self.to_eth_request_handler { let _ = reqs.try_send(event).map_err(|e| { if let TrySendError::Full(_) = e { @@ -428,7 +429,7 @@ impl NetworkManager { } /// Handle an incoming request from the peer - fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { + fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { match req { PeerRequest::GetBlockHeaders { request, response } => { self.delegate_eth_request(IncomingEthRequest::GetBlockHeaders { @@ -469,7 +470,7 @@ impl NetworkManager { } /// Invoked after a `NewBlock` message from the peer was validated - fn on_block_import_result(&mut self, outcome: BlockImportOutcome) { + fn on_block_import_result(&mut self, outcome: BlockImportOutcome) { let BlockImportOutcome { peer, result } = outcome; match result { Ok(validated_block) => match validated_block { @@ -511,7 +512,7 @@ impl NetworkManager { } /// Handles a received Message from the peer's session. - fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) { + fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(hashes) => { self.within_pow_or_disconnect(peer_id, |this| { @@ -551,7 +552,7 @@ impl NetworkManager { } /// Handler for received messages from a handle - fn on_handle_message(&mut self, msg: NetworkHandleMessage) { + fn on_handle_message(&mut self, msg: NetworkHandleMessage) { match msg { NetworkHandleMessage::DiscoveryListener(tx) => { self.swarm.state_mut().discovery_mut().add_listener(tx); @@ -646,7 +647,7 @@ impl NetworkManager { } } - fn on_swarm_event(&mut self, event: SwarmEvent) { + fn on_swarm_event(&mut self, event: SwarmEvent) { // handle event match event { SwarmEvent::ValidMessage { peer_id, message } => self.on_peer_message(peer_id, message), @@ -981,7 +982,7 @@ impl NetworkManager { } } -impl Future for NetworkManager { +impl Future for NetworkManager { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 6b8287fe51cf..199498b0b4c1 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -3,57 +3,58 @@ //! An `RLPx` stream is multiplexed via the prepended message-id of a framed message. //! Capabilities are exchanged via the `RLPx` `Hello` message as pairs of `(id, version)`, -use std::{ - sync::Arc, - task::{ready, Context, Poll}, -}; - +use alloy_consensus::BlockHeader; use alloy_primitives::{Bytes, B256}; use futures::FutureExt; use reth_eth_wire::{ capability::RawCapabilityMessage, message::RequestPair, BlockBodies, BlockHeaders, EthMessage, - GetBlockBodies, GetBlockHeaders, NewBlock, NewBlockHashes, NewPooledTransactionHashes, - NodeData, PooledTransactions, Receipts, SharedTransactions, Transactions, + EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, NewBlock, + NewBlockHashes, NewPooledTransactionHashes, NodeData, PooledTransactions, Receipts, + SharedTransactions, Transactions, }; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; -use reth_primitives::{BlockBody, Header, PooledTransactionsElement, ReceiptWithBloom}; +use reth_primitives::ReceiptWithBloom; +use std::{ + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::sync::oneshot; /// Internal form of a `NewBlock` message #[derive(Debug, Clone)] -pub struct NewBlockMessage { +pub struct NewBlockMessage { /// Hash of the block pub hash: B256, /// Raw received message - pub block: Arc, + pub block: Arc>, } // === impl NewBlockMessage === -impl NewBlockMessage { +impl NewBlockMessage { /// Returns the block number of the block pub fn number(&self) -> u64 { - self.block.block.header.number + self.block.block.header().number() } } /// All Bi-directional eth-message variants that can be sent to a session or received from a /// session. #[derive(Debug)] -pub enum PeerMessage { +pub enum PeerMessage { /// Announce new block hashes NewBlockHashes(NewBlockHashes), /// Broadcast new block. - NewBlock(NewBlockMessage), + NewBlock(NewBlockMessage), /// Received transactions _from_ the peer - ReceivedTransaction(Transactions), + ReceivedTransaction(Transactions), /// Broadcast transactions _from_ local _to_ a peer. - SendTransactions(SharedTransactions), + SendTransactions(SharedTransactions), /// Send new pooled transactions PooledTransactions(NewPooledTransactionHashes), /// All `eth` request variants. - EthRequest(PeerRequest), + EthRequest(PeerRequest), /// Other than eth namespace message Other(RawCapabilityMessage), } @@ -74,21 +75,21 @@ pub enum BlockRequest { /// Corresponding variant for [`PeerRequest`]. #[derive(Debug)] -pub enum PeerResponse { +pub enum PeerResponse { /// Represents a response to a request for block headers. BlockHeaders { /// The receiver channel for the response to a block headers request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for block bodies. BlockBodies { /// The receiver channel for the response to a block bodies request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for pooled transactions. PooledTransactions { /// The receiver channel for the response to a pooled transactions request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for `NodeData`. NodeData { @@ -104,9 +105,9 @@ pub enum PeerResponse { // === impl PeerResponse === -impl PeerResponse { +impl PeerResponse { /// Polls the type to completion. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { macro_rules! poll_request { ($response:ident, $item:ident, $cx:ident) => { match ready!($response.poll_unpin($cx)) { @@ -139,13 +140,13 @@ impl PeerResponse { /// All response variants for [`PeerResponse`] #[derive(Debug)] -pub enum PeerResponseResult { +pub enum PeerResponseResult { /// Represents a result containing block headers or an error. - BlockHeaders(RequestResult>), + BlockHeaders(RequestResult>), /// Represents a result containing block bodies or an error. - BlockBodies(RequestResult>), + BlockBodies(RequestResult>), /// Represents a result containing pooled transactions or an error. - PooledTransactions(RequestResult>), + PooledTransactions(RequestResult>), /// Represents a result containing node data or an error. NodeData(RequestResult>), /// Represents a result containing receipts or an error. @@ -154,9 +155,9 @@ pub enum PeerResponseResult { // === impl PeerResponseResult === -impl PeerResponseResult { +impl PeerResponseResult { /// Converts this response into an [`EthMessage`] - pub fn try_into_message(self, id: u64) -> RequestResult { + pub fn try_into_message(self, id: u64) -> RequestResult> { macro_rules! to_message { ($response:ident, $item:ident, $request_id:ident) => { match $response { diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 594ad4d155de..eadeccb15493 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,61 +1,59 @@ -use std::{ - net::SocketAddr, - sync::{ - atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - Arc, - }, +use crate::{ + config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, + transactions::TransactionsHandle, FetchClient, }; - use alloy_primitives::B256; use enr::Enr; use parking_lot::Mutex; use reth_discv4::{Discv4, NatResolver}; use reth_discv5::Discv5; -use reth_eth_wire::{DisconnectReason, NewBlock, NewPooledTransactionHashes, SharedTransactions}; +use reth_eth_wire::{ + DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlock, + NewPooledTransactionHashes, SharedTransactions, +}; +use reth_ethereum_forks::Head; use reth_network_api::{ test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerInfo, PeerRequest, Peers, PeersInfo, }; -use reth_network_p2p::{ - sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}, - BlockClient, -}; +use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::{PeerAddr, PeerKind, Reputation, ReputationChangeKind}; -use reth_primitives::{Head, TransactionSigned}; use reth_tokio_util::{EventSender, EventStream}; use secp256k1::SecretKey; +use std::{ + net::SocketAddr, + sync::{ + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + Arc, + }, +}; use tokio::sync::{ mpsc::{self, UnboundedSender}, oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::{ - config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, - transactions::TransactionsHandle, FetchClient, -}; - /// A _shareable_ network frontend. Used to interact with the network. /// /// See also [`NetworkManager`](crate::NetworkManager). #[derive(Clone, Debug)] -pub struct NetworkHandle { +pub struct NetworkHandle { /// The Arc'ed delegate that contains the state. - inner: Arc, + inner: Arc>, } // === impl NetworkHandle === -impl NetworkHandle { +impl NetworkHandle { /// Creates a single new instance. #[allow(clippy::too_many_arguments)] pub(crate) fn new( num_active_peers: Arc, listener_address: Arc>, - to_manager_tx: UnboundedSender, + to_manager_tx: UnboundedSender>, secret_key: SecretKey, local_peer_id: PeerId, peers: PeersHandle, @@ -64,7 +62,7 @@ impl NetworkHandle { tx_gossip_disabled: bool, discv4: Option, discv5: Option, - event_sender: EventSender, + event_sender: EventSender>>, nat: Option, ) -> Self { let inner = NetworkInner { @@ -92,7 +90,7 @@ impl NetworkHandle { &self.inner.local_peer_id } - fn manager(&self) -> &UnboundedSender { + fn manager(&self) -> &UnboundedSender> { &self.inner.to_manager_tx } @@ -102,7 +100,7 @@ impl NetworkHandle { } /// Sends a [`NetworkHandleMessage`] to the manager - pub(crate) fn send_message(&self, msg: NetworkHandleMessage) { + pub(crate) fn send_message(&self, msg: NetworkHandleMessage) { let _ = self.inner.to_manager_tx.send(msg); } @@ -116,12 +114,12 @@ impl NetworkHandle { /// Caution: in `PoS` this is a noop because new blocks are no longer announced over devp2p. /// Instead they are sent to the node by CL and can be requested over devp2p. /// Broadcasting new blocks is considered a protocol violation. - pub fn announce_block(&self, block: NewBlock, hash: B256) { + pub fn announce_block(&self, block: NewBlock, hash: B256) { self.send_message(NetworkHandleMessage::AnnounceBlock(block, hash)) } /// Sends a [`PeerRequest`] to the given peer's session. - pub fn send_request(&self, peer_id: PeerId, request: PeerRequest) { + pub fn send_request(&self, peer_id: PeerId, request: PeerRequest) { self.send_message(NetworkHandleMessage::EthRequest { peer_id, request }) } @@ -131,7 +129,7 @@ impl NetworkHandle { } /// Send full transactions to the peer - pub fn send_transactions(&self, peer_id: PeerId, msg: Vec>) { + pub fn send_transactions(&self, peer_id: PeerId, msg: Vec>) { self.send_message(NetworkHandleMessage::SendTransaction { peer_id, msg: SharedTransactions(msg), @@ -141,7 +139,7 @@ impl NetworkHandle { /// Send message to get the [`TransactionsHandle`]. /// /// Returns `None` if no transaction task is installed. - pub async fn transactions_handle(&self) -> Option { + pub async fn transactions_handle(&self) -> Option> { let (tx, rx) = oneshot::channel(); let _ = self.manager().send(NetworkHandleMessage::GetTransactionsHandle(tx)); rx.await.unwrap() @@ -189,8 +187,8 @@ impl NetworkHandle { // === API Implementations === -impl NetworkEventListenerProvider for NetworkHandle { - fn event_listener(&self) -> EventStream { +impl NetworkEventListenerProvider for NetworkHandle { + fn event_listener(&self) -> EventStream>> { self.inner.event_sender.new_listener() } @@ -201,13 +199,13 @@ impl NetworkEventListenerProvider for NetworkHandle { } } -impl NetworkProtocols for NetworkHandle { +impl NetworkProtocols for NetworkHandle { fn add_rlpx_sub_protocol(&self, protocol: RlpxSubProtocol) { self.send_message(NetworkHandleMessage::AddRlpxSubProtocol(protocol)) } } -impl PeersInfo for NetworkHandle { +impl PeersInfo for NetworkHandle { fn num_connected_peers(&self) -> usize { self.inner.num_active_peers.load(Ordering::Relaxed) } @@ -252,7 +250,7 @@ impl PeersInfo for NetworkHandle { } } -impl Peers for NetworkHandle { +impl Peers for NetworkHandle { fn add_trusted_peer_id(&self, peer: PeerId) { self.send_message(NetworkHandleMessage::AddTrustedPeerId(peer)); } @@ -340,13 +338,13 @@ impl Peers for NetworkHandle { } } -impl PeersHandleProvider for NetworkHandle { +impl PeersHandleProvider for NetworkHandle { fn peers_handle(&self) -> &PeersHandle { &self.inner.peers } } -impl NetworkInfo for NetworkHandle { +impl NetworkInfo for NetworkHandle { fn local_addr(&self) -> SocketAddr { *self.inner.listener_address.lock() } @@ -370,7 +368,7 @@ impl NetworkInfo for NetworkHandle { } } -impl SyncStateProvider for NetworkHandle { +impl SyncStateProvider for NetworkHandle { fn is_syncing(&self) -> bool { self.inner.is_syncing.load(Ordering::Relaxed) } @@ -383,7 +381,7 @@ impl SyncStateProvider for NetworkHandle { } } -impl NetworkSyncUpdater for NetworkHandle { +impl NetworkSyncUpdater for NetworkHandle { fn update_sync_state(&self, state: SyncState) { let future_state = state.is_syncing(); let prev_state = self.inner.is_syncing.swap(future_state, Ordering::Relaxed); @@ -399,8 +397,10 @@ impl NetworkSyncUpdater for NetworkHandle { } } -impl BlockDownloaderProvider for NetworkHandle { - async fn fetch_client(&self) -> Result { +impl BlockDownloaderProvider for NetworkHandle { + type Client = FetchClient; + + async fn fetch_client(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.manager().send(NetworkHandleMessage::FetchClient(tx)); rx.await @@ -408,11 +408,11 @@ impl BlockDownloaderProvider for NetworkHandle { } #[derive(Debug)] -struct NetworkInner { +struct NetworkInner { /// Number of active peer sessions the node's currently handling. num_active_peers: Arc, /// Sender half of the message channel to the [`crate::NetworkManager`]. - to_manager_tx: UnboundedSender, + to_manager_tx: UnboundedSender>, /// The local address that accepts incoming connections. listener_address: Arc>, /// The secret key used for authenticating sessions. @@ -436,7 +436,7 @@ struct NetworkInner { /// The instance of the discv5 service discv5: Option, /// Sender for high level network events. - event_sender: EventSender, + event_sender: EventSender>>, /// The NAT resolver nat: Option, } @@ -449,7 +449,7 @@ pub trait NetworkProtocols: Send + Sync { /// Internal messages that can be passed to the [`NetworkManager`](crate::NetworkManager). #[derive(Debug)] -pub(crate) enum NetworkHandleMessage { +pub(crate) enum NetworkHandleMessage { /// Marks a peer as trusted. AddTrustedPeerId(PeerId), /// Adds an address for a peer, including its ID, kind, and socket address. @@ -459,13 +459,13 @@ pub(crate) enum NetworkHandleMessage { /// Disconnects a connection to a peer if it exists, optionally providing a disconnect reason. DisconnectPeer(PeerId, Option), /// Broadcasts an event to announce a new block to all nodes. - AnnounceBlock(NewBlock, B256), + AnnounceBlock(NewBlock, B256), /// Sends a list of transactions to the given peer. SendTransaction { /// The ID of the peer to which the transactions are sent. peer_id: PeerId, /// The shared transactions to send. - msg: SharedTransactions, + msg: SharedTransactions, }, /// Sends a list of transaction hashes to the given peer. SendPooledTransactionHashes { @@ -479,12 +479,12 @@ pub(crate) enum NetworkHandleMessage { /// The peer to send the request to. peer_id: PeerId, /// The request to send to the peer's sessions. - request: PeerRequest, + request: PeerRequest, }, /// Applies a reputation change to the given peer. ReputationChange(PeerId, ReputationChangeKind), /// Returns the client that can be used to interact with the network. - FetchClient(oneshot::Sender), + FetchClient(oneshot::Sender>), /// Applies a status update. StatusUpdate { /// The head status to apply. @@ -503,7 +503,7 @@ pub(crate) enum NetworkHandleMessage { /// Gets the reputation for a specific peer via a oneshot sender. GetReputationById(PeerId, oneshot::Sender>), /// Retrieves the `TransactionsHandle` via a oneshot sender. - GetTransactionsHandle(oneshot::Sender>), + GetTransactionsHandle(oneshot::Sender>>), /// Initiates a graceful shutdown of the network via a oneshot sender. Shutdown(oneshot::Sender<()>), /// Sets the network state between hibernation and active. diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 4855ff5e7431..f8d18e159946 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -1,16 +1,13 @@ //! Peer related implementations -use std::{ - collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, - fmt::Display, - io::{self}, - net::{IpAddr, SocketAddr}, - task::{Context, Poll}, - time::Duration, +use crate::{ + error::SessionError, + session::{Direction, PendingSessionHandshakeError}, + swarm::NetworkConnectionState, }; - use futures::StreamExt; use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; +use reth_ethereum_forks::ForkId; use reth_net_banlist::BanList; use reth_network_api::test_utils::{PeerCommand, PeersHandle}; use reth_network_peers::{NodeRecord, PeerId}; @@ -22,7 +19,14 @@ use reth_network_types::{ ConnectionsConfig, Peer, PeerAddr, PeerConnectionState, PeerKind, PeersConfig, ReputationChangeKind, ReputationChangeOutcome, ReputationChangeWeights, }; -use reth_primitives::ForkId; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + fmt::Display, + io::{self}, + net::{IpAddr, SocketAddr}, + task::{Context, Poll}, + time::Duration, +}; use thiserror::Error; use tokio::{ sync::mpsc, @@ -31,12 +35,6 @@ use tokio::{ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{trace, warn}; -use crate::{ - error::SessionError, - session::{Direction, PendingSessionHandshakeError}, - swarm::NetworkConnectionState, -}; - /// Maintains the state of _all_ the peers known to the network. /// /// This is supposed to be owned by the network itself, but can be reached via the [`PeersHandle`]. @@ -377,7 +375,7 @@ impl PeersManager { if peer.is_trusted() || peer.is_static() { // For misbehaving trusted or static peers, we provide a bit more leeway when // penalizing them. - ban_duration = self.backoff_durations.medium; + ban_duration = self.backoff_durations.low / 2; } } diff --git a/crates/net/network/src/protocol.rs b/crates/net/network/src/protocol.rs index eeffd1c95f4f..aa0749c2c7b9 100644 --- a/crates/net/network/src/protocol.rs +++ b/crates/net/network/src/protocol.rs @@ -2,19 +2,18 @@ //! //! See also -use std::{ - fmt, - net::SocketAddr, - ops::{Deref, DerefMut}, - pin::Pin, -}; - use alloy_primitives::bytes::BytesMut; use futures::Stream; use reth_eth_wire::{ capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol, }; use reth_network_api::{Direction, PeerId}; +use std::{ + fmt, + net::SocketAddr, + ops::{Deref, DerefMut}, + pin::Pin, +}; /// A trait that allows to offer additional RLPx-based application-level protocols when establishing /// a peer-to-peer connection. diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 10048823c549..76701f7e2abf 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -11,18 +11,28 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult}, + session::{ + conn::EthRlpxConnection, + handle::{ActiveSessionMessage, SessionCommand}, + SessionId, + }, +}; +use alloy_primitives::Sealable; use futures::{stream::Fuse, SinkExt, StreamExt}; use metrics::Gauge; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PStreamError}, message::{EthBroadcastMessage, RequestPair}, - Capabilities, DisconnectP2P, DisconnectReason, EthMessage, + Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, }; use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_api::PeerRequest; use reth_network_p2p::error::RequestError; use reth_network_peers::PeerId; use reth_network_types::session::config::INITIAL_REQUEST_TIMEOUT; +use reth_primitives_traits::Block; use rustc_hash::FxHashMap; use tokio::{ sync::{mpsc::error::TrySendError, oneshot}, @@ -32,15 +42,6 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, trace}; -use crate::{ - message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult}, - session::{ - conn::EthRlpxConnection, - handle::{ActiveSessionMessage, SessionCommand}, - SessionId, - }, -}; - // Constants for timeout updating. /// Minimum timeout value @@ -62,11 +63,11 @@ const TIMEOUT_SCALING: u32 = 3; /// - incoming requests/broadcasts _from remote_ via the connection /// - responses for handled ETH requests received from the remote peer. #[allow(dead_code)] -pub(crate) struct ActiveSession { +pub(crate) struct ActiveSession { /// Keeps track of request ids. pub(crate) next_id: u64, /// The underlying connection. - pub(crate) conn: EthRlpxConnection, + pub(crate) conn: EthRlpxConnection, /// Identifier of the node we're connected to. pub(crate) remote_peer_id: PeerId, /// The address we're connected to. @@ -76,19 +77,19 @@ pub(crate) struct ActiveSession { /// Internal identifier of this session pub(crate) session_id: SessionId, /// Incoming commands from the manager - pub(crate) commands_rx: ReceiverStream, + pub(crate) commands_rx: ReceiverStream>, /// Sink to send messages to the [`SessionManager`](super::SessionManager). - pub(crate) to_session_manager: MeteredPollSender, + pub(crate) to_session_manager: MeteredPollSender>, /// A message that needs to be delivered to the session manager - pub(crate) pending_message_to_session: Option, + pub(crate) pending_message_to_session: Option>, /// Incoming internal requests which are delegated to the remote peer. - pub(crate) internal_request_tx: Fuse>, + pub(crate) internal_request_tx: Fuse>>, /// All requests sent to the remote peer we're waiting on a response - pub(crate) inflight_requests: FxHashMap, + pub(crate) inflight_requests: FxHashMap>>, /// All requests that were sent by the remote peer and we're waiting on an internal response - pub(crate) received_requests_from_remote: Vec, + pub(crate) received_requests_from_remote: Vec>, /// Buffered messages that should be handled and sent to the peer. - pub(crate) queued_outgoing: QueuedOutgoingMessages, + pub(crate) queued_outgoing: QueuedOutgoingMessages, /// The maximum time we wait for a response from a peer. pub(crate) internal_request_timeout: Arc, /// Interval when to check for timed out requests. @@ -97,10 +98,11 @@ pub(crate) struct ActiveSession { /// considered a protocol violation and the session will initiate a drop. pub(crate) protocol_breach_request_timeout: Duration, /// Used to reserve a slot to guarantee that the termination message is delivered - pub(crate) terminate_message: Option<(PollSender, ActiveSessionMessage)>, + pub(crate) terminate_message: + Option<(PollSender>, ActiveSessionMessage)>, } -impl ActiveSession { +impl ActiveSession { /// Returns `true` if the session is currently in the process of disconnecting fn is_disconnecting(&self) -> bool { self.conn.inner().is_disconnecting() @@ -122,7 +124,7 @@ impl ActiveSession { /// Handle a message read from the connection. /// /// Returns an error if the message is considered to be in violation of the protocol. - fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { + fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { /// A macro that handles an incoming request /// This creates a new channel and tries to send the sender half to the session while /// storing the receiver half internally so the pending response can be polled. @@ -182,7 +184,7 @@ impl ActiveSession { } EthMessage::NewBlock(msg) => { let block = - NewBlockMessage { hash: msg.block.header.hash_slow(), block: Arc::new(*msg) }; + NewBlockMessage { hash: msg.block.header().hash_slow(), block: Arc::new(*msg) }; self.try_emit_broadcast(PeerMessage::NewBlock(block)).into() } EthMessage::Transactions(msg) => { @@ -238,7 +240,7 @@ impl ActiveSession { } /// Handle an internal peer request that will be sent to the remote. - fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { + fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { let request_id = self.next_id(); let msg = request.create_request_message(request_id); self.queued_outgoing.push_back(msg.into()); @@ -251,7 +253,7 @@ impl ActiveSession { } /// Handle a message received from the internal network - fn on_internal_peer_message(&mut self, msg: PeerMessage) { + fn on_internal_peer_message(&mut self, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(msg) => { self.queued_outgoing.push_back(EthMessage::NewBlockHashes(msg).into()); @@ -289,7 +291,7 @@ impl ActiveSession { /// Handle a Response to the peer /// /// This will queue the response to be sent to the peer - fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { + fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { match resp.try_into_message(id) { Ok(msg) => { self.queued_outgoing.push_back(msg.into()); @@ -304,7 +306,7 @@ impl ActiveSession { /// /// Returns the message if the bounded channel is currently unable to handle this message. #[allow(clippy::result_large_err)] - fn try_emit_broadcast(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { + fn try_emit_broadcast(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) }; match sender @@ -330,7 +332,7 @@ impl ActiveSession { /// /// Returns the message if the bounded channel is currently unable to handle this message. #[allow(clippy::result_large_err)] - fn try_emit_request(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { + fn try_emit_request(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) }; match sender @@ -470,7 +472,7 @@ impl ActiveSession { } } -impl Future for ActiveSession { +impl Future for ActiveSession { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -656,20 +658,20 @@ impl Future for ActiveSession { } /// Tracks a request received from the peer -pub(crate) struct ReceivedRequest { +pub(crate) struct ReceivedRequest { /// Protocol Identifier request_id: u64, /// Receiver half of the channel that's supposed to receive the proper response. - rx: PeerResponse, + rx: PeerResponse, /// Timestamp when we read this msg from the wire. #[allow(dead_code)] received: Instant, } /// A request that waits for a response from the peer -pub(crate) struct InflightRequest { +pub(crate) struct InflightRequest { /// Request we sent to peer and the internal response channel - request: RequestState, + request: RequestState, /// Instant when the request was sent timestamp: Instant, /// Time limit for the response @@ -678,7 +680,7 @@ pub(crate) struct InflightRequest { // === impl InflightRequest === -impl InflightRequest { +impl InflightRequest> { /// Returns true if the request is timedout #[inline] fn is_timed_out(&self, now: Instant) -> bool { @@ -703,17 +705,19 @@ impl InflightRequest { } /// All outcome variants when handling an incoming message -enum OnIncomingMessageOutcome { +enum OnIncomingMessageOutcome { /// Message successfully handled. Ok, /// Message is considered to be in violation of the protocol - BadMessage { error: EthStreamError, message: EthMessage }, + BadMessage { error: EthStreamError, message: EthMessage }, /// Currently no capacity to handle the message - NoCapacity(ActiveSessionMessage), + NoCapacity(ActiveSessionMessage), } -impl From> for OnIncomingMessageOutcome { - fn from(res: Result<(), ActiveSessionMessage>) -> Self { +impl From>> + for OnIncomingMessageOutcome +{ + fn from(res: Result<(), ActiveSessionMessage>) -> Self { match res { Ok(_) => Self::Ok, Err(msg) => Self::NoCapacity(msg), @@ -721,29 +725,29 @@ impl From> for OnIncomingMessageOutcome { } } -enum RequestState { +enum RequestState { /// Waiting for the response - Waiting(PeerRequest), + Waiting(R), /// Request already timed out TimedOut, } /// Outgoing messages that can be sent over the wire. -pub(crate) enum OutgoingMessage { +pub(crate) enum OutgoingMessage { /// A message that is owned. - Eth(EthMessage), + Eth(EthMessage), /// A message that may be shared by multiple sessions. - Broadcast(EthBroadcastMessage), + Broadcast(EthBroadcastMessage), } -impl From for OutgoingMessage { - fn from(value: EthMessage) -> Self { +impl From> for OutgoingMessage { + fn from(value: EthMessage) -> Self { Self::Eth(value) } } -impl From for OutgoingMessage { - fn from(value: EthBroadcastMessage) -> Self { +impl From> for OutgoingMessage { + fn from(value: EthBroadcastMessage) -> Self { Self::Broadcast(value) } } @@ -760,22 +764,22 @@ fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> } /// A helper struct that wraps the queue of outgoing messages and a metric to track their count -pub(crate) struct QueuedOutgoingMessages { - messages: VecDeque, +pub(crate) struct QueuedOutgoingMessages { + messages: VecDeque>, count: Gauge, } -impl QueuedOutgoingMessages { +impl QueuedOutgoingMessages { pub(crate) const fn new(metric: Gauge) -> Self { Self { messages: VecDeque::new(), count: metric } } - pub(crate) fn push_back(&mut self, message: OutgoingMessage) { + pub(crate) fn push_back(&mut self, message: OutgoingMessage) { self.messages.push_back(message); self.count.increment(1); } - pub(crate) fn pop_front(&mut self) -> Option { + pub(crate) fn pop_front(&mut self) -> Option> { self.messages.pop_front().inspect(|_| self.count.decrement(1)) } @@ -791,8 +795,8 @@ mod tests { use reth_chainspec::MAINNET; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ - EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, Status, StatusBuilder, - UnauthedEthStream, UnauthedP2PStream, + EthNetworkPrimitives, EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, + Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream, }; use reth_network_peers::pk2id; use reth_network_types::session::config::PROTOCOL_BREACH_REQUEST_TIMEOUT; @@ -808,11 +812,11 @@ mod tests { HelloMessageWithProtocols::builder(pk2id(&server_key.public_key(SECP256K1))).build() } - struct SessionBuilder { + struct SessionBuilder { _remote_capabilities: Arc, - active_session_tx: mpsc::Sender, - active_session_rx: ReceiverStream, - to_sessions: Vec>, + active_session_tx: mpsc::Sender>, + active_session_rx: ReceiverStream>, + to_sessions: Vec>>, secret_key: SecretKey, local_peer_id: PeerId, hello: HelloMessageWithProtocols, @@ -821,7 +825,7 @@ mod tests { next_id: usize, } - impl SessionBuilder { + impl SessionBuilder { fn next_id(&mut self) -> SessionId { let id = self.next_id; self.next_id += 1; @@ -858,7 +862,7 @@ mod tests { }) } - async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession { + async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession { let remote_addr = stream.local_addr().unwrap(); let session_id = self.next_id(); let (_disconnect_tx, disconnect_rx) = oneshot::channel(); diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 628c880c8eac..45b83d1c487b 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -1,26 +1,25 @@ //! Connection types for a session -use std::{ - pin::Pin, - task::{Context, Poll}, -}; - use futures::{Sink, Stream}; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ errors::EthStreamError, message::EthBroadcastMessage, multiplex::{ProtocolProxy, RlpxSatelliteStream}, - EthMessage, EthStream, EthVersion, P2PStream, + EthMessage, EthNetworkPrimitives, EthStream, EthVersion, NetworkPrimitives, P2PStream, +}; +use std::{ + pin::Pin, + task::{Context, Poll}, }; use tokio::net::TcpStream; /// The type of the underlying peer network connection. -pub type EthPeerConnection = EthStream>>; +pub type EthPeerConnection = EthStream>, N>; /// Various connection types that at least support the ETH protocol. -pub type EthSatelliteConnection = - RlpxSatelliteStream, EthStream>; +pub type EthSatelliteConnection = + RlpxSatelliteStream, EthStream>; /// Connection types that support the ETH protocol. /// @@ -30,14 +29,14 @@ pub type EthSatelliteConnection = // This type is boxed because the underlying stream is ~6KB, // mostly coming from `P2PStream`'s `snap::Encoder` (2072), and `ECIESStream` (3600). #[derive(Debug)] -pub enum EthRlpxConnection { +pub enum EthRlpxConnection { /// A connection that only supports the ETH protocol. - EthOnly(Box), + EthOnly(Box>), /// A connection that supports the ETH protocol and __at least one other__ `RLPx` protocol. - Satellite(Box), + Satellite(Box>), } -impl EthRlpxConnection { +impl EthRlpxConnection { /// Returns the negotiated ETH version. #[inline] pub(crate) const fn version(&self) -> EthVersion { @@ -78,7 +77,7 @@ impl EthRlpxConnection { #[inline] pub fn start_send_broadcast( &mut self, - item: EthBroadcastMessage, + item: EthBroadcastMessage, ) -> Result<(), EthStreamError> { match self { Self::EthOnly(conn) => conn.start_send_broadcast(item), @@ -87,16 +86,16 @@ impl EthRlpxConnection { } } -impl From for EthRlpxConnection { +impl From> for EthRlpxConnection { #[inline] - fn from(conn: EthPeerConnection) -> Self { + fn from(conn: EthPeerConnection) -> Self { Self::EthOnly(Box::new(conn)) } } -impl From for EthRlpxConnection { +impl From> for EthRlpxConnection { #[inline] - fn from(conn: EthSatelliteConnection) -> Self { + fn from(conn: EthSatelliteConnection) -> Self { Self::Satellite(Box::new(conn)) } } @@ -112,22 +111,22 @@ macro_rules! delegate_call { } } -impl Stream for EthRlpxConnection { - type Item = Result; +impl Stream for EthRlpxConnection { + type Item = Result, EthStreamError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { delegate_call!(self.poll_next(cx)) } } -impl Sink for EthRlpxConnection { +impl Sink> for EthRlpxConnection { type Error = EthStreamError; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { delegate_call!(self.poll_ready(cx)) } - fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { delegate_call!(self.start_send(item)) } diff --git a/crates/net/network/src/session/counter.rs b/crates/net/network/src/session/counter.rs index 0d8f764f206d..052cf1e25707 100644 --- a/crates/net/network/src/session/counter.rs +++ b/crates/net/network/src/session/counter.rs @@ -1,8 +1,7 @@ +use super::ExceedsSessionLimit; use reth_network_api::Direction; use reth_network_types::SessionLimits; -use super::ExceedsSessionLimit; - /// Keeps track of all sessions. #[derive(Debug, Clone)] pub struct SessionCounter { diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index a022e670419a..d167dc0e6ec4 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -1,26 +1,24 @@ //! Session handles. -use std::{io, net::SocketAddr, sync::Arc, time::Instant}; - +use crate::{ + message::PeerMessage, + session::{conn::EthRlpxConnection, Direction, SessionId}, + PendingSessionHandshakeError, +}; use reth_ecies::ECIESError; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, - EthVersion, Status, + EthVersion, NetworkPrimitives, Status, }; use reth_network_api::PeerInfo; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::PeerKind; +use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, oneshot, }; -use crate::{ - message::PeerMessage, - session::{conn::EthRlpxConnection, Direction, SessionId}, - PendingSessionHandshakeError, -}; - /// A handler attached to a peer session that's not authenticated yet, pending Handshake and hello /// message which exchanges the `capabilities` of the peer. /// @@ -54,7 +52,7 @@ impl PendingSessionHandle { /// Within an active session that supports the `Ethereum Wire Protocol `, three high-level tasks can /// be performed: chain synchronization, block propagation and transaction exchange. #[derive(Debug)] -pub struct ActiveSessionHandle { +pub struct ActiveSessionHandle { /// The direction of the session pub(crate) direction: Direction, /// The assigned id for this session @@ -68,7 +66,7 @@ pub struct ActiveSessionHandle { /// Announced capabilities of the peer. pub(crate) capabilities: Arc, /// Sender half of the command channel used send commands _to_ the spawned session - pub(crate) commands_to_session: mpsc::Sender, + pub(crate) commands_to_session: mpsc::Sender>, /// The client's name and version pub(crate) client_version: Arc, /// The address we're connected to @@ -81,7 +79,7 @@ pub struct ActiveSessionHandle { // === impl ActiveSessionHandle === -impl ActiveSessionHandle { +impl ActiveSessionHandle { /// Sends a disconnect command to the session. pub fn disconnect(&self, reason: Option) { // Note: we clone the sender which ensures the channel has capacity to send the message @@ -93,7 +91,7 @@ impl ActiveSessionHandle { pub async fn try_disconnect( &self, reason: Option, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.commands_to_session.clone().send(SessionCommand::Disconnect { reason }).await } @@ -162,7 +160,7 @@ impl ActiveSessionHandle { /// /// A session starts with a `Handshake`, followed by a `Hello` message which #[derive(Debug)] -pub enum PendingSessionEvent { +pub enum PendingSessionEvent { /// Represents a successful `Hello` and `Status` exchange: Established { /// An internal identifier for the established session @@ -179,7 +177,7 @@ pub enum PendingSessionEvent { status: Arc, /// The actual connection stream which can be used to send and receive `eth` protocol /// messages - conn: EthRlpxConnection, + conn: EthRlpxConnection, /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, /// The remote node's user agent, usually containing the client name and version @@ -222,20 +220,20 @@ pub enum PendingSessionEvent { /// Commands that can be sent to the spawned session. #[derive(Debug)] -pub enum SessionCommand { +pub enum SessionCommand { /// Disconnect the connection Disconnect { /// Why the disconnect was initiated reason: Option, }, /// Sends a message to the peer - Message(PeerMessage), + Message(PeerMessage), } /// Message variants an active session can produce and send back to the /// [`SessionManager`](crate::session::SessionManager) #[derive(Debug)] -pub enum ActiveSessionMessage { +pub enum ActiveSessionMessage { /// Session was gracefully disconnected. Disconnected { /// The remote node's public key @@ -257,7 +255,7 @@ pub enum ActiveSessionMessage { /// Identifier of the remote peer. peer_id: PeerId, /// Message received from the peer. - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 30b1cda9da91..a020c540e385 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -23,19 +23,25 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + message::PeerMessage, + metrics::SessionManagerMetrics, + protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}, + session::active::ActiveSession, +}; use counter::SessionCounter; use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, multiplex::RlpxProtocolMultiplexer, - Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, Status, - UnauthedEthStream, UnauthedP2PStream, + Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, NetworkPrimitives, + Status, UnauthedEthStream, UnauthedP2PStream, }; +use reth_ethereum_forks::{ForkFilter, ForkId, ForkTransition, Head}; use reth_metrics::common::mpsc::MeteredPollSender; -use reth_network_api::PeerRequestSender; +use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::SessionsConfig; -use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; use rustc_hash::FxHashMap; use secp256k1::SecretKey; @@ -48,13 +54,6 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, instrument, trace}; -use crate::{ - message::PeerMessage, - metrics::SessionManagerMetrics, - protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}, - session::active::ActiveSession, -}; - /// Internal identifier for active sessions. #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Hash)] pub struct SessionId(usize); @@ -62,7 +61,7 @@ pub struct SessionId(usize); /// Manages a set of sessions. #[must_use = "Session Manager must be polled to process session events."] #[derive(Debug)] -pub struct SessionManager { +pub struct SessionManager { /// Tracks the identifier for the next session. next_id: usize, /// Keeps track of all sessions @@ -93,21 +92,21 @@ pub struct SessionManager { /// session is authenticated, it can be moved to the `active_session` set. pending_sessions: FxHashMap, /// All active sessions that are ready to exchange messages. - active_sessions: HashMap, + active_sessions: HashMap>, /// The original Sender half of the [`PendingSessionEvent`] channel. /// /// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will /// get a clone of this sender half. - pending_sessions_tx: mpsc::Sender, + pending_sessions_tx: mpsc::Sender>, /// Receiver half that listens for [`PendingSessionEvent`] produced by pending sessions. - pending_session_rx: ReceiverStream, + pending_session_rx: ReceiverStream>, /// The original Sender half of the [`ActiveSessionMessage`] channel. /// /// When active session state is reached, the corresponding [`ActiveSessionHandle`] will get a /// clone of this sender half. - active_session_tx: MeteredPollSender, + active_session_tx: MeteredPollSender>, /// Receiver half that listens for [`ActiveSessionMessage`] produced by pending sessions. - active_session_rx: ReceiverStream, + active_session_rx: ReceiverStream>, /// Additional `RLPx` sub-protocols to be used by the session manager. extra_protocols: RlpxSubProtocols, /// Tracks the ongoing graceful disconnections attempts for incoming connections. @@ -118,7 +117,7 @@ pub struct SessionManager { // === impl SessionManager === -impl SessionManager { +impl SessionManager { /// Creates a new empty [`SessionManager`]. #[allow(clippy::too_many_arguments)] pub fn new( @@ -182,7 +181,7 @@ impl SessionManager { } /// Returns a borrowed reference to the active sessions. - pub const fn active_sessions(&self) -> &HashMap { + pub const fn active_sessions(&self) -> &HashMap> { &self.active_sessions } @@ -348,7 +347,7 @@ impl SessionManager { } /// Sends a message to the peer's session - pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { + pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { if let Some(session) = self.active_sessions.get_mut(peer_id) { let _ = session.commands_to_session.try_send(SessionCommand::Message(msg)).inspect_err( |e| { @@ -373,7 +372,7 @@ impl SessionManager { } /// Removes the [`PendingSessionHandle`] if it exists. - fn remove_active_session(&mut self, id: &PeerId) -> Option { + fn remove_active_session(&mut self, id: &PeerId) -> Option> { let session = self.active_sessions.remove(id)?; self.counter.dec_active(&session.direction); Some(session) @@ -411,7 +410,7 @@ impl SessionManager { /// This polls all the session handles and returns [`SessionEvent`]. /// /// Active sessions are prioritized. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { // Poll events from active sessions match self.active_session_rx.poll_next_unpin(cx) { Poll::Pending => {} @@ -663,7 +662,7 @@ impl DisconnectionsCounter { /// Events produced by the [`SessionManager`] #[derive(Debug)] -pub enum SessionEvent { +pub enum SessionEvent { /// A new session was successfully authenticated. /// /// This session is now able to exchange data. @@ -681,7 +680,7 @@ pub enum SessionEvent { /// The Status message the peer sent during the `eth` handshake status: Arc, /// The channel for sending messages to the peer with the session - messages: PeerRequestSender, + messages: PeerRequestSender>, /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, /// The maximum time that the session waits for a response from the peer before timing out @@ -702,7 +701,7 @@ pub enum SessionEvent { /// The remote node's public key peer_id: PeerId, /// Message received from the peer. - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { @@ -797,18 +796,18 @@ impl PendingSessionHandshakeError { pub struct ExceedsSessionLimit(pub(crate) u32); /// Starts a pending session authentication with a timeout. -pub(crate) async fn pending_session_with_timeout( +pub(crate) async fn pending_session_with_timeout( timeout: Duration, session_id: SessionId, remote_addr: SocketAddr, direction: Direction, - events: mpsc::Sender, + events: mpsc::Sender>, f: F, ) where F: Future, { if tokio::time::timeout(timeout, f).await.is_err() { - debug!(target: "net::session", ?remote_addr, ?direction, "pending session timed out"); + trace!(target: "net::session", ?remote_addr, ?direction, "pending session timed out"); let event = PendingSessionEvent::Disconnected { remote_addr, session_id, @@ -823,11 +822,11 @@ pub(crate) async fn pending_session_with_timeout( /// /// This will wait for the _incoming_ handshake request and answer it. #[allow(clippy::too_many_arguments)] -pub(crate) async fn start_pending_incoming_session( +pub(crate) async fn start_pending_incoming_session( disconnect_rx: oneshot::Receiver<()>, session_id: SessionId, stream: TcpStream, - events: mpsc::Sender, + events: mpsc::Sender>, remote_addr: SocketAddr, secret_key: SecretKey, hello: HelloMessageWithProtocols, @@ -854,9 +853,9 @@ pub(crate) async fn start_pending_incoming_session( /// Starts the authentication process for a connection initiated by a remote peer. #[instrument(skip_all, fields(%remote_addr, peer_id), target = "net")] #[allow(clippy::too_many_arguments)] -async fn start_pending_outbound_session( +async fn start_pending_outbound_session( disconnect_rx: oneshot::Receiver<()>, - events: mpsc::Sender, + events: mpsc::Sender>, session_id: SessionId, remote_addr: SocketAddr, remote_peer_id: PeerId, @@ -903,9 +902,9 @@ async fn start_pending_outbound_session( /// Authenticates a session #[allow(clippy::too_many_arguments)] -async fn authenticate( +async fn authenticate( disconnect_rx: oneshot::Receiver<()>, - events: mpsc::Sender, + events: mpsc::Sender>, stream: TcpStream, session_id: SessionId, remote_addr: SocketAddr, @@ -986,7 +985,7 @@ async fn get_ecies_stream( /// If additional [`RlpxSubProtocolHandlers`] are provided, the hello message will be updated to /// also negotiate the additional protocols. #[allow(clippy::too_many_arguments)] -async fn authenticate_stream( +async fn authenticate_stream( stream: UnauthedP2PStream>, session_id: SessionId, remote_addr: SocketAddr, @@ -996,7 +995,7 @@ async fn authenticate_stream( mut status: Status, fork_filter: ForkFilter, mut extra_handlers: RlpxSubProtocolHandlers, -) -> PendingSessionEvent { +) -> PendingSessionEvent { // Add extra protocols to the hello message extra_handlers.retain(|handler| hello.try_add_protocol(handler.protocol()).is_ok()); diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 5caa656a98ea..5d7c0a9f6541 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -1,5 +1,25 @@ //! Keeps track of the state of the network. +use crate::{ + cache::LruCache, + discovery::Discovery, + fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, + message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, + peers::{PeerAction, PeersManager}, + FetchClient, +}; +use alloy_consensus::BlockHeader; +use alloy_primitives::B256; +use rand::seq::SliceRandom; +use reth_eth_wire::{ + BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, + NewBlockHashes, Status, +}; +use reth_ethereum_forks::ForkId; +use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; +use reth_network_peers::PeerId; +use reth_network_types::{PeerAddr, PeerKind}; +use reth_primitives_traits::Block; use std::{ collections::{HashMap, VecDeque}, fmt, @@ -11,26 +31,9 @@ use std::{ }, task::{Context, Poll}, }; - -use alloy_primitives::B256; -use rand::seq::SliceRandom; -use reth_eth_wire::{BlockHashNumber, Capabilities, DisconnectReason, NewBlockHashes, Status}; -use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; -use reth_network_peers::PeerId; -use reth_network_types::{PeerAddr, PeerKind}; -use reth_primitives::ForkId; use tokio::sync::oneshot; use tracing::{debug, trace}; -use crate::{ - cache::LruCache, - discovery::Discovery, - fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, - message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, - peers::{PeerAction, PeersManager}, - FetchClient, -}; - /// Cache limit of blocks to keep track of for a single peer. const PEER_BLOCK_CACHE_LIMIT: u32 = 512; @@ -69,13 +72,13 @@ impl Deref for BlockNumReader { /// /// This type is also responsible for responding for received request. #[derive(Debug)] -pub struct NetworkState { +pub struct NetworkState { /// All active peers and their state. - active_peers: HashMap, + active_peers: HashMap>, /// Manages connections to peers. peers_manager: PeersManager, /// Buffered messages until polled. - queued_messages: VecDeque, + queued_messages: VecDeque>, /// The client type that can interact with the chain. /// /// This type is used to fetch the block number after we established a session and received the @@ -88,10 +91,10 @@ pub struct NetworkState { /// The fetcher streams `RLPx` related requests on a per-peer basis to this type. This type /// will then queue in the request and notify the fetcher once the result has been /// received. - state_fetcher: StateFetcher, + state_fetcher: StateFetcher, } -impl NetworkState { +impl NetworkState { /// Create a new state instance with the given params pub(crate) fn new( client: BlockNumReader, @@ -126,7 +129,7 @@ impl NetworkState { } /// Returns a new [`FetchClient`] - pub(crate) fn fetch_client(&self) -> FetchClient { + pub(crate) fn fetch_client(&self) -> FetchClient { self.state_fetcher.client() } @@ -144,7 +147,7 @@ impl NetworkState { peer: PeerId, capabilities: Arc, status: Arc, - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, timeout: Arc, ) { debug_assert!(!self.active_peers.contains_key(&peer), "Already connected; not possible"); @@ -182,12 +185,12 @@ impl NetworkState { /// > the total number of peers) using the `NewBlock` message. /// /// See also - pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { + pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { // send a `NewBlock` message to a fraction of the connected peers (square root of the total // number of peers) let num_propagate = (self.active_peers.len() as f64).sqrt() as u64 + 1; - let number = msg.block.block.header.number; + let number = msg.block.block.header().number(); let mut count = 0; // Shuffle to propagate to a random sample of peers on every block announcement @@ -224,8 +227,8 @@ impl NetworkState { /// Completes the block propagation process started in [`NetworkState::announce_new_block()`] /// but sending `NewBlockHash` broadcast to all peers that haven't seen it yet. - pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { - let number = msg.block.block.header.number; + pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { + let number = msg.block.block.header().number(); let hashes = NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }]); for (peer_id, peer) in &mut self.active_peers { if peer.blocks.contains(&msg.hash) { @@ -382,7 +385,7 @@ impl NetworkState { } /// Handle the outcome of processed response, for example directly queue another request. - fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) -> Option { + fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) { match outcome { BlockResponseOutcome::Request(peer, request) => { self.handle_block_request(peer, request); @@ -391,7 +394,6 @@ impl NetworkState { self.peers_manager.apply_reputation_change(&peer, reputation_change); } } - None } /// Invoked when received a response from a connected peer. @@ -399,22 +401,24 @@ impl NetworkState { /// Delegates the response result to the fetcher which may return an outcome specific /// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be /// a follow-up request or an instruction to slash the peer's reputation. - fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) -> Option { - match resp { + fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) { + let outcome = match resp { PeerResponseResult::BlockHeaders(res) => { - let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_headers_response(peer, res) } PeerResponseResult::BlockBodies(res) => { - let outcome = self.state_fetcher.on_block_bodies_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_bodies_response(peer, res) } _ => None, + }; + + if let Some(outcome) = outcome { + self.on_block_response_outcome(outcome); } } /// Advances the state - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { loop { // drain buffered messages if let Some(message) = self.queued_messages.pop_front() { @@ -433,13 +437,14 @@ impl NetworkState { } } - // need to buffer results here to make borrow checker happy - let mut closed_sessions = Vec::new(); - let mut received_responses = Vec::new(); + loop { + // need to buffer results here to make borrow checker happy + let mut closed_sessions = Vec::new(); + let mut received_responses = Vec::new(); - // poll all connected peers for responses - for (id, peer) in &mut self.active_peers { - if let Some(mut response) = peer.pending_response.take() { + // poll all connected peers for responses + for (id, peer) in &mut self.active_peers { + let Some(mut response) = peer.pending_response.take() else { continue }; match response.poll(cx) { Poll::Ready(res) => { // check if the error is due to a closed channel to the session @@ -450,7 +455,8 @@ impl NetworkState { "Request canceled, response channel from session closed." ); // if the channel is closed, this means the peer session is also - // closed, in which case we can invoke the [Self::on_closed_session] + // closed, in which case we can invoke the + // [Self::on_closed_session] // immediately, preventing followup requests and propagate the // connection dropped error closed_sessions.push(*id); @@ -464,15 +470,17 @@ impl NetworkState { } }; } - } - for peer in closed_sessions { - self.on_session_closed(peer) - } + for peer in closed_sessions { + self.on_session_closed(peer) + } + + if received_responses.is_empty() { + break; + } - for (peer_id, resp) in received_responses { - if let Some(action) = self.on_eth_response(peer_id, resp) { - self.queued_messages.push_back(action); + for (peer_id, resp) in received_responses { + self.on_eth_response(peer_id, resp); } } @@ -481,6 +489,8 @@ impl NetworkState { self.on_peer_action(action); } + // We need to poll again tn case we have received any responses because they may have + // triggered follow-up requests. if self.queued_messages.is_empty() { return Poll::Pending } @@ -492,29 +502,29 @@ impl NetworkState { /// /// For example known blocks,so we can decide what to announce. #[derive(Debug)] -pub(crate) struct ActivePeer { +pub(crate) struct ActivePeer { /// Best block of the peer. pub(crate) best_hash: B256, /// The capabilities of the remote peer. #[allow(dead_code)] pub(crate) capabilities: Arc, /// A communication channel directly to the session task. - pub(crate) request_tx: PeerRequestSender, + pub(crate) request_tx: PeerRequestSender>, /// The response receiver for a currently active request to that peer. - pub(crate) pending_response: Option, + pub(crate) pending_response: Option>, /// Blocks we know the peer has. pub(crate) blocks: LruCache, } /// Message variants triggered by the [`NetworkState`] #[derive(Debug)] -pub(crate) enum StateAction { +pub(crate) enum StateAction { /// Dispatch a `NewBlock` message to the peer NewBlock { /// Target of the message peer_id: PeerId, /// The `NewBlock` message - block: NewBlockMessage, + block: NewBlockMessage, }, NewBlockHashes { /// Target of the message @@ -551,12 +561,13 @@ mod tests { sync::{atomic::AtomicU64, Arc}, }; + use alloy_consensus::Header; use alloy_primitives::B256; use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthVersion}; use reth_network_api::PeerRequestSender; use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError}; use reth_network_peers::PeerId; - use reth_primitives::{BlockBody, Header}; + use reth_primitives::BlockBody; use reth_provider::test_utils::NoopProvider; use tokio::sync::mpsc; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index c1fe9f9e2315..47447783f428 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -1,20 +1,3 @@ -use std::{ - io, - net::SocketAddr, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -use futures::Stream; -use reth_eth_wire::{ - capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, - EthVersion, Status, -}; -use reth_network_api::PeerRequestSender; -use reth_network_peers::PeerId; -use tracing::trace; - use crate::{ listener::{ConnectionListener, ListenerEvent}, message::PeerMessage, @@ -23,6 +6,21 @@ use crate::{ session::{Direction, PendingSessionHandshakeError, SessionEvent, SessionId, SessionManager}, state::{NetworkState, StateAction}, }; +use futures::Stream; +use reth_eth_wire::{ + capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, + EthNetworkPrimitives, EthVersion, NetworkPrimitives, Status, +}; +use reth_network_api::{PeerRequest, PeerRequestSender}; +use reth_network_peers::PeerId; +use std::{ + io, + net::SocketAddr, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use tracing::trace; #[cfg_attr(doc, aquamarine::aquamarine)] /// Contains the connectivity related state of the network. @@ -50,23 +48,23 @@ use crate::{ /// `include_mmd!("docs/mermaid/swarm.mmd`") #[derive(Debug)] #[must_use = "Swarm does nothing unless polled"] -pub(crate) struct Swarm { +pub(crate) struct Swarm { /// Listens for new incoming connections. incoming: ConnectionListener, /// All sessions. - sessions: SessionManager, + sessions: SessionManager, /// Tracks the entire state of the network and handles events received from the sessions. - state: NetworkState, + state: NetworkState, } // === impl Swarm === -impl Swarm { +impl Swarm { /// Configures a new swarm instance. pub(crate) const fn new( incoming: ConnectionListener, - sessions: SessionManager, - state: NetworkState, + sessions: SessionManager, + state: NetworkState, ) -> Self { Self { incoming, sessions, state } } @@ -77,12 +75,12 @@ impl Swarm { } /// Access to the state. - pub(crate) const fn state(&self) -> &NetworkState { + pub(crate) const fn state(&self) -> &NetworkState { &self.state } /// Mutable access to the state. - pub(crate) fn state_mut(&mut self) -> &mut NetworkState { + pub(crate) fn state_mut(&mut self) -> &mut NetworkState { &mut self.state } @@ -92,17 +90,17 @@ impl Swarm { } /// Access to the [`SessionManager`]. - pub(crate) const fn sessions(&self) -> &SessionManager { + pub(crate) const fn sessions(&self) -> &SessionManager { &self.sessions } /// Mutable access to the [`SessionManager`]. - pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { + pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { &mut self.sessions } } -impl Swarm { +impl Swarm { /// Triggers a new outgoing connection to the given node pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) { self.sessions.dial_outbound(remote_addr, remote_id) @@ -112,7 +110,7 @@ impl Swarm { /// /// This either updates the state or produces a new [`SwarmEvent`] that is bubbled up to the /// manager. - fn on_session_event(&mut self, event: SessionEvent) -> Option { + fn on_session_event(&mut self, event: SessionEvent) -> Option> { match event { SessionEvent::SessionEstablished { peer_id, @@ -181,7 +179,7 @@ impl Swarm { /// Callback for events produced by [`ConnectionListener`]. /// /// Depending on the event, this will produce a new [`SwarmEvent`]. - fn on_connection(&mut self, event: ListenerEvent) -> Option { + fn on_connection(&mut self, event: ListenerEvent) -> Option> { match event { ListenerEvent::Error(err) => return Some(SwarmEvent::TcpListenerError(err)), ListenerEvent::ListenerClosed { local_address: address } => { @@ -229,7 +227,7 @@ impl Swarm { } /// Hook for actions pulled from the state - fn on_state_action(&mut self, event: StateAction) -> Option { + fn on_state_action(&mut self, event: StateAction) -> Option> { match event { StateAction::Connect { remote_addr, peer_id } => { self.dial_outbound(remote_addr, peer_id); @@ -286,8 +284,8 @@ impl Swarm { } } -impl Stream for Swarm { - type Item = SwarmEvent; +impl Stream for Swarm { + type Item = SwarmEvent; /// This advances all components. /// @@ -338,13 +336,13 @@ impl Stream for Swarm { /// All events created or delegated by the [`Swarm`] that represents changes to the state of the /// network. -pub(crate) enum SwarmEvent { +pub(crate) enum SwarmEvent { /// Events related to the actual network protocol. ValidMessage { /// The peer that sent the message peer_id: PeerId, /// Message received from the peer - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidCapabilityMessage { @@ -394,7 +392,7 @@ pub(crate) enum SwarmEvent { capabilities: Arc, /// negotiated eth version version: EthVersion, - messages: PeerRequestSender, + messages: PeerRequestSender>, status: Arc, direction: Direction, }, diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 767f6818091a..87ccbb5f9d79 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -1,7 +1,6 @@ -use std::{net::SocketAddr, time::Duration}; - use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey}; use reth_network_peers::PeerId; +use std::{net::SocketAddr, time::Duration}; /// The timeout for tests that create a `GethInstance` pub const GETH_TIMEOUT: Duration = Duration::from_secs(60); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index d92272a871e0..9801ecf9293a 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -1,13 +1,13 @@ //! A network implementation for testing purposes. -use std::{ - fmt, - future::Future, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - pin::Pin, - task::{Context, Poll}, +use crate::{ + builder::ETH_REQUEST_CHANNEL_CAPACITY, + error::NetworkError, + eth_requests::EthRequestHandler, + protocol::IntoRlpxSubProtocol, + transactions::{TransactionsHandle, TransactionsManager, TransactionsManagerConfig}, + NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, }; - use futures::{FutureExt, StreamExt}; use pin_project::pin_project; use reth_chainspec::{Hardforks, MAINNET}; @@ -27,6 +27,13 @@ use reth_transaction_pool::{ EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, }; use secp256k1::SecretKey; +use std::{ + fmt, + future::Future, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + pin::Pin, + task::{Context, Poll}, +}; use tokio::{ sync::{ mpsc::{channel, unbounded_channel}, @@ -35,15 +42,6 @@ use tokio::{ task::JoinHandle, }; -use crate::{ - builder::ETH_REQUEST_CHANNEL_CAPACITY, - error::NetworkError, - eth_requests::EthRequestHandler, - protocol::IntoRlpxSubProtocol, - transactions::{TransactionsHandle, TransactionsManager, TransactionsManagerConfig}, - NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, -}; - /// A test network consisting of multiple peers. pub struct Testnet { /// All running peers in the network. @@ -196,7 +194,11 @@ where impl Testnet where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { /// Spawns the testnet to a separate task @@ -255,7 +257,10 @@ impl fmt::Debug for Testnet { impl Future for Testnet where - C: BlockReader + HeaderProvider + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { type Output = (); @@ -450,7 +455,10 @@ where impl Future for Peer where - C: BlockReader + HeaderProvider + Unpin + 'static, + C: BlockReader + + HeaderProvider + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, { type Output = (); diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index b838f7cfe71b..db59ffac5cc0 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -1,5 +1,3 @@ -use derive_more::Constructor; - use super::{ DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, @@ -9,6 +7,7 @@ use crate::transactions::constants::tx_fetcher::{ DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, }; +use derive_more::Constructor; /// Configuration for managing transactions within the network. #[derive(Debug, Clone)] diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 00a9158233b7..180a619fff9e 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -25,13 +25,18 @@ //! before it's re-tried. Nonetheless, the capacity of the buffered hashes cache must be large //! enough to buffer many hashes during network failure, to allow for recovery. -use std::{ - collections::HashMap, - pin::Pin, - task::{ready, Context, Poll}, - time::Duration, +use super::{ + config::TransactionFetcherConfig, + constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, + MessageFilter, PeerMetadata, PooledTransactions, + SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, +}; +use crate::{ + cache::{LruCache, LruMap}, + duration_metered_exec, + metrics::TransactionFetcherMetrics, + transactions::{validation, PartiallyFilterMessage}, }; - use alloy_primitives::TxHash; use derive_more::{Constructor, Deref}; use futures::{stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt}; @@ -40,6 +45,7 @@ use reth_eth_wire::{ DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; +use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_peers::PeerId; @@ -47,30 +53,23 @@ use reth_primitives::PooledTransactionsElement; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; +use std::{ + collections::HashMap, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; use tokio::sync::{mpsc::error::TrySendError, oneshot, oneshot::error::RecvError}; use tracing::{debug, trace}; use validation::FilterOutcome; -use super::{ - config::TransactionFetcherConfig, - constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, - MessageFilter, PeerMetadata, PooledTransactions, - SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, -}; -use crate::{ - cache::{LruCache, LruMap}, - duration_metered_exec, - metrics::TransactionFetcherMetrics, - transactions::{validation, PartiallyFilterMessage}, -}; - /// The type responsible for fetching missing transactions from peers. /// /// This will keep track of unique transaction hashes that are currently being fetched and submits /// new requests on announced hashes. #[derive(Debug)] #[pin_project] -pub struct TransactionFetcher { +pub struct TransactionFetcher { /// All peers with to which a [`GetPooledTransactions`] request is inflight. pub active_peers: LruMap, /// All currently active [`GetPooledTransactions`] requests. @@ -79,7 +78,7 @@ pub struct TransactionFetcher { /// It's disjoint from the set of hashes which are awaiting an idle fallback peer in order to /// be fetched. #[pin] - pub inflight_requests: FuturesUnordered, + pub inflight_requests: FuturesUnordered>, /// Hashes that are awaiting an idle fallback peer so they can be fetched. /// /// This is a subset of all hashes in the fetcher, and is disjoint from the set of hashes for @@ -95,9 +94,7 @@ pub struct TransactionFetcher { metrics: TransactionFetcherMetrics, } -// === impl TransactionFetcher === - -impl TransactionFetcher { +impl TransactionFetcher { /// Removes the peer from the active set. pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) { self.active_peers.remove(peer_id); @@ -289,7 +286,7 @@ impl TransactionFetcher { // tx is really big, pack request with single tx if size >= self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request { - return hashes_from_announcement_iter.collect::() + return hashes_from_announcement_iter.collect() } acc_size_response = size; } @@ -431,7 +428,7 @@ impl TransactionFetcher { /// the request by checking the transactions seen by the peer against the buffer. pub fn on_fetch_pending_hashes( &mut self, - peers: &HashMap, + peers: &HashMap>, has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool, ) { let init_capacity_req = approx_capacity_get_pooled_transactions_req_eth68(&self.info); @@ -634,7 +631,7 @@ impl TransactionFetcher { pub fn request_transactions_from_peer( &mut self, new_announced_hashes: RequestTxHashes, - peer: &PeerMetadata, + peer: &PeerMetadata, ) -> Option { let peer_id: PeerId = peer.request_tx.peer_id; let conn_eth_version = peer.version; @@ -688,10 +685,8 @@ impl TransactionFetcher { } let (response, rx) = oneshot::channel(); - let req: PeerRequest = PeerRequest::GetPooledTransactions { - request: GetPooledTransactions( - new_announced_hashes.iter().copied().collect::>(), - ), + let req = PeerRequest::GetPooledTransactions { + request: GetPooledTransactions(new_announced_hashes.iter().copied().collect()), response, }; @@ -900,7 +895,9 @@ impl TransactionFetcher { approx_capacity_get_pooled_transactions_req_eth66() } } +} +impl TransactionFetcher { /// Processes a resolved [`GetPooledTransactions`] request. Queues the outcome as a /// [`FetchEvent`], which will then be streamed by /// [`TransactionsManager`](super::TransactionsManager). @@ -1012,8 +1009,7 @@ impl TransactionFetcher { // self.try_buffer_hashes_for_retry(requested_hashes, &peer_id); - let transactions = - valid_payload.into_data().into_values().collect::(); + let transactions = valid_payload.into_data().into_values().collect(); FetchEvent::TransactionsFetched { peer_id, transactions } } @@ -1049,7 +1045,7 @@ impl Stream for TransactionFetcher { } } -impl Default for TransactionFetcher { +impl Default for TransactionFetcher { fn default() -> Self { Self { active_peers: LruMap::new(DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS), @@ -1096,13 +1092,13 @@ impl TxFetchMetadata { /// Represents possible events from fetching transactions. #[derive(Debug)] -pub enum FetchEvent { +pub enum FetchEvent { /// Triggered when transactions are successfully fetched. TransactionsFetched { /// The ID of the peer from which transactions were fetched. peer_id: PeerId, /// The transactions that were fetched, if available. - transactions: PooledTransactions, + transactions: PooledTransactions, }, /// Triggered when there is an error in fetching transactions. FetchError { @@ -1120,22 +1116,22 @@ pub enum FetchEvent { /// An inflight request for [`PooledTransactions`] from a peer. #[derive(Debug)] -pub struct GetPooledTxRequest { +pub struct GetPooledTxRequest { peer_id: PeerId, /// Transaction hashes that were requested, for cleanup purposes requested_hashes: RequestTxHashes, - response: oneshot::Receiver>, + response: oneshot::Receiver>>, } /// Upon reception of a response, a [`GetPooledTxRequest`] is deconstructed to form a /// [`GetPooledTxResponse`]. #[derive(Debug)] -pub struct GetPooledTxResponse { +pub struct GetPooledTxResponse { peer_id: PeerId, /// Transaction hashes that were requested, for cleanup purposes, since peer may only return a /// subset of requested hashes. requested_hashes: RequestTxHashes, - result: Result, RecvError>, + result: Result>, RecvError>, } /// Stores the response receiver made by sending a [`GetPooledTransactions`] request to a peer's @@ -1143,24 +1139,24 @@ pub struct GetPooledTxResponse { #[must_use = "futures do nothing unless polled"] #[pin_project::pin_project] #[derive(Debug)] -pub struct GetPooledTxRequestFut { +pub struct GetPooledTxRequestFut { #[pin] - inner: Option, + inner: Option>, } -impl GetPooledTxRequestFut { +impl GetPooledTxRequestFut { #[inline] const fn new( peer_id: PeerId, requested_hashes: RequestTxHashes, - response: oneshot::Receiver>, + response: oneshot::Receiver>>, ) -> Self { Self { inner: Some(GetPooledTxRequest { peer_id, requested_hashes, response }) } } } -impl Future for GetPooledTxRequestFut { - type Output = GetPooledTxResponse; +impl Future for GetPooledTxRequestFut { + type Output = GetPooledTxResponse; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut req = self.as_mut().project().inner.take().expect("polled after completion"); @@ -1202,13 +1198,10 @@ impl DedupPayload for VerifiedPooledTransactions { } fn dedup(self) -> PartiallyValidData { - let Self { txns } = self; - let unique_fetched = txns - .into_iter() - .map(|tx| (*tx.hash(), tx)) - .collect::>(); - - PartiallyValidData::from_raw_data(unique_fetched, None) + PartiallyValidData::from_raw_data( + self.txns.into_iter().map(|tx| (*tx.hash(), tx)).collect(), + None, + ) } } @@ -1380,7 +1373,7 @@ mod test { // RIG TEST - let tx_fetcher = &mut TransactionFetcher::default(); + let tx_fetcher = &mut TransactionFetcher::::default(); let eth68_hashes = [ B256::from_slice(&[1; 32]), diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 4e23c8527b47..d533aee102b3 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -18,25 +18,25 @@ pub use validation::*; pub(crate) use fetcher::{FetchEvent, TransactionFetcher}; use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_BROADCAST_MESSAGE}; -use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; - -use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - pin::Pin, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, +use crate::{ + budget::{ + DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, + DEFAULT_BUDGET_TRY_DRAIN_STREAM, }, - task::{Context, Poll}, - time::{Duration, Instant}, + cache::LruCache, + duration_metered_exec, metered_poll_nested_stream_with_budget, + metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, + NetworkHandle, }; - use alloy_primitives::{TxHash, B256}; +use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; use futures::{stream::FuturesUnordered, Future, StreamExt}; use reth_eth_wire::{ - DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, - NewPooledTransactionHashes, NewPooledTransactionHashes66, NewPooledTransactionHashes68, - PooledTransactions, RequestTxHashes, Transactions, + DedupPayload, EthNetworkPrimitives, EthVersion, GetPooledTransactions, HandleMempoolData, + HandleVersionedMempoolData, NetworkPrimitives, NewPooledTransactionHashes, + NewPooledTransactionHashes66, NewPooledTransactionHashes68, PooledTransactions, + RequestTxHashes, Transactions, }; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{ @@ -48,29 +48,28 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{PooledTransactionsElement, TransactionSigned}; +use reth_primitives_traits::{SignedTransaction, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, TransactionPool, ValidPoolTransaction, }; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::{Duration, Instant}, +}; use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; use tracing::{debug, trace}; -use crate::{ - budget::{ - DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - DEFAULT_BUDGET_TRY_DRAIN_STREAM, - }, - cache::LruCache, - duration_metered_exec, metered_poll_nested_stream_with_budget, - metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, - NetworkHandle, -}; - /// The future for importing transactions into the pool. /// /// Resolves with the result of each transaction import. @@ -84,42 +83,26 @@ pub type PoolImportFuture = Pin>> /// For example [`TransactionsHandle::get_peer_transaction_hashes`] returns the transaction hashes /// known by a specific peer. #[derive(Debug, Clone)] -pub struct TransactionsHandle { +pub struct TransactionsHandle { /// Command channel to the [`TransactionsManager`] - manager_tx: mpsc::UnboundedSender, + manager_tx: mpsc::UnboundedSender>, } /// Implementation of the `TransactionsHandle` API for use in testnet via type /// [`PeerHandle`](crate::test_utils::PeerHandle). -impl TransactionsHandle { - fn send(&self, cmd: TransactionsCommand) { +impl TransactionsHandle { + fn send(&self, cmd: TransactionsCommand) { let _ = self.manager_tx.send(cmd); } /// Fetch the [`PeerRequestSender`] for the given peer. - async fn peer_handle(&self, peer_id: PeerId) -> Result, RecvError> { - let (tx, rx) = oneshot::channel(); - self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx }); - rx.await - } - - /// Requests the transactions directly from the given peer. - /// - /// Returns `None` if the peer is not connected. - /// - /// **Note**: this returns the response from the peer as received. - pub async fn get_pooled_transactions_from( + async fn peer_handle( &self, peer_id: PeerId, - hashes: Vec, - ) -> Result>, RequestError> { - let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; - + ) -> Result>>, RecvError> { let (tx, rx) = oneshot::channel(); - let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx }; - peer.try_send(request).ok(); - - rx.await?.map(|res| Some(res.0)) + self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx }); + rx.await } /// Manually propagate the transaction that belongs to the hash. @@ -179,6 +162,25 @@ impl TransactionsHandle { let res = self.get_transaction_hashes(vec![peer]).await?; Ok(res.into_values().next().unwrap_or_default()) } + + /// Requests the transactions directly from the given peer. + /// + /// Returns `None` if the peer is not connected. + /// + /// **Note**: this returns the response from the peer as received. + pub async fn get_pooled_transactions_from( + &self, + peer_id: PeerId, + hashes: Vec, + ) -> Result>, RequestError> { + let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; + + let (tx, rx) = oneshot::channel(); + let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx }; + peer.try_send(request).ok(); + + rx.await?.map(|res| Some(res.0)) + } } /// Manages transactions on top of the p2p network. @@ -200,17 +202,17 @@ impl TransactionsHandle { /// propagate new transactions over the network. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] -pub struct TransactionsManager { +pub struct TransactionsManager { /// Access to the transaction pool. pool: Pool, /// Network access. - network: NetworkHandle, + network: NetworkHandle, /// Subscriptions to all network related events. /// /// From which we get all new incoming transaction related messages. - network_events: EventStream, + network_events: EventStream>>, /// Transaction fetcher to handle inflight and missing transaction requests. - transaction_fetcher: TransactionFetcher, + transaction_fetcher: TransactionFetcher, /// All currently pending transactions grouped by peers. /// /// This way we can track incoming transactions and prevent multiple pool imports for the same @@ -233,16 +235,16 @@ pub struct TransactionsManager { /// Bad imports. bad_imports: LruCache, /// All the connected peers. - peers: HashMap, + peers: HashMap>, /// Send half for the command channel. /// /// This is kept so that a new [`TransactionsHandle`] can be created at any time. - command_tx: mpsc::UnboundedSender, + command_tx: mpsc::UnboundedSender>, /// Incoming commands from [`TransactionsHandle`]. /// /// This will only receive commands if a user manually sends a command to the manager through /// the [`TransactionsHandle`] to interact with this type directly. - command_rx: UnboundedReceiverStream, + command_rx: UnboundedReceiverStream>, /// A stream that yields new __pending__ transactions. /// /// A transaction is considered __pending__ if it is executable on the current state of the @@ -312,22 +314,106 @@ impl TransactionsManager { } } -// === impl TransactionsManager === - -impl TransactionsManager -where - Pool: TransactionPool, -{ +impl TransactionsManager { /// Returns a new handle that can send commands to this type. - pub fn handle(&self) -> TransactionsHandle { + pub fn handle(&self) -> TransactionsHandle { TransactionsHandle { manager_tx: self.command_tx.clone() } } -} -impl TransactionsManager -where - Pool: TransactionPool + 'static, -{ + /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns + /// `false` if [`TransactionsManager`] is operating close to full capacity. + fn has_capacity_for_fetching_pending_hashes(&self) -> bool { + self.pending_pool_imports_info + .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && + self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() + } + + fn report_peer_bad_transactions(&self, peer_id: PeerId) { + self.report_peer(peer_id, ReputationChangeKind::BadTransactions); + self.metrics.reported_bad_transactions.increment(1); + } + + fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { + trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); + self.network.reputation_change(peer_id, kind); + } + + fn report_already_seen(&self, peer_id: PeerId) { + trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction"); + self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction); + } + + /// Clear the transaction + fn on_good_import(&mut self, hash: TxHash) { + self.transactions_by_peers.remove(&hash); + } + + /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid + /// fetching or importing it again. + /// + /// Errors that count as bad transactions are: + /// + /// - intrinsic gas too low + /// - exceeds gas limit + /// - gas uint overflow + /// - exceeds max init code size + /// - oversized data + /// - signer account has bytecode + /// - chain id mismatch + /// - old legacy chain id + /// - tx type not supported + /// + /// (and additionally for blobs txns...) + /// + /// - no blobs + /// - too many blobs + /// - invalid kzg proof + /// - kzg error + /// - not blob transaction (tx type mismatch) + /// - wrong versioned kzg commitment hash + fn on_bad_import(&mut self, err: PoolError) { + let peers = self.transactions_by_peers.remove(&err.hash); + + // if we're _currently_ syncing, we ignore a bad transaction + if !err.is_bad_transaction() || self.network.is_syncing() { + return + } + // otherwise we penalize the peer that sent the bad transaction, with the assumption that + // the peer should have known that this transaction is bad (e.g. violating consensus rules) + if let Some(peers) = peers { + for peer_id in peers { + self.report_peer_bad_transactions(peer_id); + } + } + self.metrics.bad_imports.increment(1); + self.bad_imports.insert(err.hash); + } + + /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. + fn on_fetch_hashes_pending_fetch(&mut self) { + // try drain transaction hashes pending fetch + let info = &self.pending_pool_imports_info; + let max_pending_pool_imports = info.max_pending_pool_imports; + let has_capacity_wrt_pending_pool_imports = + |divisor| info.has_capacity(max_pending_pool_imports / divisor); + + self.transaction_fetcher + .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); + } + + fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { + let kind = match req_err { + RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, + RequestError::Timeout => ReputationChangeKind::Timeout, + RequestError::ChannelClosed | RequestError::ConnectionDropped => { + // peer is already disconnected + return + } + RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id), + }; + self.report_peer(peer_id, kind); + } + #[inline] fn update_poll_metrics(&self, start: Instant, poll_durations: TxManagerPollDurations) { let metrics = &self.metrics; @@ -353,50 +439,34 @@ where metrics.acc_duration_fetch_pending_hashes.set(acc_pending_fetch.as_secs_f64()); metrics.acc_duration_poll_commands.set(acc_cmds.as_secs_f64()); } +} - /// Request handler for an incoming request for transactions - fn on_get_pooled_transactions( - &mut self, - peer_id: PeerId, - request: GetPooledTransactions, - response: oneshot::Sender>, - ) { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if self.network.tx_gossip_disabled() { - let _ = response.send(Ok(PooledTransactions::default())); - return +impl TransactionsManager +where + Pool: TransactionPool, + N: NetworkPrimitives, +{ + /// Processes a batch import results. + fn on_batch_import_result(&mut self, batch_results: Vec>) { + for res in batch_results { + match res { + Ok(hash) => { + self.on_good_import(hash); + } + Err(err) => { + self.on_bad_import(err); + } } - let transactions = self.pool.get_pooled_transaction_elements( - request.0, - GetPooledTransactionLimit::ResponseSizeSoftLimit( - self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, - ), - ); - - trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| *tx.hash()), "Sending requested transactions to peer"); - - // we sent a response at which point we assume that the peer is aware of the - // transactions - peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.hash())); - - let resp = PooledTransactions(transactions); - let _ = response.send(Ok(resp)); } } - /// Invoked when transactions in the local mempool are considered __pending__. - /// - /// When a transaction in the local mempool is moved to the pending pool, we propagate them to - /// connected peers over network using the `Transactions` and `NewPooledTransactionHashes` - /// messages. The Transactions message relays complete transaction objects and is typically - /// sent to a small, random fraction of connected peers. - /// - /// All other peers receive a notification of the transaction hash and can request the - /// complete transaction object if it is unknown to them. The dissemination of complete - /// transactions to a fraction of peers usually ensures that all nodes receive the transaction - /// and won't need to request it. - fn on_new_pending_transactions(&mut self, hashes: Vec) { - // Nothing to propagate while initially syncing + /// Request handler for an incoming `NewPooledTransactionHashes` + fn on_new_pooled_transaction_hashes( + &mut self, + peer_id: PeerId, + msg: NewPooledTransactionHashes, + ) { + // If the node is initially syncing, ignore transactions if self.network.is_initially_syncing() { return } @@ -404,113 +474,244 @@ where return } - trace!(target: "net::tx", num_hashes=?hashes.len(), "Start propagating transactions"); + // get handle to peer's session, if the session is still active + let Some(peer) = self.peers.get_mut(&peer_id) else { + trace!( + peer_id = format!("{peer_id:#}"), + ?msg, + "discarding announcement from inactive peer" + ); - self.propagate_all(hashes); - } + return + }; + let client = peer.client_version.clone(); - /// Propagates the given transactions to the peers - /// - /// This fetches all transaction from the pool, including the 4844 blob transactions but - /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. - fn propagate_all(&mut self, hashes: Vec) { - let propagated = self.propagate_transactions( - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), - PropagationMode::Basic, - ); + // keep track of the transactions the peer knows + let mut count_txns_already_seen_by_peer = 0; + for tx in msg.iter_hashes().copied() { + if !peer.seen_transactions.insert(tx) { + count_txns_already_seen_by_peer += 1; + } + } + if count_txns_already_seen_by_peer > 0 { + // this may occur if transactions are sent or announced to a peer, at the same time as + // the peer sends/announces those hashes to us. this is because, marking + // txns as seen by a peer is done optimistically upon sending them to the + // peer. + self.metrics.messages_with_hashes_already_seen_by_peer.increment(1); + self.metrics + .occurrences_hash_already_seen_by_peer + .increment(count_txns_already_seen_by_peer); - // notify pool so events get fired - self.pool.on_propagated(propagated); - } + trace!(target: "net::tx", + %count_txns_already_seen_by_peer, + peer_id=format!("{peer_id:#}"), + ?client, + "Peer sent hashes that have already been marked as seen by peer" + ); - /// Propagate the transactions to all connected peers either as full objects or hashes. - /// - /// The message for new pooled hashes depends on the negotiated version of the stream. - /// See [`NewPooledTransactionHashes`] - /// - /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . - fn propagate_transactions( - &mut self, - to_propagate: Vec, - propagation_mode: PropagationMode, - ) -> PropagatedTransactions { - let mut propagated = PropagatedTransactions::default(); - if self.network.tx_gossip_disabled() { - return propagated + self.report_already_seen(peer_id); } - // send full transactions to a set of the connected peers based on the configured mode - let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); + // 1. filter out spam + let (validation_outcome, mut partially_valid_msg) = + self.transaction_fetcher.filter_valid_message.partially_filter_valid_entries(msg); - // Note: Assuming ~random~ order due to random state of the peers map hasher - for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { - // determine whether to send full tx objects or hashes. - let mut builder = if peer_idx > max_num_full { - PropagateTransactionsBuilder::pooled(peer.version) - } else { - PropagateTransactionsBuilder::full(peer.version) - }; + if validation_outcome == FilterOutcome::ReportPeer { + self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); + } - if propagation_mode.is_forced() { - builder.extend(to_propagate.iter()); - } else { - // Iterate through the transactions to propagate and fill the hashes and full - // transaction lists, before deciding whether or not to send full transactions to - // the peer. - for tx in &to_propagate { - // Only proceed if the transaction is not in the peer's list of seen - // transactions - if !peer.seen_transactions.contains(&tx.hash()) { - builder.push(tx); - } - } - } + // 2. filter out transactions pending import to pool + partially_valid_msg.retain_by_hash(|hash| !self.transactions_by_peers.contains_key(hash)); - if builder.is_empty() { - trace!(target: "net::tx", ?peer_id, "Nothing to propagate to peer; has seen all transactions"); - continue - } + // 3. filter out known hashes + // + // known txns have already been successfully fetched or received over gossip. + // + // most hashes will be filtered out here since this the mempool protocol is a gossip + // protocol, healthy peers will send many of the same hashes. + // + let hashes_count_pre_pool_filter = partially_valid_msg.len(); + self.pool.retain_unknown(&mut partially_valid_msg); + if hashes_count_pre_pool_filter > partially_valid_msg.len() { + let already_known_hashes_count = + hashes_count_pre_pool_filter - partially_valid_msg.len(); + self.metrics + .occurrences_hashes_already_in_pool + .increment(already_known_hashes_count as u64); + } - let PropagateTransactions { pooled, full } = builder.build(); + if partially_valid_msg.is_empty() { + // nothing to request + return + } - // send hashes if any - if let Some(mut new_pooled_hashes) = pooled { - // enforce tx soft limit per message for the (unlikely) event the number of - // hashes exceeds it - new_pooled_hashes - .truncate(SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE); + // 4. filter out invalid entries (spam) + // + // validates messages with respect to the given network, e.g. allowed tx types + // + let (validation_outcome, mut valid_announcement_data) = if partially_valid_msg + .msg_version() + .expect("partially valid announcement should have version") + .is_eth68() + { + // validate eth68 announcement data + self.transaction_fetcher + .filter_valid_message + .filter_valid_entries_68(partially_valid_msg) + } else { + // validate eth66 announcement data + self.transaction_fetcher + .filter_valid_message + .filter_valid_entries_66(partially_valid_msg) + }; - for hash in new_pooled_hashes.iter_hashes().copied() { - propagated.0.entry(hash).or_default().push(PropagateKind::Hash(*peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(hash); - } + if validation_outcome == FilterOutcome::ReportPeer { + self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); + } - trace!(target: "net::tx", ?peer_id, num_txs=?new_pooled_hashes.len(), "Propagating tx hashes to peer"); + if valid_announcement_data.is_empty() { + // no valid announcement data + return + } - // send hashes of transactions - self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); - } + // 5. filter out already seen unknown hashes + // + // seen hashes are already in the tx fetcher, pending fetch. + // + // for any seen hashes add the peer as fallback. unseen hashes are loaded into the tx + // fetcher, hence they should be valid at this point. + let bad_imports = &self.bad_imports; + self.transaction_fetcher.filter_unseen_and_pending_hashes( + &mut valid_announcement_data, + |hash| bad_imports.contains(hash), + &peer_id, + |peer_id| self.peers.contains_key(&peer_id), + &client, + ); - // send full transactions, if any - if let Some(new_full_transactions) = full { - for tx in &new_full_transactions { - propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(*peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(tx.hash()); - } + if valid_announcement_data.is_empty() { + // nothing to request + return + } - trace!(target: "net::tx", ?peer_id, num_txs=?new_full_transactions.len(), "Propagating full transactions to peer"); + trace!(target: "net::tx::propagation", + peer_id=format!("{peer_id:#}"), + hashes_len=valid_announcement_data.iter().count(), + hashes=?valid_announcement_data.keys().collect::>(), + msg_version=%valid_announcement_data.msg_version(), + client_version=%client, + "received previously unseen and pending hashes in announcement from peer" + ); - // send full transactions - self.network.send_transactions(*peer_id, new_full_transactions); - } + // only send request for hashes to idle peer, otherwise buffer hashes storing peer as + // fallback + if !self.transaction_fetcher.is_idle(&peer_id) { + // load message version before announcement data is destructed in packing + let msg_version = valid_announcement_data.msg_version(); + let (hashes, _version) = valid_announcement_data.into_request_hashes(); + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + hashes=?*hashes, + %msg_version, + %client, + "buffering hashes announced by busy peer" + ); + + self.transaction_fetcher.buffer_hashes(hashes, Some(peer_id)); + + return } - // Update propagated transactions metrics - self.metrics.propagated_transactions.increment(propagated.0.len() as u64); + // load message version before announcement data type is destructed in packing + let msg_version = valid_announcement_data.msg_version(); + // + // demand recommended soft limit on response, however the peer may enforce an arbitrary + // limit on the response (2MB) + // + // request buffer is shrunk via call to pack request! + let init_capacity_req = + self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version); + let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); + let surplus_hashes = + self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data); - propagated + if !surplus_hashes.is_empty() { + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + surplus_hashes=?*surplus_hashes, + %msg_version, + %client, + "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" + ); + + self.transaction_fetcher.buffer_hashes(surplus_hashes, Some(peer_id)); + } + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + hashes=?*hashes_to_request, + %msg_version, + %client, + "sending hashes in `GetPooledTransactions` request to peer's session" + ); + + // request the missing transactions + // + // get handle to peer's session again, at this point we know it exists + let Some(peer) = self.peers.get_mut(&peer_id) else { return }; + if let Some(failed_to_request_hashes) = + self.transaction_fetcher.request_transactions_from_peer(hashes_to_request, peer) + { + let conn_eth_version = peer.version; + + trace!(target: "net::tx", + peer_id=format!("{peer_id:#}"), + failed_to_request_hashes=?*failed_to_request_hashes, + %conn_eth_version, + %client, + "sending `GetPooledTransactions` request to peer's session failed, buffering hashes" + ); + self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id)); + } + } +} + +impl TransactionsManager +where + Pool: TransactionPool, + N: NetworkPrimitives< + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + >, + <::Transaction as PoolTransaction>::Consensus: + Into, + <::Transaction as PoolTransaction>::Pooled: Into, +{ + /// Invoked when transactions in the local mempool are considered __pending__. + /// + /// When a transaction in the local mempool is moved to the pending pool, we propagate them to + /// connected peers over network using the `Transactions` and `NewPooledTransactionHashes` + /// messages. The Transactions message relays complete transaction objects and is typically + /// sent to a small, random fraction of connected peers. + /// + /// All other peers receive a notification of the transaction hash and can request the + /// complete transaction object if it is unknown to them. The dissemination of complete + /// transactions to a fraction of peers usually ensures that all nodes receive the transaction + /// and won't need to request it. + fn on_new_pending_transactions(&mut self, hashes: Vec) { + // Nothing to propagate while initially syncing + if self.network.is_initially_syncing() { + return + } + if self.network.tx_gossip_disabled() { + return + } + + trace!(target: "net::tx", num_hashes=?hashes.len(), "Start propagating transactions"); + + self.propagate_all(hashes); } /// Propagate the full transactions to a specific peer. @@ -539,7 +740,7 @@ where // Iterate through the transactions to propagate and fill the hashes and full // transaction for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { + if !peer.seen_transactions.contains(tx.tx_hash()) { // Only include if the peer hasn't seen the transaction full_transactions.push(&tx); } @@ -568,9 +769,9 @@ where // send full transactions, if any if let Some(new_full_transactions) = full { for tx in &new_full_transactions { - propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(peer_id)); + propagated.0.entry(*tx.tx_hash()).or_default().push(PropagateKind::Full(peer_id)); // mark transaction as seen by peer - peer.seen_transactions.insert(tx.hash()); + peer.seen_transactions.insert(*tx.tx_hash()); } // send full transactions @@ -602,8 +803,12 @@ where return }; - let to_propagate: Vec = - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(); + let to_propagate = self + .pool + .get_all(hashes) + .into_iter() + .map(PropagateTransaction::new) + .collect::>(); let mut propagated = PropagatedTransactions::default(); @@ -614,7 +819,7 @@ where hashes.extend(to_propagate) } else { for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { + if !peer.seen_transactions.contains(tx.tx_hash()) { // Include if the peer hasn't seen it hashes.push(&tx); } @@ -647,223 +852,152 @@ where self.pool.on_propagated(propagated); } - /// Request handler for an incoming `NewPooledTransactionHashes` - fn on_new_pooled_transaction_hashes( + /// Propagate the transactions to all connected peers either as full objects or hashes. + /// + /// The message for new pooled hashes depends on the negotiated version of the stream. + /// See [`NewPooledTransactionHashes`] + /// + /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . + fn propagate_transactions( &mut self, - peer_id: PeerId, - msg: NewPooledTransactionHashes, - ) { - // If the node is initially syncing, ignore transactions - if self.network.is_initially_syncing() { - return - } + to_propagate: Vec>, + propagation_mode: PropagationMode, + ) -> PropagatedTransactions { + let mut propagated = PropagatedTransactions::default(); if self.network.tx_gossip_disabled() { - return + return propagated } - // get handle to peer's session, if the session is still active - let Some(peer) = self.peers.get_mut(&peer_id) else { - trace!( - peer_id = format!("{peer_id:#}"), - ?msg, - "discarding announcement from inactive peer" - ); + // send full transactions to a set of the connected peers based on the configured mode + let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); - return - }; - let client = peer.client_version.clone(); + // Note: Assuming ~random~ order due to random state of the peers map hasher + for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { + // determine whether to send full tx objects or hashes. + let mut builder = if peer_idx > max_num_full { + PropagateTransactionsBuilder::pooled(peer.version) + } else { + PropagateTransactionsBuilder::full(peer.version) + }; - // keep track of the transactions the peer knows - let mut count_txns_already_seen_by_peer = 0; - for tx in msg.iter_hashes().copied() { - if !peer.seen_transactions.insert(tx) { - count_txns_already_seen_by_peer += 1; + if propagation_mode.is_forced() { + builder.extend(to_propagate.iter()); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction lists, before deciding whether or not to send full transactions to + // the peer. + for tx in &to_propagate { + // Only proceed if the transaction is not in the peer's list of seen + // transactions + if !peer.seen_transactions.contains(tx.tx_hash()) { + builder.push(tx); + } + } } - } - if count_txns_already_seen_by_peer > 0 { - // this may occur if transactions are sent or announced to a peer, at the same time as - // the peer sends/announces those hashes to us. this is because, marking - // txns as seen by a peer is done optimistically upon sending them to the - // peer. - self.metrics.messages_with_hashes_already_seen_by_peer.increment(1); - self.metrics - .occurrences_hash_already_seen_by_peer - .increment(count_txns_already_seen_by_peer); - - trace!(target: "net::tx", - %count_txns_already_seen_by_peer, - peer_id=format!("{peer_id:#}"), - ?client, - "Peer sent hashes that have already been marked as seen by peer" - ); - self.report_already_seen(peer_id); - } - - // 1. filter out spam - let (validation_outcome, mut partially_valid_msg) = - self.transaction_fetcher.filter_valid_message.partially_filter_valid_entries(msg); - - if validation_outcome == FilterOutcome::ReportPeer { - self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); - } - - // 2. filter out transactions pending import to pool - partially_valid_msg.retain_by_hash(|hash| !self.transactions_by_peers.contains_key(hash)); - - // 3. filter out known hashes - // - // known txns have already been successfully fetched or received over gossip. - // - // most hashes will be filtered out here since this the mempool protocol is a gossip - // protocol, healthy peers will send many of the same hashes. - // - let hashes_count_pre_pool_filter = partially_valid_msg.len(); - self.pool.retain_unknown(&mut partially_valid_msg); - if hashes_count_pre_pool_filter > partially_valid_msg.len() { - let already_known_hashes_count = - hashes_count_pre_pool_filter - partially_valid_msg.len(); - self.metrics - .occurrences_hashes_already_in_pool - .increment(already_known_hashes_count as u64); - } - - if partially_valid_msg.is_empty() { - // nothing to request - return - } - - // 4. filter out invalid entries (spam) - // - // validates messages with respect to the given network, e.g. allowed tx types - // - let (validation_outcome, mut valid_announcement_data) = if partially_valid_msg - .msg_version() - .expect("partially valid announcement should have version") - .is_eth68() - { - // validate eth68 announcement data - self.transaction_fetcher - .filter_valid_message - .filter_valid_entries_68(partially_valid_msg) - } else { - // validate eth66 announcement data - self.transaction_fetcher - .filter_valid_message - .filter_valid_entries_66(partially_valid_msg) - }; - - if validation_outcome == FilterOutcome::ReportPeer { - self.report_peer(peer_id, ReputationChangeKind::BadAnnouncement); - } + if builder.is_empty() { + trace!(target: "net::tx", ?peer_id, "Nothing to propagate to peer; has seen all transactions"); + continue + } - if valid_announcement_data.is_empty() { - // no valid announcement data - return - } + let PropagateTransactions { pooled, full } = builder.build(); - // 5. filter out already seen unknown hashes - // - // seen hashes are already in the tx fetcher, pending fetch. - // - // for any seen hashes add the peer as fallback. unseen hashes are loaded into the tx - // fetcher, hence they should be valid at this point. - let bad_imports = &self.bad_imports; - self.transaction_fetcher.filter_unseen_and_pending_hashes( - &mut valid_announcement_data, - |hash| bad_imports.contains(hash), - &peer_id, - |peer_id| self.peers.contains_key(&peer_id), - &client, - ); + // send hashes if any + if let Some(mut new_pooled_hashes) = pooled { + // enforce tx soft limit per message for the (unlikely) event the number of + // hashes exceeds it + new_pooled_hashes + .truncate(SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE); - if valid_announcement_data.is_empty() { - // nothing to request - return - } + for hash in new_pooled_hashes.iter_hashes().copied() { + propagated.0.entry(hash).or_default().push(PropagateKind::Hash(*peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(hash); + } - trace!(target: "net::tx::propagation", - peer_id=format!("{peer_id:#}"), - hashes_len=valid_announcement_data.iter().count(), - hashes=?valid_announcement_data.keys().collect::>(), - msg_version=%valid_announcement_data.msg_version(), - client_version=%client, - "received previously unseen and pending hashes in announcement from peer" - ); + trace!(target: "net::tx", ?peer_id, num_txs=?new_pooled_hashes.len(), "Propagating tx hashes to peer"); - // only send request for hashes to idle peer, otherwise buffer hashes storing peer as - // fallback - if !self.transaction_fetcher.is_idle(&peer_id) { - // load message version before announcement data is destructed in packing - let msg_version = valid_announcement_data.msg_version(); - let (hashes, _version) = valid_announcement_data.into_request_hashes(); + // send hashes of transactions + self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); + } - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - hashes=?*hashes, - %msg_version, - %client, - "buffering hashes announced by busy peer" - ); + // send full transactions, if any + if let Some(new_full_transactions) = full { + for tx in &new_full_transactions { + propagated + .0 + .entry(*tx.tx_hash()) + .or_default() + .push(PropagateKind::Full(*peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(*tx.tx_hash()); + } - self.transaction_fetcher.buffer_hashes(hashes, Some(peer_id)); + trace!(target: "net::tx", ?peer_id, num_txs=?new_full_transactions.len(), "Propagating full transactions to peer"); - return + // send full transactions + self.network.send_transactions(*peer_id, new_full_transactions); + } } - // load message version before announcement data type is destructed in packing - let msg_version = valid_announcement_data.msg_version(); - // - // demand recommended soft limit on response, however the peer may enforce an arbitrary - // limit on the response (2MB) - // - // request buffer is shrunk via call to pack request! - let init_capacity_req = - self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version); - let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); - let surplus_hashes = - self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data); - - if !surplus_hashes.is_empty() { - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - surplus_hashes=?*surplus_hashes, - %msg_version, - %client, - "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" - ); + // Update propagated transactions metrics + self.metrics.propagated_transactions.increment(propagated.0.len() as u64); - self.transaction_fetcher.buffer_hashes(surplus_hashes, Some(peer_id)); - } + propagated + } - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - hashes=?*hashes_to_request, - %msg_version, - %client, - "sending hashes in `GetPooledTransactions` request to peer's session" + /// Propagates the given transactions to the peers + /// + /// This fetches all transaction from the pool, including the 4844 blob transactions but + /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. + fn propagate_all(&mut self, hashes: Vec) { + let propagated = self.propagate_transactions( + self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), + PropagationMode::Basic, ); - // request the missing transactions - // - // get handle to peer's session again, at this point we know it exists - let Some(peer) = self.peers.get_mut(&peer_id) else { return }; - if let Some(failed_to_request_hashes) = - self.transaction_fetcher.request_transactions_from_peer(hashes_to_request, peer) - { - let conn_eth_version = peer.version; + // notify pool so events get fired + self.pool.on_propagated(propagated); + } - trace!(target: "net::tx", - peer_id=format!("{peer_id:#}"), - failed_to_request_hashes=?*failed_to_request_hashes, - %conn_eth_version, - %client, - "sending `GetPooledTransactions` request to peer's session failed, buffering hashes" + /// Request handler for an incoming request for transactions + fn on_get_pooled_transactions( + &mut self, + peer_id: PeerId, + request: GetPooledTransactions, + response: oneshot::Sender>>, + ) { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if self.network.tx_gossip_disabled() { + let _ = response.send(Ok(PooledTransactions::default())); + return + } + let transactions = self.pool.get_pooled_transactions_as::( + request.0, + GetPooledTransactionLimit::ResponseSizeSoftLimit( + self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, + ), ); - self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id)); + + trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| tx.tx_hash()), "Sending requested transactions to peer"); + + // we sent a response at which point we assume that the peer is aware of the + // transactions + peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.tx_hash())); + + let resp = PooledTransactions(transactions); + let _ = response.send(Ok(resp)); } } +} +impl TransactionsManager +where + Pool: TransactionPool + 'static, + <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: + Into, +{ /// Handles dedicated transaction events related to the `eth` protocol. fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { match event { @@ -878,7 +1012,7 @@ where .into_iter() .map(PooledTransactionsElement::try_from_broadcast) .filter_map(Result::ok) - .collect::(); + .collect(); self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); @@ -1135,20 +1269,6 @@ where } } - /// Processes a batch import results. - fn on_batch_import_result(&mut self, batch_results: Vec>) { - for res in batch_results { - match res { - Ok(hash) => { - self.on_good_import(hash); - } - Err(err) => { - self.on_bad_import(err); - } - } - } - } - /// Processes a [`FetchEvent`]. fn on_fetch_event(&mut self, fetch_event: FetchEvent) { match fetch_event { @@ -1164,100 +1284,6 @@ where } } } - - /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. - fn on_fetch_hashes_pending_fetch(&mut self) { - // try drain transaction hashes pending fetch - let info = &self.pending_pool_imports_info; - let max_pending_pool_imports = info.max_pending_pool_imports; - let has_capacity_wrt_pending_pool_imports = - |divisor| info.has_capacity(max_pending_pool_imports / divisor); - - self.transaction_fetcher - .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); - } - - fn report_peer_bad_transactions(&self, peer_id: PeerId) { - self.report_peer(peer_id, ReputationChangeKind::BadTransactions); - self.metrics.reported_bad_transactions.increment(1); - } - - fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { - trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); - self.network.reputation_change(peer_id, kind); - } - - fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { - let kind = match req_err { - RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, - RequestError::Timeout => ReputationChangeKind::Timeout, - RequestError::ChannelClosed | RequestError::ConnectionDropped => { - // peer is already disconnected - return - } - RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id), - }; - self.report_peer(peer_id, kind); - } - - fn report_already_seen(&self, peer_id: PeerId) { - trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction"); - self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction); - } - - /// Clear the transaction - fn on_good_import(&mut self, hash: TxHash) { - self.transactions_by_peers.remove(&hash); - } - - /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid - /// fetching or importing it again. - /// - /// Errors that count as bad transactions are: - /// - /// - intrinsic gas too low - /// - exceeds gas limit - /// - gas uint overflow - /// - exceeds max init code size - /// - oversized data - /// - signer account has bytecode - /// - chain id mismatch - /// - old legacy chain id - /// - tx type not supported - /// - /// (and additionally for blobs txns...) - /// - /// - no blobs - /// - too many blobs - /// - invalid kzg proof - /// - kzg error - /// - not blob transaction (tx type mismatch) - /// - wrong versioned kzg commitment hash - fn on_bad_import(&mut self, err: PoolError) { - let peers = self.transactions_by_peers.remove(&err.hash); - - // if we're _currently_ syncing, we ignore a bad transaction - if !err.is_bad_transaction() || self.network.is_syncing() { - return - } - // otherwise we penalize the peer that sent the bad transaction, with the assumption that - // the peer should have known that this transaction is bad (e.g. violating consensus rules) - if let Some(peers) = peers { - for peer_id in peers { - self.report_peer_bad_transactions(peer_id); - } - } - self.metrics.bad_imports.increment(1); - self.bad_imports.insert(err.hash); - } - - /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns - /// `false` if [`TransactionsManager`] is operating close to full capacity. - fn has_capacity_for_fetching_pending_hashes(&self) -> bool { - self.pending_pool_imports_info - .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && - self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() - } } /// An endless future. Preemption ensure that future is non-blocking, nonetheless. See @@ -1270,6 +1296,9 @@ where impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, + <::Transaction as PoolTransaction>::Consensus: Into, + <::Transaction as PoolTransaction>::Pooled: + Into, { type Output = (); @@ -1448,40 +1477,37 @@ impl PropagationMode { /// A transaction that's about to be propagated to multiple peers. #[derive(Debug, Clone)] -struct PropagateTransaction { +struct PropagateTransaction { size: usize, - transaction: Arc, + transaction: Arc, } -// === impl PropagateTransaction === - -impl PropagateTransaction { - fn hash(&self) -> TxHash { - self.transaction.hash() - } - +impl PropagateTransaction { /// Create a new instance from a pooled transaction - fn new(tx: Arc>) -> Self + fn new

(tx: Arc>) -> Self where - T: PoolTransaction>, + P: PoolTransaction>, { let size = tx.encoded_length(); - let recovered: TransactionSignedEcRecovered = - tx.transaction.clone().into_consensus().into(); - let transaction = Arc::new(recovered.into_signed()); + let transaction = tx.transaction.clone().into_consensus().into(); + let transaction = Arc::new(transaction); Self { size, transaction } } + + fn tx_hash(&self) -> &TxHash { + self.transaction.tx_hash() + } } /// Helper type to construct the appropriate message to send to the peer based on whether the peer /// should receive them in full or as pooled #[derive(Debug, Clone)] -enum PropagateTransactionsBuilder { +enum PropagateTransactionsBuilder { Pooled(PooledTransactionsHashesBuilder), - Full(FullTransactionsBuilder), + Full(FullTransactionsBuilder), } -impl PropagateTransactionsBuilder { +impl PropagateTransactionsBuilder { /// Create a builder for pooled transactions fn pooled(version: EthVersion) -> Self { Self::Pooled(PooledTransactionsHashesBuilder::new(version)) @@ -1492,21 +1518,6 @@ impl PropagateTransactionsBuilder { Self::Full(FullTransactionsBuilder::new(version)) } - /// Appends all transactions - fn extend<'a>(&mut self, txs: impl IntoIterator) { - for tx in txs { - self.push(tx); - } - } - - /// Appends a transaction to the list. - fn push(&mut self, transaction: &PropagateTransaction) { - match self { - Self::Pooled(builder) => builder.push(transaction), - Self::Full(builder) => builder.push(transaction), - } - } - /// Returns true if no transactions are recorded. fn is_empty(&self) -> bool { match self { @@ -1516,7 +1527,7 @@ impl PropagateTransactionsBuilder { } /// Consumes the type and returns the built messages that should be sent to the peer. - fn build(self) -> PropagateTransactions { + fn build(self) -> PropagateTransactions { match self { Self::Pooled(pooled) => { PropagateTransactions { pooled: Some(pooled.build()), full: None } @@ -1526,12 +1537,29 @@ impl PropagateTransactionsBuilder { } } +impl PropagateTransactionsBuilder { + /// Appends all transactions + fn extend<'a>(&mut self, txs: impl IntoIterator>) { + for tx in txs { + self.push(tx); + } + } + + /// Appends a transaction to the list. + fn push(&mut self, transaction: &PropagateTransaction) { + match self { + Self::Pooled(builder) => builder.push(transaction), + Self::Full(builder) => builder.push(transaction), + } + } +} + /// Represents how the transactions should be sent to a peer if any. -struct PropagateTransactions { +struct PropagateTransactions { /// The pooled transaction hashes to send. pooled: Option, /// The transactions to send in full. - full: Option>>, + full: Option>>, } /// Helper type for constructing the full transaction message that enforces the @@ -1539,18 +1567,16 @@ struct PropagateTransactions { /// and enforces other propagation rules for EIP-4844 and tracks those transactions that can't be /// broadcasted in full. #[derive(Debug, Clone)] -struct FullTransactionsBuilder { +struct FullTransactionsBuilder { /// The soft limit to enforce for a single broadcast message of full transactions. total_size: usize, /// All transactions to be broadcasted. - transactions: Vec>, + transactions: Vec>, /// Transactions that didn't fit into the broadcast message pooled: PooledTransactionsHashesBuilder, } -// === impl FullTransactionsBuilder === - -impl FullTransactionsBuilder { +impl FullTransactionsBuilder { /// Create a builder for the negotiated version of the peer's session fn new(version: EthVersion) -> Self { Self { @@ -1560,8 +1586,22 @@ impl FullTransactionsBuilder { } } + /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. + fn is_empty(&self) -> bool { + self.transactions.is_empty() && self.pooled.is_empty() + } + + /// Returns the messages that should be propagated to the peer. + fn build(self) -> PropagateTransactions { + let pooled = Some(self.pooled.build()).filter(|pooled| !pooled.is_empty()); + let full = Some(self.transactions).filter(|full| !full.is_empty()); + PropagateTransactions { pooled, full } + } +} + +impl FullTransactionsBuilder { /// Appends all transactions. - fn extend(&mut self, txs: impl IntoIterator) { + fn extend(&mut self, txs: impl IntoIterator>) { for tx in txs { self.push(&tx) } @@ -1575,7 +1615,8 @@ impl FullTransactionsBuilder { /// /// If the transaction is unsuitable for broadcast or would exceed the softlimit, it is appended /// to list of pooled transactions, (e.g. 4844 transactions). - fn push(&mut self, transaction: &PropagateTransaction) { + /// See also [`TxType::is_broadcastable_in_full`]. + fn push(&mut self, transaction: &PropagateTransaction) { // Do not send full 4844 transaction hashes to peers. // // Nodes MUST NOT automatically broadcast blob transactions to their peers. @@ -1584,7 +1625,7 @@ impl FullTransactionsBuilder { // via `GetPooledTransactions`. // // From: - if transaction.transaction.is_eip4844() { + if !transaction.transaction.tx_type().is_broadcastable_in_full() { self.pooled.push(transaction); return } @@ -1601,18 +1642,6 @@ impl FullTransactionsBuilder { self.total_size = new_size; self.transactions.push(Arc::clone(&transaction.transaction)); } - - /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. - fn is_empty(&self) -> bool { - self.transactions.is_empty() && self.pooled.is_empty() - } - - /// Returns the messages that should be propagated to the peer. - fn build(self) -> PropagateTransactions { - let pooled = Some(self.pooled.build()).filter(|pooled| !pooled.is_empty()); - let full = Some(self.transactions).filter(|full| !full.is_empty()); - PropagateTransactions { pooled, full } - } } /// A helper type to create the pooled transactions message based on the negotiated version of the @@ -1647,17 +1676,20 @@ impl PooledTransactionsHashesBuilder { } /// Appends all hashes - fn extend(&mut self, txs: impl IntoIterator) { + fn extend( + &mut self, + txs: impl IntoIterator>, + ) { for tx in txs { self.push(&tx); } } - fn push(&mut self, tx: &PropagateTransaction) { + fn push(&mut self, tx: &PropagateTransaction) { match self { - Self::Eth66(msg) => msg.0.push(tx.hash()), + Self::Eth66(msg) => msg.0.push(*tx.tx_hash()), Self::Eth68(msg) => { - msg.hashes.push(tx.hash()); + msg.hashes.push(*tx.tx_hash()); msg.sizes.push(tx.size); msg.types.push(tx.transaction.tx_type().into()); } @@ -1699,23 +1731,23 @@ impl TransactionSource { /// Tracks a single peer in the context of [`TransactionsManager`]. #[derive(Debug)] -pub struct PeerMetadata { +pub struct PeerMetadata { /// Optimistically keeps track of transactions that we know the peer has seen. Optimistic, in /// the sense that transactions are preemptively marked as seen by peer when they are sent to /// the peer. seen_transactions: LruCache, /// A communication channel directly to the peer's session task. - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, /// negotiated version of the session. version: EthVersion, /// The peer's client version. client_version: Arc, } -impl PeerMetadata { +impl PeerMetadata { /// Returns a new instance of [`PeerMetadata`]. fn new( - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, version: EthVersion, client_version: Arc, max_transactions_seen_by_peer: u32, @@ -1731,7 +1763,7 @@ impl PeerMetadata { /// Commands to send to the [`TransactionsManager`] #[derive(Debug)] -enum TransactionsCommand { +enum TransactionsCommand { /// Propagate a transaction hash to the network. PropagateHash(B256), /// Propagate transaction hashes to a specific peer. @@ -1750,13 +1782,13 @@ enum TransactionsCommand { /// Requests a clone of the sender sender channel to the peer. GetPeerSender { peer_id: PeerId, - peer_request_sender: oneshot::Sender>, + peer_request_sender: oneshot::Sender>>>, }, } /// All events related to transactions emitted by the network. #[derive(Debug)] -pub enum NetworkTransactionEvent { +pub enum NetworkTransactionEvent { /// Represents the event of receiving a list of transactions from a peer. /// /// This indicates transactions that were broadcasted to us from the peer. @@ -1764,7 +1796,7 @@ pub enum NetworkTransactionEvent { /// The ID of the peer from which the transactions were received. peer_id: PeerId, /// The received transactions. - msg: Transactions, + msg: Transactions, }, /// Represents the event of receiving a list of transaction hashes from a peer. IncomingPooledTransactionHashes { @@ -1780,10 +1812,10 @@ pub enum NetworkTransactionEvent { /// The received `GetPooledTransactions` request. request: GetPooledTransactions, /// The sender for responding to the request with a result of `PooledTransactions`. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Represents the event of receiving a `GetTransactionsHandle` request. - GetTransactionsHandle(oneshot::Sender>), + GetTransactionsHandle(oneshot::Sender>>), } /// Tracks stats about the [`TransactionsManager`]. @@ -2154,7 +2186,7 @@ mod tests { .await; assert!(!pool.is_empty()); - assert!(pool.get(&signed_tx.hash).is_some()); + assert!(pool.get(signed_tx.tx_hash()).is_some()); handle.terminate().await; } @@ -2229,7 +2261,7 @@ mod tests { .add_transaction(reth_transaction_pool::TransactionOrigin::External, tx.clone()) .await; - let request = GetPooledTransactions(vec![tx.get_hash()]); + let request = GetPooledTransactions(vec![*tx.get_hash()]); let (send, receive) = oneshot::channel::>(); @@ -2364,7 +2396,8 @@ mod tests { #[test] fn test_transaction_builder_empty() { - let mut builder = PropagateTransactionsBuilder::pooled(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::pooled(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2380,7 +2413,8 @@ mod tests { #[test] fn test_transaction_builder_large() { - let mut builder = PropagateTransactionsBuilder::full(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::full(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2408,7 +2442,8 @@ mod tests { #[test] fn test_transaction_builder_eip4844() { - let mut builder = PropagateTransactionsBuilder::full(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::full(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 7bfe07761a21..1575d9f3374a 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -2,8 +2,6 @@ //! and [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) //! announcements. Validation and filtering of announcements is network dependent. -use std::{fmt, fmt::Display, mem}; - use crate::metrics::{AnnouncedTxTypesMetrics, TxTypesCounter}; use alloy_primitives::{Signature, TxHash}; use derive_more::{Deref, DerefMut}; @@ -12,6 +10,7 @@ use reth_eth_wire::{ MAX_MESSAGE_SIZE, }; use reth_primitives::TxType; +use std::{fmt, fmt::Display, mem}; use tracing::trace; /// The size of a decoded signature in bytes. diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 4d65e3f63baa..328229e87e14 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -26,16 +26,13 @@ async fn test_large_tx_req() { // replace rng txhash with real txhash let mut tx = MockTransaction::eip1559(); - let ts = TransactionSigned { - hash: Default::default(), - signature: Signature::test_signature(), - transaction: tx.clone().into(), - }; + let ts = + TransactionSigned::new_unhashed(tx.clone().into(), Signature::test_signature()); tx.set_hash(ts.recalculate_hash()); tx }) .collect(); - let txs_hashes: Vec = txs.iter().map(|tx| tx.get_hash()).collect(); + let txs_hashes: Vec = txs.iter().map(|tx| *tx.get_hash()).collect(); // setup testnet let mut net = Testnet::create_with(2, MockEthProvider::default()).await; diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index ec891e5b39a9..0a17cbd563ed 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -8,7 +8,7 @@ use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; use reth_chainspec::MAINNET; use reth_discv4::Discv4Config; -use reth_eth_wire::{DisconnectReason, HeadersDirection}; +use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, HeadersDirection}; use reth_net_banlist::BanList; use reth_network::{ test_utils::{enr_to_peer_id, NetworkEventStream, PeerConfig, Testnet, GETH_TIMEOUT}, @@ -204,8 +204,9 @@ async fn test_connect_with_boot_nodes() { let mut discv4 = Discv4Config::builder(); discv4.add_boot_nodes(mainnet_nodes()); - let config = - NetworkConfigBuilder::new(secret_key).discovery(discv4).build(NoopProvider::default()); + let config = NetworkConfigBuilder::::new(secret_key) + .discovery(discv4) + .build(NoopProvider::default()); let network = NetworkManager::new(config).await.unwrap(); let handle = network.handle().clone(); @@ -572,7 +573,7 @@ async fn test_disconnect_incoming_when_exceeded_incoming_connections() { let secret_key = SecretKey::new(&mut rand::thread_rng()); let peers_config = PeersConfig::default().with_max_inbound(0); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .disable_discovery() .peer_config(peers_config) diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 58e46e3fb095..0dd38c959de9 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -3,7 +3,7 @@ use std::sync::Arc; -use alloy_consensus::TxEip2930; +use alloy_consensus::{Header, TxEip2930}; use alloy_primitives::{Bytes, PrimitiveSignature as Signature, TxKind, U256}; use rand::Rng; use reth_eth_wire::HeadersDirection; @@ -16,7 +16,7 @@ use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, }; -use reth_primitives::{Block, Header, Transaction, TransactionSigned}; +use reth_primitives::{Block, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; /// Returns a new [`TransactionSigned`] with some random parameters @@ -33,7 +33,7 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { }); let signature = Signature::new(U256::default(), U256::default(), true); - TransactionSigned::from_transaction_and_signature(request, signature) + TransactionSigned::new_unhashed(request, signature) } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/net/network/tests/it/startup.rs b/crates/net/network/tests/it/startup.rs index d84ff492e5e7..862281ab1ffd 100644 --- a/crates/net/network/tests/it/startup.rs +++ b/crates/net/network/tests/it/startup.rs @@ -5,6 +5,7 @@ use std::{ use reth_chainspec::MAINNET; use reth_discv4::{Discv4Config, NatResolver}; +use reth_eth_wire::EthNetworkPrimitives; use reth_network::{ error::{NetworkError, ServiceKind}, Discovery, NetworkConfigBuilder, NetworkManager, @@ -26,7 +27,7 @@ fn is_addr_in_use_kind(err: &NetworkError, kind: ServiceKind) -> bool { #[tokio::test(flavor = "multi_thread")] async fn test_is_default_syncing() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .disable_discovery() .listener_port(0) .build(NoopProvider::default()); @@ -37,13 +38,13 @@ async fn test_is_default_syncing() { #[tokio::test(flavor = "multi_thread")] async fn test_listener_addr_in_use() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .disable_discovery() .listener_port(0) .build(NoopProvider::default()); let network = NetworkManager::new(config).await.unwrap(); let listener_port = network.local_addr().port(); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(listener_port) .disable_discovery() .build(NoopProvider::default()); @@ -72,7 +73,7 @@ async fn test_discovery_addr_in_use() { #[tokio::test(flavor = "multi_thread")] async fn test_tcp_port_node_record_no_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .disable_discovery() .build_with_noop_provider(MAINNET.clone()); @@ -90,7 +91,7 @@ async fn test_tcp_port_node_record_no_discovery() { #[tokio::test(flavor = "multi_thread")] async fn test_tcp_port_node_record_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .discovery_port(0) .disable_dns_discovery() @@ -109,7 +110,7 @@ async fn test_tcp_port_node_record_discovery() { #[tokio::test(flavor = "multi_thread")] async fn test_node_record_address_with_nat() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discv4_discovery() .disable_dns_discovery() @@ -125,7 +126,7 @@ async fn test_node_record_address_with_nat() { #[tokio::test(flavor = "multi_thread")] async fn test_node_record_address_with_nat_disable_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discovery() .listener_port(0) diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 2e2ee4a031a0..ebde61ef8ea1 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -95,7 +95,7 @@ async fn test_4844_tx_gossip_penalization() { let peer0_reputation_after = peer1.peer_handle().peer_by_id(*peer0.peer_id()).await.unwrap().reputation(); assert_ne!(peer0_reputation_before, peer0_reputation_after); - assert_eq!(received, txs[1].transaction().hash); + assert_eq!(received, txs[1].transaction().hash()); // this will return an [`Empty`] error because blob txs are disallowed to be broadcasted assert!(peer1_tx_listener.try_recv().is_err()); @@ -132,10 +132,7 @@ async fn test_sending_invalid_transactions() { value: Default::default(), input: Default::default(), }; - let tx = TransactionSigned::from_transaction_and_signature( - tx.into(), - Signature::test_signature(), - ); + let tx = TransactionSigned::new_unhashed(tx.into(), Signature::test_signature()); peer0.network().send_transactions(*peer1.peer_id(), vec![Arc::new(tx)]); } diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index 3b6d74c9dbeb..9348bf2d0413 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true reth-network-peers.workspace = true @@ -21,6 +22,7 @@ reth-network-types.workspace = true reth-storage-errors.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true @@ -32,7 +34,6 @@ tokio = { workspace = true, features = ["sync"] } auto_impl.workspace = true tracing.workspace = true derive_more.workspace = true - parking_lot = { workspace = true, optional = true } [dev-dependencies] @@ -47,11 +48,14 @@ test-utils = [ "reth-consensus/test-utils", "parking_lot", "reth-network-types/test-utils", - "reth-primitives/test-utils" + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils" ] std = [ "reth-consensus/std", "reth-primitives/std", "alloy-eips/std", - "alloy-primitives/std" + "alloy-primitives/std", + "reth-primitives-traits/std", + "alloy-consensus/std", ] diff --git a/crates/net/p2p/src/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs index 2a4b57c23459..d48fccc6d000 100644 --- a/crates/net/p2p/src/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -9,13 +9,16 @@ use futures::{Future, FutureExt}; use reth_primitives::BlockBody; /// The bodies future type -pub type BodiesFut = Pin>> + Send + Sync>>; +pub type BodiesFut = + Pin>> + Send + Sync>>; /// A client capable of downloading block bodies. #[auto_impl::auto_impl(&, Arc, Box)] pub trait BodiesClient: DownloadClient { + /// The body type this client fetches. + type Body: Send + Sync + Unpin + 'static; /// The output of the request future for querying block bodies. - type Output: Future>> + Sync + Send + Unpin; + type Output: Future>> + Sync + Send + Unpin; /// Fetches the block body for the requested block. fn get_block_bodies(&self, hashes: Vec) -> Self::Output { @@ -49,11 +52,11 @@ pub struct SingleBodyRequest { fut: Fut, } -impl Future for SingleBodyRequest +impl Future for SingleBodyRequest where - Fut: Future>> + Sync + Send + Unpin, + Fut: Future>> + Sync + Send + Unpin, { - type Output = PeerRequestResult>; + type Output = PeerRequestResult>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let resp = ready!(self.get_mut().fut.poll_unpin(cx)); diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index b55229fa2426..7008c08e522e 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -2,17 +2,22 @@ use super::response::BlockResponse; use crate::error::DownloadResult; use alloy_primitives::BlockNumber; use futures::Stream; -use std::ops::RangeInclusive; +use std::{fmt::Debug, ops::RangeInclusive}; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block bodies, /// while a [`BodiesClient`][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. -pub trait BodyDownloader: Send + Sync + Stream + Unpin { +pub trait BodyDownloader: + Send + Sync + Stream> + Unpin +{ + /// The type of the body that is being downloaded. + type Body: Debug + Send + Sync + Unpin + 'static; + /// Method for setting the download range. fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()>; } diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 8ae840fbf669..11aaab17a300 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,16 +1,17 @@ use alloy_primitives::{BlockNumber, U256}; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; +use reth_primitives_traits::InMemorySize; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) - Full(SealedBlock), + Full(SealedBlock), /// The empty block response Empty(SealedHeader), } -impl BlockResponse { +impl BlockResponse { /// Return the reference to the response header pub const fn header(&self) -> &SealedHeader { match self { @@ -19,15 +20,6 @@ impl BlockResponse { } } - /// Calculates a heuristic for the in-memory size of the [`BlockResponse`]. - #[inline] - pub fn size(&self) -> usize { - match self { - Self::Full(block) => SealedBlock::size(block), - Self::Empty(header) => SealedHeader::size(header), - } - } - /// Return the block number pub fn block_number(&self) -> BlockNumber { self.header().number @@ -40,4 +32,22 @@ impl BlockResponse { Self::Empty(header) => header.difficulty, } } + + /// Return the reference to the response body + pub fn into_body(self) -> Option { + match self { + Self::Full(block) => Some(block.body), + Self::Empty(_) => None, + } + } +} + +impl InMemorySize for BlockResponse { + #[inline] + fn size(&self) -> usize { + match self { + Self::Full(block) => SealedBlock::size(block), + Self::Empty(header) => SealedHeader::size(header), + } + } } diff --git a/crates/net/p2p/src/either.rs b/crates/net/p2p/src/either.rs index 30650069b913..3f1182bd4826 100644 --- a/crates/net/p2p/src/either.rs +++ b/crates/net/p2p/src/either.rs @@ -32,8 +32,9 @@ where impl BodiesClient for Either where A: BodiesClient, - B: BodiesClient, + B: BodiesClient, { + type Body = A::Body; type Output = Either; fn get_block_bodies_with_priority( @@ -51,8 +52,9 @@ where impl HeadersClient for Either where A: HeadersClient, - B: HeadersClient, + B: HeadersClient

, { + type Header = A::Header; type Output = Either; fn get_headers_with_priority( diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index 9394a9fdf6c5..45d34fc04ece 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -1,13 +1,14 @@ use std::ops::RangeInclusive; use super::headers::client::HeadersRequest; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use derive_more::{Display, Error}; use reth_consensus::ConsensusError; use reth_network_peers::WithPeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{GotExpected, GotExpectedBoxed, Header}; +use reth_primitives::{GotExpected, GotExpectedBoxed}; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use tokio::sync::{mpsc, oneshot}; @@ -26,7 +27,7 @@ pub trait EthResponseValidator { fn reputation_change_err(&self) -> Option; } -impl EthResponseValidator for RequestResult> { +impl EthResponseValidator for RequestResult> { fn is_likely_bad_headers_response(&self, request: &HeadersRequest) -> bool { match self { Ok(headers) => { @@ -38,7 +39,7 @@ impl EthResponseValidator for RequestResult> { match request.start { BlockHashOrNumber::Number(block_number) => { - headers.first().is_some_and(|header| block_number != header.number) + headers.first().is_some_and(|header| block_number != header.number()) } BlockHashOrNumber::Hash(_) => { // we don't want to hash the header @@ -79,24 +80,24 @@ impl EthResponseValidator for RequestResult> { #[derive(Clone, Debug, Eq, PartialEq, Display, Error)] pub enum RequestError { /// Closed channel to the peer. - #[display("closed channel to the peer")] /// Indicates the channel to the peer is closed. + #[display("closed channel to the peer")] ChannelClosed, /// Connection to a peer dropped while handling the request. - #[display("connection to a peer dropped while handling the request")] /// Represents a dropped connection while handling the request. + #[display("connection to a peer dropped while handling the request")] ConnectionDropped, /// Capability message is not supported by the remote peer. - #[display("capability message is not supported by remote peer")] /// Indicates an unsupported capability message from the remote peer. + #[display("capability message is not supported by remote peer")] UnsupportedCapability, /// Request timed out while awaiting response. - #[display("request timed out while awaiting response")] /// Represents a timeout while waiting for a response. + #[display("request timed out while awaiting response")] Timeout, /// Received bad response. - #[display("received bad response")] /// Indicates a bad response was received. + #[display("received bad response")] BadResponse, } @@ -216,6 +217,8 @@ impl From for DownloadError { #[cfg(test)] mod tests { + use alloy_consensus::Header; + use super::*; #[test] diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index e5129b686741..a966c01c933d 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -5,16 +5,18 @@ use crate::{ headers::client::{HeadersClient, SingleHeaderRequest}, BlockClient, }; +use alloy_consensus::BlockHeader; use alloy_primitives::{Sealable, B256}; -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::Consensus; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; -use reth_primitives::{BlockBody, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_primitives::{SealedBlock, SealedHeader}; use std::{ cmp::Reverse, collections::{HashMap, VecDeque}, fmt::Debug, future::Future, + hash::Hash, pin::Pin, sync::Arc, task::{ready, Context, Poll}, @@ -23,14 +25,23 @@ use tracing::debug; /// A Client that can fetch full blocks from the network. #[derive(Debug, Clone)] -pub struct FullBlockClient { +pub struct FullBlockClient +where + Client: BlockClient, +{ client: Client, - consensus: Arc, + consensus: Arc>, } -impl FullBlockClient { +impl FullBlockClient +where + Client: BlockClient, +{ /// Creates a new instance of `FullBlockClient`. - pub fn new(client: Client, consensus: Arc) -> Self { + pub fn new( + client: Client, + consensus: Arc>, + ) -> Self { Self { client, consensus } } @@ -55,6 +66,7 @@ where let client = self.client.clone(); FetchFullBlockFuture { hash, + consensus: self.consensus.clone(), request: FullBlockRequest { header: Some(client.get_header(hash.into())), body: Some(client.get_block_body(hash)), @@ -84,11 +96,7 @@ where start_hash: hash, count, request: FullBlockRangeRequest { - headers: Some(client.get_headers(HeadersRequest { - start: hash.into(), - limit: count, - direction: HeadersDirection::Falling, - })), + headers: Some(client.get_headers(HeadersRequest::falling(hash.into(), count))), bodies: None, }, client, @@ -110,15 +118,16 @@ where Client: BlockClient, { client: Client, + consensus: Arc>, hash: B256, request: FullBlockRequest, - header: Option, - body: Option, + header: Option>, + body: Option>, } impl FetchFullBlockFuture where - Client: BlockClient, + Client: BlockClient, { /// Returns the hash of the block being requested. pub const fn hash(&self) -> &B256 { @@ -127,11 +136,11 @@ where /// If the header request is already complete, this returns the block number pub fn block_number(&self) -> Option { - self.header.as_ref().map(|h| h.number) + self.header.as_ref().map(|h| h.number()) } /// Returns the [`SealedBlock`] if the request is complete and valid. - fn take_block(&mut self) -> Option { + fn take_block(&mut self) -> Option> { if self.header.is_none() || self.body.is_none() { return None } @@ -142,7 +151,8 @@ where BodyResponse::Validated(body) => Some(SealedBlock::new(header, body)), BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry - if let Err(err) = ensure_valid_body_response(&header, resp.data()) { + if let Err(err) = self.consensus.validate_body_against_header(resp.data(), &header) + { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); self.client.report_bad_message(resp.peer_id()); self.header = Some(header); @@ -154,9 +164,9 @@ where } } - fn on_block_response(&mut self, resp: WithPeerId) { + fn on_block_response(&mut self, resp: WithPeerId) { if let Some(ref header) = self.header { - if let Err(err) = ensure_valid_body_response(header, resp.data()) { + if let Err(err) = self.consensus.validate_body_against_header(resp.data(), header) { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); self.client.report_bad_message(resp.peer_id()); return @@ -170,9 +180,9 @@ where impl Future for FetchFullBlockFuture where - Client: BlockClient + 'static, + Client: BlockClient + 'static, { - type Output = SealedBlock; + type Output = SealedBlock; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -185,15 +195,8 @@ where ResponseResult::Header(res) => { match res { Ok(maybe_header) => { - let (peer, maybe_header) = maybe_header - .map(|h| { - h.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - }) - .split(); + let (peer, maybe_header) = + maybe_header.map(|h| h.map(SealedHeader::seal)).split(); if let Some(header) = maybe_header { if header.hash() == this.hash { this.header = Some(header); @@ -249,7 +252,7 @@ where impl Debug for FetchFullBlockFuture where - Client: BlockClient, + Client: BlockClient, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("FetchFullBlockFuture") @@ -272,7 +275,7 @@ impl FullBlockRequest where Client: BlockClient, { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { self.header = None; @@ -293,63 +296,19 @@ where /// The result of a request for a single header or body. This is yielded by the `FullBlockRequest` /// future. -enum ResponseResult { - Header(PeerRequestResult>), - Body(PeerRequestResult>), +enum ResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), } /// The response of a body request. #[derive(Debug)] -enum BodyResponse { +enum BodyResponse { /// Already validated against transaction root of header - Validated(BlockBody), + Validated(B), /// Still needs to be validated against header - PendingValidation(WithPeerId), -} - -/// Ensures the block response data matches the header. -/// -/// This ensures the body response items match the header's hashes: -/// - ommer hash -/// - transaction root -/// - withdrawals root -fn ensure_valid_body_response( - header: &SealedHeader, - block: &BlockBody, -) -> Result<(), ConsensusError> { - let ommers_hash = block.calculate_ommers_root(); - if header.ommers_hash != ommers_hash { - return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), - )) - } - - let tx_root = block.calculate_tx_root(); - if header.transactions_root != tx_root { - return Err(ConsensusError::BodyTransactionRootDiff( - GotExpected { got: tx_root, expected: header.transactions_root }.into(), - )) - } - - match (header.withdrawals_root, &block.withdrawals) { - (Some(header_withdrawals_root), Some(withdrawals)) => { - let withdrawals = withdrawals.as_slice(); - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); - if withdrawals_root != header_withdrawals_root { - return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), - )) - } - } - (None, None) => { - // this is ok because we assume the fork is not active in this case - } - _ => return Err(ConsensusError::WithdrawalsRootUnexpected), - } - - Ok(()) + PendingValidation(WithPeerId), } - /// A future that downloads a range of full blocks from the network. /// /// This first fetches the headers for the given range using the inner `Client`. Once the request @@ -371,7 +330,7 @@ where /// The client used to fetch headers and bodies. client: Client, /// The consensus instance used to validate the blocks. - consensus: Arc, + consensus: Arc>, /// The block hash to start fetching from (inclusive). start_hash: B256, /// How many blocks to fetch: `len([start_hash, ..]) == count` @@ -379,20 +338,20 @@ where /// Requests for headers and bodies that are in progress. request: FullBlockRangeRequest, /// Fetched headers. - headers: Option>, + headers: Option>>, /// The next headers to request bodies for. This is drained as responses are received. - pending_headers: VecDeque, + pending_headers: VecDeque>, /// The bodies that have been received so far. - bodies: HashMap, + bodies: HashMap, BodyResponse>, } impl FetchFullBlockRangeFuture where - Client: BlockClient, + Client: BlockClient, { /// Returns the block hashes for the given range, if they are available. pub fn range_block_hashes(&self) -> Option> { - self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect::>()) + self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect()) } /// Returns whether or not the bodies map is fully populated with requested headers and bodies. @@ -403,14 +362,14 @@ where /// Inserts a block body, matching it with the `next_header`. /// /// Note: this assumes the response matches the next header in the queue. - fn insert_body(&mut self, body_response: BodyResponse) { + fn insert_body(&mut self, body_response: BodyResponse) { if let Some(header) = self.pending_headers.pop_front() { self.bodies.insert(header, body_response); } } /// Inserts multiple block bodies. - fn insert_bodies(&mut self, bodies: impl IntoIterator) { + fn insert_bodies(&mut self, bodies: impl IntoIterator>) { for body in bodies { self.insert_body(body); } @@ -429,7 +388,7 @@ where /// /// These are returned in falling order starting with the requested `hash`, i.e. with /// descending block numbers. - fn take_blocks(&mut self) -> Option> { + fn take_blocks(&mut self) -> Option>> { if !self.is_bodies_complete() { // not done with bodies yet return None @@ -446,7 +405,9 @@ where BodyResponse::Validated(body) => body, BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry - if let Err(err) = ensure_valid_body_response(header, resp.data()) { + if let Err(err) = + self.consensus.validate_body_against_header(resp.data(), header) + { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body in range response"); self.client.report_bad_message(resp.peer_id()); @@ -484,23 +445,14 @@ where Some(valid_responses) } - fn on_headers_response(&mut self, headers: WithPeerId>) { - let (peer, mut headers_falling) = headers - .map(|h| { - h.into_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - .collect::>() - }) - .split(); + fn on_headers_response(&mut self, headers: WithPeerId>) { + let (peer, mut headers_falling) = + headers.map(|h| h.into_iter().map(SealedHeader::seal).collect::>()).split(); // fill in the response if it's the correct length if headers_falling.len() == self.count as usize { // sort headers from highest to lowest block number - headers_falling.sort_unstable_by_key(|h| Reverse(h.number)); + headers_falling.sort_unstable_by_key(|h| Reverse(h.number())); // check the starting hash if headers_falling[0].hash() == self.start_hash { @@ -551,9 +503,9 @@ where impl Future for FetchFullBlockRangeFuture where - Client: BlockClient + 'static, + Client: BlockClient + 'static, { - type Output = Vec; + type Output = Vec>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -660,7 +612,10 @@ impl FullBlockRangeRequest where Client: BlockClient, { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { self.headers = None; @@ -681,13 +636,15 @@ where // The result of a request for headers or block bodies. This is yielded by the // `FullBlockRangeRequest` future. -enum RangeResponseResult { - Header(PeerRequestResult>), - Body(PeerRequestResult>), +enum RangeResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), } #[cfg(test)] mod tests { + use reth_primitives::BlockBody; + use super::*; use crate::test_utils::TestFullBlockClient; use std::ops::Range; @@ -695,7 +652,7 @@ mod tests { #[tokio::test] async fn download_single_full_block() { let client = TestFullBlockClient::default(); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); let client = FullBlockClient::test_client(client); @@ -707,7 +664,7 @@ mod tests { #[tokio::test] async fn download_single_full_block_range() { let client = TestFullBlockClient::default(); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); let client = FullBlockClient::test_client(client); @@ -722,7 +679,7 @@ mod tests { client: &TestFullBlockClient, range: Range, ) -> (SealedHeader, BlockBody) { - let mut sealed_header = SealedHeader::default(); + let mut sealed_header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); for _ in range { let (mut header, hash) = sealed_header.split(); @@ -730,9 +687,7 @@ mod tests { header.parent_hash = hash; header.number += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } @@ -785,6 +740,7 @@ mod tests { let test_consensus = reth_consensus::test_utils::TestConsensus::default(); test_consensus.set_fail_validation(true); + test_consensus.set_fail_body_against_header(false); let client = FullBlockClient::new(client, Arc::new(test_consensus)); let received = client.get_full_block_range(header.hash(), range_length as u64).await; diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index b73ea4e925f9..4be6208c4a2c 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -1,8 +1,8 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use futures::{Future, FutureExt}; pub use reth_eth_wire_types::{BlockHeaders, HeadersDirection}; -use reth_primitives::Header; use std::{ fmt::Debug, pin::Pin, @@ -21,14 +21,45 @@ pub struct HeadersRequest { pub direction: HeadersDirection, } +impl HeadersRequest { + /// Creates a request for a single header (direction doesn't matter). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + pub const fn one(start: BlockHashOrNumber) -> Self { + Self { direction: HeadersDirection::Rising, limit: 1, start } + } + + /// Creates a request for headers in rising direction (ascending block numbers). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + /// * `limit` - Maximum number of headers to retrieve + pub const fn rising(start: BlockHashOrNumber, limit: u64) -> Self { + Self { direction: HeadersDirection::Rising, limit, start } + } + + /// Creates a request for headers in falling direction (descending block numbers). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + /// * `limit` - Maximum number of headers to retrieve + pub const fn falling(start: BlockHashOrNumber, limit: u64) -> Self { + Self { direction: HeadersDirection::Falling, limit, start } + } +} + /// The headers future type -pub type HeadersFut = Pin>> + Send + Sync>>; +pub type HeadersFut = + Pin>> + Send + Sync>>; /// The block headers downloader client #[auto_impl::auto_impl(&, Arc, Box)] pub trait HeadersClient: DownloadClient { + /// The header type this client fetches. + type Header: Send + Sync + Unpin; /// The headers future type - type Output: Future>> + Sync + Send + Unpin; + type Output: Future>> + Sync + Send + Unpin; /// Sends the header request to the p2p network and returns the header response received from a /// peer. @@ -55,12 +86,7 @@ pub trait HeadersClient: DownloadClient { start: BlockHashOrNumber, priority: Priority, ) -> SingleHeaderRequest { - let req = HeadersRequest { - start, - limit: 1, - // doesn't matter for a single header - direction: HeadersDirection::Rising, - }; + let req = HeadersRequest::one(start); let fut = self.get_headers_with_priority(req, priority); SingleHeaderRequest { fut } } @@ -73,11 +99,11 @@ pub struct SingleHeaderRequest { fut: Fut, } -impl Future for SingleHeaderRequest +impl Future for SingleHeaderRequest where - Fut: Future>> + Sync + Send + Unpin, + Fut: Future>> + Sync + Send + Unpin, { - type Output = PeerRequestResult>; + type Output = PeerRequestResult>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let resp = ready!(self.get_mut().fut.poll_unpin(cx)); diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 5565880ed399..eca03bdb4e79 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,10 +1,14 @@ use super::error::HeadersDownloaderResult; use crate::error::{DownloadError, DownloadResult}; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::Stream; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_primitives::SealedHeader; +use reth_primitives_traits::BlockWithParent; +use std::fmt::Debug; + /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, @@ -13,19 +17,25 @@ use reth_primitives::SealedHeader; /// /// A [`HeaderDownloader`] is a [Stream] that returns batches of headers. pub trait HeaderDownloader: - Send + Sync + Stream>> + Unpin + Send + + Sync + + Stream>, Self::Header>> + + Unpin { + /// The header type being downloaded. + type Header: Debug + Send + Sync + Unpin + 'static; + /// Updates the gap to sync which ranges from local head to the sync target /// /// See also [`HeaderDownloader::update_sync_target`] and /// [`HeaderDownloader::update_local_head`] - fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { + fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { self.update_local_head(head); self.update_sync_target(target); } /// Updates the block number of the local database - fn update_local_head(&mut self, head: SealedHeader); + fn update_local_head(&mut self, head: SealedHeader); /// Updates the target we want to sync to fn update_sync_target(&mut self, target: SyncTarget); @@ -50,7 +60,7 @@ pub enum SyncTarget { /// /// The benefit of this variant is, that this already provides the block number of the highest /// missing block. - Gap(SealedHeader), + Gap(BlockWithParent), /// This represents a tip by block number TipNum(u64), } @@ -65,7 +75,7 @@ impl SyncTarget { pub fn tip(&self) -> BlockHashOrNumber { match self { Self::Tip(tip) => (*tip).into(), - Self::Gap(gap) => gap.parent_hash.into(), + Self::Gap(gap) => gap.parent.into(), Self::TipNum(num) => (*num).into(), } } @@ -74,23 +84,23 @@ impl SyncTarget { /// Validate whether the header is valid in relation to it's parent /// /// Returns Ok(false) if the -pub fn validate_header_download( - consensus: &dyn Consensus, - header: &SealedHeader, - parent: &SealedHeader, +pub fn validate_header_download( + consensus: &dyn HeaderValidator, + header: &SealedHeader, + parent: &SealedHeader, ) -> DownloadResult<()> { // validate header against parent consensus.validate_header_against_parent(header, parent).map_err(|error| { DownloadError::HeaderValidation { hash: header.hash(), - number: header.number, + number: header.number(), error: Box::new(error), } })?; // validate header standalone consensus.validate_header(header).map_err(|error| DownloadError::HeaderValidation { hash: header.hash(), - number: header.number, + number: header.number(), error: Box::new(error), })?; Ok(()) diff --git a/crates/net/p2p/src/headers/error.rs b/crates/net/p2p/src/headers/error.rs index b22aae9248ec..8757bb215f5f 100644 --- a/crates/net/p2p/src/headers/error.rs +++ b/crates/net/p2p/src/headers/error.rs @@ -3,19 +3,19 @@ use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; /// Header downloader result -pub type HeadersDownloaderResult = Result; +pub type HeadersDownloaderResult = Result>; /// Error variants that can happen when sending requests to a session. #[derive(Debug, Clone, Eq, PartialEq, Display, Error)] -pub enum HeadersDownloaderError { +pub enum HeadersDownloaderError { /// The downloaded header cannot be attached to the local head, /// but is valid otherwise. #[display("valid downloaded header cannot be attached to the local head: {error}")] DetachedHead { /// The local head we attempted to attach to. - local_head: Box, + local_head: Box>, /// The header we attempted to attach. - header: Box, + header: Box>, /// The error that occurred when attempting to attach the header. #[error(source)] error: Box, diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index 2ba8012f0ae3..7dcb77671d46 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -52,3 +52,14 @@ pub use headers::client::HeadersClient; pub trait BlockClient: HeadersClient + BodiesClient + Unpin + Clone {} impl BlockClient for T where T: HeadersClient + BodiesClient + Unpin + Clone {} + +/// The [`BlockClient`] providing Ethereum block parts. +pub trait EthBlockClient: + BlockClient
+{ +} + +impl EthBlockClient for T where + T: BlockClient
+{ +} diff --git a/crates/net/p2p/src/test_utils/bodies.rs b/crates/net/p2p/src/test_utils/bodies.rs index cfd292129162..0689d403f2ce 100644 --- a/crates/net/p2p/src/test_utils/bodies.rs +++ b/crates/net/p2p/src/test_utils/bodies.rs @@ -36,6 +36,7 @@ impl BodiesClient for TestBodiesClient where F: Fn(Vec) -> PeerRequestResult> + Send + Sync, { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index 8a13f69325dc..ee65bcb3f072 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -5,12 +5,13 @@ use crate::{ headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::B256; use parking_lot::Mutex; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; use std::{collections::HashMap, sync::Arc}; /// A headers+bodies client implementation that does nothing. @@ -40,6 +41,7 @@ impl DownloadClient for NoopFullBlockClient { /// Implements the `BodiesClient` trait for the `NoopFullBlockClient` struct. impl BodiesClient for NoopFullBlockClient { + type Body = BlockBody; /// Defines the output type of the function. type Output = futures::future::Ready>>; @@ -65,6 +67,7 @@ impl BodiesClient for NoopFullBlockClient { } impl HeadersClient for NoopFullBlockClient { + type Header = Header; /// The output type representing a future containing a peer request result with a vector of /// headers. type Output = futures::future::Ready>>; @@ -152,6 +155,7 @@ impl DownloadClient for TestFullBlockClient { /// Implements the `HeadersClient` trait for the `TestFullBlockClient` struct. impl HeadersClient for TestFullBlockClient { + type Header = Header; /// Specifies the associated output type. type Output = futures::future::Ready>>; @@ -205,6 +209,7 @@ impl HeadersClient for TestFullBlockClient { /// Implements the `BodiesClient` trait for the `TestFullBlockClient` struct. impl BodiesClient for TestFullBlockClient { + type Body = BlockBody; /// Defines the output type of the function. type Output = futures::future::Ready>>; diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index e61183d22e4b..5809ad6bdd40 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -10,12 +10,12 @@ use crate::{ }, priority::Priority, }; -use alloy_primitives::Sealable; +use alloy_consensus::Header; use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use std::{ fmt, pin::Pin, @@ -62,6 +62,8 @@ impl TestHeaderDownloader { } impl HeaderDownloader for TestHeaderDownloader { + type Header = Header; + fn update_local_head(&mut self, _head: SealedHeader) {} fn update_sync_target(&mut self, _target: SyncTarget) {} @@ -72,7 +74,7 @@ impl HeaderDownloader for TestHeaderDownloader { } impl Stream for TestHeaderDownloader { - type Item = HeadersDownloaderResult>; + type Item = HeadersDownloaderResult, Header>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -143,8 +145,10 @@ impl Stream for TestDownload { return Poll::Ready(None) } - let empty = SealedHeader::default(); - if let Err(error) = this.consensus.validate_header_against_parent(&empty, &empty) { + let empty: SealedHeader = SealedHeader::default(); + if let Err(error) = + >::validate_header_against_parent(&this.consensus, &empty, &empty) + { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { hash: empty.hash(), @@ -156,16 +160,8 @@ impl Stream for TestDownload { match ready!(this.get_or_init_fut().poll_unpin(cx)) { Ok(resp) => { // Skip head and seal headers - let mut headers = resp - .1 - .into_iter() - .skip(1) - .map(|header| { - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - .collect::>(); + let mut headers = + resp.1.into_iter().skip(1).map(SealedHeader::seal).collect::>(); headers.sort_unstable_by_key(|h| h.number); headers.into_iter().for_each(|h| this.buffer.push(h)); this.done = true; @@ -227,6 +223,7 @@ impl DownloadClient for TestHeadersClient { } impl HeadersClient for TestHeadersClient { + type Header = Header; type Output = TestHeadersFut; fn get_headers_with_priority( diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index b2bf001862ee..ab4595d33624 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -18,13 +18,14 @@ reth-evm.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-network-api.workspace = true reth-node-types.workspace = true -reth-primitives.workspace = true reth-node-core.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true -eyre.workspace = true \ No newline at end of file +eyre.workspace = true diff --git a/crates/node/api/src/lib.rs b/crates/node/api/src/lib.rs index 099cf82b5fe0..105cac47d942 100644 --- a/crates/node/api/src/lib.rs +++ b/crates/node/api/src/lib.rs @@ -16,6 +16,10 @@ pub use reth_engine_primitives::*; pub use reth_payload_primitives as payload; pub use reth_payload_primitives::*; +/// Traits and helper types used to abstract over payload builder types. +pub use reth_payload_builder_primitives as payload_builder; +pub use reth_payload_builder_primitives::*; + /// Traits and helper types used to abstract over EVM methods and types. pub use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 253145ea9eb6..5d25d8d592c7 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,6 +1,7 @@ //! Traits for configuring a node. use crate::ConfigureEvm; +use alloy_consensus::Header; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_consensus::Consensus; @@ -8,8 +9,7 @@ use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; -use reth_payload_primitives::PayloadBuilder; -use reth_primitives::Header; +use reth_payload_builder_primitives::PayloadBuilder; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; use reth_transaction_pool::TransactionPool; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 4ef2b0728e07..781112d93c8a 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -41,7 +41,6 @@ reth-node-core.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-payload-builder.workspace = true -reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true @@ -62,6 +61,8 @@ reth-transaction-pool.workspace = true ## ethereum alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } +alloy-consensus.workspace = true +revm-primitives.workspace = true ## async futures.workspace = true @@ -96,20 +97,20 @@ tempfile.workspace = true [features] default = [] test-utils = [ - "reth-db/test-utils", - "reth-blockchain-tree/test-utils", - "reth-chain-state/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-engine-tree/test-utils", - "reth-evm/test-utils", - "reth-downloaders/test-utils", - "reth-network/test-utils", - "reth-network-p2p/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-stages/test-utils", - "reth-db-api/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils" + "reth-db/test-utils", + "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-engine-tree/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-stages/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", ] diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 2e00b08f8a56..06d5294d800a 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -2,13 +2,6 @@ #![allow(clippy::type_complexity, missing_debug_implementations)] -pub mod add_ons; -mod states; - -pub use states::*; - -use std::sync::Arc; - use crate::{ common::WithConfigs, components::NodeComponentsBuilder, @@ -17,6 +10,7 @@ use crate::{ DefaultNodeLauncher, LaunchNode, Node, NodeHandle, }; use futures::Future; +use reth_blockchain_tree::externals::NodeTypesForTree; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli_util::get_secret_key; use reth_db_api::{ @@ -29,8 +23,8 @@ use reth_network::{ NetworkHandle, NetworkManager, }; use reth_node_api::{ - FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, NodeTypesWithDBAdapter, - NodeTypesWithEngine, + FullNodePrimitives, FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, + NodeTypesWithDBAdapter, NodeTypesWithEngine, }; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, @@ -38,13 +32,22 @@ use reth_node_core::{ node_config::NodeConfig, primitives::Head, }; -use reth_primitives::revm_primitives::EnvKzgSettings; -use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; +use reth_provider::{ + providers::{BlockchainProvider, NodeTypesForProvider}, + BlockReader, ChainSpecProvider, FullProvider, +}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; +use revm_primitives::EnvKzgSettings; use secp256k1::SecretKey; +use std::sync::Arc; use tracing::{info, trace, warn}; +pub mod add_ons; + +mod states; +pub use states::*; + /// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. pub type RethFullAdapter = FullNodeTypesAdapter< @@ -241,7 +244,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> NodeBuilderWithTypes> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForTree, { self.with_types_and_provider() } @@ -251,7 +254,7 @@ where self, ) -> NodeBuilderWithTypes, P>> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, P: FullProvider>, { NodeBuilderWithTypes::new(self.config, self.database) @@ -265,7 +268,7 @@ where node: N, ) -> NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns> where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -302,7 +305,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForTree, { WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } @@ -314,7 +317,7 @@ where NodeBuilderWithTypes, P>>, > where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, P: FullProvider>, { WithLaunchContext { @@ -333,7 +336,7 @@ where NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, > where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -356,13 +359,14 @@ where >, > where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, N::AddOns: RethRpcAddOns< NodeAdapter< RethFullAdapter, >>::Components, >, >, + N::Primitives: FullNodePrimitives, { self.node(node).launch().await } @@ -550,7 +554,7 @@ where impl WithLaunchContext, CB, AO>> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForTree, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, { @@ -647,6 +651,8 @@ impl BuilderContext { pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, + Node::Provider: + BlockReader, { self.start_network_with(builder, pool, Default::default()) } @@ -665,6 +671,8 @@ impl BuilderContext { ) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, + Node::Provider: + BlockReader, { let (handle, network, txpool, eth) = builder .transactions(pool, tx_config) diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 41ce36858d8b..95c0c764b5c3 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,11 +7,11 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; +use alloy_consensus::Header; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::Header; use reth_transaction_pool::TransactionPool; use std::{future::Future, marker::PhantomData}; diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 90cff588f7cc..4e8f63f412bc 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,8 +1,8 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; +use alloy_consensus::Header; use reth_evm::execute::BlockExecutorProvider; use reth_node_api::ConfigureEvm; -use reth_primitives::Header; use std::future::Future; /// A type that knows how to build the executor types. diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 29b667d5409c..1fe35e554d51 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -22,13 +22,13 @@ pub use payload::*; pub use pool::*; use crate::{ConfigureEvm, FullNodeTypes}; +use alloy_consensus::Header; use reth_consensus::Consensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; use reth_node_api::NodeTypesWithEngine; use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::Header; use reth_transaction_pool::TransactionPool; /// An abstraction over the components of a node, consisting of: diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 856f86c6fe02..830909c8cc4c 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -2,17 +2,19 @@ use std::{sync::Arc, thread::available_parallelism}; +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + hooks::OnComponentInitializedHook, + BuilderContext, NodeAdapter, +}; use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; use reth_beacon_consensus::EthBeaconConsensus; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; use reth_consensus::Consensus; -use reth_db_api::database::Database; +use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_engine_local::MiningMode; @@ -21,7 +23,7 @@ use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; -use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithDB}; +use reth_node_api::{FullNodePrimitives, FullNodeTypes, NodeTypes, NodeTypesWithDB}; use reth_node_core::{ args::InvalidBlockHookType, dirs::{ChainPath, DataDirPath}, @@ -34,15 +36,15 @@ use reth_node_core::{ use reth_node_metrics::{ chain::ChainSpecInfo, hooks::Hooks, + recorder::install_prometheus_recorder, server::{MetricServer, MetricServerConfig}, version::VersionInfo, }; use reth_primitives::Head; use reth_provider::{ - providers::{BlockchainProvider, BlockchainProvider2, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, CanonStateNotificationSender, ChainSpecProvider, ProviderFactory, + providers::{ProviderNodeTypes, StaticFileProvider}, + BlockHashReader, BlockNumReader, ChainSpecProvider, ProviderError, ProviderFactory, ProviderResult, StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, - TreeViewer, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_api::clients::EthApiClient; @@ -58,33 +60,6 @@ use tokio::sync::{ oneshot, watch, }; -use crate::{ - components::{NodeComponents, NodeComponentsBuilder}, - hooks::OnComponentInitializedHook, - BuilderContext, NodeAdapter, -}; - -/// Allows to set a tree viewer for a configured blockchain provider. -// TODO: remove this helper trait once the engine revamp is done, the new -// blockchain provider won't require a TreeViewer. -// https://github.com/paradigmxyz/reth/issues/8742 -pub trait WithTree { - /// Setter for tree viewer. - fn set_tree(self, tree: Arc) -> Self; -} - -impl WithTree for BlockchainProvider { - fn set_tree(self, tree: Arc) -> Self { - self.with_tree(tree) - } -} - -impl WithTree for BlockchainProvider2 { - fn set_tree(self, _tree: Arc) -> Self { - self - } -} - /// Reusable setup for launching a node. /// /// This provides commonly used boilerplate for launching a node. @@ -404,9 +379,15 @@ where /// Returns the [`ProviderFactory`] for the attached storage after executing a consistent check /// between the database and static files. **It may execute a pipeline unwind if it fails this /// check.** - pub async fn create_provider_factory>( - &self, - ) -> eyre::Result> { + pub async fn create_provider_factory(&self) -> eyre::Result> + where + N: ProviderNodeTypes, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, + { let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), @@ -416,7 +397,7 @@ where .with_static_files_metrics(); let has_receipt_pruning = - self.toml_config().prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); + self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); // Check for consistency between database and static files. If it fails, it unwinds to // the first block that's consistent between database and static files. @@ -467,9 +448,17 @@ where } /// Creates a new [`ProviderFactory`] and attaches it to the launch context. - pub async fn with_provider_factory>( + pub async fn with_provider_factory( self, - ) -> eyre::Result, ProviderFactory>>> { + ) -> eyre::Result, ProviderFactory>>> + where + N: ProviderNodeTypes, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, + { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { inner: self.inner, @@ -482,7 +471,7 @@ where impl LaunchContextWith, ProviderFactory>> where - T: NodeTypesWithDB, + T: ProviderNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &T::DB { @@ -495,7 +484,7 @@ where } /// Returns the static file provider to interact with the static files. - pub fn static_file_provider(&self) -> StaticFileProvider { + pub fn static_file_provider(&self) -> StaticFileProvider { self.right().static_file_provider() } @@ -509,6 +498,9 @@ where /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { + // ensure recorder runs upkeep periodically + install_prometheus_recorder().spawn_upkeep(); + let listen_addr = self.node_config().metrics; if let Some(addr) = listen_addr { info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); @@ -524,7 +516,20 @@ where }, ChainSpecInfo { name: self.left().config.chain.chain().to_string() }, self.task_executor().clone(), - Hooks::new(self.database().clone(), self.static_file_provider()), + Hooks::builder() + .with_hook({ + let db = self.database().clone(); + move || db.report_metrics() + }) + .with_hook({ + let sfp = self.static_file_provider(); + move || { + if let Err(error) = sfp.report_metrics() { + error!(%error, "Failed to report metrics for the static file provider"); + } + } + }) + .build(), ); MetricServer::new(config).serve().await?; @@ -587,8 +592,6 @@ where pub fn with_blockchain_db( self, create_blockchain_provider: F, - tree_config: BlockchainTreeConfig, - canon_state_notification_sender: CanonStateNotificationSender, ) -> eyre::Result, WithMeteredProviders>>> where T: FullNodeTypes, @@ -602,8 +605,6 @@ where metrics_sender: self.sync_metrics_tx(), }, blockchain_db, - tree_config, - canon_state_notification_sender, }; let ctx = LaunchContextWith { @@ -620,7 +621,7 @@ impl Attached::ChainSpec>, WithMeteredProviders>, > where - T: FullNodeTypes, + T: FullNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &::DB { @@ -651,16 +652,6 @@ where &self.right().blockchain_db } - /// Returns a reference to the `BlockchainTreeConfig`. - pub const fn tree_config(&self) -> &BlockchainTreeConfig { - &self.right().tree_config - } - - /// Returns the `CanonStateNotificationSender`. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.right().canon_state_notification_sender.clone() - } - /// Creates a `NodeAdapter` and attaches it to the launch context. pub async fn with_components( self, @@ -689,31 +680,13 @@ where debug!(target: "reth::cli", "creating components"); let components = components_builder.build_components(&builder_ctx).await?; - let consensus: Arc = Arc::new(components.consensus().clone()); - - let tree_externals = TreeExternals::new( - self.provider_factory().clone().with_prune_modes(self.prune_modes()), - consensus.clone(), - components.block_executor().clone(), - ); - let tree = BlockchainTree::new(tree_externals, *self.tree_config())? - .with_sync_metrics_tx(self.sync_metrics_tx()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be removed - // once the Blockchain provider no longer depends on an instance of the tree - .with_canon_state_notification_sender(self.canon_state_notification_sender()); - - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - // Replace the tree component with the actual tree - let blockchain_db = self.blockchain_db().clone().set_tree(blockchain_tree); - - debug!(target: "reth::cli", "configured blockchain tree"); + let blockchain_db = self.blockchain_db().clone(); + let consensus = Arc::new(components.consensus().clone()); let node_adapter = NodeAdapter { components, task_executor: self.task_executor().clone(), - provider: blockchain_db.clone(), + provider: blockchain_db, }; debug!(target: "reth::cli", "calling on_component_initialized hook"); @@ -724,8 +697,6 @@ where provider_factory: self.provider_factory().clone(), metrics_sender: self.sync_metrics_tx(), }, - blockchain_db, - tree_config: self.right().tree_config, node_adapter, head, consensus, @@ -745,10 +716,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes< - Provider: WithTree, - Types: NodeTypes, - >, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. @@ -760,13 +728,13 @@ where /// necessary pub async fn max_block(&self, client: C) -> eyre::Result> where - C: HeadersClient, + C: HeadersClient
, { self.node_config().max_block(client, self.provider_factory().clone()).await } /// Returns the static file provider to interact with the static files. - pub fn static_file_provider(&self) -> StaticFileProvider { + pub fn static_file_provider(&self) -> StaticFileProvider<::Primitives> { self.provider_factory().static_file_provider() } @@ -785,9 +753,14 @@ where &self.right().node_adapter } + /// Returns mutable reference to the configured `NodeAdapter`. + pub fn node_adapter_mut(&mut self) -> &mut NodeAdapter { + &mut self.right_mut().node_adapter + } + /// Returns a reference to the blockchain provider. pub const fn blockchain_db(&self) -> &T::Provider { - &self.right().blockchain_db + &self.node_adapter().provider } /// Returns the initial backfill to sync to at launch. @@ -814,6 +787,26 @@ where self.node_config().debug.terminate || self.node_config().debug.max_block.is_some() } + /// Ensures that the database matches chain-specific requirements. + /// + /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past + /// bedrock height) + fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { + if self.chain_spec().is_optimism() && + !self.is_dev() && + self.chain_id() == Chain::optimism_mainnet() + { + let latest = self.blockchain_db().last_block_number()?; + // bedrock height + if latest < 105235063 { + error!("Op-mainnet has been launched without importing the pre-Bedrock state. The chain can't progress without this. See also https://reth.rs/run/sync-op-mainnet.html?minimal-bootstrap-recommended"); + return Err(ProviderError::BestBlockNotFound) + } + } + + Ok(()) + } + /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less /// than the checkpoint of the first stage). /// @@ -857,6 +850,8 @@ where } } + self.ensure_chain_specific_db_checks()?; + Ok(None) } @@ -870,11 +865,6 @@ where self.right().db_provider_container.metrics_sender.clone() } - /// Returns a reference to the `BlockchainTreeConfig`. - pub const fn tree_config(&self) -> &BlockchainTreeConfig { - &self.right().tree_config - } - /// Returns the node adapter components. pub const fn components(&self) -> &CB::Components { &self.node_adapter().components @@ -886,10 +876,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes< - Provider: WithTree + StateProviderFactory + ChainSpecProvider, - Types: NodeTypes, - >, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the [`InvalidBlockHook`] to use for the node. @@ -1021,7 +1008,7 @@ pub struct WithMeteredProvider { metrics_sender: UnboundedSender, } -/// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] +/// Helper container to bundle the [`ProviderFactory`], [`FullNodeTypes::Provider`] /// and a metrics sender. #[allow(missing_debug_implementations)] pub struct WithMeteredProviders @@ -1030,8 +1017,6 @@ where { db_provider_container: WithMeteredProvider, blockchain_db: T::Provider, - canon_state_notification_sender: CanonStateNotificationSender, - tree_config: BlockchainTreeConfig, } /// Helper container to bundle the metered providers container and [`NodeAdapter`]. @@ -1042,8 +1027,6 @@ where CB: NodeComponentsBuilder, { db_provider_container: WithMeteredProvider, - tree_config: BlockchainTreeConfig, - blockchain_db: T::Provider, node_adapter: NodeAdapter, head: Head, consensus: Arc, diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 65433176ba99..b1141314d106 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -5,21 +5,22 @@ use reth_beacon_consensus::{ hooks::{EngineHooks, StaticFileHook}, BeaconConsensusEngineHandle, }; -use reth_blockchain_tree::BlockchainTreeConfig; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, + persistence::PersistenceNodeTypes, tree::TreeConfig, }; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; -use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; +use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, + BlockTy, BuiltPayload, EngineValidator, FullNodeTypes, NodeTypesWithEngine, + PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -27,8 +28,7 @@ use reth_node_core::{ primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_payload_primitives::PayloadBuilder; -use reth_primitives::EthereumHardforks; +use reth_primitives::{EthPrimitives, EthereumHardforks}; use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; @@ -40,7 +40,7 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{RethRpcAddOns, RpcHandle}, + rpc::{EngineValidatorAddOn, RethRpcAddOns, RpcHandle}, setup::build_networked_pipeline, AddOns, AddOnsContext, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, @@ -70,10 +70,19 @@ impl EngineNodeLauncher { impl LaunchNode> for EngineNodeLauncher where - Types: ProviderNodeTypes + NodeTypesWithEngine, + Types: + ProviderNodeTypes + NodeTypesWithEngine + PersistenceNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, - AO: RethRpcAddOns>, + AO: RethRpcAddOns> + + EngineValidatorAddOn< + NodeAdapter, + Validator: EngineValidator< + ::Engine, + Block = BlockTy, + >, + >, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, @@ -93,15 +102,6 @@ where } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - // TODO: move tree_config and canon_state_notification_sender - // initialization to with_blockchain_db once the engine revamp is done - // https://github.com/paradigmxyz/reth/issues/8742 - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - // setup the launch context let ctx = ctx .with_configured_globals() @@ -131,7 +131,7 @@ where // later the components. .with_blockchain_db::(move |provider_factory| { Ok(BlockchainProvider2::new(provider_factory)?) - }, tree_config, canon_state_notification_sender)? + })? .with_components(components_builder, on_component_initialized).await?; // spawn exexs @@ -203,10 +203,24 @@ where pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone()); - let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); + let event_sender = EventSender::default(); + let beacon_engine_handle = + BeaconConsensusEngineHandle::new(consensus_engine_tx.clone(), event_sender.clone()); + + // extract the jwt secret from the args if possible + let jwt_secret = ctx.auth_jwt_secret()?; + + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter().clone(), + config: ctx.node_config(), + beacon_engine_handle: beacon_engine_handle.clone(), + jwt_secret, + }; + let engine_payload_validator = add_ons.engine_validator(&add_ons_ctx).await?; + let mut engine_service = if ctx.is_dev() { let eth_service = LocalEngineService::new( ctx.consensus(), @@ -215,6 +229,7 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -238,6 +253,7 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -246,15 +262,9 @@ where Either::Right(eth_service) }; - let event_sender = EventSender::default(); - - let beacon_engine_handle = - BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender.clone()); - info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - ctx.components().network().event_listener().map(Into::into), beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { @@ -277,16 +287,6 @@ where ), ); - // extract the jwt secret from the args if possible - let jwt_secret = ctx.auth_jwt_secret()?; - - let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter().clone(), - config: ctx.node_config(), - beacon_engine_handle, - jwt_secret, - }; - let RpcHandle { rpc_server_handles, rpc_registry } = add_ons.launch_add_ons(add_ons_ctx).await?; diff --git a/crates/node/builder/src/launch/exex.rs b/crates/node/builder/src/launch/exex.rs index a3640690c1dc..0eef0d005763 100644 --- a/crates/node/builder/src/launch/exex.rs +++ b/crates/node/builder/src/launch/exex.rs @@ -10,7 +10,7 @@ use reth_exex::{ DEFAULT_EXEX_MANAGER_CAPACITY, }; use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::Head; +use reth_primitives::{EthPrimitives, Head}; use reth_provider::CanonStateSubscriptions; use reth_tracing::tracing::{debug, info}; use tracing::Instrument; @@ -25,7 +25,9 @@ pub struct ExExLauncher { config_container: WithConfigs<::ChainSpec>, } -impl ExExLauncher { +impl> + Clone> + ExExLauncher +{ /// Create a new `ExExLauncher` with the given extensions. pub const fn new( head: Head, diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 4f9e850c97f1..9f2c027f76b5 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -16,19 +16,22 @@ use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngine, }; -use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_blockchain_tree::{ + externals::TreeNodeTypes, noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, + ShareableBlockchainTree, TreeExternals, +}; +use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; -use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; +use reth_network::BlockDownloaderProvider; +use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_provider::providers::BlockchainProvider; +use reth_provider::providers::{BlockchainProvider, ProviderNodeTypes}; use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; @@ -67,7 +70,7 @@ pub trait LaunchNode { type Node; /// Create and return a new node asynchronously. - fn launch_node(self, target: Target) -> impl Future> + Send; + fn launch_node(self, target: Target) -> impl Future>; } impl LaunchNode for F @@ -77,7 +80,7 @@ where { type Node = Node; - fn launch_node(self, target: Target) -> impl Future> + Send { + fn launch_node(self, target: Target) -> impl Future> { self(target) } } @@ -98,7 +101,7 @@ impl DefaultNodeLauncher { impl LaunchNode> for DefaultNodeLauncher where - Types: NodeTypesWithDB + NodeTypesWithEngine, + Types: ProviderNodeTypes + NodeTypesWithEngine + TreeNodeTypes, T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, @@ -132,7 +135,7 @@ where )); // setup the launch context - let ctx = ctx + let mut ctx = ctx .with_configured_globals() // load the toml config .with_loaded_toml_config(config)? @@ -160,9 +163,29 @@ where // later the components. .with_blockchain_db::(move |provider_factory| { Ok(BlockchainProvider::new(provider_factory, tree)?) - }, tree_config, canon_state_notification_sender)? + })? .with_components(components_builder, on_component_initialized).await?; + let consensus = Arc::new(ctx.components().consensus().clone()); + + let tree_externals = TreeExternals::new( + ctx.provider_factory().clone(), + consensus.clone(), + ctx.components().block_executor().clone(), + ); + let tree = BlockchainTree::new(tree_externals, tree_config)? + .with_sync_metrics_tx(ctx.sync_metrics_tx()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(canon_state_notification_sender); + + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + ctx.node_adapter_mut().provider = ctx.blockchain_db().clone().with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + // spawn exexs let exex_manager_handle = ExExLauncher::new( ctx.head(), @@ -260,8 +283,6 @@ where info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - ctx.components().network().event_listener().map(Into::into), - beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 62c710ea8022..ce7d12fee3d3 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -71,6 +71,8 @@ where type ChainSpec = ::ChainSpec; type StateCommitment = ::StateCommitment; + + type Storage = ::Storage; } impl NodeTypesWithEngine for AnyNode diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 4530bbe70144..55313f3e9898 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -11,12 +11,14 @@ use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; use reth_node_api::{ AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, + PayloadBuilder, }; use reth_node_core::{ node_config::NodeConfig, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::PayloadStore; +use reth_primitives::EthPrimitives; use reth_provider::providers::ProviderNodeTypes; use reth_rpc::{ eth::{EthApiTypes, FullEthApiServer}, @@ -398,18 +400,25 @@ where } } -impl NodeAddOns for RpcAddOns +impl RpcAddOns where N: FullNodeComponents< - Types: ProviderNodeTypes, - PayloadBuilder: Into::Engine>>, + Types: ProviderNodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, >, EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, EV: EngineValidatorBuilder, { - type Handle = RpcHandle; - - async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + /// Launches the RPC servers with the given context and an additional hook for extending + /// modules. + pub async fn launch_add_ons_with( + self, + ctx: AddOnsContext<'_, N>, + ext: F, + ) -> eyre::Result> + where + F: FnOnce(&mut TransportRpcModules) -> eyre::Result<()>, + { let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; let engine_validator = engine_validator_builder.build(&ctx).await?; @@ -426,7 +435,7 @@ where node.provider().clone(), config.chain.clone(), beacon_engine_handle, - node.payload_builder().clone().into(), + PayloadStore::new(node.payload_builder().clone()), node.pool().clone(), Box::new(node.task_executor().clone()), client, @@ -466,6 +475,7 @@ where let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; + ext(ctx.modules)?; extend_rpc_modules.extend_rpc_modules(ctx)?; let server_config = config.rpc.rpc_server_config(); @@ -512,6 +522,22 @@ where } } +impl NodeAddOns for RpcAddOns +where + N: FullNodeComponents< + Types: ProviderNodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, + >, + EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, + EV: EngineValidatorBuilder, +{ + type Handle = RpcHandle; + + async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + self.launch_add_ons_with(ctx, |_| Ok(())).await + } +} + /// Helper trait implemented for add-ons producing [`RpcHandle`]. Used by common node launcher /// implementations. pub trait RethRpcAddOns: @@ -541,19 +567,43 @@ pub trait EthApiBuilder: 'static { fn build(ctx: &EthApiBuilderCtx) -> Self; } -impl EthApiBuilder for EthApi { +impl>> EthApiBuilder + for EthApi +{ fn build(ctx: &EthApiBuilderCtx) -> Self { Self::with_spawner(ctx) } } +/// Helper trait that provides the validator for the engine API +pub trait EngineValidatorAddOn: Send { + /// The Validator type to use for the engine API. + type Validator: EngineValidator<::Engine>; + + /// Creates the engine validator for an engine API based node. + fn engine_validator( + &self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future>; +} + +impl EngineValidatorAddOn for RpcAddOns +where + N: FullNodeComponents, + EthApi: EthApiTypes, + EV: EngineValidatorBuilder, +{ + type Validator = EV::Validator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + self.engine_validator_builder.clone().build(ctx).await + } +} + /// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send { +pub trait EngineValidatorBuilder: Send + Sync + Clone { /// The consensus implementation to build. - type Validator: EngineValidator<::Engine> - + Clone - + Unpin - + 'static; + type Validator: EngineValidator<::Engine>; /// Creates the engine validator. fn build( @@ -567,7 +617,7 @@ where Node: FullNodeComponents, Validator: EngineValidator<::Engine> + Clone + Unpin + 'static, - F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send, + F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Sync + Clone, Fut: Future> + Send, { type Validator = Validator; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 3591868ddad9..092c1fdf6518 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -12,8 +12,9 @@ use reth_downloaders::{ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network_p2p::{ - bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, + bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, EthBlockClient, }; +use reth_node_api::{BodyTy, FullNodePrimitives}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -26,7 +27,7 @@ use tokio::sync::watch; pub fn build_networked_pipeline( config: &StageConfig, client: Client, - consensus: Arc, + consensus: Arc>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, @@ -38,12 +39,17 @@ pub fn build_networked_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, Executor: BlockExecutorProvider, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) - .build(client.clone(), Arc::clone(&consensus)) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::new(config.bodies) @@ -84,9 +90,14 @@ pub fn build_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - H: HeaderDownloader + 'static, - B: BodyDownloader + 'static, + H: HeaderDownloader
+ 'static, + B: BodyDownloader> + 'static, Executor: BlockExecutorProvider, + N::Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 1c6c9d98c80b..0ede9fe80c4d 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -13,7 +13,9 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-consensus.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-cli-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-storage-errors.workspace = true @@ -30,9 +32,9 @@ reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-peers.workspace = true -reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index a69a255a3c67..aa4f72bd6a4d 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -15,9 +15,11 @@ pub mod exit; pub mod node_config; pub mod utils; pub mod version; -/// Re-exported from `reth_primitives`. + +/// Re-exported primitive types pub mod primitives { - pub use reth_primitives::*; + pub use reth_ethereum_forks::*; + pub use reth_primitives_traits::*; } /// Re-export of `reth_rpc_*` crates. diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 3848772c4158..2fd39bde82f2 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -8,22 +8,27 @@ use crate::{ dirs::{ChainPath, DataDirPath}, utils::get_single_header, }; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockHashOrNumber; +use alloy_primitives::{BlockNumber, B256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; +use reth_ethereum_forks::Head; use reth_network_p2p::headers::client::HeadersClient; -use serde::{de::DeserializeOwned, Serialize}; -use std::{fs, path::Path}; - -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; -use reth_primitives::{Head, SealedHeader}; +use reth_primitives_traits::SealedHeader; use reth_stages_types::StageId; use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, }; use reth_storage_errors::provider::ProviderResult; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + fs, + net::SocketAddr, + path::{Path, PathBuf}, + sync::Arc, +}; use tracing::*; /// This includes all necessary configuration to launch the node. @@ -273,7 +278,7 @@ impl NodeConfig { ) -> eyre::Result> where Provider: HeaderProvider, - Client: HeadersClient, + Client: HeadersClient, { let max_block = if let Some(block) = self.debug.max_block { Some(block) @@ -332,7 +337,7 @@ impl NodeConfig { ) -> ProviderResult where Provider: HeaderProvider, - Client: HeadersClient, + Client: HeadersClient, { let header = provider.header_by_hash_or_number(tip.into())?; @@ -342,7 +347,7 @@ impl NodeConfig { return Ok(header.number) } - Ok(self.fetch_tip_from_network(client, tip.into()).await.number) + Ok(self.fetch_tip_from_network(client, tip.into()).await.number()) } /// Attempt to look up the block with the given number and return the header. @@ -352,9 +357,9 @@ impl NodeConfig { &self, client: Client, tip: BlockHashOrNumber, - ) -> SealedHeader + ) -> SealedHeader where - Client: HeadersClient, + Client: HeadersClient, { info!(target: "reth::cli", ?tip, "Fetching tip block from the network."); let mut fetch_failures = 0; diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index a04d4e324e1a..65f90f27eb72 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -1,22 +1,19 @@ //! Utility functions for node startup and shutdown, for example path parsing and retrieving single //! blocks from the network. +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::Sealable; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; -use reth_chainspec::ChainSpec; -use reth_consensus_common::validation::validate_block_pre_execution; +use reth_consensus::Consensus; use reth_network_p2p::{ - bodies::client::BodiesClient, - headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, - priority::Priority, + bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::SealedBlock; +use reth_primitives_traits::SealedHeader; use std::{ env::VarError, path::{Path, PathBuf}, - sync::Arc, }; use tracing::{debug, info}; @@ -41,27 +38,22 @@ pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result( client: Client, id: BlockHashOrNumber, -) -> Result +) -> Result> where - Client: HeadersClient, + Client: HeadersClient, { - let request = HeadersRequest { direction: HeadersDirection::Rising, limit: 1, start: id }; + let (peer_id, response) = client.get_header_with_priority(id, Priority::High).await?.split(); - let (peer_id, response) = - client.get_headers_with_priority(request, Priority::High).await?.split(); - - if response.len() != 1 { + let Some(header) = response else { client.report_bad_message(peer_id); - eyre::bail!("Invalid number of headers received. Expected: 1. Received: {}", response.len()) - } + eyre::bail!("Invalid number of headers received. Expected: 1. Received: 0") + }; - let sealed_header = response.into_iter().next().unwrap().seal_slow(); - let (header, seal) = sealed_header.into_parts(); - let header = SealedHeader::new(header, seal); + let header = SealedHeader::seal(header); let valid = match id { BlockHashOrNumber::Hash(hash) => header.hash() == hash, - BlockHashOrNumber::Number(number) => header.number == number, + BlockHashOrNumber::Number(number) => header.number() == number, }; if !valid { @@ -77,25 +69,23 @@ where } /// Get a body from network based on header -pub async fn get_single_body( +pub async fn get_single_body( client: Client, - chain_spec: Arc, - header: SealedHeader, -) -> Result + header: SealedHeader, + consensus: impl Consensus, +) -> Result> where Client: BodiesClient, { let (peer_id, response) = client.get_block_body(header.hash()).await?.split(); - if response.is_none() { + let Some(body) = response else { client.report_bad_message(peer_id); eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0") - } + }; - let body = response.unwrap(); let block = SealedBlock { header, body }; - - validate_block_pre_execution(&block, &chain_spec)?; + consensus.validate_block_pre_execution(&block)?; Ok(block) } diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 6af3d8cbeb40..03f3ab172883 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -12,14 +12,14 @@ workspace = true [dependencies] # reth -reth-provider.workspace = true +reth-storage-api.workspace = true reth-beacon-consensus.workspace = true -reth-network = { workspace = true, features = ["serde"] } reth-network-api.workspace = true reth-stages.workspace = true -reth-prune.workspace = true -reth-static-file.workspace = true +reth-prune-types.workspace = true +reth-static-file-types.workspace = true reth-primitives-traits.workspace = true +reth-engine-primitives.workspace = true # ethereum alloy-primitives.workspace = true diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index 6d29c9bbfa29..bf0d4a59b213 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -1,7 +1,7 @@ //! Events related to Consensus Layer health. use futures::Stream; -use reth_provider::CanonChainTracker; +use reth_storage_api::CanonChainTracker; use std::{ fmt, pin::Pin, diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index fb0f4d48d77f..edd85501ec0a 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -5,15 +5,13 @@ use alloy_consensus::constants::GWEI_TO_WEI; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; -use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, -}; -use reth_network::NetworkEvent; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; +use reth_engine_primitives::ForkchoiceStatus; use reth_network_api::PeersInfo; use reth_primitives_traits::{format_gas, format_gas_throughput}; -use reth_prune::PrunerEvent; +use reth_prune_types::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; -use reth_static_file::StaticFileProducerEvent; +use reth_static_file_types::StaticFileProducerEvent; use std::{ fmt::{Display, Formatter}, future::Future, @@ -213,12 +211,6 @@ impl NodeState { } } - fn handle_network_event(&self, _: NetworkEvent) { - // NOTE(onbjerg): This used to log established/disconnecting sessions, but this is already - // logged in the networking component. I kept this stub in case we want to catch other - // networking events later on. - } - fn handle_consensus_engine_event(&mut self, event: BeaconConsensusEngineEvent) { match event { BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status) => { @@ -309,7 +301,11 @@ impl NodeState { info!(tip_block_number, "Pruner started"); } PrunerEvent::Finished { tip_block_number, elapsed, stats } => { - info!(tip_block_number, ?elapsed, ?stats, "Pruner finished"); + let stats = format!( + "[{}]", + stats.iter().map(|item| item.to_string()).collect::>().join(", ") + ); + info!(tip_block_number, ?elapsed, %stats, "Pruner finished"); } } } @@ -356,8 +352,6 @@ struct CurrentStage { /// A node event. #[derive(Debug)] pub enum NodeEvent { - /// A network event. - Network(NetworkEvent), /// A sync pipeline event. Pipeline(PipelineEvent), /// A consensus engine event. @@ -373,12 +367,6 @@ pub enum NodeEvent { Other(String), } -impl From for NodeEvent { - fn from(event: NetworkEvent) -> Self { - Self::Network(event) - } -} - impl From for NodeEvent { fn from(event: PipelineEvent) -> Self { Self::Pipeline(event) @@ -525,9 +513,6 @@ where while let Poll::Ready(Some(event)) = this.events.as_mut().poll_next(cx) { match event { - NodeEvent::Network(event) => { - this.state.handle_network_event(event); - } NodeEvent::Pipeline(event) => { this.state.handle_pipeline_event(event); } diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 9efdbd4959db..3d79d11db7d7 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -8,8 +8,6 @@ homepage.workspace = true repository.workspace = true [dependencies] -reth-db-api.workspace = true -reth-provider.workspace = true reth-metrics.workspace = true reth-tasks.workspace = true @@ -20,7 +18,7 @@ metrics-util.workspace = true tokio.workspace = true -jsonrpsee = { workspace = true, features = ["server"] } +jsonrpsee-server.workspace = true http.workspace = true tower.workspace = true @@ -36,7 +34,6 @@ procfs = "0.16.0" [dev-dependencies] reqwest.workspace = true socket2 = { version = "0.5", default-features = false } -reth-provider = { workspace = true, features = ["test-utils"] } [lints] workspace = true diff --git a/crates/node/metrics/src/hooks.rs b/crates/node/metrics/src/hooks.rs index 18755717667c..3b6d23a39007 100644 --- a/crates/node/metrics/src/hooks.rs +++ b/crates/node/metrics/src/hooks.rs @@ -1,15 +1,59 @@ use metrics_process::Collector; -use reth_db_api::database_metrics::DatabaseMetrics; -use reth_provider::providers::StaticFileProvider; use std::{fmt, sync::Arc}; -pub(crate) trait Hook: Fn() + Send + Sync {} -impl Hook for T {} -impl fmt::Debug for Hooks { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hooks_len = self.inner.len(); - f.debug_struct("Hooks") - .field("inner", &format!("Arc>>, len: {}", hooks_len)) +/// The simple alias for function types that are `'static`, `Send`, and `Sync`. +pub trait Hook: Fn() + Send + Sync + 'static {} +impl Hook for T {} + +/// A builder-like type to create a new [`Hooks`] instance. +pub struct HooksBuilder { + hooks: Vec>>, +} + +impl HooksBuilder { + /// Registers a [`Hook`]. + pub fn with_hook(self, hook: impl Hook) -> Self { + self.with_boxed_hook(Box::new(hook)) + } + + /// Registers a [`Hook`] by calling the provided closure. + pub fn install_hook(self, f: F) -> Self + where + F: FnOnce() -> H, + H: Hook, + { + self.with_hook(f()) + } + + /// Registers a [`Hook`]. + #[inline] + pub fn with_boxed_hook(mut self, hook: Box>) -> Self { + self.hooks.push(hook); + self + } + + /// Builds the [`Hooks`] collection from the registered hooks. + pub fn build(self) -> Hooks { + Hooks { inner: Arc::new(self.hooks) } + } +} + +impl Default for HooksBuilder { + fn default() -> Self { + Self { + hooks: vec![ + Box::new(|| Collector::default().collect()), + Box::new(collect_memory_stats), + Box::new(collect_io_stats), + ], + } + } +} + +impl std::fmt::Debug for HooksBuilder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("HooksBuilder") + .field("hooks", &format_args!("Vec>, len: {}", self.hooks.len())) .finish() } } @@ -21,23 +65,10 @@ pub struct Hooks { } impl Hooks { - /// Create a new set of hooks - pub fn new( - db: Metrics, - static_file_provider: StaticFileProvider, - ) -> Self { - let hooks: Vec>> = vec![ - Box::new(move || db.report_metrics()), - Box::new(move || { - let _ = static_file_provider.report_metrics().map_err( - |error| tracing::error!(%error, "Failed to report static file provider metrics"), - ); - }), - Box::new(move || Collector::default().collect()), - Box::new(collect_memory_stats), - Box::new(collect_io_stats), - ]; - Self { inner: Arc::new(hooks) } + /// Creates a new [`HooksBuilder`] instance. + #[inline] + pub fn builder() -> HooksBuilder { + HooksBuilder::default() } pub(crate) fn iter(&self) -> impl Iterator>> { @@ -45,6 +76,15 @@ impl Hooks { } } +impl fmt::Debug for Hooks { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hooks_len = self.inner.len(); + f.debug_struct("Hooks") + .field("inner", &format_args!("Arc>>, len: {}", hooks_len)) + .finish() + } +} + #[cfg(all(feature = "jemalloc", unix))] fn collect_memory_stats() { use metrics::gauge; diff --git a/crates/node/metrics/src/recorder.rs b/crates/node/metrics/src/recorder.rs index a7421ab355c0..e62b98c81cd4 100644 --- a/crates/node/metrics/src/recorder.rs +++ b/crates/node/metrics/src/recorder.rs @@ -3,25 +3,78 @@ use eyre::WrapErr; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; use metrics_util::layers::{PrefixLayer, Stack}; -use std::sync::LazyLock; +use std::sync::{atomic::AtomicBool, LazyLock}; /// Installs the Prometheus recorder as the global recorder. -pub fn install_prometheus_recorder() -> &'static PrometheusHandle { +/// +/// Note: This must be installed before any metrics are `described`. +/// +/// Caution: This only configures the global recorder and does not spawn the exporter. +/// Callers must run [`PrometheusRecorder::spawn_upkeep`] manually. +pub fn install_prometheus_recorder() -> &'static PrometheusRecorder { &PROMETHEUS_RECORDER_HANDLE } /// The default Prometheus recorder handle. We use a global static to ensure that it is only /// installed once. -static PROMETHEUS_RECORDER_HANDLE: LazyLock = +static PROMETHEUS_RECORDER_HANDLE: LazyLock = LazyLock::new(|| PrometheusRecorder::install().unwrap()); -/// Prometheus recorder installer +/// A handle to the Prometheus recorder. +/// +/// This is intended to be used as the global recorder. +/// Callers must ensure that [`PrometheusRecorder::spawn_upkeep`] is called once. #[derive(Debug)] -pub struct PrometheusRecorder; +pub struct PrometheusRecorder { + handle: PrometheusHandle, + upkeep: AtomicBool, +} impl PrometheusRecorder { + const fn new(handle: PrometheusHandle) -> Self { + Self { handle, upkeep: AtomicBool::new(false) } + } + + /// Returns a reference to the [`PrometheusHandle`]. + pub const fn handle(&self) -> &PrometheusHandle { + &self.handle + } + + /// Spawns the upkeep task if there hasn't been one spawned already. + /// + /// ## Panics + /// + /// This method must be called from within an existing Tokio runtime or it will panic. + /// + /// See also [`PrometheusHandle::run_upkeep`] + pub fn spawn_upkeep(&self) { + if self + .upkeep + .compare_exchange( + false, + true, + std::sync::atomic::Ordering::SeqCst, + std::sync::atomic::Ordering::Acquire, + ) + .is_err() + { + return; + } + + let handle = self.handle.clone(); + tokio::spawn(async move { + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + handle.run_upkeep(); + } + }); + } + /// Installs Prometheus as the metrics recorder. - pub fn install() -> eyre::Result { + /// + /// Caution: This only configures the global recorder and does not spawn the exporter. + /// Callers must run [`Self::spawn_upkeep`] manually. + pub fn install() -> eyre::Result { let recorder = PrometheusBuilder::new().build_recorder(); let handle = recorder.handle(); @@ -31,7 +84,7 @@ impl PrometheusRecorder { .install() .wrap_err("Couldn't set metrics recorder.")?; - Ok(handle) + Ok(Self::new(handle)) } } @@ -52,7 +105,7 @@ mod tests { process.describe(); process.collect(); - let metrics = PROMETHEUS_RECORDER_HANDLE.render(); + let metrics = PROMETHEUS_RECORDER_HANDLE.handle.render(); assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}"); } } diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 87521349d4de..313b578f800d 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -103,7 +103,7 @@ impl MetricServer { let hook = hook.clone(); let service = tower::service_fn(move |_| { (hook)(); - let metrics = handle.render(); + let metrics = handle.handle().render(); let mut response = Response::new(metrics); response .headers_mut() @@ -113,12 +113,12 @@ impl MetricServer { let mut shutdown = signal.clone().ignore_guard(); tokio::task::spawn(async move { - if let Err(error) = - jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown) + let _ = + jsonrpsee_server::serve_with_graceful_shutdown(io, service, &mut shutdown) .await - { - tracing::debug!(%error, "failed to serve request") - } + .inspect_err( + |error| tracing::debug!(%error, "failed to serve request"), + ); }); } }); @@ -206,7 +206,6 @@ const fn describe_io_stats() {} mod tests { use super::*; use reqwest::Client; - use reth_provider::{test_utils::create_test_provider_factory, StaticFileProviderFactory}; use reth_tasks::TaskManager; use socket2::{Domain, Socket, Type}; use std::net::{SocketAddr, TcpListener}; @@ -236,8 +235,7 @@ mod tests { let tasks = TaskManager::current(); let executor = tasks.executor(); - let factory = create_test_provider_factory(); - let hooks = Hooks::new(factory.db_ref().clone(), factory.static_file_provider()); + let hooks = Hooks::builder().build(); let listen_addr = get_random_available_addr(); let config = @@ -252,7 +250,7 @@ mod tests { // Check the response body let body = response.text().await.unwrap(); - assert!(body.contains("reth_db_table_size")); - assert!(body.contains("reth_jemalloc_metadata")); + assert!(body.contains("reth_process_cpu_seconds_total")); + assert!(body.contains("reth_process_start_time_seconds")); } } diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index cc33aac30ff4..588fe7c4062f 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -17,3 +17,10 @@ reth-db-api.workspace = true reth-engine-primitives.workspace = true reth-primitives-traits.workspace = true reth-trie-db.workspace = true + +[features] +default = ["std"] +std = [ + "reth-primitives-traits/std", + "reth-chainspec/std", +] \ No newline at end of file diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index afb650ada2b0..c0d266e57755 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -7,6 +7,12 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +use core::{fmt::Debug, marker::PhantomData}; +pub use reth_primitives_traits::{ + Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives, +}; use reth_chainspec::EthChainSpec; use reth_db_api::{ @@ -14,25 +20,7 @@ use reth_db_api::{ Database, }; use reth_engine_primitives::EngineTypes; -pub use reth_primitives_traits::{Block, BlockBody}; use reth_trie_db::StateCommitment; -use std::marker::PhantomData; - -/// Configures all the primitive types of the node. -pub trait NodePrimitives { - /// Block primitive. - type Block; - /// Signed version of the transaction type. - type SignedTx; - /// A receipt. - type Receipt; -} - -impl NodePrimitives for () { - type Block = (); - type SignedTx = (); - type Receipt = (); -} /// The type that configures the essential types of an Ethereum-like node. /// @@ -46,6 +34,8 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { type ChainSpec: EthChainSpec; /// The type used to perform state commitment operations. type StateCommitment: StateCommitment; + /// The type responsible for writing chain primitives to storage. + type Storage: Default + Send + Sync + Unpin + Debug + 'static; } /// The type that configures an Ethereum-like node with an engine for consensus. @@ -97,6 +87,7 @@ where type Primitives = Types::Primitives; type ChainSpec = Types::ChainSpec; type StateCommitment = Types::StateCommitment; + type Storage = Types::Storage; } impl NodeTypesWithEngine for NodeTypesWithDBAdapter @@ -116,86 +107,143 @@ where } /// A [`NodeTypes`] type builder. -#[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData, PhantomData); +#[derive(Debug)] +pub struct AnyNodeTypes

( + PhantomData

, + PhantomData, + PhantomData, + PhantomData, +); + +impl Default for AnyNodeTypes { + fn default() -> Self { + Self::new() + } +} + +impl AnyNodeTypes { + /// Creates a new instance of [`AnyNodeTypes`]. + pub const fn new() -> Self { + Self(PhantomData, PhantomData, PhantomData, PhantomData) + } -impl AnyNodeTypes { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::, PhantomData::) + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::new() } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::new() } /// Sets the `StateCommitment` associated type. - pub const fn state_commitment(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::, PhantomData::) + pub const fn state_commitment(self) -> AnyNodeTypes { + AnyNodeTypes::new() + } + + /// Sets the `Storage` associated type. + pub const fn storage(self) -> AnyNodeTypes { + AnyNodeTypes::new() } } -impl NodeTypes for AnyNodeTypes +impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Primitives = P; type ChainSpec = C; - type StateCommitment = S; + type StateCommitment = SC; + type Storage = S; } /// A [`NodeTypesWithEngine`] type builder. -#[derive(Default, Debug)] -pub struct AnyNodeTypesWithEngine

{ +#[derive(Debug)] +pub struct AnyNodeTypesWithEngine

{ /// Embedding the basic node types. - base: AnyNodeTypes, + _base: AnyNodeTypes, /// Phantom data for the engine. _engine: PhantomData, } -impl AnyNodeTypesWithEngine { +impl Default for AnyNodeTypesWithEngine { + fn default() -> Self { + Self::new() + } +} + +impl AnyNodeTypesWithEngine { + /// Creates a new instance of [`AnyNodeTypesWithEngine`]. + pub const fn new() -> Self { + Self { _base: AnyNodeTypes::new(), _engine: PhantomData } + } + /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } + pub const fn primitives(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } + pub const fn engine(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `StateCommitment` associated type. - pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.state_commitment::(), _engine: PhantomData } + pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() + } + + /// Sets the `Storage` associated type. + pub const fn storage(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } } -impl NodeTypes for AnyNodeTypesWithEngine +impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Primitives = P; type ChainSpec = C; - type StateCommitment = S; + type StateCommitment = SC; + type Storage = S; } -impl NodeTypesWithEngine for AnyNodeTypesWithEngine +impl NodeTypesWithEngine for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, C: EthChainSpec + 'static, - S: StateCommitment, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Engine = E; } + +/// Helper adapter type for accessing [`NodePrimitives::Block`] on [`NodeTypes`]. +pub type BlockTy = <::Primitives as NodePrimitives>::Block; + +/// Helper adapter type for accessing [`NodePrimitives::BlockHeader`] on [`NodeTypes`]. +pub type HeaderTy = <::Primitives as NodePrimitives>::BlockHeader; + +/// Helper adapter type for accessing [`NodePrimitives::BlockBody`] on [`NodeTypes`]. +pub type BodyTy = <::Primitives as NodePrimitives>::BlockBody; + +/// Helper adapter type for accessing [`NodePrimitives::SignedTx`] on [`NodeTypes`]. +pub type TxTy = <::Primitives as NodePrimitives>::SignedTx; + +/// Helper adapter type for accessing [`NodePrimitives::Receipt`] on [`NodeTypes`]. +pub type ReceiptTy = <::Primitives as NodePrimitives>::Receipt; diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 77166763100a..45f4492e82b6 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -48,7 +48,8 @@ optimism = [ ] dev = [ - "reth-optimism-cli/dev" + "reth-optimism-cli/dev", + "reth-optimism-primitives/arbitrary", ] min-error-logs = ["tracing/release_max_level_error"] diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 7bd0e433a2d8..d552d08f18ca 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -19,6 +19,7 @@ mod op_sepolia; use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{Bytes, B256, U256}; pub use base::BASE_MAINNET; @@ -35,8 +36,7 @@ use reth_chainspec::{ }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; -use reth_optimism_forks::OptimismHardforks; -use reth_primitives_traits::Header; +use reth_optimism_forks::OpHardforks; #[cfg(feature = "std")] pub(crate) use std::sync::LazyLock; @@ -336,7 +336,7 @@ impl EthereumHardforks for OpChainSpec { } } -impl OptimismHardforks for OpChainSpec {} +impl OpHardforks for OpChainSpec {} impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { @@ -392,6 +392,7 @@ impl From for OpChainSpec { (OpHardfork::Ecotone.boxed(), genesis_info.ecotone_time), (OpHardfork::Fjord.boxed(), genesis_info.fjord_time), (OpHardfork::Granite.boxed(), genesis_info.granite_time), + (OpHardfork::Holocene.boxed(), genesis_info.holocene_time), ]; let mut time_hardforks = time_hardfork_opts @@ -486,7 +487,7 @@ mod tests { use alloy_primitives::b256; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; - use reth_optimism_forks::{OpHardfork, OptimismHardforks}; + use reth_optimism_forks::{OpHardfork, OpHardforks}; use crate::*; @@ -572,7 +573,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 0 }, + ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 1732633200 }, + ), + ( + Head { number: 0, timestamp: 1732633200, ..Default::default() }, + ForkId { hash: ForkHash([0x4a, 0x1c, 0x79, 0x2e]), next: 0 }, ), ], ); @@ -639,7 +644,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 0 }, + ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 1732633200 }, + ), + ( + Head { number: 0, timestamp: 1732633200, ..Default::default() }, + ForkId { hash: ForkHash([0x8b, 0x5e, 0x76, 0x29]), next: 0 }, ), ], ); @@ -721,6 +730,7 @@ mod tests { "ecotoneTime": 40, "fjordTime": 50, "graniteTime": 51, + "holoceneTime": 52, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70 @@ -742,6 +752,8 @@ mod tests { assert_eq!(actual_fjord_timestamp, Some(serde_json::Value::from(50)).as_ref()); let actual_granite_timestamp = genesis.config.extra_fields.get("graniteTime"); assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); + let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); + assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -765,6 +777,7 @@ mod tests { assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 0)); assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); @@ -772,6 +785,7 @@ mod tests { assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 52)); } #[test] @@ -785,6 +799,7 @@ mod tests { "ecotoneTime": 40, "fjordTime": 50, "graniteTime": 51, + "holoceneTime": 52, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70, @@ -807,6 +822,8 @@ mod tests { assert_eq!(actual_fjord_timestamp, Some(serde_json::Value::from(50)).as_ref()); let actual_granite_timestamp = genesis.config.extra_fields.get("graniteTime"); assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); + let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); + assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -837,6 +854,7 @@ mod tests { assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 0)); assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); @@ -844,6 +862,7 @@ mod tests { assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 52)); } #[test] @@ -955,6 +974,7 @@ mod tests { (String::from("ecotoneTime"), 0.into()), (String::from("fjordTime"), 0.into()), (String::from("graniteTime"), 0.into()), + (String::from("holoceneTime"), 0.into()), ] .into_iter() .collect(), @@ -988,6 +1008,7 @@ mod tests { OpHardfork::Ecotone.boxed(), OpHardfork::Fjord.boxed(), OpHardfork::Granite.boxed(), + OpHardfork::Holocene.boxed(), ]; assert!(expected_hardforks @@ -1036,7 +1057,7 @@ mod tests { } #[test] - fn test_get_base_fee_holocene_nonce_not_set() { + fn test_get_base_fee_holocene_extra_data_not_set() { let op_chain_spec = holocene_chainspec(); let parent = Header { base_fee_per_gas: Some(1), @@ -1058,7 +1079,7 @@ mod tests { } #[test] - fn test_get_base_fee_holocene_nonce_set() { + fn test_get_base_fee_holocene_extra_data_set() { let op_chain_spec = holocene_chainspec(); let parent = Header { base_fee_per_gas: Some(1), diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index a2ba71214f5c..b61a4628f4d2 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -26,6 +26,7 @@ reth-execution-types.workspace = true reth-node-core.workspace = true reth-optimism-node.workspace = true reth-primitives.workspace = true +reth-fs-util.workspace = true # so jemalloc metrics can be included reth-node-metrics.workspace = true @@ -47,11 +48,15 @@ reth-node-builder.workspace = true reth-tracing.workspace = true # eth +alloy-eips.workspace = true +alloy-consensus = { workspace = true, optional = true } alloy-primitives.workspace = true alloy-rlp.workspace = true # misc futures-util.workspace = true +derive_more = { workspace = true, optional = true } +serde = { workspace = true, optional = true } clap = { workspace = true, features = ["derive", "env"] } @@ -67,9 +72,7 @@ eyre.workspace = true # reth test-vectors proptest = { workspace = true, optional = true } -op-alloy-consensus = { workspace = true, features = [ - "arbitrary", -], optional = true } +op-alloy-consensus = { workspace = true, optional = true } [dev-dependencies] @@ -80,6 +83,10 @@ reth-cli-commands.workspace = true [features] optimism = [ + "op-alloy-consensus", + "alloy-consensus", + "dep:derive_more", + "dep:serde", "reth-primitives/optimism", "reth-optimism-evm/optimism", "reth-provider/optimism", @@ -87,7 +94,8 @@ optimism = [ "reth-optimism-node/optimism", "reth-execution-types/optimism", "reth-db/optimism", - "reth-db-api/optimism" + "reth-db-api/optimism", + "reth-downloaders/optimism" ] asm-keccak = [ "alloy-primitives/asm-keccak", @@ -104,6 +112,14 @@ jemalloc = [ dev = [ "dep:proptest", - "reth-cli-commands/arbitrary", - "op-alloy-consensus" + "reth-cli-commands/arbitrary" +] +serde = [ + "alloy-consensus?/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "op-alloy-consensus?/serde", + "reth-execution-types/serde", + "reth-provider/serde", + "reth-optimism-primitives/serde", ] diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index a197f93a8b42..8ebefdcc0b40 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -1,5 +1,6 @@ use alloy_primitives::B256; use futures_util::{Stream, StreamExt}; +use reth_cli_commands::common::CliNodeTypes; use reth_config::Config; use reth_consensus::Consensus; use reth_downloaders::{ @@ -11,11 +12,13 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_builder::NodeTypesWithDB; use reth_node_events::node::NodeEvent; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::OpExecutorProvider; -use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderFactory}; +use reth_provider::{ + providers::ProviderNodeTypes, BlockNumReader, ChainSpecProvider, HeaderProvider, + ProviderFactory, +}; use reth_prune::PruneModes; use reth_stages::{sets::DefaultStages, Pipeline, StageSet}; use reth_stages_types::StageId; @@ -36,7 +39,7 @@ pub(crate) async fn build_import_pipeline( disable_exec: bool, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: NodeTypesWithDB, + N: CliNodeTypes + ProviderNodeTypes, C: Consensus + 'static, { if !file_client.has_canonical_blocks() { diff --git a/crates/optimism/cli/src/commands/import.rs b/crates/optimism/cli/src/commands/import.rs index e5f037c3d5cc..5e3de5a8671a 100644 --- a/crates/optimism/cli/src/commands/import.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -2,14 +2,13 @@ //! file. use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_consensus::noop::NoopConsensus; use reth_db::tables; use reth_db_api::transaction::DbTx; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::version::SHORT_VERSION; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::is_dup_tx; @@ -42,9 +41,7 @@ pub struct ImportOpCommand { impl> ImportOpCommand { /// Execute `import` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); info!(target: "reth::cli", diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 838a99818e96..a5c12a48cfbd 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -5,22 +5,21 @@ use std::path::{Path, PathBuf}; use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_db::tables; use reth_downloaders::{ file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, receipt_file_client::ReceiptFileClient, }; use reth_execution_types::ExecutionOutcome; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::version::SHORT_VERSION; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::is_dup_tx; -use reth_primitives::Receipts; +use reth_primitives::{NodePrimitives, Receipts}; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, DatabaseProviderFactory, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, - StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, + StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -48,9 +47,7 @@ pub struct ImportReceiptsOpCommand { impl> ImportReceiptsOpCommand { /// Execute `import` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); debug!(target: "reth::cli", @@ -88,7 +85,10 @@ pub async fn import_receipts_from_file( filter: F, ) -> eyre::Result<()> where - N: NodeTypesWithDB, + N: ProviderNodeTypes< + ChainSpec = OpChainSpec, + Primitives: NodePrimitives, + >, P: AsRef, F: FnMut(u64, &mut Receipts) -> usize, { @@ -126,7 +126,7 @@ pub async fn import_receipts_from_reader( mut filter: F, ) -> eyre::Result where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, F: FnMut(u64, &mut Receipts) -> usize, { let static_file_provider = provider_factory.static_file_provider(); @@ -150,7 +150,7 @@ where } } - let provider = provider_factory.provider_rw()?; + let provider = provider_factory.database_provider_rw()?; let mut total_decoded_receipts = 0; let mut total_receipts = 0; let mut total_filtered_out_dup_txns = 0; @@ -222,11 +222,11 @@ where ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); // finally, write the receipts - let mut storage_writer = UnifiedStorageWriter::from( - &provider, - static_file_provider.latest_writer(StaticFileSegment::Receipts)?, - ); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_state( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::StaticFiles, + )?; } // Only commit if we have imported as many receipts as the number of transactions. @@ -247,7 +247,7 @@ where provider .save_stage_checkpoint(StageId::Execution, StageCheckpoint::new(highest_block_receipts))?; - UnifiedStorageWriter::commit(provider, static_file_provider)?; + UnifiedStorageWriter::commit(provider)?; Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns }) } diff --git a/crates/optimism/cli/src/commands/init_state.rs b/crates/optimism/cli/src/commands/init_state.rs index 68f5d9a585f6..7bbfc3bb820f 100644 --- a/crates/optimism/cli/src/commands/init_state.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -2,9 +2,8 @@ use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment}; use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; use reth_primitives::SealedHeader; @@ -12,7 +11,7 @@ use reth_provider::{ BlockNumReader, ChainSpecProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{fs::File, io::BufReader}; +use std::io::BufReader; use tracing::info; /// Initializes the database with the genesis block. @@ -36,9 +35,7 @@ pub struct InitStateCommandOp { impl> InitStateCommandOp { /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "Reth init-state starting"); let Environment { config, provider_factory, .. } = @@ -54,7 +51,6 @@ impl> InitStateCommandOp { if last_block_number == 0 { reth_cli_commands::init_state::without_evm::setup_without_evm( &provider_rw, - &static_file_provider, SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), BEDROCK_HEADER_TTD, )?; @@ -74,7 +70,7 @@ impl> InitStateCommandOp { info!(target: "reth::cli", "Initiating state dump"); - let reader = BufReader::new(File::open(self.init_state.state)?); + let reader = BufReader::new(reth_fs_util::open(self.init_state.state)?); let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; provider_rw.commit()?; diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index b3c7c86d1d18..23eaa99b5213 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -27,6 +27,11 @@ pub mod commands; /// made for op-erigon's import needs). pub mod receipt_file_codec; +/// OVM block, same as EVM block at bedrock, except for signature of deposit transaction +/// not having a signature back then. +/// Enables decoding and encoding `Block` types within file contexts. +pub mod ovm_file_codec; + pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; use reth_optimism_chainspec::OpChainSpec; diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs new file mode 100644 index 000000000000..3d746d6d1e0d --- /dev/null +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -0,0 +1,383 @@ +use alloy_consensus::{ + transaction::{from_eip155_value, RlpEcdsaTx}, + Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip4895::Withdrawals, +}; +use alloy_primitives::{ + bytes::{Buf, BytesMut}, + keccak256, PrimitiveSignature as Signature, TxHash, B256, U256, +}; +use alloy_rlp::{Decodable, Error as RlpError, RlpDecodable}; +use derive_more::{AsRef, Deref}; +use op_alloy_consensus::TxDeposit; +use reth_downloaders::file_client::FileClientError; +use reth_primitives::transaction::{Transaction, TxType}; +use serde::{Deserialize, Serialize}; +use tokio_util::codec::Decoder; + +#[allow(dead_code)] +/// Specific codec for reading raw block bodies from a file +/// with optimism-specific signature handling +pub(crate) struct OvmBlockFileCodec; + +impl Decoder for OvmBlockFileCodec { + type Item = Block; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None); + } + + let buf_slice = &mut src.as_ref(); + let body = + Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + src.advance(src.len() - buf_slice.len()); + + Ok(Some(body)) + } +} + +/// OVM block, same as EVM block but with different transaction signature handling +/// Pre-bedrock system transactions on Optimism were sent from the zero address +/// with an empty signature, +#[derive(Debug, Clone, PartialEq, Eq, RlpDecodable)] +pub struct Block { + /// Block header + pub header: Header, + /// Block body + pub body: BlockBody, +} + +impl Block { + /// Decodes a `Block` from the given byte slice. + pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let header = Header::decode(buf)?; + let body = BlockBody::decode(buf)?; + Ok(Self { header, body }) + } +} + +/// The body of a block for OVM +#[derive(Debug, Clone, PartialEq, Eq, Default, RlpDecodable)] +#[rlp(trailing)] +pub struct BlockBody { + /// Transactions in the block + pub transactions: Vec, + /// Uncle headers for the given block + pub ommers: Vec

, + /// Withdrawals in the block. + pub withdrawals: Option, +} + +/// Signed transaction. +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +pub struct TransactionSigned { + /// Transaction hash + pub hash: TxHash, + /// The transaction signature values + pub signature: Signature, + /// Raw transaction info + #[deref] + #[as_ref] + pub transaction: Transaction, +} + +impl Default for TransactionSigned { + fn default() -> Self { + Self { + hash: Default::default(), + signature: Signature::test_signature(), + transaction: Default::default(), + } + } +} + +impl AsRef for TransactionSigned { + fn as_ref(&self) -> &Self { + self + } +} + +// === impl TransactionSigned === +impl TransactionSigned { + /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with + /// tx type. + pub fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } + + /// Create a new signed transaction from a transaction and its signature. + /// + /// This will also calculate the transaction hash using its encoding. + pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { + let mut initial_tx = Self { transaction, hash: Default::default(), signature }; + initial_tx.hash = initial_tx.recalculate_hash(); + initial_tx + } + + /// Decodes legacy transaction from the data buffer into a tuple. + /// + /// This expects `rlp(legacy_tx)` + /// + /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact + /// format expected. + pub(crate) fn decode_rlp_legacy_transaction_tuple( + data: &mut &[u8], + ) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> { + let original_encoding = *data; + + let header = alloy_rlp::Header::decode(data)?; + let remaining_len = data.len(); + + let transaction_payload_len = header.payload_length; + + if transaction_payload_len > remaining_len { + return Err(RlpError::InputTooShort); + } + + let mut transaction = TxLegacy { + nonce: Decodable::decode(data)?, + gas_price: Decodable::decode(data)?, + gas_limit: Decodable::decode(data)?, + to: Decodable::decode(data)?, + value: Decodable::decode(data)?, + input: Decodable::decode(data)?, + chain_id: None, + }; + + let v = Decodable::decode(data)?; + let r: U256 = Decodable::decode(data)?; + let s: U256 = Decodable::decode(data)?; + + let tx_length = header.payload_length + header.length(); + let hash = keccak256(&original_encoding[..tx_length]); + + // Handle both pre-bedrock and regular cases + let (signature, chain_id) = if v == 0 && r.is_zero() && s.is_zero() { + // Pre-bedrock system transactions case + (Signature::new(r, s, false), None) + } else { + // Regular transaction case + let (parity, chain_id) = from_eip155_value(v) + .ok_or(alloy_rlp::Error::Custom("invalid parity for legacy transaction"))?; + (Signature::new(r, s, parity), chain_id) + }; + + // Set chain ID and verify length + transaction.chain_id = chain_id; + let decoded = remaining_len - data.len(); + if decoded != transaction_payload_len { + return Err(RlpError::UnexpectedLength); + } + + Ok((transaction, hash, signature)) + } + + /// Decodes legacy transaction from the data buffer. + /// + /// This should be used _only_ be used in general transaction decoding methods, which have + /// already ensured that the input is a legacy transaction with the following format: + /// `rlp(legacy_tx)` + /// + /// Legacy transactions are encoded as lists, so the input should start with a RLP list header. + /// + /// This expects `rlp(legacy_tx)` + // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, + // so decoding methods do not need to manually advance the buffer + pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { + let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; + let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + Ok(signed) + } +} + +impl Decodable for TransactionSigned { + /// This `Decodable` implementation only supports decoding rlp encoded transactions as it's used + /// by p2p. + /// + /// The p2p encoding format always includes an RLP header, although the type RLP header depends + /// on whether or not the transaction is a legacy transaction. + /// + /// If the transaction is a legacy transaction, it is just encoded as a RLP list: + /// `rlp(tx-data)`. + /// + /// If the transaction is a typed transaction, it is encoded as a RLP string: + /// `rlp(tx-type || rlp(tx-data))` + /// + /// This can be used for decoding all signed transactions in p2p `BlockBodies` responses. + /// + /// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since + /// the EIP-4844 variant of [`TransactionSigned`] does not include the blob sidecar. + /// + /// For a method suitable for decoding pooled transactions, see \[`PooledTransactionsElement`\]. + /// + /// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed + /// transaction is encoded in this format, and does not start with a RLP header: + /// `tx-type || rlp(tx-data)`. + /// + /// This is because [`Header::decode`] does not advance the buffer, and returns a length-1 + /// string header if the first byte is less than `0xf7`. + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Self::network_decode(buf).map_err(Into::into) + } +} + +impl Encodable2718 for TransactionSigned { + fn type_flag(&self) -> Option { + match self.transaction.tx_type() { + TxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + match &self.transaction { + Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), + Transaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), + Transaction::Eip7702(set_code_tx) => { + set_code_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + } + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.eip2718_encode(&self.signature, out) + } +} + +impl Decodable2718 for TransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + TxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + } + TxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + } + TxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + } + TxType::Eip4844 => { + let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + } + TxType::Deposit => Ok(Self::from_transaction_and_signature( + Transaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), + )), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + Ok(Self::decode_rlp_legacy_transaction(buf)?) + } +} + +#[cfg(test)] +mod tests { + use crate::ovm_file_codec::TransactionSigned; + use alloy_primitives::{address, hex, TxKind, B256, U256}; + use reth_primitives::transaction::Transaction; + const DEPOSIT_FUNCTION_SELECTOR: [u8; 4] = [0xb6, 0xb5, 0x5f, 0x25]; + use alloy_rlp::Decodable; + + #[test] + fn test_decode_legacy_transactions() { + // Test Case 1: contract deposit - regular L2 transaction calling deposit() function + // tx: https://optimistic.etherscan.io/getRawTx?tx=0x7860252963a2df21113344f323035ef59648638a571eef742e33d789602c7a1c + let deposit_tx_bytes = hex!("f88881f0830f481c830c6e4594a75127121d28a9bf848f3b70e7eea26570aa770080a4b6b55f2500000000000000000000000000000000000000000000000000000000000710b238a0d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45fa02c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def"); + let deposit_decoded = TransactionSigned::decode(&mut &deposit_tx_bytes[..]).unwrap(); + + // Verify deposit transaction + let deposit_tx = match &deposit_decoded.transaction { + Transaction::Legacy(ref tx) => tx, + _ => panic!("Expected legacy transaction for NFT deposit"), + }; + + assert_eq!( + deposit_tx.to, + TxKind::Call(address!("a75127121d28a9bf848f3b70e7eea26570aa7700")) + ); + assert_eq!(deposit_tx.nonce, 240); + assert_eq!(deposit_tx.gas_price, 1001500); + assert_eq!(deposit_tx.gas_limit, 814661); + assert_eq!(deposit_tx.value, U256::ZERO); + assert_eq!(&deposit_tx.input.as_ref()[0..4], DEPOSIT_FUNCTION_SELECTOR); + assert_eq!(deposit_tx.chain_id, Some(10)); + assert_eq!( + deposit_decoded.signature.r(), + U256::from_str_radix( + "d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45f", + 16 + ) + .unwrap() + ); + assert_eq!( + deposit_decoded.signature.s(), + U256::from_str_radix( + "2c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def", + 16 + ) + .unwrap() + ); + + // Test Case 2: pre-bedrock system transaction from block 105235052 + // tx: https://optimistic.etherscan.io/getRawTx?tx=0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e + let system_tx_bytes = hex!("f9026c830d899383124f808302a77e94a0cc33dd6f4819d473226257792afe230ec3c67f80b902046c459a280000000000000000000000004d73adb72bc3dd368966edd0f0b2148401a178e2000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000647fac7f00000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000084704316e5000000000000000000000000000000000000000000000000000000000000006e10975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000001410975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082a39325251d44e11f3b6d92f9382438eb6c8b5068d4a488d4f177b26f2ca20db34ae53467322852afcc779f25eafd124c5586f54b9026497ba934403d4c578e3c1b5aa754c918ee2ecd25402df656c2419717e4017a7aecb84af3914fd3c7bf6930369c4e6ff76950246b98e354821775f02d33cdbee5ef6aed06c15b75691692d31c00000000000000000000000000000000000000000000000000000000000038a0e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbea013ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4"); + let system_decoded = TransactionSigned::decode(&mut &system_tx_bytes[..]).unwrap(); + + // Verify system transaction + assert!(system_decoded.is_legacy()); + + let system_tx = match &system_decoded.transaction { + Transaction::Legacy(ref tx) => tx, + _ => panic!("Expected Legacy transaction"), + }; + + assert_eq!(system_tx.nonce, 887187); + assert_eq!(system_tx.gas_price, 1200000); + assert_eq!(system_tx.gas_limit, 173950); + assert_eq!( + system_tx.to, + TxKind::Call(address!("a0cc33dd6f4819d473226257792afe230ec3c67f")) + ); + assert_eq!(system_tx.value, U256::ZERO); + assert_eq!(system_tx.chain_id, Some(10)); + + assert_eq!( + system_decoded.signature.r(), + U256::from_str_radix( + "e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbe", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.signature.s(), + U256::from_str_radix( + "13ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.hash, + B256::from(hex!("e20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e")) + ); + } +} diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index e2520c89340d..0dffceaddca9 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -26,6 +26,7 @@ reth-optimism-chainspec.workspace = true # ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true tracing.workspace = true diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index bf1428815d09..cb357db924ad 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,19 +9,19 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{Consensus, ConsensusError, HeaderValidator, PostExecutionInput}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, - validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, - validate_header_base_fee, validate_header_extradata, validate_header_gas, - validate_shanghai_withdrawals, + validate_against_parent_hash_number, validate_against_parent_timestamp, + validate_body_against_header, validate_cancun_gas, validate_header_base_fee, + validate_header_extradata, validate_header_gas, validate_shanghai_withdrawals, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardforks; -use reth_primitives::{BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_optimism_forks::OpHardforks; +use reth_primitives::{BlockBody, BlockWithSenders, GotExpected, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; mod proof; @@ -47,9 +47,53 @@ impl OpBeaconConsensus { } impl Consensus for OpBeaconConsensus { + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + // Check ommers hash + let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); + if block.header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), + )) + } + + // Check transaction root + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } + + // EIP-4895: Beacon chain push withdrawals as operations + if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + validate_shanghai_withdrawals(block)?; + } + + if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + validate_cancun_gas(block)?; + } + + Ok(()) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts) + } +} + +impl HeaderValidator for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_gas(header)?; - validate_header_base_fee(header, &self.chain_spec) + validate_header_gas(header.header())?; + validate_header_base_fee(header.header(), &self.chain_spec) } fn validate_header_against_parent( @@ -57,17 +101,21 @@ impl Consensus for OpBeaconConsensus { header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_against_parent_hash_number(header, parent)?; + validate_against_parent_hash_number(header.header(), parent)?; if self.chain_spec.is_bedrock_active_at_block(header.number) { - validate_against_parent_timestamp(header, parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; } - validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_against_parent_4844(header, parent)?; + validate_against_parent_4844(header.header(), parent.header())?; } Ok(()) @@ -118,38 +166,4 @@ impl Consensus for OpBeaconConsensus { Ok(()) } - - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - // Check ommers hash - let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); - if block.header.ommers_hash != ommers_hash { - return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), - )) - } - - // Check transaction root - if let Err(error) = block.ensure_transaction_root_valid() { - return Err(ConsensusError::BodyTransactionRootDiff(error.into())) - } - - // EIP-4895: Beacon chain push withdrawals as operations - if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { - validate_shanghai_withdrawals(block)?; - } - - if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { - validate_cancun_gas(block)?; - } - - Ok(()) - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts) - } } diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index 813e451da25f..18e64a467ff1 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,10 +1,10 @@ //! Helper function for Receipt root calculation for Optimism hardforks. use alloy_primitives::B256; +use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; use reth_optimism_forks::OpHardfork; use reth_primitives::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; -use reth_trie_common::root::ordered_trie_root_with_encoder; /// Calculates the receipt root for a header. pub(crate) fn calculate_receipt_root_optimism( diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index f6b22ad14c8d..807f224ca4b8 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -48,6 +48,7 @@ reth-primitives = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true +reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } [features] default = ["std"] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 2b004e6eb9dc..1c93d2b71d03 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -2,7 +2,7 @@ use crate::{l1::ensure_create2_deployer, OpBlockExecutionError, OpEvmConfig}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::Transaction as _; +use alloy_consensus::{Header, Transaction as _}; use alloy_eips::eip7685::Requests; use core::fmt::Display; use op_alloy_consensus::DepositTransaction; @@ -15,16 +15,14 @@ use reth_evm::{ }, state_change::post_block_balance_increments, system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, + ConfigureEvm, TxEnvOverrides, }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OpHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, TxType}; +use reth_primitives::{BlockWithSenders, Receipt, TxType}; use reth_revm::{Database, State}; -use revm_primitives::{ - db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, -}; +use revm_primitives::{db::DatabaseCommit, EnvWithHandlerCfg, ResultAndState, U256}; use tracing::trace; /// Factory for [`OpExecutionStrategy`]. @@ -78,6 +76,8 @@ where chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Optional overrides for the transactions environment. + tx_env_overrides: Option>, /// Current state for block execution. state: State, /// Utility to call system smart contracts. @@ -91,7 +91,7 @@ where /// Creates a new [`OpExecutionStrategy`] pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); - Self { state, chain_spec, evm_config, system_caller } + Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } } } @@ -104,10 +104,7 @@ where /// /// Caution: this does not initialize the tx environment. fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } @@ -119,6 +116,10 @@ where { type Error = BlockExecutionError; + fn init(&mut self, tx_env_overrides: Box) { + self.tx_env_overrides = Some(tx_env_overrides); + } + fn apply_pre_execution_changes( &mut self, block: &BlockWithSenders, @@ -197,6 +198,10 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + if let Some(tx_env_overrides) = &mut self.tx_env_overrides { + tx_env_overrides.apply(evm.tx_mut()); + } + // Execute transaction. let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); @@ -367,7 +372,7 @@ mod tests { let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); - let tx = TransactionSigned::from_transaction_and_signature( + let tx = TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, @@ -378,7 +383,7 @@ mod tests { Signature::test_signature(), ); - let tx_deposit = TransactionSigned::from_transaction_and_signature( + let tx_deposit = TransactionSigned::new_unhashed( Transaction::Deposit(op_alloy_consensus::TxDeposit { from: addr, to: addr.into(), @@ -451,7 +456,7 @@ mod tests { let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); - let tx = TransactionSigned::from_transaction_and_signature( + let tx = TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, @@ -462,7 +467,7 @@ mod tests { Signature::test_signature(), ); - let tx_deposit = TransactionSigned::from_transaction_and_signature( + let tx_deposit = TransactionSigned::new_unhashed( Transaction::Deposit(op_alloy_consensus::TxDeposit { from: addr, to: addr.into(), diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 6ff841b9ddc1..ef8c3f3b3dbe 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -2,6 +2,7 @@ use crate::OpBlockExecutionError; use alloc::{string::ToString, sync::Arc}; +use alloy_consensus::Transaction; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; @@ -297,15 +298,16 @@ where mod tests { use alloy_eips::eip2718::Decodable2718; use reth_optimism_chainspec::OP_MAINNET; - use reth_optimism_forks::OptimismHardforks; + use reth_optimism_forks::OpHardforks; use reth_primitives::{Block, BlockBody, TransactionSigned}; use super::*; #[test] fn sanity_l1_block() { + use alloy_consensus::Header; use alloy_primitives::{hex_literal::hex, Bytes}; - use reth_primitives::{Header, TransactionSigned}; + use reth_primitives::TransactionSigned; let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); let l1_info_tx = TransactionSigned::decode_2718(&mut bytes.as_ref()).unwrap(); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 8f0f75782f42..176864de6dc5 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -13,15 +13,16 @@ extern crate alloc; use alloc::{sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_primitives::{Address, U256}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_optimism_chainspec::{DecodeError, OpChainSpec}; -use reth_primitives::{ - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - transaction::FillTxEnv, - Head, Header, TransactionSigned, +use reth_primitives::{transaction::FillTxEnv, Head, TransactionSigned}; +use reth_revm::{ + inspector_handle_register, + primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + Database, Evm, EvmBuilder, GetInspector, }; -use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock}; @@ -49,7 +50,7 @@ impl OpEvmConfig { } /// Returns the chain spec associated with this configuration. - pub fn chain_spec(&self) -> &OpChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } @@ -200,7 +201,7 @@ impl ConfigureEvm for OpEvmConfig { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::KECCAK_EMPTY; + use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_eips::eip7685::Requests; use alloy_genesis::Genesis; use alloy_primitives::{bytes, Address, LogData, B256, U256}; @@ -210,17 +211,15 @@ mod tests { AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, }; use reth_optimism_chainspec::BASE_MAINNET; - use reth_primitives::{ - revm_primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, - Account, Header, Log, Receipt, Receipts, SealedBlockWithSenders, TxType, - }; - + use reth_optimism_primitives::OpPrimitives; + use reth_primitives::{Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType}; use reth_revm::{ db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, + primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -232,12 +231,6 @@ mod tests { #[test] fn test_fill_cfg_and_block_env() { - // Create a new configuration environment - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - // Create a default block environment - let mut block_env = BlockEnv::default(); - // Create a default header let header = Header::default(); @@ -254,10 +247,10 @@ mod tests { // Define the total difficulty as zero (default) let total_difficulty = U256::ZERO; - // Use the `OpEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, + // Use the `OpEvmConfig` to create the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) - .fill_cfg_and_block_env(&mut cfg_env, &mut block_env, &header, total_difficulty); + let (cfg_env, _) = OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) + .cfg_and_block_env(&header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec @@ -551,7 +544,7 @@ mod tests { #[test] fn receipts_by_block_hash() { // Create a default SealedBlockWithSenders object - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); @@ -603,7 +596,8 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain = Chain::new([block1, block2], execution_outcome.clone(), None); + let chain: Chain = + Chain::new([block1, block2], execution_outcome.clone(), None); // Assert that the proper receipt vector is returned for block1_hash assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); @@ -820,7 +814,7 @@ mod tests { }; // Create an empty Receipts object - let receipts_empty = Receipts { receipt_vec: vec![] }; + let receipts_empty = Receipts:: { receipt_vec: vec![] }; // Define the first block number let first_block = 123; diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 9a9786a8fe09..661816ae5fe0 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -158,7 +158,7 @@ impl OpHardfork { Self::Ecotone => Some(1708534800), Self::Fjord => Some(1716998400), Self::Granite => Some(1723478400), - Self::Holocene => None, + Self::Holocene => Some(1732633200), }, ) } @@ -257,6 +257,7 @@ impl OpHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), ]) } @@ -288,6 +289,7 @@ impl OpHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), ]) } @@ -354,7 +356,8 @@ mod tests { #[test] fn check_op_hardfork_from_str() { - let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe"]; + let hardfork_str = + ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE"]; let expected_hardforks = [ OpHardfork::Bedrock, OpHardfork::Regolith, @@ -362,6 +365,7 @@ mod tests { OpHardfork::Ecotone, OpHardfork::Fjord, OpHardfork::Granite, + OpHardfork::Holocene, ]; let hardforks: Vec = diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index df159161e0e1..3915bcf6cbda 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -19,7 +19,7 @@ pub use hardfork::OpHardfork; use reth_ethereum_forks::EthereumHardforks; /// Extends [`EthereumHardforks`] with optimism helper methods. -pub trait OptimismHardforks: EthereumHardforks { +pub trait OpHardforks: EthereumHardforks { /// Convenience method to check if [`OpHardfork::Bedrock`] is active at a given block /// number. fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index c1e23e3d5719..5f100f0a28d1 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -13,9 +13,12 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-db.workspace = true reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true +reth-payload-util.workspace = true +reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true @@ -28,6 +31,8 @@ reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true reth-trie-db.workspace = true +reth-rpc-server-types.workspace = true +reth-tasks = { workspace = true, optional = true } # op-reth reth-optimism-payload-builder.workspace = true @@ -36,6 +41,7 @@ reth-optimism-rpc.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true +reth-optimism-primitives.workspace = true # revm with required optimism features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -45,6 +51,7 @@ alloy-eips.workspace = true alloy-primitives.workspace = true op-alloy-rpc-types-engine.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true # misc clap.workspace = true @@ -56,7 +63,6 @@ parking_lot.workspace = true serde_json.workspace = true # test-utils dependencies -reth = { workspace = true, optional = true } reth-e2e-test-utils = { workspace = true, optional = true } alloy-genesis = { workspace = true, optional = true } tokio = { workspace = true, optional = true } @@ -64,9 +70,12 @@ tokio = { workspace = true, optional = true } [dev-dependencies] reth-optimism-node = { workspace = true, features = ["test-utils"] } reth-db.workspace = true +reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } +reth-tasks.workspace = true + alloy-primitives.workspace = true op-alloy-consensus.workspace = true alloy-signer-local.workspace = true @@ -86,32 +95,38 @@ optimism = [ "reth-engine-local/optimism", "reth-optimism-consensus/optimism", "reth-db/optimism", - "reth-optimism-node/optimism" + "reth-optimism-node/optimism", + "reth-node-core/optimism" ] asm-keccak = [ "reth-primitives/asm-keccak", - "reth/asm-keccak", "alloy-primitives/asm-keccak", "revm/asm-keccak", - "reth-optimism-node/asm-keccak" + "reth-optimism-node/asm-keccak", + "reth-node-core/asm-keccak" ] test-utils = [ - "reth", - "reth-e2e-test-utils", - "alloy-genesis", - "tokio", - "reth-node-builder/test-utils", - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-network/test-utils", - "reth-payload-builder/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-transaction-pool/test-utils", - "reth-trie-db/test-utils", - "revm/test-utils", - "reth-optimism-node/test-utils" + "reth-tasks", + "reth-e2e-test-utils", + "alloy-genesis", + "tokio", + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-optimism-node/test-utils", + "reth-optimism-primitives/arbitrary", +] +reth-codec = [ + "reth-primitives/reth-codec", + "reth-optimism-primitives/reth-codec", ] diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 7400f149a966..57b76b904bd3 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -1,6 +1,7 @@ -use std::sync::Arc; - -use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1}; +use alloy_rpc_types_engine::{ + ExecutionPayload, ExecutionPayloadEnvelopeV2, ExecutionPayloadSidecar, ExecutionPayloadV1, + PayloadError, +}; use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpPayloadAttributes, }; @@ -14,8 +15,11 @@ use reth_node_api::{ validate_version_specific_fields, EngineTypes, EngineValidator, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::{OpHardfork, OptimismHardforks}; +use reth_optimism_forks::{OpHardfork, OpHardforks}; use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, SealedBlockFor}; +use std::sync::Arc; /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -57,76 +61,42 @@ impl PayloadTypes for OpPayloadTypes { /// Validator for Optimism engine API. #[derive(Debug, Clone)] pub struct OpEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, } impl OpEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { inner: ExecutionPayloadValidator::new(chain_spec) } } -} - -/// Validates the presence of the `withdrawals` field according to the payload timestamp. -/// -/// After Canyon, withdrawals field must be [Some]. -/// Before Canyon, withdrawals field must be [None]; -/// -/// Canyon activates the Shanghai EIPs, see the Canyon specs for more details: -/// -pub fn validate_withdrawals_presence( - chain_spec: &ChainSpec, - version: EngineApiMessageVersion, - message_validation_kind: MessageValidationKind, - timestamp: u64, - has_withdrawals: bool, -) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); - match version { - EngineApiMessageVersion::V1 => { - if has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) - } - if is_shanghai { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) - } - } - EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { - if is_shanghai && !has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) - } - if !is_shanghai && has_withdrawals { - return Err(message_validation_kind - .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) - } - } - }; - - Ok(()) + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &OpChainSpec { + self.inner.chain_spec() + } } impl EngineValidator for OpEngineValidator where Types: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, OpPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_withdrawals_presence( - &self.chain_spec, + self.chain_spec(), version, payload_or_attrs.message_validation_kind(), payload_or_attrs.timestamp(), payload_or_attrs.withdrawals().is_some(), )?; validate_parent_beacon_block_root_presence( - &self.chain_spec, + self.chain_spec(), version, payload_or_attrs.message_validation_kind(), payload_or_attrs.timestamp(), @@ -139,7 +109,7 @@ where version: EngineApiMessageVersion, attributes: &OpPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into())?; + validate_version_specific_fields(self.chain_spec(), version, attributes.into())?; if attributes.gas_limit.is_none() { return Err(EngineObjectValidationError::InvalidParams( @@ -147,7 +117,9 @@ where )) } - if self.chain_spec.is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) + if self + .chain_spec() + .is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) { let (elasticity, denominator) = attributes.decode_eip_1559_params().ok_or_else(|| { @@ -164,6 +136,56 @@ where Ok(()) } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + +/// Validates the presence of the `withdrawals` field according to the payload timestamp. +/// +/// After Canyon, withdrawals field must be [Some]. +/// Before Canyon, withdrawals field must be [None]; +/// +/// Canyon activates the Shanghai EIPs, see the Canyon specs for more details: +/// +pub fn validate_withdrawals_presence( + chain_spec: &ChainSpec, + version: EngineApiMessageVersion, + message_validation_kind: MessageValidationKind, + timestamp: u64, + has_withdrawals: bool, +) -> Result<(), EngineObjectValidationError> { + let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); + + match version { + EngineApiMessageVersion::V1 => { + if has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::WithdrawalsNotSupportedInV1)) + } + if is_shanghai { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) + } + } + EngineApiMessageVersion::V2 | EngineApiMessageVersion::V3 | EngineApiMessageVersion::V4 => { + if is_shanghai && !has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::NoWithdrawalsPostShanghai)) + } + if !is_shanghai && has_withdrawals { + return Err(message_validation_kind + .to_error(VersionSpecificValidationError::HasWithdrawalsPreShanghai)) + } + } + }; + + Ok(()) } #[cfg(test)] @@ -172,16 +194,12 @@ mod test { use crate::engine; use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; - use reth_chainspec::ForkCondition; use reth_optimism_chainspec::BASE_SEPOLIA; use super::*; - fn get_chainspec(is_holocene: bool) -> Arc { - let mut hardforks = OpHardfork::base_sepolia(); - if is_holocene { - hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); - } + fn get_chainspec() -> Arc { + let hardforks = OpHardfork::base_sepolia(); Arc::new(OpChainSpec { inner: ChainSpec { chain: BASE_SEPOLIA.inner.chain, @@ -217,8 +235,8 @@ mod test { #[test] fn test_well_formed_attributes_pre_holocene() { - let validator = OpEngineValidator::new(get_chainspec(false)); - let attributes = get_attributes(None, 1799999999); + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(None, 1732633199); let result = > BlockBodyWriter for OpStorage { + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(u64, Option)>, + ) -> ProviderResult<()> { + self.0.write_block_bodies(provider, bodies) + } -impl NodePrimitives for OpPrimitives { - type Block = Block; - type SignedTx = TransactionSigned; - type Receipt = Receipt; + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: alloy_primitives::BlockNumber, + ) -> ProviderResult<()> { + self.0.remove_block_bodies_above(provider, block) + } +} + +impl> + BlockBodyReader for OpStorage +{ + type Block = reth_primitives::Block; + + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult> { + self.0.read_block_bodies(provider, inputs) + } +} + +impl ChainStorage for OpStorage { + fn reader( + &self, + ) -> impl reth_provider::ChainStorageReader, OpPrimitives> + where + TX: DbTx + 'static, + Types: reth_provider::providers::NodeTypesForProvider, + { + self + } + + fn writer( + &self, + ) -> impl reth_provider::ChainStorageWriter, OpPrimitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypes, + { + self + } } /// Type configuration for a regular Optimism node. @@ -77,7 +133,11 @@ impl OpNode { > where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; @@ -96,7 +156,14 @@ impl OpNode { impl Node for OpNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, @@ -124,6 +191,7 @@ impl NodeTypes for OpNode { type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = OpStorage; } impl NodeTypesWithEngine for OpNode { @@ -134,13 +202,13 @@ impl NodeTypesWithEngine for OpNode { #[derive(Debug)] pub struct OpAddOns(pub RpcAddOns, OpEngineValidatorBuilder>); -impl Default for OpAddOns { +impl>> Default for OpAddOns { fn default() -> Self { Self::new(None) } } -impl OpAddOns { +impl>> OpAddOns { /// Create a new instance with the given `sequencer_http` URL. pub fn new(sequencer_http: Option) -> Self { Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http), Default::default())) @@ -150,8 +218,8 @@ impl OpAddOns { impl NodeAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, - PayloadBuilder: Into::Engine>>, + Types: NodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -161,15 +229,25 @@ where self, ctx: reth_node_api::AddOnsContext<'_, N>, ) -> eyre::Result { - self.0.launch_add_ons(ctx).await + // install additional OP specific rpc methods + let debug_ext = + OpDebugWitnessApi::new(ctx.node.provider().clone(), ctx.node.evm_config().clone()); + + self.0 + .launch_add_ons_with(ctx, move |modules| { + debug!(target: "reth::cli", "Installing debug payload witness rpc endpoint"); + modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; + Ok(()) + }) + .await } } impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, - PayloadBuilder: Into::Engine>>, + Types: NodeTypes, + PayloadBuilder: PayloadBuilder::Engine>, >, OpEngineValidator: EngineValidator<::Engine>, { @@ -180,6 +258,18 @@ where } } +impl EngineValidatorAddOn for OpAddOns +where + N: FullNodeComponents>, + OpEngineValidator: EngineValidator<::Engine>, +{ + type Validator = OpEngineValidator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + OpEngineValidatorBuilder::default().build(ctx).await + } +} + /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -217,7 +307,7 @@ pub struct OpPoolBuilder { impl PoolBuilder for OpPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Pool = OpTransactionPool; @@ -229,6 +319,7 @@ where let validator = TransactionValidationTaskExecutor::eth_builder(Arc::new( ctx.chain_spec().inner.clone(), )) + .no_eip4844() .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) .with_additional_tasks( @@ -337,7 +428,11 @@ where ) -> eyre::Result> where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, Pool: TransactionPool + Unpin + 'static, Evm: ConfigureEvm
, @@ -372,8 +467,13 @@ where impl PayloadServiceBuilder for OpPayloadBuilder where - Node: - FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, + >, Pool: TransactionPool + Unpin + 'static, Txs: OpPayloadTransactions, { @@ -445,7 +545,7 @@ impl OpNetworkBuilder { impl NetworkBuilder for OpNetworkBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, Pool: TransactionPool + Unpin + 'static, { async fn build_network( diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 0edfeec73227..6db5d69568b6 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -3,7 +3,9 @@ use alloy_eips::eip2718::Encodable2718; use parking_lot::RwLock; use reth_chainspec::ChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_primitives::{Block, GotExpected, InvalidTransactionError, SealedBlock}; +use reth_primitives::{ + Block, GotExpected, InvalidTransactionError, SealedBlock, TransactionSigned, +}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; use reth_revm::L1BlockInfo; use reth_transaction_pool::{ @@ -67,7 +69,7 @@ impl OpTransactionValidator { impl OpTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory + BlockReaderIdExt, Tx: EthPoolTransaction, { /// Create a new [`OpTransactionValidator`]. @@ -140,7 +142,8 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - valid_tx.transaction().clone().into_consensus().into().encode_2718(&mut encoded); + let tx: TransactionSigned = valid_tx.transaction().clone().into_consensus().into(); + tx.encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( &self.chain_spec(), @@ -192,7 +195,7 @@ where impl TransactionValidator for OpTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, + Client: StateProviderFactory + BlockReaderIdExt, Tx: EthPoolTransaction, { type Transaction = Tx; @@ -262,7 +265,7 @@ mod tests { input: Default::default(), }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(deposit_tx, signature); + let signed_tx = TransactionSigned::new_unhashed(deposit_tx, signature); let signed_recovered = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, signer); let len = signed_recovered.encode_2718_len(); diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index b54015fef0cc..e70e35031982 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -1,12 +1,13 @@ use crate::{node::OpAddOns, OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; -use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; +use alloy_rpc_types_engine::PayloadAttributes; use reth_e2e_test_utils::{ transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, }; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_tasks::TaskManager; use std::sync::Arc; use tokio::sync::Mutex; diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index 3db4cfab8698..90623d9e65d3 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,6 +1,5 @@ use alloy_rpc_types_engine::PayloadStatusEnum; use futures::StreamExt; -use reth::blockchain_tree::error::BlockchainTreeError; use reth_optimism_node::utils::{advance_chain, setup}; use std::sync::Arc; use tokio::sync::Mutex; @@ -90,10 +89,10 @@ async fn can_sync() -> eyre::Result<()> { canonical_payload_chain[tip_index - reorg_depth + 1].0.clone(), canonical_payload_chain[tip_index - reorg_depth + 1].1.clone(), PayloadStatusEnum::Invalid { - validation_error: BlockchainTreeError::PendingBlockIsFinalized { - last_finalized: (tip - reorg_depth) as u64 + 1, - } - .to_string(), + validation_error: format!( + "block number is lower than the last finalized block number {}", + (tip - reorg_depth) as u64 + 1 + ), }, ) .await; diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index 52e3bef3d918..35be3dfd3ee1 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -4,7 +4,6 @@ use alloy_consensus::TxEip1559; use alloy_genesis::Genesis; use alloy_network::TxSignerSync; use alloy_primitives::{Address, ChainId, TxKind}; -use reth::{args::DatadirArgs, tasks::TaskManager}; use reth_chainspec::EthChainSpec; use reth_db::test_utils::create_test_rw_db_with_path; use reth_e2e_test_utils::{ @@ -14,6 +13,7 @@ use reth_node_api::{FullNodeTypes, NodeTypesWithEngine}; use reth_node_builder::{ components::ComponentsBuilder, EngineNodeLauncher, NodeBuilder, NodeConfig, }; +use reth_node_core::args::DatadirArgs; use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; use reth_optimism_node::{ args::RollupArgs, @@ -25,12 +25,12 @@ use reth_optimism_node::{ OpEngineTypes, OpNode, }; use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_optimism_primitives::OpPrimitives; +use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; use reth_primitives::{SealedBlock, Transaction, TransactionSigned, TransactionSignedEcRecovered}; use reth_provider::providers::BlockchainProvider2; -use reth_transaction_pool::{ - pool::{BestPayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}, - PayloadTransactions, -}; +use reth_tasks::TaskManager; +use reth_transaction_pool::pool::BestPayloadTransactions; use std::sync::Arc; use tokio::sync::Mutex; @@ -65,10 +65,7 @@ impl OpPayloadTransactions for CustomTxPriority { }; let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); let end_of_block_tx = TransactionSignedEcRecovered::from_signed_transaction( - TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(end_of_block_tx), - signature, - ), + TransactionSigned::new_unhashed(Transaction::Eip1559(end_of_block_tx), signature), sender.address(), ); @@ -95,8 +92,13 @@ fn build_components( OpConsensusBuilder, > where - Node: - FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, + >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = RollupArgs::default(); diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 839355b2158e..7f47da7e2360 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -15,13 +15,15 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true -reth-revm.workspace = true +reth-revm = { workspace = true, features = ["witness"] } reth-transaction-pool.workspace = true reth-provider.workspace = true reth-rpc-types-compat.workspace = true reth-evm.workspace = true reth-execution-types.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true +reth-payload-util.workspace = true reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true reth-trie.workspace = true @@ -41,6 +43,7 @@ alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true +alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true # misc diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 42326de6ea49..fbf99c78d9e7 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -2,33 +2,37 @@ use std::{fmt::Display, sync::Arc}; -use alloy_consensus::{Transaction, EMPTY_OMMER_ROOT_HASH}; -use alloy_eips::merge::BEACON_NONCE; -use alloy_primitives::{Address, Bytes, U256}; +use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{eip4895::Withdrawals, merge::BEACON_NONCE}; +use alloy_primitives::{Address, Bytes, B256, U256}; +use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; -use reth_chainspec::ChainSpecProvider; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_optimism_forks::OptimismHardforks; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_optimism_forks::OpHardforks; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_payload_util::PayloadTransactions; use reth_primitives::{ - proofs, - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, SealedHeader, TransactionSigned, TxType, + proofs, Block, BlockBody, BlockExt, Receipt, SealedHeader, TransactionSigned, TxType, }; -use reth_provider::{ProviderError, StateProviderFactory, StateRootProvider}; +use reth_provider::{ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, PayloadTransactions, TransactionPool, + noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, }; use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState, TxEnv}, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, InvalidTransaction, + ResultAndState, TxEnv, + }, Database, DatabaseCommit, }; use tracing::{debug, trace, warn}; @@ -38,6 +42,8 @@ use crate::{ payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, }; use op_alloy_consensus::DepositTransaction; +use op_alloy_rpc_types_engine::OpPayloadAttributes; +use reth_revm::witness::ExecutionWitnessRecord; use reth_transaction_pool::pool::BestPayloadTransactions; /// Optimism's payload builder @@ -92,21 +98,6 @@ where EvmConfig: ConfigureEvm
, Txs: OpPayloadTransactions, { - /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload - /// (that has the `parent` as its parent). - pub fn cfg_and_block_env( - &self, - config: &PayloadConfig, - parent: &Header, - ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { - let next_attributes = NextBlockEnvAttributes { - timestamp: config.attributes.timestamp(), - suggested_fee_recipient: config.attributes.suggested_fee_recipient(), - prev_randao: config.attributes.prev_randao(), - }; - self.evm_config.next_cfg_and_block_env(parent, next_attributes) - } - /// Constructs an Optimism payload from the transactions sent via the /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in /// the payload attributes, the transaction pool will be ignored and the only transactions @@ -124,7 +115,7 @@ where Pool: TransactionPool, { let (initialized_cfg, initialized_block_env) = self - .cfg_and_block_env(&args.config, &args.config.parent_header) + .cfg_and_block_env(&args.config.attributes, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; @@ -159,6 +150,65 @@ where } } +impl OpPayloadBuilder +where + EvmConfig: ConfigureEvm
, +{ + /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload + /// (that has the `parent` as its parent). + pub fn cfg_and_block_env( + &self, + attributes: &OpPayloadBuilderAttributes, + parent: &Header, + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { + let next_attributes = NextBlockEnvAttributes { + timestamp: attributes.timestamp(), + suggested_fee_recipient: attributes.suggested_fee_recipient(), + prev_randao: attributes.prev_randao(), + }; + self.evm_config.next_cfg_and_block_env(parent, next_attributes) + } + + /// Computes the witness for the payload. + pub fn payload_witness( + &self, + client: &Client, + parent: SealedHeader, + attributes: OpPayloadAttributes, + ) -> Result + where + Client: StateProviderFactory + ChainSpecProvider, + { + let attributes = OpPayloadBuilderAttributes::try_new(parent.hash(), attributes, 3) + .map_err(PayloadBuilderError::other)?; + + let (initialized_cfg, initialized_block_env) = + self.cfg_and_block_env(&attributes, &parent).map_err(PayloadBuilderError::other)?; + + let config = PayloadConfig { + parent_header: Arc::new(parent), + attributes, + extra_data: Default::default(), + }; + let ctx = OpPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + chain_spec: client.chain_spec(), + config, + initialized_cfg, + initialized_block_env, + cancel: Default::default(), + best_payload: Default::default(), + }; + + let state_provider = client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(state_provider); + let mut state = State::builder().with_database(state).with_bundle_update().build(); + + let builder = OpBuilder { pool: NoopTransactionPool::default(), best: () }; + builder.witness(&mut state, &ctx) + } +} + /// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. impl PayloadBuilder for OpPayloadBuilder where @@ -234,36 +284,33 @@ where Pool: TransactionPool, Txs: OpPayloadTransactions, { - /// Builds the payload on top of the state. - pub fn build( + /// Executes the payload and returns the outcome. + pub fn execute( self, - mut db: State, - ctx: OpPayloadBuilderCtx, - ) -> Result, PayloadBuilderError> + state: &mut State, + ctx: &OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> where EvmConfig: ConfigureEvm
, - DB: Database + AsRef

, - P: StateRootProvider, + DB: Database, { let Self { pool, best } = self; debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); // 1. apply eip-4788 pre block contract call - ctx.apply_pre_beacon_root_contract_call(&mut db)?; + ctx.apply_pre_beacon_root_contract_call(state)?; // 2. ensure create2deployer is force deployed - ctx.ensure_create2_deployer(&mut db)?; + ctx.ensure_create2_deployer(state)?; // 3. execute sequencer transactions - let mut info = ctx.execute_sequencer_transactions(&mut db)?; + let mut info = ctx.execute_sequencer_transactions(state)?; // 4. if mem pool transactions are requested we execute them if !ctx.attributes().no_tx_pool { let best_txs = best.best_transactions(pool, ctx.best_transaction_attributes()); - if let Some(cancelled) = - ctx.execute_best_transactions::<_, Pool>(&mut info, &mut db, best_txs)? - { - return Ok(cancelled) + if ctx.execute_best_transactions::<_, Pool>(&mut info, state, best_txs)?.is_some() { + return Ok(BuildOutcomeKind::Cancelled) } // check if the new payload is even more valuable @@ -273,17 +320,36 @@ where } } - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - ctx.commit_withdrawals(&mut db)?; + let withdrawals_root = ctx.commit_withdrawals(state)?; // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call - db.merge_transitions(BundleRetention::Reverts); + state.merge_transitions(BundleRetention::Reverts); + + Ok(BuildOutcomeKind::Better { payload: ExecutedPayload { info, withdrawals_root } }) + } + + /// Builds the payload on top of the state. + pub fn build( + self, + mut state: State, + ctx: OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> + where + EvmConfig: ConfigureEvm

, + DB: Database + AsRef

, + P: StateRootProvider, + { + let ExecutedPayload { info, withdrawals_root } = match self.execute(&mut state, &ctx)? { + BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, + BuildOutcomeKind::Cancelled => return Ok(BuildOutcomeKind::Cancelled), + BuildOutcomeKind::Aborted { fees } => return Ok(BuildOutcomeKind::Aborted { fees }), + }; let block_number = ctx.block_number(); let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![info.receipts.clone()].into(), + state.take_bundle(), + vec![info.receipts].into(), block_number, Vec::new(), ); @@ -302,7 +368,7 @@ where // // calculate the state root let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); let (state_root, trie_output) = { - db.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( + state.database.as_ref().state_root_with_updates(hashed_state.clone()).inspect_err( |err| { warn!(target: "payload_builder", parent_header=%ctx.parent().hash(), @@ -352,12 +418,12 @@ where body: BlockBody { transactions: info.executed_transactions, ommers: vec![], - withdrawals, + withdrawals: ctx.withdrawals().cloned(), }, }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); + debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { @@ -388,6 +454,24 @@ where Ok(BuildOutcomeKind::Better { payload }) } } + + /// Builds the payload and returns its [`ExecutionWitness`] based on the state after execution. + pub fn witness( + self, + state: &mut State, + ctx: &OpPayloadBuilderCtx, + ) -> Result + where + EvmConfig: ConfigureEvm

, + DB: Database + AsRef

, + P: StateProofProvider, + { + let _ = self.execute(state, ctx)?; + let ExecutionWitnessRecord { hashed_state, codes, keys } = + ExecutionWitnessRecord::from_executed_state(state); + let state = state.database.as_ref().witness(Default::default(), hashed_state)?; + Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys }) + } } /// A type that returns a the [`PayloadTransactions`] that should be included in the pool. @@ -411,6 +495,15 @@ impl OpPayloadTransactions for () { } } +/// Holds the state after execution +#[derive(Debug)] +pub struct ExecutedPayload { + /// Tracked execution info + pub info: ExecutionInfo, + /// Withdrawal hash. + pub withdrawals_root: Option, +} + /// This acts as the container for executed transactions and its byproducts (receipts, gas used) #[derive(Default, Debug)] pub struct ExecutionInfo { @@ -469,6 +562,13 @@ impl OpPayloadBuilderCtx { &self.config.attributes } + /// Returns the withdrawals if shanghai is active. + pub fn withdrawals(&self) -> Option<&Withdrawals> { + self.chain_spec + .is_shanghai_active_at_timestamp(self.attributes().timestamp()) + .then(|| &self.attributes().payload_attributes.withdrawals) + } + /// Returns the block gas limit to target. pub fn block_gas_limit(&self) -> u64 { self.attributes() @@ -558,10 +658,7 @@ impl OpPayloadBuilderCtx { } /// Commits the withdrawals from the payload attributes to the state. - pub fn commit_withdrawals( - &self, - db: &mut State, - ) -> Result + pub fn commit_withdrawals(&self, db: &mut State) -> Result, ProviderError> where DB: Database, { @@ -569,7 +666,7 @@ impl OpPayloadBuilderCtx { db, &self.chain_spec, self.attributes().payload_attributes.timestamp, - self.attributes().payload_attributes.withdrawals.clone(), + &self.attributes().payload_attributes.withdrawals, ) } @@ -725,13 +822,15 @@ where Ok(info) } - /// Executes the given best transactions and updates the execution info + /// Executes the given best transactions and updates the execution info. + /// + /// Returns `Ok(Some(())` if the job was cancelled. pub fn execute_best_transactions( &self, info: &mut ExecutionInfo, db: &mut State, mut best_txs: impl PayloadTransactions, - ) -> Result>, PayloadBuilderError> + ) -> Result, PayloadBuilderError> where DB: Database, Pool: TransactionPool, @@ -764,7 +863,7 @@ where // check if the job was cancelled, if so we can exit early if self.cancel.is_cancelled() { - return Ok(Some(BuildOutcomeKind::Cancelled)) + return Ok(Some(())) } // Configure the environment for the tx. diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 36f11ee628b3..1a951abadcae 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -7,7 +7,7 @@ use alloy_eips::{ use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; -use op_alloy_consensus::eip1559::{decode_holocene_extra_data, EIP1559ParamError}; +use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index a2d4c20a8b72..abd27300fa59 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -12,6 +12,68 @@ description = "OP primitive types" workspace = true [dependencies] +# reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-codecs = { workspace = true, optional = true, features = ["optimism"] } + +# ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true +alloy-rlp.workspace = true + +# op +op-alloy-consensus.workspace = true + +# codec +bytes.workspace = true +serde = { workspace = true, optional = true } + +# misc +derive_more.workspace = true + +# test +arbitrary = { workspace = true, features = ["derive"], optional = true } + +[dev-dependencies] +reth-codecs = { workspace = true, features = ["test-utils"] } +rstest.workspace = true +arbitrary.workspace = true + +[features] +default = ["std", "reth-codec"] +std = [ + "reth-primitives-traits/std", + "reth-primitives/std", + "reth-codecs/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "serde/std", +] +reth-codec = [ + "dep:reth-codecs", + "reth-primitives/reth-codec", + "reth-primitives-traits/reth-codec", +] +serde = [ + "dep:serde", + "reth-primitives-traits/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "bytes/serde", + "reth-codecs?/serde", + "op-alloy-consensus/serde", +] +arbitrary = [ + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "reth-primitives/arbitrary", + "reth-codecs?/arbitrary", + "op-alloy-consensus/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", +] diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 7153ae3155c7..204b34d33782 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,8 +1,7 @@ //! OP mainnet bedrock related data. -use alloy_consensus::{EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; -use reth_primitives::Header; /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, /// replayed in blocks: diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 659900b9adbb..796f5cb06138 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -6,5 +6,29 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(not(feature = "std"), no_std)] pub mod bedrock; +pub mod transaction; + +use reth_primitives::EthPrimitives; +pub use transaction::{tx_type::OpTxType, OpTransaction}; + +/// Optimism primitive types. +pub type OpPrimitives = EthPrimitives; + +// TODO: once we are ready for separating primitive types, introduce a separate `NodePrimitives` +// implementation used exclusively by legacy engine. +// +// #[derive(Debug, Default, Clone, PartialEq, Eq)] +// pub struct OpPrimitives; +// +// impl NodePrimitives for OpPrimitives { +// type Block = Block; +// type BlockHeader = Header; +// type BlockBody = BlockBody; +// type SignedTx = TransactionSigned; +// type TxType = OpTxType; +// type Receipt = Receipt; +// } diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs new file mode 100644 index 000000000000..070b3d984e0b --- /dev/null +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -0,0 +1,173 @@ +//! Wrapper of [`OpTypedTransaction`], that implements reth database encoding [`Compact`]. + +pub mod tx_type; + +use alloy_primitives::{bytes, Bytes, TxKind, Uint, B256}; + +use alloy_consensus::{constants::EIP7702_TX_TYPE_ID, TxLegacy}; +use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; +use derive_more::{Deref, From}; +use op_alloy_consensus::{OpTypedTransaction, DEPOSIT_TX_TYPE_ID}; +use reth_codecs::Compact; +use reth_primitives::transaction::{ + COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, + COMPACT_IDENTIFIER_LEGACY, +}; +use reth_primitives_traits::InMemorySize; + +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Deref, Hash, From)] +/// Optimistic transaction. +pub struct OpTransaction(OpTypedTransaction); + +impl Default for OpTransaction { + fn default() -> Self { + Self(OpTypedTransaction::Legacy(TxLegacy::default())) + } +} + +impl Compact for OpTransaction { + fn to_compact(&self, out: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + match &self.0 { + OpTypedTransaction::Legacy(tx) => tx.to_compact(out), + OpTypedTransaction::Eip2930(tx) => tx.to_compact(out), + OpTypedTransaction::Eip1559(tx) => tx.to_compact(out), + OpTypedTransaction::Eip7702(tx) => tx.to_compact(out), + OpTypedTransaction::Deposit(tx) => tx.to_compact(out), + } + } + + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + + match identifier { + COMPACT_IDENTIFIER_LEGACY => { + let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Legacy(tx)), buf) + } + COMPACT_IDENTIFIER_EIP2930 => { + let (tx, buf) = + alloy_consensus::transaction::TxEip2930::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip2930(tx)), buf) + } + COMPACT_IDENTIFIER_EIP1559 => { + let (tx, buf) = + alloy_consensus::transaction::TxEip1559::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip1559(tx)), buf) + } + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + // An identifier of 3 indicates that the transaction type did not fit into + // the backwards compatible 2 bit identifier, their transaction types are + // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, + // we need to read the concrete transaction type from the buffer by + // reading the full 8 bits (single byte) and match on this transaction type. + let identifier = buf.get_u8(); + match identifier { + EIP7702_TX_TYPE_ID => { + let (tx, buf) = + alloy_consensus::transaction::TxEip7702::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip7702(tx)), buf) + } + DEPOSIT_TX_TYPE_ID => { + let (tx, buf) = op_alloy_consensus::TxDeposit::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Deposit(tx)), buf) + } + _ => unreachable!( + "Junk data in database: unknown Transaction variant: {identifier}" + ), + } + } + _ => unreachable!("Junk data in database: unknown Transaction variant: {identifier}"), + } + } +} + +impl alloy_consensus::Transaction for OpTransaction { + fn chain_id(&self) -> Option { + self.0.chain_id() + } + + fn nonce(&self) -> u64 { + self.0.nonce() + } + + fn gas_limit(&self) -> u64 { + self.0.gas_limit() + } + + fn gas_price(&self) -> Option { + self.0.gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.0.max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.0.max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.0.max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.0.priority_fee_or_price() + } + + fn kind(&self) -> TxKind { + self.0.kind() + } + + fn value(&self) -> Uint<256, 4> { + self.0.value() + } + + fn input(&self) -> &Bytes { + self.0.input() + } + + fn ty(&self) -> u8 { + self.0.ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.0.access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.0.blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.0.authorization_list() + } + + fn is_dynamic_fee(&self) -> bool { + self.0.is_dynamic_fee() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.0.effective_gas_price(base_fee) + } + + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.0.effective_tip_per_gas(base_fee) + } +} + +impl InMemorySize for OpTransaction { + fn size(&self) -> usize { + match &self.0 { + OpTypedTransaction::Legacy(tx) => tx.size(), + OpTypedTransaction::Eip2930(tx) => tx.size(), + OpTypedTransaction::Eip1559(tx) => tx.size(), + OpTypedTransaction::Eip7702(tx) => tx.size(), + OpTypedTransaction::Deposit(tx) => tx.size(), + } + } +} diff --git a/crates/optimism/primitives/src/transaction/tx_type.rs b/crates/optimism/primitives/src/transaction/tx_type.rs new file mode 100644 index 000000000000..9976221b4240 --- /dev/null +++ b/crates/optimism/primitives/src/transaction/tx_type.rs @@ -0,0 +1,315 @@ +//! newtype pattern on `op_alloy_consensus::OpTxType`. +//! `OpTxType` implements `reth_primitives_traits::TxType`. +//! This type is required because a `Compact` impl is needed on the deposit tx type. + +use core::fmt::Debug; + +use alloy_primitives::{U64, U8}; +use alloy_rlp::{Decodable, Encodable, Error}; +use bytes::BufMut; +use derive_more::{ + derive::{From, Into}, + Display, +}; +use op_alloy_consensus::OpTxType as AlloyOpTxType; +use reth_primitives_traits::{InMemorySize, TxType}; + +/// Wrapper type for [`op_alloy_consensus::OpTxType`] to implement +/// [`TxType`] trait. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Display, Ord, Hash, From, Into)] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[into(u8)] +pub struct OpTxType(AlloyOpTxType); + +impl TxType for OpTxType { + #[inline] + fn is_legacy(&self) -> bool { + matches!(self.0, AlloyOpTxType::Legacy) + } + + #[inline] + fn is_eip2930(&self) -> bool { + matches!(self.0, AlloyOpTxType::Eip2930) + } + + #[inline] + fn is_eip1559(&self) -> bool { + matches!(self.0, AlloyOpTxType::Eip1559) + } + + #[inline] + fn is_eip4844(&self) -> bool { + false + } + + #[inline] + fn is_eip7702(&self) -> bool { + matches!(self.0, AlloyOpTxType::Eip7702) + } +} + +impl InMemorySize for OpTxType { + /// Calculates a heuristic for the in-memory size of the [`OpTxType`]. + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } +} + +impl From for U8 { + fn from(tx_type: OpTxType) -> Self { + Self::from(u8::from(tx_type)) + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: u8) -> Result { + AlloyOpTxType::try_from(value) + .map(OpTxType) + .map_err(|_| Error::Custom("Invalid transaction type")) + } +} + +impl Default for OpTxType { + fn default() -> Self { + Self(AlloyOpTxType::Legacy) + } +} + +impl PartialEq for OpTxType { + fn eq(&self, other: &u8) -> bool { + let self_as_u8: u8 = (*self).into(); + &self_as_u8 == other + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: u64) -> Result { + if value > u8::MAX as u64 { + return Err(Error::Custom("value out of range")); + } + Self::try_from(value as u8) + } +} + +impl TryFrom for OpTxType { + type Error = Error; + + fn try_from(value: U64) -> Result { + let u64_value: u64 = value.try_into().map_err(|_| Error::Custom("value out of range"))?; + Self::try_from(u64_value) + } +} + +impl Encodable for OpTxType { + fn length(&self) -> usize { + let value: u8 = (*self).into(); + value.length() + } + + fn encode(&self, out: &mut dyn BufMut) { + let value: u8 = (*self).into(); + value.encode(out); + } +} + +impl Decodable for OpTxType { + fn decode(buf: &mut &[u8]) -> Result { + // Decode the u8 value from RLP + let value = if buf.is_empty() { + return Err(alloy_rlp::Error::InputTooShort); + } else if buf[0] == 0x80 { + 0 // Special case: RLP encoding for integer 0 is `b"\x80"` + } else { + u8::decode(buf)? + }; + + Self::try_from(value).map_err(|_| alloy_rlp::Error::Custom("Invalid transaction type")) + } +} + +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for OpTxType { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + use reth_codecs::txtype::*; + match self.0 { + AlloyOpTxType::Legacy => COMPACT_IDENTIFIER_LEGACY, + AlloyOpTxType::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + AlloyOpTxType::Eip1559 => COMPACT_IDENTIFIER_EIP1559, + AlloyOpTxType::Eip7702 => { + buf.put_u8(alloy_consensus::constants::EIP7702_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + AlloyOpTxType::Deposit => { + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + } + } + + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + ( + match identifier { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self(AlloyOpTxType::Legacy), + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self(AlloyOpTxType::Eip2930), + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self(AlloyOpTxType::Eip1559), + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { + let extended_identifier = buf.get_u8(); + match extended_identifier { + alloy_consensus::constants::EIP7702_TX_TYPE_ID => { + Self(AlloyOpTxType::Eip7702) + } + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self(AlloyOpTxType::Deposit), + _ => panic!("Unsupported OpTxType identifier: {extended_identifier}"), + } + } + _ => panic!("Unknown identifier for OpTxType: {identifier}"), + }, + buf, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::constants::EIP7702_TX_TYPE_ID; + use bytes::BytesMut; + use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; + use reth_codecs::{txtype::*, Compact}; + use rstest::rstest; + + #[test] + fn test_from_alloy_op_tx_type() { + let alloy_tx = AlloyOpTxType::Legacy; + let op_tx: OpTxType = OpTxType::from(alloy_tx); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_from_op_tx_type_to_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let tx_type_u8: u8 = op_tx.into(); + assert_eq!(tx_type_u8, AlloyOpTxType::Legacy as u8); + } + + #[test] + fn test_from_op_tx_type_to_u8_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let tx_type_u8: U8 = op_tx.into(); + assert_eq!(tx_type_u8, U8::from(AlloyOpTxType::Legacy as u8)); + } + + #[test] + fn test_try_from_u8() { + let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u8).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_try_from_invalid_u8() { + let invalid_value: u8 = 255; + let result = OpTxType::try_from(invalid_value); + assert_eq!(result, Err(Error::Custom("Invalid transaction type"))); + } + + #[test] + fn test_try_from_u64() { + let op_tx = OpTxType::try_from(AlloyOpTxType::Legacy as u64).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_try_from_u64_out_of_range() { + let result = OpTxType::try_from(u64::MAX); + assert_eq!(result, Err(Error::Custom("value out of range"))); + } + + #[test] + fn test_try_from_u64_within_range() { + let valid_value: U64 = U64::from(AlloyOpTxType::Legacy as u64); + let op_tx = OpTxType::try_from(valid_value).unwrap(); + assert_eq!(op_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_default() { + let default_tx = OpTxType::default(); + assert_eq!(default_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_partial_eq_u8() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + assert_eq!(op_tx, AlloyOpTxType::Legacy as u8); + } + + #[test] + fn test_encodable() { + let op_tx = OpTxType(AlloyOpTxType::Legacy); + let mut buf = BytesMut::new(); + op_tx.encode(&mut buf); + assert_eq!(buf, BytesMut::from(&[0x80][..])); + } + + #[test] + fn test_decodable_success() { + // Using the RLP-encoded form of 0, which is `b"\x80"` + let mut buf: &[u8] = &[0x80]; + let decoded_tx = OpTxType::decode(&mut buf).unwrap(); + assert_eq!(decoded_tx, OpTxType(AlloyOpTxType::Legacy)); + } + + #[test] + fn test_decodable_invalid() { + let mut buf: &[u8] = &[255]; + let result = OpTxType::decode(&mut buf); + assert!(result.is_err()); + } + + #[rstest] + #[case(OpTxType(AlloyOpTxType::Legacy), COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip2930), COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip1559), COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip7702), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType(AlloyOpTxType::Deposit), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + fn test_txtype_to_compact( + #[case] tx_type: OpTxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!( + identifier, expected_identifier, + "Unexpected identifier for OpTxType {tx_type:?}", + ); + assert_eq!(buf, expected_buf, "Unexpected buffer for OpTxType {tx_type:?}",); + } + + #[rstest] + #[case(OpTxType(AlloyOpTxType::Legacy), COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip2930), COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip1559), COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType(AlloyOpTxType::Eip7702), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType(AlloyOpTxType::Deposit), COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + fn test_txtype_from_compact( + #[case] expected_type: OpTxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = OpTxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); + } +} diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 37b64b774a13..50194f39aa3c 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -22,6 +22,7 @@ reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-rpc.workspace = true +reth-rpc-api.workspace = true reth-node-api.workspace = true reth-network-api.workspace = true reth-node-builder.workspace = true @@ -31,15 +32,19 @@ reth-chainspec.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-evm.workspace = true +reth-optimism-payload-builder.workspace = true +reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true +alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true +op-alloy-rpc-types-engine.workspace = true op-alloy-consensus.workspace = true revm.workspace = true @@ -49,6 +54,7 @@ tokio.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } # rpc +jsonrpsee-core.workspace = true jsonrpsee-types.workspace = true serde_json.workspace = true @@ -66,5 +72,6 @@ optimism = [ "reth-primitives/optimism", "reth-provider/optimism", "revm/optimism", - "reth-optimism-consensus/optimism" + "reth-optimism-consensus/optimism", + "reth-optimism-payload-builder/optimism" ] diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index ffc698b6e980..caafe798c81c 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,12 +1,12 @@ //! RPC errors specific to OP. -use alloy_rpc_types_eth::error::EthRpcErrorCode; +use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError}; use jsonrpsee_types::error::INTERNAL_ERROR_CODE; use reth_optimism_evm::OpBlockExecutionError; -use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; use reth_rpc_eth_api::AsEthApiError; use reth_rpc_eth_types::EthApiError; use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; +use revm::primitives::{InvalidTransaction, OptimismInvalidTransaction}; /// Optimism specific errors, that extend [`EthApiError`]. #[derive(Debug, thiserror::Error)] @@ -113,3 +113,9 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> ) } } + +impl From for OpEthApiError { + fn from(error: BlockError) -> Self { + Self::Eth(error.into()) + } +} diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 6678fbe5df4f..64a55496993d 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -35,7 +35,6 @@ where let block_hash = block.hash(); let excess_blob_gas = block.excess_blob_gas; let timestamp = block.timestamp; - let block = block.unseal(); let l1_block_info = reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; @@ -48,7 +47,7 @@ where .enumerate() .map(|(idx, (ref tx, receipt))| -> Result<_, _> { let meta = TransactionMeta { - tx_hash: tx.hash, + tx_hash: tx.hash(), index: idx as u64, block_hash, block_number, diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 9ddf7b3855b6..9495a359e329 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,21 +1,26 @@ +use crate::{OpEthApi, OpEthApiError}; +use alloy_consensus::Header; use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; use reth_evm::ConfigureEvm; -use reth_primitives::{ - revm_primitives::{BlockEnv, OptimismFields, TxEnv}, - Header, -}; use reth_rpc_eth_api::{ - helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, + helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, FromEthApiError, IntoEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; - -use crate::{OpEthApi, OpEthApiError}; +use revm::primitives::{BlockEnv, OptimismFields, TxEnv}; impl EthCall for OpEthApi where - Self: Call + LoadPendingBlock, + Self: EstimateCall + LoadPendingBlock, + N: RpcNodeCore, +{ +} + +impl EstimateCall for OpEthApi +where + Self: Call, + Self::Error: From, N: RpcNodeCore, { } @@ -42,7 +47,7 @@ where request: TransactionRequest, ) -> Result { // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { + if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index dc6e8e59fa6b..6b909f012c55 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -8,9 +8,11 @@ mod call; mod pending_block; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; +use reth_optimism_primitives::OpPrimitives; use std::{fmt, sync::Arc}; +use alloy_consensus::Header; use alloy_primitives::U256; use derive_more::Deref; use op_alloy_network::Optimism; @@ -18,10 +20,9 @@ use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_node_builder::EthApiBuilderCtx; -use reth_primitives::Header; use reth_provider::{ - BlockNumReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, - StageCheckpointReader, StateProviderFactory, + BlockNumReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, + EvmEnvProvider, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -71,7 +72,11 @@ pub struct OpEthApi { impl OpEthApi where N: RpcNodeCore< - Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, + Provider: BlockReaderIdExt + + ChainSpecProvider + + CanonStateSubscriptions + + Clone + + 'static, >, { /// Creates a new instance for given context. @@ -121,6 +126,7 @@ where type Pool = N::Pool; type Evm = ::Evm; type Network = ::Network; + type PayloadBuilder = (); #[inline] fn pool(&self) -> &Self::Pool { @@ -137,6 +143,11 @@ where self.inner.network() } + #[inline] + fn payload_builder(&self) -> &Self::PayloadBuilder { + &() + } + #[inline] fn provider(&self) -> &Self::Provider { self.inner.provider() @@ -243,7 +254,7 @@ where impl Trace for OpEthApi where - Self: LoadState>, + Self: RpcNodeCore + LoadState>, N: RpcNodeCore, { } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index c90b3f7b794d..98ea65778d8d 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,11 +1,13 @@ //! Loads OP pending block for a RPC response. +use crate::OpEthApi; +use alloy_consensus::Header; use alloy_eips::BlockNumberOrTag; use alloy_primitives::{BlockNumber, B256}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{revm_primitives::BlockEnv, Header, Receipt, SealedBlockWithSenders}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, ReceiptProvider, StateProviderFactory, @@ -16,15 +18,16 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_transaction_pool::TransactionPool; - -use crate::OpEthApi; +use revm::primitives::BlockEnv; impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking, N: RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 40ee5d9fd863..e803ea210197 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -9,9 +9,9 @@ use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceipt use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_optimism_forks::OptimismHardforks; +use reth_optimism_forks::OpHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; -use reth_provider::ChainSpecProvider; +use reth_provider::{ChainSpecProvider, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; @@ -21,6 +21,8 @@ impl LoadReceipt for OpEthApi where Self: Send + Sync, N: FullNodeComponents>, + Self::Provider: + TransactionsProvider + ReceiptProvider, { async fn build_transaction_receipt( &self, @@ -54,10 +56,10 @@ where /// L1 fee and data gas for a non-deposit transaction, or deposit nonce and receipt version for a /// deposit transaction. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] pub struct OpReceiptFieldsBuilder { /// Block timestamp. - pub l1_block_timestamp: u64, + pub block_timestamp: u64, /// The L1 fee for transaction. pub l1_fee: Option, /// L1 gas used by transaction. @@ -84,8 +86,19 @@ pub struct OpReceiptFieldsBuilder { impl OpReceiptFieldsBuilder { /// Returns a new builder. - pub fn new(block_timestamp: u64) -> Self { - Self { l1_block_timestamp: block_timestamp, ..Default::default() } + pub const fn new(block_timestamp: u64) -> Self { + Self { + block_timestamp, + l1_fee: None, + l1_data_gas: None, + l1_fee_scalar: None, + l1_base_fee: None, + deposit_nonce: None, + deposit_receipt_version: None, + l1_base_fee_scalar: None, + l1_blob_base_fee: None, + l1_blob_base_fee_scalar: None, + } } /// Applies [`L1BlockInfo`](revm::L1BlockInfo). @@ -96,7 +109,7 @@ impl OpReceiptFieldsBuilder { l1_block_info: revm::L1BlockInfo, ) -> Result { let raw_tx = tx.encoded_2718(); - let timestamp = self.l1_block_timestamp; + let timestamp = self.block_timestamp; self.l1_fee = Some( l1_block_info @@ -140,7 +153,7 @@ impl OpReceiptFieldsBuilder { /// Builds the [`OpTransactionReceiptFields`] object. pub const fn build(self) -> OpTransactionReceiptFields { let Self { - l1_block_timestamp: _, // used to compute other fields + block_timestamp: _, // used to compute other fields l1_fee, l1_data_gas: l1_gas_used, l1_fee_scalar, @@ -187,6 +200,7 @@ impl OpReceiptBuilder { all_receipts: &[Receipt], l1_block_info: revm::L1BlockInfo, ) -> Result { + let timestamp = meta.timestamp; let core_receipt = build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { match receipt.tx_type { @@ -211,7 +225,7 @@ impl OpReceiptBuilder { } })?; - let op_receipt_fields = OpReceiptFieldsBuilder::default() + let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp) .l1_block_info(chain_spec, transaction, l1_block_info)? .deposit_nonce(receipt.deposit_nonce) .deposit_version(receipt.deposit_receipt_version) @@ -233,13 +247,12 @@ impl OpReceiptBuilder { #[cfg(test)] mod test { + use super::*; use alloy_primitives::hex; use op_alloy_network::eip2718::Decodable2718; - use reth_optimism_chainspec::OP_MAINNET; + use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; use reth_primitives::{Block, BlockBody}; - use super::*; - /// OP Mainnet transaction at index 0 in block 124665056. /// /// @@ -342,4 +355,46 @@ mod test { "incorrect l1 blob base fee scalar" ); } + + // + #[test] + fn base_receipt_gas_fields() { + // https://basescan.org/tx/0x510fd4c47d78ba9f97c91b0f2ace954d5384c169c9545a77a373cf3ef8254e6e + let system = hex!("7ef8f8a0389e292420bcbf9330741f72074e39562a09ff5a00fd22e4e9eee7e34b81bca494deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000008dd00101c120000000000000004000000006721035b00000000014189960000000000000000000000000000000000000000000000000000000349b4dcdc000000000000000000000000000000000000000000000000000000004ef9325cc5991ce750960f636ca2ffbb6e209bb3ba91412f21dd78c14ff154d1930f1f9a0000000000000000000000005050f69a9786f081509234f1a7f4684b5e5b76c9"); + let tx_0 = TransactionSigned::decode_2718(&mut &system[..]).unwrap(); + + let block = Block { + body: BlockBody { transactions: vec![tx_0], ..Default::default() }, + ..Default::default() + }; + let l1_block_info = + reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); + + // https://basescan.org/tx/0xf9420cbaf66a2dda75a015488d37262cbfd4abd0aad7bb2be8a63e14b1fa7a94 + let tx = hex!("02f86c8221058034839a4ae283021528942f16386bb37709016023232523ff6d9daf444be380841249c58bc080a001b927eda2af9b00b52a57be0885e0303c39dd2831732e14051c2336470fd468a0681bf120baf562915841a48601c2b54a6742511e535cf8f71c95115af7ff63bd"); + let tx_1 = TransactionSigned::decode_2718(&mut &tx[..]).unwrap(); + + let receipt_meta = OpReceiptFieldsBuilder::new(1730216981) + .l1_block_info(&BASE_MAINNET, &tx_1, l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + } = receipt_meta.l1_block_info; + + assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); + assert_eq!(l1_gas_used, Some(1600), "incorrect l1 gas used"); + assert_eq!(l1_fee, Some(191150293412), "incorrect l1 fee"); + assert!(l1_fee_scalar.is_none(), "incorrect l1 fee scalar"); + assert_eq!(l1_base_fee_scalar, Some(2269), "incorrect l1 base fee scalar"); + assert_eq!(l1_blob_base_fee, Some(1324954204), "incorrect l1 blob base fee"); + assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); + } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 90e5e33feb76..3202dc46ad1b 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,7 +1,7 @@ //! Loads and formats OP transaction RPC response. use alloy_consensus::{Signed, Transaction as _}; -use alloy_primitives::{Bytes, B256}; +use alloy_primitives::{Bytes, Sealable, Sealed, B256}; use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; @@ -15,7 +15,7 @@ use reth_rpc_eth_api::{ use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use crate::{OpEthApi, SequencerClient}; +use crate::{OpEthApi, OpEthApiError, SequencerClient}; impl EthTransactions for OpEthApi where @@ -58,6 +58,7 @@ impl LoadTransaction for OpEthApi where Self: SpawnBlocking + FullEthApiTypes, N: RpcNodeCore, + Self::Pool: TransactionPool, { } @@ -73,17 +74,21 @@ where impl TransactionCompat for OpEthApi where - N: FullNodeComponents, + N: FullNodeComponents>, { type Transaction = Transaction; + type Error = OpEthApiError; fn fill( &self, tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, - ) -> Self::Transaction { + ) -> Result { let from = tx.signer(); - let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx.into_signed(); + let mut deposit_receipt_version = None; + let mut deposit_nonce = None; let inner = match transaction { reth_primitives::Transaction::Legacy(tx) => { @@ -99,28 +104,38 @@ where reth_primitives::Transaction::Eip7702(tx) => { Signed::new_unchecked(tx, signature, hash).into() } - reth_primitives::Transaction::Deposit(tx) => OpTxEnvelope::Deposit(tx), + reth_primitives::Transaction::Deposit(tx) => { + self.inner + .provider() + .receipt_by_hash(hash) + .map_err(Self::Error::from_eth_err)? + .inspect(|receipt| { + deposit_receipt_version = receipt.deposit_receipt_version; + deposit_nonce = receipt.deposit_nonce; + }); + + OpTxEnvelope::Deposit(tx.seal_unchecked(hash)) + } }; - let deposit_receipt_version = self - .inner - .provider() - .receipt_by_hash(hash) - .ok() // todo: change sig to return result - .flatten() - .and_then(|receipt| receipt.deposit_receipt_version); - let TransactionInfo { block_hash, block_number, index: transaction_index, base_fee, .. } = tx_info; - let effective_gas_price = base_fee - .map(|base_fee| { - inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee - }) - .unwrap_or_else(|| inner.max_fee_per_gas()); + let effective_gas_price = if inner.is_deposit() { + // For deposits, we must always set the `gasPrice` field to 0 in rpc + // deposit tx don't have a gas price field, but serde of `Transaction` will take care of + // it + 0 + } else { + base_fee + .map(|base_fee| { + inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee + }) + .unwrap_or_else(|| inner.max_fee_per_gas()) + }; - Transaction { + Ok(Transaction { inner: alloy_rpc_types_eth::Transaction { inner, block_hash, @@ -129,8 +144,9 @@ where from, effective_gas_price: Some(effective_gas_price), }, + deposit_nonce, deposit_receipt_version, - } + }) } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { @@ -139,7 +155,17 @@ where OpTxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, OpTxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, OpTxEnvelope::Eip7702(tx) => &mut tx.tx_mut().input, - OpTxEnvelope::Deposit(tx) => &mut tx.input, + OpTxEnvelope::Deposit(tx) => { + let (mut deposit, hash) = std::mem::replace( + tx, + Sealed::new_unchecked(Default::default(), Default::default()), + ) + .split(); + deposit.input = deposit.input.slice(..4); + let mut deposit = deposit.seal_unchecked(hash); + std::mem::swap(tx, &mut deposit); + return + } _ => return, }; *input = input.slice(..4); diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index 44d0fa353890..0fa0debdf33d 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -13,6 +13,7 @@ pub mod error; pub mod eth; pub mod sequencer; +pub mod witness; pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; pub use eth::{OpEthApi, OpReceiptBuilder}; diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs new file mode 100644 index 000000000000..ed9d77e73e84 --- /dev/null +++ b/crates/optimism/rpc/src/witness.rs @@ -0,0 +1,81 @@ +//! Support for optimism specific witness RPCs. + +use alloy_consensus::Header; +use alloy_primitives::B256; +use alloy_rpc_types_debug::ExecutionWitness; +use jsonrpsee_core::RpcResult; +use op_alloy_rpc_types_engine::OpPayloadAttributes; +use reth_chainspec::ChainSpecProvider; +use reth_evm::ConfigureEvm; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_payload_builder::OpPayloadBuilder; +use reth_primitives::SealedHeader; +use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; +pub use reth_rpc_api::DebugExecutionWitnessApiServer; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use std::{fmt::Debug, sync::Arc}; + +/// An extension to the `debug_` namespace of the RPC API. +pub struct OpDebugWitnessApi { + inner: Arc>, +} + +impl OpDebugWitnessApi { + /// Creates a new instance of the `OpDebugWitnessApi`. + pub fn new(provider: Provider, evm_config: EvmConfig) -> Self { + let builder = OpPayloadBuilder::new(evm_config); + let inner = OpDebugWitnessApiInner { provider, builder }; + Self { inner: Arc::new(inner) } + } +} + +impl OpDebugWitnessApi +where + Provider: BlockReaderIdExt, +{ + /// Fetches the parent header by hash. + fn parent_header(&self, parent_block_hash: B256) -> ProviderResult { + self.inner + .provider + .sealed_header_by_hash(parent_block_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_block_hash.into())) + } +} + +impl DebugExecutionWitnessApiServer + for OpDebugWitnessApi +where + Provider: BlockReaderIdExt + + StateProviderFactory + + ChainSpecProvider + + 'static, + EvmConfig: ConfigureEvm

+ 'static, +{ + fn execute_payload( + &self, + parent_block_hash: B256, + attributes: OpPayloadAttributes, + ) -> RpcResult { + let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?; + self.inner + .builder + .payload_witness(&self.inner.provider, parent_header, attributes) + .map_err(|err| internal_rpc_err(err.to_string())) + } +} + +impl Clone for OpDebugWitnessApi { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} +impl Debug for OpDebugWitnessApi { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OpDebugWitnessApi").finish_non_exhaustive() + } +} + +struct OpDebugWitnessApiInner { + provider: Provider, + builder: OpPayloadBuilder, +} diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index c3b8a71feea1..391f26093ba6 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -16,7 +16,7 @@ mod tests { CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt, ReceiptWithBloom}; + use reth_primitives::{Account, Receipt}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -40,7 +40,6 @@ mod tests { assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 2); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); @@ -65,7 +64,6 @@ mod tests { validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 74dea45d10d9..0315f73cae4e 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -15,9 +15,11 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-evm.workspace = true diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 6f2038ba4b4b..0ab411d3e600 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -17,10 +17,10 @@ use futures_util::FutureExt; use reth_chainspec::EthereumHardforks; use reth_evm::state_change::post_block_withdrawals_balance_increments; use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator}; -use reth_payload_primitives::{ - BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, -}; -use reth_primitives::{constants::RETH_CLIENT_VERSION, proofs, SealedHeader}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind}; +use reth_primitives::{proofs, SealedHeader}; +use reth_primitives_traits::constants::RETH_CLIENT_VERSION; use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; @@ -616,7 +616,9 @@ where if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { this.maybe_better = None; - if let Ok(BuildOutcome::Better { payload, .. }) = res { + if let Ok(Some(payload)) = res.map(|out| out.into_payload()) + .inspect_err(|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload")) + { debug!(target: "payload_builder", "resolving better payload"); return Poll::Ready(Ok(payload)) } @@ -767,7 +769,7 @@ impl BuildOutcome { /// Consumes the type and returns the payload if the outcome is `Better`. pub fn into_payload(self) -> Option { match self { - Self::Better { payload, .. } => Some(payload), + Self::Better { payload, .. } | Self::Freeze(payload) => Some(payload), _ => None, } } @@ -977,31 +979,6 @@ impl Default for MissingPayloadBehaviour { } } -/// Represents the outcome of committing withdrawals to the runtime database and post state. -/// Pre-shanghai these are `None` values. -#[derive(Default, Debug)] -pub struct WithdrawalsOutcome { - /// committed withdrawals, if any. - pub withdrawals: Option, - /// withdrawals root if any. - pub withdrawals_root: Option, -} - -impl WithdrawalsOutcome { - /// No withdrawals pre shanghai - pub const fn pre_shanghai() -> Self { - Self { withdrawals: None, withdrawals_root: None } - } - - /// No withdrawals - pub fn empty() -> Self { - Self { - withdrawals: Some(Withdrawals::default()), - withdrawals_root: Some(EMPTY_WITHDRAWALS), - } - } -} - /// Executes the withdrawals and commits them to the _runtime_ Database and `BundleState`. /// /// Returns the withdrawals root. @@ -1011,32 +988,26 @@ pub fn commit_withdrawals( db: &mut State, chain_spec: &ChainSpec, timestamp: u64, - withdrawals: Withdrawals, -) -> Result + withdrawals: &Withdrawals, +) -> Result, DB::Error> where DB: Database, ChainSpec: EthereumHardforks, { if !chain_spec.is_shanghai_active_at_timestamp(timestamp) { - return Ok(WithdrawalsOutcome::pre_shanghai()) + return Ok(None) } if withdrawals.is_empty() { - return Ok(WithdrawalsOutcome::empty()) + return Ok(Some(EMPTY_WITHDRAWALS)) } let balance_increments = - post_block_withdrawals_balance_increments(chain_spec, timestamp, &withdrawals); + post_block_withdrawals_balance_increments(chain_spec, timestamp, withdrawals); db.increment_balances(balance_increments)?; - let withdrawals_root = proofs::calculate_withdrawals_root(&withdrawals); - - // calculate withdrawals root - Ok(WithdrawalsOutcome { - withdrawals: Some(withdrawals), - withdrawals_root: Some(withdrawals_root), - }) + Ok(Some(proofs::calculate_withdrawals_root(withdrawals))) } /// Checks if the new payload is better than the current best. diff --git a/crates/payload/builder-primitives/Cargo.toml b/crates/payload/builder-primitives/Cargo.toml new file mode 100644 index 000000000000..6d89ea89d03a --- /dev/null +++ b/crates/payload/builder-primitives/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "reth-payload-builder-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-payload-primitives.workspace = true + +# alloy +alloy-rpc-types-engine = { workspace = true, features = ["serde"] } + +# async +async-trait.workspace = true +pin-project.workspace = true +tokio = { workspace = true, features = ["sync"] } +tokio-stream.workspace = true + +# misc +tracing.workspace = true diff --git a/crates/payload/primitives/src/events.rs b/crates/payload/builder-primitives/src/events.rs similarity index 98% rename from crates/payload/primitives/src/events.rs rename to crates/payload/builder-primitives/src/events.rs index 3fb3813adb1e..d51f13f7c4cb 100644 --- a/crates/payload/primitives/src/events.rs +++ b/crates/payload/builder-primitives/src/events.rs @@ -1,4 +1,4 @@ -use crate::PayloadTypes; +use reth_payload_primitives::PayloadTypes; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/crates/payload/builder-primitives/src/lib.rs b/crates/payload/builder-primitives/src/lib.rs new file mode 100644 index 000000000000..af7ad736d44e --- /dev/null +++ b/crates/payload/builder-primitives/src/lib.rs @@ -0,0 +1,18 @@ +//! This crate defines abstractions to create and update payloads (blocks) + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod events; +pub use crate::events::{Events, PayloadEvents}; + +/// Contains the payload builder trait to abstract over payload attributes. +mod traits; +pub use traits::{PayloadBuilder, PayloadStoreExt}; + +pub use reth_payload_primitives::PayloadBuilderError; diff --git a/crates/payload/builder-primitives/src/traits.rs b/crates/payload/builder-primitives/src/traits.rs new file mode 100644 index 000000000000..b5e8910b6c26 --- /dev/null +++ b/crates/payload/builder-primitives/src/traits.rs @@ -0,0 +1,111 @@ +use crate::{PayloadBuilderError, PayloadEvents}; +use alloy_rpc_types_engine::PayloadId; +use reth_payload_primitives::{PayloadKind, PayloadTypes}; +use std::fmt::Debug; +use tokio::sync::oneshot; + +/// A helper trait for internal usage to retrieve and resolve payloads. +#[async_trait::async_trait] +pub trait PayloadStoreExt: Debug + Send + Sync + Unpin { + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option>; + + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve(&self, id: PayloadId) -> Option> { + self.resolve_kind(id, PayloadKind::Earliest).await + } + + /// Returns the best payload for the given identifier. + async fn best_payload( + &self, + id: PayloadId, + ) -> Option>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option>; +} + +#[async_trait::async_trait] +impl PayloadStoreExt for P +where + P: PayloadBuilder, +{ + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { + Some(PayloadBuilder::resolve_kind(self, id, kind).await?.map_err(Into::into)) + } + + async fn best_payload( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::best_payload(self, id).await?.map_err(Into::into)) + } + + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::payload_attributes(self, id).await?.map_err(Into::into)) + } +} + +/// A type that can request, subscribe to and resolve payloads. +#[async_trait::async_trait] +pub trait PayloadBuilder: Debug + Send + Sync + Unpin { + /// The Payload type for the builder. + type PayloadType: PayloadTypes; + /// The error type returned by the builder. + type Error: Into; + + /// Sends a message to the service to start building a new payload for the given payload. + /// + /// Returns a receiver that will receive the payload id. + fn send_new_payload( + &self, + attr: ::PayloadBuilderAttributes, + ) -> oneshot::Receiver>; + + /// Returns the best payload for the given identifier. + async fn best_payload( + &self, + id: PayloadId, + ) -> Option::BuiltPayload, Self::Error>>; + + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option::BuiltPayload, Self::Error>>; + + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve( + &self, + id: PayloadId, + ) -> Option::BuiltPayload, Self::Error>> { + self.resolve_kind(id, PayloadKind::Earliest).await + } + + /// Sends a message to the service to subscribe to payload events. + /// Returns a receiver that will receive them. + async fn subscribe(&self) -> Result, Self::Error>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option::PayloadBuilderAttributes, Self::Error>>; +} diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 7a536cdbcfac..78814da50664 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-primitives = { workspace = true, optional = true } reth-chain-state.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true @@ -39,11 +40,12 @@ tracing.workspace = true reth-primitives.workspace = true alloy-primitives.workspace = true revm.workspace = true +alloy-consensus.workspace = true [features] test-utils = [ - "alloy-primitives", - "reth-chain-state/test-utils", - "reth-primitives/test-utils", - "revm/test-utils" + "alloy-primitives", + "reth-chain-state/test-utils", + "reth-primitives/test-utils", + "revm/test-utils", ] diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 57a040a4bb48..b6191ea7fd11 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -28,9 +28,10 @@ //! use std::pin::Pin; //! use std::sync::Arc; //! use std::task::{Context, Poll}; +//! use alloy_consensus::Header; //! use alloy_primitives::U256; //! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; -//! use reth_primitives::{Block, Header}; +//! use reth_primitives::{Block, BlockExt}; //! //! /// The generator type that creates new jobs that builds empty blocks. //! pub struct EmptyBlockPayloadJobGenerator; @@ -112,7 +113,8 @@ pub mod noop; pub mod test_utils; pub use alloy_rpc_types::engine::PayloadId; -pub use reth_payload_primitives::{PayloadBuilderError, PayloadKind}; +pub use reth_payload_builder_primitives::PayloadBuilderError; +pub use reth_payload_primitives::PayloadKind; pub use service::{ PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand, PayloadStore, }; diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 43beaf82c38f..af11ba75ce6a 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -10,14 +10,15 @@ use crate::{ use alloy_rpc_types::engine::PayloadId; use futures_util::{future::FutureExt, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; -use reth_payload_primitives::{ - BuiltPayload, Events, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, - PayloadEvents, PayloadKind, PayloadTypes, +use reth_payload_builder_primitives::{ + Events, PayloadBuilder, PayloadBuilderError, PayloadEvents, PayloadStoreExt, }; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind, PayloadTypes}; use std::{ fmt, future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, }; use tokio::sync::{ @@ -30,13 +31,14 @@ use tracing::{debug, info, trace, warn}; type PayloadFuture

= Pin> + Send + Sync>>; /// A communication channel to the [`PayloadBuilderService`] that can retrieve payloads. +/// +/// This type is intended to be used to retrieve payloads from the service (e.g. from the engine +/// API). #[derive(Debug)] pub struct PayloadStore { - inner: PayloadBuilderHandle, + inner: Arc>, } -// === impl PayloadStore === - impl PayloadStore where T: PayloadTypes, @@ -82,12 +84,16 @@ where } } -impl Clone for PayloadStore +impl PayloadStore where T: PayloadTypes, { - fn clone(&self) -> Self { - Self { inner: self.inner.clone() } + /// Create a new instance + pub fn new

(inner: P) -> Self + where + P: PayloadStoreExt + 'static, + { + Self { inner: Arc::new(inner) } } } @@ -96,7 +102,7 @@ where T: PayloadTypes, { fn from(inner: PayloadBuilderHandle) -> Self { - Self { inner } + Self::new(inner) } } @@ -156,6 +162,18 @@ where let _ = self.to_service.send(PayloadServiceCommand::Subscribe(tx)); Ok(PayloadEvents { receiver: rx.await? }) } + + /// Returns the payload attributes associated with the given identifier. + /// + /// Note: this returns the attributes of the payload and does not resolve the job. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + let (tx, rx) = oneshot::channel(); + self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; + rx.await.ok()? + } } impl PayloadBuilderHandle @@ -169,18 +187,6 @@ where pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { Self { to_service } } - - /// Returns the payload attributes associated with the given identifier. - /// - /// Note: this returns the attributes of the payload and does not resolve the job. - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option> { - let (tx, rx) = oneshot::channel(); - self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; - rx.await.ok()? - } } impl Clone for PayloadBuilderHandle diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 780df5c84636..4690ca14f0d8 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -7,8 +7,9 @@ use crate::{ use alloy_primitives::U256; use reth_chain_state::{CanonStateNotification, ExecutedBlock}; -use reth_payload_primitives::{PayloadBuilderError, PayloadKind, PayloadTypes}; -use reth_primitives::Block; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{PayloadKind, PayloadTypes}; +use reth_primitives::{Block, BlockExt}; use std::{ future::Future, pin::Pin, diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index ba8486b69079..d9d54ccd0e45 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -1,9 +1,8 @@ //! Trait abstractions used by the payload crate. use reth_chain_state::CanonStateNotification; -use reth_payload_primitives::{ - BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError, PayloadKind, -}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind}; use std::future::Future; /// A type that can build a payload. diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 951108e7da3c..d4070b4688e0 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -16,25 +16,20 @@ workspace = true reth-chainspec.workspace = true reth-errors.workspace = true reth-primitives.workspace = true -reth-transaction-pool.workspace = true reth-chain-state.workspace = true +revm-primitives.workspace = true + # alloy alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["serde"] } op-alloy-rpc-types-engine = { workspace = true, optional = true } -# async -async-trait.workspace = true -tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true -pin-project.workspace = true - # misc serde.workspace = true thiserror.workspace = true -tracing.workspace = true +tokio = { workspace = true, default-features = false, features = ["sync"] } [features] op = ["dep:op-alloy-rpc-types-engine"] \ No newline at end of file diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 16446255c359..ffe4e027e966 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -1,9 +1,9 @@ //! Error types emitted by types or implementations of this crate. use alloy_primitives::B256; +use alloy_rpc_types_engine::ForkchoiceUpdateError; use reth_errors::{ProviderError, RethError}; -use reth_primitives::revm_primitives::EVMError; -use reth_transaction_pool::BlobStoreError; +use revm_primitives::EVMError; use tokio::sync::oneshot; /// Possible error variants during payload building. @@ -21,21 +21,12 @@ pub enum PayloadBuilderError { /// If there's no payload to resolve. #[error("missing payload")] MissingPayload, - /// Build cancelled - #[error("build outcome cancelled")] - BuildOutcomeCancelled, - /// Error occurring in the blob store. - #[error(transparent)] - BlobStore(#[from] BlobStoreError), /// Other internal error #[error(transparent)] Internal(#[from] RethError), /// Unrecoverable error during evm execution. #[error("evm execution error: {0}")] EvmExecutionError(EVMError), - /// Thrown if the payload requests withdrawals before Shanghai activation. - #[error("withdrawals set before Shanghai activation")] - WithdrawalsBeforeShanghai, /// Any other payload building errors. #[error(transparent)] Other(Box), @@ -63,7 +54,7 @@ impl From for PayloadBuilderError { } } -/// Thrown when the payload or attributes are known to be invalid before processing. +/// Thrown when the payload or attributes are known to be invalid __before__ processing. /// /// This is used mainly for /// [`validate_version_specific_fields`](crate::validate_version_specific_fields), which validates @@ -125,3 +116,20 @@ impl EngineObjectValidationError { Self::InvalidParams(Box::new(error)) } } + +/// Thrown when validating the correctness of a payloadattributes object. +#[derive(thiserror::Error, Debug)] +pub enum InvalidPayloadAttributesError { + /// Thrown if the timestamp of the payload attributes is invalid according to the engine specs. + #[error("parent beacon block root not supported before V3")] + InvalidTimestamp, + /// Another type of error that is not covered by the above variants. + #[error("Invalid params: {0}")] + InvalidParams(#[from] Box), +} + +impl From for ForkchoiceUpdateError { + fn from(_: InvalidPayloadAttributesError) -> Self { + Self::UpdatedInvalidPayloadAttributes + } +} diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 7013d9fd913c..523e6fb057a6 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -9,18 +9,16 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; - -pub use error::{EngineObjectValidationError, PayloadBuilderError, VersionSpecificValidationError}; - -mod events; -pub use crate::events::{Events, PayloadEvents}; +pub use error::{ + EngineObjectValidationError, InvalidPayloadAttributesError, PayloadBuilderError, + VersionSpecificValidationError, +}; /// Contains traits to abstract over payload attributes types and default implementations of the /// [`PayloadAttributes`] trait for ethereum mainnet and optimism types. mod traits; pub use traits::{ - BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilder, - PayloadBuilderAttributes, + BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilderAttributes, }; mod payload; diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 160808854a9c..8d5c429e6c64 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,4 +1,3 @@ -use crate::{PayloadBuilderError, PayloadEvents, PayloadKind, PayloadTypes}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, eip7685::Requests, @@ -7,50 +6,6 @@ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use reth_chain_state::ExecutedBlock; use reth_primitives::SealedBlock; -use tokio::sync::oneshot; - -/// A type that can request, subscribe to and resolve payloads. -#[async_trait::async_trait] -pub trait PayloadBuilder: Send + Unpin { - /// The Payload type for the builder. - type PayloadType: PayloadTypes; - /// The error type returned by the builder. - type Error: Into; - - /// Sends a message to the service to start building a new payload for the given payload. - /// - /// Returns a receiver that will receive the payload id. - fn send_new_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> oneshot::Receiver>; - - /// Returns the best payload for the given identifier. - async fn best_payload( - &self, - id: PayloadId, - ) -> Option::BuiltPayload, Self::Error>>; - - /// Resolves the payload job and returns the best payload that has been built so far. - async fn resolve_kind( - &self, - id: PayloadId, - kind: PayloadKind, - ) -> Option::BuiltPayload, Self::Error>>; - - /// Resolves the payload job as fast and possible and returns the best payload that has been - /// built so far. - async fn resolve( - &self, - id: PayloadId, - ) -> Option::BuiltPayload, Self::Error>> { - self.resolve_kind(id, PayloadKind::Earliest).await - } - - /// Sends a message to the service to subscribe to payload events. - /// Returns a receiver that will receive them. - async fn subscribe(&self) -> Result, Self::Error>; -} /// Represents a built payload type that contains a built [`SealedBlock`] and can be converted into /// engine API execution payloads. diff --git a/crates/payload/util/Cargo.toml b/crates/payload/util/Cargo.toml new file mode 100644 index 000000000000..2da8dc660280 --- /dev/null +++ b/crates/payload/util/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "reth-payload-util" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "reth payload utilities" + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true + +# alloy +alloy-primitives.workspace = true +alloy-consensus.workspace = true \ No newline at end of file diff --git a/crates/payload/util/src/lib.rs b/crates/payload/util/src/lib.rs new file mode 100644 index 000000000000..5ad0e83507b2 --- /dev/null +++ b/crates/payload/util/src/lib.rs @@ -0,0 +1,15 @@ +//! payload utils. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod traits; +mod transaction; + +pub use traits::PayloadTransactions; +pub use transaction::{PayloadTransactionsChain, PayloadTransactionsFixed}; diff --git a/crates/payload/util/src/traits.rs b/crates/payload/util/src/traits.rs new file mode 100644 index 000000000000..52dad5111698 --- /dev/null +++ b/crates/payload/util/src/traits.rs @@ -0,0 +1,20 @@ +use alloy_primitives::Address; +use reth_primitives::TransactionSignedEcRecovered; + +/// Iterator that returns transactions for the block building process in the order they should be +/// included in the block. +/// +/// Can include transactions from the pool and other sources (alternative pools, +/// sequencer-originated transactions, etc.). +pub trait PayloadTransactions { + /// Returns the next transaction to include in the block. + fn next( + &mut self, + // In the future, `ctx` can include access to state for block building purposes. + ctx: (), + ) -> Option; + + /// Exclude descendants of the transaction with given sender and nonce from the iterator, + /// because this transaction won't be included in the block. + fn mark_invalid(&mut self, sender: Address, nonce: u64); +} diff --git a/crates/payload/util/src/transaction.rs b/crates/payload/util/src/transaction.rs new file mode 100644 index 000000000000..a45e177d4d34 --- /dev/null +++ b/crates/payload/util/src/transaction.rs @@ -0,0 +1,128 @@ +use crate::PayloadTransactions; +use alloy_consensus::Transaction; +use alloy_primitives::Address; +use reth_primitives::TransactionSignedEcRecovered; + +/// An implementation of [`crate::traits::PayloadTransactions`] that yields +/// a pre-defined set of transactions. +/// +/// This is useful to put a sequencer-specified set of transactions into the block +/// and compose it with the rest of the transactions. +#[derive(Debug)] +pub struct PayloadTransactionsFixed { + transactions: Vec, + index: usize, +} + +impl PayloadTransactionsFixed { + /// Constructs a new [`PayloadTransactionsFixed`]. + pub fn new(transactions: Vec) -> Self { + Self { transactions, index: Default::default() } + } + + /// Constructs a new [`PayloadTransactionsFixed`] with a single transaction. + pub fn single(transaction: T) -> Self { + Self { transactions: vec![transaction], index: Default::default() } + } +} + +impl PayloadTransactions for PayloadTransactionsFixed { + fn next(&mut self, _ctx: ()) -> Option { + (self.index < self.transactions.len()).then(|| { + let tx = self.transactions[self.index].clone(); + self.index += 1; + tx + }) + } + + fn mark_invalid(&mut self, _sender: Address, _nonce: u64) {} +} + +/// Wrapper over [`crate::traits::PayloadTransactions`] that combines transactions from multiple +/// `PayloadTransactions` iterators and keeps track of the gas for both of iterators. +/// +/// We can't use [`Iterator::chain`], because: +/// (a) we need to propagate the `mark_invalid` and `no_updates` +/// (b) we need to keep track of the gas +/// +/// Notes that [`PayloadTransactionsChain`] fully drains the first iterator +/// before moving to the second one. +/// +/// If the `before` iterator has transactions that are not fitting into the block, +/// the after iterator will get propagated a `mark_invalid` call for each of them. +#[derive(Debug)] +pub struct PayloadTransactionsChain { + /// Iterator that will be used first + before: B, + /// Allowed gas for the transactions from `before` iterator. If `None`, no gas limit is + /// enforced. + before_max_gas: Option, + /// Gas used by the transactions from `before` iterator + before_gas: u64, + /// Iterator that will be used after `before` iterator + after: A, + /// Allowed gas for the transactions from `after` iterator. If `None`, no gas limit is + /// enforced. + after_max_gas: Option, + /// Gas used by the transactions from `after` iterator + after_gas: u64, +} + +impl PayloadTransactionsChain { + /// Constructs a new [`PayloadTransactionsChain`]. + pub fn new( + before: B, + before_max_gas: Option, + after: A, + after_max_gas: Option, + ) -> Self { + Self { + before, + before_max_gas, + before_gas: Default::default(), + after, + after_max_gas, + after_gas: Default::default(), + } + } +} + +impl PayloadTransactions for PayloadTransactionsChain +where + B: PayloadTransactions, + A: PayloadTransactions, +{ + fn next(&mut self, ctx: ()) -> Option { + while let Some(tx) = self.before.next(ctx) { + if let Some(before_max_gas) = self.before_max_gas { + if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { + self.before_gas += tx.transaction.gas_limit(); + return Some(tx); + } + self.before.mark_invalid(tx.signer(), tx.transaction.nonce()); + self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + } else { + return Some(tx); + } + } + + while let Some(tx) = self.after.next(ctx) { + if let Some(after_max_gas) = self.after_max_gas { + if self.after_gas + tx.transaction.gas_limit() <= after_max_gas { + self.after_gas += tx.transaction.gas_limit(); + return Some(tx); + } + self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); + } else { + return Some(tx); + } + } + + None + } + + fn mark_invalid(&mut self, sender: Address, nonce: u64) { + self.before.mark_invalid(sender, nonce); + self.after.mark_invalid(sender, nonce); + } +} diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index e74b5f48d40f..0a872a68ddfa 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -12,7 +12,7 @@ use alloy_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, }; use reth_chainspec::EthereumHardforks; -use reth_primitives::SealedBlock; +use reth_primitives::{BlockExt, SealedBlock}; use reth_rpc_types_compat::engine::payload::try_into_block; use std::sync::Arc; diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 6cafe8b8b1e5..df4491b2d126 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -12,26 +12,28 @@ description = "Common types in reth." workspace = true [dependencies] -reth-codecs.workspace = true +# reth +reth-codecs = { workspace = true, optional = true } -alloy-consensus = { workspace = true, features = ["serde"] } +# ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true - -revm-primitives = { workspace = true, features = ["serde"] } +revm-primitives.workspace = true # misc -byteorder = "1" +byteorder = { workspace = true, optional = true } +bytes.workspace = true derive_more.workspace = true roaring = "0.10.2" serde_with = { workspace = true, optional = true } +auto_impl.workspace = true # required by reth-codecs -bytes.workspace = true -modular-bitfield.workspace = true -serde.workspace = true +modular-bitfield = { workspace = true, optional = true } +serde = { workspace = true, optional = true} # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -39,8 +41,6 @@ proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] -reth-testing-utils.workspace = true - alloy-primitives = { workspace = true, features = ["arbitrary"] } alloy-consensus = { workspace = true, features = ["arbitrary"] } @@ -50,6 +50,8 @@ proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true +modular-bitfield.workspace = true +serde.workspace = true [features] default = ["std"] @@ -59,11 +61,11 @@ std = [ "alloy-genesis/std", "alloy-primitives/std", "revm-primitives/std", - "serde/std" + "serde?/std" ] test-utils = [ "arbitrary", - "reth-codecs/test-utils" + "reth-codecs?/test-utils" ] arbitrary = [ "std", @@ -74,10 +76,28 @@ arbitrary = [ "dep:proptest-arbitrary-interop", "alloy-eips/arbitrary", "revm-primitives/arbitrary", - "reth-codecs/arbitrary" + "reth-codecs?/arbitrary" ] serde-bincode-compat = [ + "serde", "serde_with", "alloy-consensus/serde-bincode-compat", "alloy-eips/serde-bincode-compat" ] +serde = [ + "dep:serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-codecs?/serde", + "revm-primitives/serde", + "roaring/serde", + "revm-primitives/serde", +] +reth-codec = [ + "dep:reth-codecs", + "dep:modular-bitfield", + "dep:byteorder", +] diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index ae58973edd71..c8504f3b63cb 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -1,32 +1,34 @@ use alloy_consensus::constants::KECCAK_EMPTY; use alloy_genesis::GenesisAccount; use alloy_primitives::{keccak256, Bytes, B256, U256}; -use byteorder::{BigEndian, ReadBytesExt}; -use bytes::Buf; use derive_more::Deref; -use reth_codecs::{add_arbitrary_tests, Compact}; -use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError, JumpTable}; -use serde::{Deserialize, Serialize}; +use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError}; -/// Identifier for [`LegacyRaw`](RevmBytecode::LegacyRaw). -const LEGACY_RAW_BYTECODE_ID: u8 = 0; +#[cfg(any(test, feature = "reth-codec"))] +/// Identifiers used in [`Compact`](reth_codecs::Compact) encoding of [`Bytecode`]. +pub mod compact_ids { + /// Identifier for [`LegacyRaw`](revm_primitives::Bytecode::LegacyRaw). + pub const LEGACY_RAW_BYTECODE_ID: u8 = 0; -/// Identifier for removed bytecode variant. -const REMOVED_BYTECODE_ID: u8 = 1; + /// Identifier for removed bytecode variant. + pub const REMOVED_BYTECODE_ID: u8 = 1; -/// Identifier for [`LegacyAnalyzed`](RevmBytecode::LegacyAnalyzed). -const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; + /// Identifier for [`LegacyAnalyzed`](revm_primitives::Bytecode::LegacyAnalyzed). + pub const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; -/// Identifier for [`Eof`](RevmBytecode::Eof). -const EOF_BYTECODE_ID: u8 = 3; + /// Identifier for [`Eof`](revm_primitives::Bytecode::Eof). + pub const EOF_BYTECODE_ID: u8 = 3; -/// Identifier for [`Eip7702`](RevmBytecode::Eip7702). -const EIP7702_BYTECODE_ID: u8 = 4; + /// Identifier for [`Eip7702`](revm_primitives::Bytecode::Eip7702). + pub const EIP7702_BYTECODE_ID: u8 = 4; +} /// An Ethereum account. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct Account { /// Account nonce. pub nonce: u64, @@ -60,7 +62,8 @@ impl Account { /// Bytecode for an account. /// /// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support. -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, Default, PartialEq, Eq, Deref)] pub struct Bytecode(pub RevmBytecode); impl Bytecode { @@ -84,11 +87,17 @@ impl Bytecode { } } -impl Compact for Bytecode { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for Bytecode { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, { + use compact_ids::{ + EIP7702_BYTECODE_ID, EOF_BYTECODE_ID, LEGACY_ANALYZED_BYTECODE_ID, + LEGACY_RAW_BYTECODE_ID, + }; + let bytecode = match &self.0 { RevmBytecode::LegacyRaw(bytes) => bytes, RevmBytecode::LegacyAnalyzed(analyzed) => analyzed.bytecode(), @@ -127,7 +136,12 @@ impl Compact for Bytecode { // A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the // database. fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { - let len = buf.read_u32::().expect("could not read bytecode length"); + use byteorder::ReadBytesExt; + use bytes::Buf; + + use compact_ids::*; + + let len = buf.read_u32::().expect("could not read bytecode length"); let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); let variant = buf.read_u8().expect("could not read bytecode variant"); let decoded = match variant { @@ -138,8 +152,8 @@ impl Compact for Bytecode { LEGACY_ANALYZED_BYTECODE_ID => Self(unsafe { RevmBytecode::new_analyzed( bytes, - buf.read_u64::().unwrap() as usize, - JumpTable::from_slice(buf), + buf.read_u64::().unwrap() as usize, + revm_primitives::JumpTable::from_slice(buf), ) }), EOF_BYTECODE_ID | EIP7702_BYTECODE_ID => { @@ -186,9 +200,11 @@ impl From for AccountInfo { #[cfg(test)] mod tests { - use super::*; use alloy_primitives::{hex_literal::hex, B256, U256}; - use revm_primitives::LegacyAnalyzedBytecode; + use reth_codecs::Compact; + use revm_primitives::{JumpTable, LegacyAnalyzedBytecode}; + + use super::*; #[test] fn test_account() { diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index c9b673ec7241..76bf916add9b 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -2,95 +2,46 @@ use alloc::{fmt, vec::Vec}; -use alloy_consensus::{BlockHeader, Transaction, TxType}; -use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; -use alloy_primitives::{Address, B256}; +use alloy_eips::eip4895::Withdrawals; -use crate::Block; +use crate::{FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, SignedTransaction}; + +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullBlockBody: BlockBody {} + +impl FullBlockBody for T where T: BlockBody {} /// Abstraction for block's body. pub trait BlockBody: - Clone + Send + + Sync + + Unpin + + Clone + + Default + fmt::Debug + PartialEq + Eq - + Default - + serde::Serialize - + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + + InMemorySize + + MaybeSerde + + MaybeArbitrary { /// Ordered list of signed transactions as committed in block. - // todo: requires trait for signed transaction - type SignedTransaction: Transaction; - - /// Header type (uncle blocks). - type Header: BlockHeader; + type Transaction: SignedTransaction; - /// Withdrawals in block. - type Withdrawals: Iterator; + /// Ommer header type. + type OmmerHeader; /// Returns reference to transactions in block. - fn transactions(&self) -> &[Self::SignedTransaction]; - - /// Returns `Withdrawals` in the block, if any. - // todo: branch out into extension trait - fn withdrawals(&self) -> Option<&Self::Withdrawals>; - - /// Returns reference to uncle block headers. - fn ommers(&self) -> &[Self::Header]; - - /// Returns [`Requests`] in block, if any. - fn requests(&self) -> Option<&Requests>; - - /// Create a [`Block`] from the body and its header. - fn into_block>(self, header: Self::Header) -> T { - T::from((header, self)) - } - - /// Calculate the transaction root for the block body. - fn calculate_tx_root(&self) -> B256; - - /// Calculate the ommers root for the block body. - fn calculate_ommers_root(&self) -> B256; - - /// Calculate the withdrawals root for the block body, if withdrawals exist. If there are no - /// withdrawals, this will return `None`. - // todo: can be default impl if `calculate_withdrawals_root` made into a method on - // `Withdrawals` and `Withdrawals` moved to alloy - fn calculate_withdrawals_root(&self) -> Option; - - /// Recover signer addresses for all transactions in the block body. - fn recover_signers(&self) -> Option>; - - /// Returns whether or not the block body contains any blob transactions. - fn has_blob_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() == TxType::Eip4844 as u8) - } - - /// Returns whether or not the block body contains any EIP-7702 transactions. - fn has_eip7702_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() == TxType::Eip7702 as u8) - } - - /// Returns an iterator over all blob transactions of the block - fn blob_transactions_iter(&self) -> impl Iterator + '_ { - self.transactions().iter().filter(|tx| tx.ty() == TxType::Eip4844 as u8) - } - - /// Returns only the blob transactions, if any, from the block body. - fn blob_transactions(&self) -> Vec<&Self::SignedTransaction> { - self.blob_transactions_iter().collect() - } + fn transactions(&self) -> &[Self::Transaction]; - /// Returns an iterator over all blob versioned hashes from the block body. - fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_; + /// Consume the block body and return a [`Vec`] of transactions. + fn into_transactions(self) -> Vec; - /// Returns all blob versioned hashes from the block body. - fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() - } + /// Returns block withdrawals if any. + fn withdrawals(&self) -> Option<&Withdrawals>; - /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. - fn size(&self) -> usize; + /// Returns block ommers if any. + fn ommers(&self) -> Option<&[Self::OmmerHeader]>; } diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs new file mode 100644 index 000000000000..26806808532b --- /dev/null +++ b/crates/primitives-traits/src/block/header.rs @@ -0,0 +1,52 @@ +//! Block header data primitive. + +use core::fmt; + +use alloy_primitives::Sealable; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; + +/// Helper trait that unifies all behaviour required by block header to support full node +/// operations. +pub trait FullBlockHeader: BlockHeader + MaybeCompact {} + +impl FullBlockHeader for T where T: BlockHeader + MaybeCompact {} + +/// Abstraction of a block header. +pub trait BlockHeader: + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + alloy_consensus::BlockHeader + + Sealable + + InMemorySize + + MaybeSerde + + MaybeArbitrary +{ +} + +impl BlockHeader for T where + T: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + alloy_consensus::BlockHeader + + Sealable + + InMemorySize + + MaybeSerde + + MaybeArbitrary +{ +} diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 519987606eee..5b22ff590be5 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -1,107 +1,60 @@ //! Block abstraction. pub mod body; +pub mod header; -use alloc::{fmt, vec::Vec}; +use alloc::fmt; -use alloy_consensus::BlockHeader; -use alloy_primitives::{Address, Sealable, B256}; -use reth_codecs::Compact; - -use crate::BlockBody; +use crate::{ + BlockBody, BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, + MaybeSerde, +}; /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullBlock: Block + Compact {} +pub trait FullBlock: + Block + alloy_rlp::Encodable + alloy_rlp::Decodable +{ +} -impl FullBlock for T where T: Block + Compact {} +impl FullBlock for T where + T: Block + + alloy_rlp::Encodable + + alloy_rlp::Decodable +{ +} /// Abstraction of block data type. // todo: make sealable super-trait, depends on // todo: make with senders extension trait, so block can be impl by block type already containing // senders pub trait Block: - fmt::Debug + Send + + Sync + + Unpin + Clone + + Default + + fmt::Debug + PartialEq + Eq - + Default - + serde::Serialize - + for<'a> serde::Deserialize<'a> - + From<(Self::Header, Self::Body)> - + Into<(Self::Header, Self::Body)> + + InMemorySize + + MaybeSerde + + MaybeArbitrary { /// Header part of the block. - type Header: BlockHeader + Sealable; + type Header: BlockHeader + 'static; /// The block's body contains the transactions in the block. - type Body: BlockBody; + type Body: BlockBody + Send + Sync + Unpin + 'static; - /// A block and block hash. - type SealedBlock; + /// Create new block instance. + fn new(header: Self::Header, body: Self::Body) -> Self; - /// A block and addresses of senders of transactions in it. - type BlockWithSenders; - - /// Returns reference to [`BlockHeader`] type. + /// Returns reference to block header. fn header(&self) -> &Self::Header; - /// Returns reference to [`BlockBody`] type. + /// Returns reference to block body. fn body(&self) -> &Self::Body; - /// Calculate the header hash and seal the block so that it can't be changed. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal_slow(self) -> Self::SealedBlock; - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal(self, hash: B256) -> Self::SealedBlock; - - /// Expensive operation that recovers transaction signer. See - /// `SealedBlockWithSenders`. - fn senders(&self) -> Option> { - self.body().recover_signers() - } - - /// Transform into a `BlockWithSenders`. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - fn with_senders_unchecked(self, senders: Vec

) -> Self::BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a `BlockWithSenders` using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also `SignedTransaction::recover_signer_unchecked`. - /// - /// Returns an error if a signature is invalid. - // todo: can be default impl if block with senders type is made generic over block and migrated - // to alloy - #[track_caller] - fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result, Self>; - - /// **Expensive**. Transform into a `BlockWithSenders` by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn with_recovered_senders(self) -> Option>; - - /// Calculates a heuristic for the in-memory size of the [`Block`]. - fn size(&self) -> usize; + /// Splits the block into its header and body. + fn split(self) -> (Self::Header, Self::Body); } diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index 94eaf95c269f..e927ed3a7dfb 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,7 +1,5 @@ //! Ethereum protocol-related constants -use alloy_primitives::{b256, B256}; - /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; @@ -12,10 +10,6 @@ pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION" /// Minimum gas limit allowed for transactions. pub const MINIMUM_GAS_LIMIT: u64 = 5000; -/// Holesky genesis hash: `0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4` -pub const HOLESKY_GENESIS_HASH: B256 = - b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); - /// The number of blocks to unwind during a reorg that already became a part of canonical chain. /// /// In reality, the node can end up in this particular situation very rarely. It would happen only diff --git a/crates/primitives-traits/src/encoded.rs b/crates/primitives-traits/src/encoded.rs new file mode 100644 index 000000000000..885031af1b63 --- /dev/null +++ b/crates/primitives-traits/src/encoded.rs @@ -0,0 +1,63 @@ +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::Bytes; + +/// Generic wrapper with encoded Bytes, such as transaction data. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WithEncoded(Bytes, pub T); + +impl From<(Bytes, T)> for WithEncoded { + fn from(value: (Bytes, T)) -> Self { + Self(value.0, value.1) + } +} + +impl WithEncoded { + /// Wraps the value with the bytes. + pub const fn new(bytes: Bytes, value: T) -> Self { + Self(bytes, value) + } + + /// Get the encoded bytes + pub const fn encoded_bytes(&self) -> &Bytes { + &self.0 + } + + /// Get the underlying value + pub const fn value(&self) -> &T { + &self.1 + } + + /// Returns ownership of the underlying value. + pub fn into_value(self) -> T { + self.1 + } + + /// Transform the value + pub fn transform>(self) -> WithEncoded { + WithEncoded(self.0, self.1.into()) + } + + /// Split the wrapper into [`Bytes`] and value tuple + pub fn split(self) -> (Bytes, T) { + (self.0, self.1) + } + + /// Maps the inner value to a new value using the given function. + pub fn map U>(self, op: F) -> WithEncoded { + WithEncoded(self.0, op(self.1)) + } +} + +impl WithEncoded { + /// Wraps the value with the [`Encodable2718::encoded_2718`] bytes. + pub fn from_2718_encodable(value: T) -> Self { + Self(value.encoded_2718().into(), value) + } +} + +impl WithEncoded> { + /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. + pub fn transpose(self) -> Option> { + self.1.map(|v| WithEncoded(self.0, v)) + } +} diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index fa9c33245359..ea5f7eafb518 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,5 +1,5 @@ mod sealed; -pub use sealed::SealedHeader; +pub use sealed::{BlockWithParent, Header, SealedHeader}; mod error; pub use error::HeaderError; @@ -7,73 +7,8 @@ pub use error::HeaderError; #[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] pub mod test_utils; -pub use alloy_consensus::Header; - -use alloy_primitives::{Address, BlockNumber, B256, U256}; - /// Bincode-compatible header type serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { pub use super::sealed::serde_bincode_compat::SealedHeader; } - -/// Trait for extracting specific Ethereum block data from a header -pub trait BlockHeader { - /// Retrieves the beneficiary (miner) of the block - fn beneficiary(&self) -> Address; - - /// Retrieves the difficulty of the block - fn difficulty(&self) -> U256; - - /// Retrieves the block number - fn number(&self) -> BlockNumber; - - /// Retrieves the gas limit of the block - fn gas_limit(&self) -> u64; - - /// Retrieves the timestamp of the block - fn timestamp(&self) -> u64; - - /// Retrieves the mix hash of the block - fn mix_hash(&self) -> B256; - - /// Retrieves the base fee per gas of the block, if available - fn base_fee_per_gas(&self) -> Option; - - /// Retrieves the excess blob gas of the block, if available - fn excess_blob_gas(&self) -> Option; -} - -impl BlockHeader for Header { - fn beneficiary(&self) -> Address { - self.beneficiary - } - - fn difficulty(&self) -> U256 { - self.difficulty - } - - fn number(&self) -> BlockNumber { - self.number - } - - fn gas_limit(&self) -> u64 { - self.gas_limit - } - - fn timestamp(&self) -> u64 { - self.timestamp - } - - fn mix_hash(&self) -> B256 { - self.mix_hash - } - - fn base_fee_per_gas(&self) -> Option { - self.base_fee_per_gas - } - - fn excess_blob_gas(&self) -> Option { - self.excess_blob_gas - } -} diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 7552ece31f10..08add0ac3c15 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,20 +1,31 @@ -use super::Header; +pub use alloy_consensus::Header; + +use core::mem; + use alloy_consensus::Sealed; use alloy_eips::BlockNumHash; -use alloy_primitives::{keccak256, BlockHash, Sealable}; -#[cfg(any(test, feature = "test-utils"))] -use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_primitives::{keccak256, BlockHash, Sealable, B256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; -use core::mem; use derive_more::{AsRef, Deref}; -use reth_codecs::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; + +use crate::InMemorySize; + +/// A helper struct to store the block number/hash and its parent hash. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct BlockWithParent { + /// Parent hash. + pub parent: B256, + /// Block number/hash. + pub block: BlockNumHash, +} /// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want /// to modify header. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] -#[add_arbitrary_tests(rlp)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] pub struct SealedHeader { /// Locked Header hash. hash: BlockHash, @@ -30,12 +41,10 @@ impl SealedHeader { pub const fn new(header: H, hash: BlockHash) -> Self { Self { header, hash } } -} -impl SealedHeader { /// Returns the sealed Header fields. #[inline] - pub const fn header(&self) -> &Header { + pub const fn header(&self) -> &H { &self.header } @@ -46,32 +55,42 @@ impl SealedHeader { } /// Extract raw header that can be modified. - pub fn unseal(self) -> Header { + pub fn unseal(self) -> H { self.header } /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. - pub fn split(self) -> (Header, BlockHash) { + pub fn split(self) -> (H, BlockHash) { (self.header, self.hash) } +} + +impl SealedHeader { + /// Hashes the header and creates a sealed header. + pub fn seal(header: H) -> Self { + let hash = header.hash_slow(); + Self::new(header, hash) + } +} +impl SealedHeader { /// Return the number hash tuple. pub fn num_hash(&self) -> BlockNumHash { - BlockNumHash::new(self.number, self.hash) + BlockNumHash::new(self.number(), self.hash) } +} +impl InMemorySize for SealedHeader { /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.header.size() + mem::size_of::() } } -impl Default for SealedHeader { +impl Default for SealedHeader { fn default() -> Self { - let sealed = Header::default().seal_slow(); - let (header, hash) = sealed.into_parts(); - Self { header, hash } + Self::seal(H::default()) } } @@ -118,17 +137,17 @@ impl SealedHeader { } /// Updates the block number. - pub fn set_block_number(&mut self, number: BlockNumber) { + pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { self.header.number = number; } /// Updates the block state root. - pub fn set_state_root(&mut self, state_root: B256) { + pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) { self.header.state_root = state_root; } /// Updates the block difficulty. - pub fn set_difficulty(&mut self, difficulty: U256) { + pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { self.header.difficulty = difficulty; } } @@ -140,13 +159,14 @@ impl From> for Sealed { } #[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { +impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader +where + H: for<'b> arbitrary::Arbitrary<'b> + Sealable, +{ fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let header = Header::arbitrary(u)?; + let header = H::arbitrary(u)?; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Self::new(header, seal)) + Ok(Self::seal(header)) } } @@ -212,10 +232,8 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, SealedHeader}; - use arbitrary::Arbitrary; use rand::Rng; - use reth_testing_utils::generators; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -229,7 +247,7 @@ pub(super) mod serde_bincode_compat { } let mut bytes = [0u8; 1024]; - generators::rng().fill(bytes.as_mut_slice()); + rand::thread_rng().fill(&mut bytes[..]); let data = Data { transaction: SealedHeader::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) .unwrap(), diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index c5f6e86b9db1..0e79f6cb462f 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -1,6 +1,6 @@ //! Test utilities to generate random valid headers. -use crate::Header; +use alloy_consensus::Header; use alloy_primitives::B256; use proptest::{arbitrary::any, prop_compose}; use proptest_arbitrary_interop::arb; diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs index 682fa0cf822f..6fc6d75899ce 100644 --- a/crates/primitives-traits/src/integer_list.rs +++ b/crates/primitives-traits/src/integer_list.rs @@ -1,13 +1,9 @@ use alloc::vec::Vec; -use bytes::BufMut; use core::fmt; + +use bytes::BufMut; use derive_more::Deref; use roaring::RoaringTreemap; -use serde::{ - de::{SeqAccess, Visitor}, - ser::SerializeSeq, - Deserialize, Deserializer, Serialize, Serializer, -}; /// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. /// @@ -90,11 +86,14 @@ impl IntegerList { } } -impl Serialize for IntegerList { +#[cfg(feature = "serde")] +impl serde::Serialize for IntegerList { fn serialize(&self, serializer: S) -> Result where - S: Serializer, + S: serde::Serializer, { + use serde::ser::SerializeSeq; + let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; for e in &self.0 { seq.serialize_element(&e)?; @@ -103,8 +102,11 @@ impl Serialize for IntegerList { } } +#[cfg(feature = "serde")] struct IntegerListVisitor; -impl<'de> Visitor<'de> for IntegerListVisitor { + +#[cfg(feature = "serde")] +impl<'de> serde::de::Visitor<'de> for IntegerListVisitor { type Value = IntegerList; fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -113,7 +115,7 @@ impl<'de> Visitor<'de> for IntegerListVisitor { fn visit_seq(self, mut seq: E) -> Result where - E: SeqAccess<'de>, + E: serde::de::SeqAccess<'de>, { let mut list = IntegerList::empty(); while let Some(item) = seq.next_element()? { @@ -123,10 +125,11 @@ impl<'de> Visitor<'de> for IntegerListVisitor { } } -impl<'de> Deserialize<'de> for IntegerList { +#[cfg(feature = "serde")] +impl<'de> serde::Deserialize<'de> for IntegerList { fn deserialize(deserializer: D) -> Result where - D: Deserializer<'de>, + D: serde::Deserializer<'de>, { deserializer.deserialize_byte_buf(IntegerListVisitor) } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index ec93f2a21632..338f8f621e1a 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -14,7 +14,6 @@ extern crate alloc; /// Common constants. pub mod constants; - pub use constants::gas_units::{format_gas, format_gas_throughput}; /// Minimal account @@ -22,11 +21,13 @@ pub mod account; pub use account::{Account, Bytecode}; pub mod receipt; -pub use receipt::Receipt; +pub use receipt::{FullReceipt, Receipt}; pub mod transaction; pub use transaction::{ + execute::FillTxEnv, signed::{FullSignedTx, SignedTransaction}, + tx_type::{FullTxType, TxType}, FullTransaction, Transaction, }; @@ -34,10 +35,15 @@ mod integer_list; pub use integer_list::{IntegerList, IntegerListError}; pub mod block; -pub use block::{body::BlockBody, Block, FullBlock}; +pub use block::{ + body::{BlockBody, FullBlockBody}, + header::{BlockHeader, FullBlockHeader}, + Block, FullBlock, +}; +mod encoded; mod withdrawal; -pub use withdrawal::Withdrawal; +pub use encoded::WithEncoded; mod error; pub use error::{GotExpected, GotExpectedBoxed}; @@ -48,15 +54,11 @@ pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; -/// Transaction types -pub mod tx_type; -pub use tx_type::TxType; - /// Common header types pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; +pub use header::{BlockWithParent, Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// @@ -69,3 +71,50 @@ pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; pub mod serde_bincode_compat { pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; } + +/// Heuristic size trait +pub mod size; +pub use size::InMemorySize; + +/// Node traits +pub mod node; +pub use node::{BodyTy, FullNodePrimitives, HeaderTy, NodePrimitives, ReceiptTy}; + +/// Helper trait that requires arbitrary implementation if the feature is enabled. +#[cfg(any(feature = "test-utils", feature = "arbitrary"))] +pub trait MaybeArbitrary: for<'a> arbitrary::Arbitrary<'a> {} +/// Helper trait that requires arbitrary implementation if the feature is enabled. +#[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] +pub trait MaybeArbitrary {} + +#[cfg(any(feature = "test-utils", feature = "arbitrary"))] +impl MaybeArbitrary for T where T: for<'a> arbitrary::Arbitrary<'a> {} +#[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] +impl MaybeArbitrary for T {} + +/// Helper trait that requires de-/serialize implementation since `serde` feature is enabled. +#[cfg(feature = "serde")] +pub trait MaybeSerde: serde::Serialize + for<'de> serde::Deserialize<'de> {} +/// Noop. Helper trait that would require de-/serialize implementation if `serde` feature were +/// enabled. +#[cfg(not(feature = "serde"))] +pub trait MaybeSerde {} + +#[cfg(feature = "serde")] +impl MaybeSerde for T where T: serde::Serialize + for<'de> serde::Deserialize<'de> {} +#[cfg(not(feature = "serde"))] +impl MaybeSerde for T {} + +/// Helper trait that requires database encoding implementation since `reth-codec` feature is +/// enabled. +#[cfg(feature = "reth-codec")] +pub trait MaybeCompact: reth_codecs::Compact {} +/// Noop. Helper trait that would require database encoding implementation if `reth-codec` feature +/// were enabled. +#[cfg(not(feature = "reth-codec"))] +pub trait MaybeCompact {} + +#[cfg(feature = "reth-codec")] +impl MaybeCompact for T where T: reth_codecs::Compact {} +#[cfg(not(feature = "reth-codec"))] +impl MaybeCompact for T {} diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs new file mode 100644 index 000000000000..e610c094ba2d --- /dev/null +++ b/crates/primitives-traits/src/node.rs @@ -0,0 +1,92 @@ +use core::fmt; + +use crate::{ + Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, + FullSignedTx, FullTxType, MaybeArbitrary, MaybeSerde, Receipt, +}; + +/// Configures all the primitive types of the node. +pub trait NodePrimitives: + Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static +{ + /// Block primitive. + type Block: Block
; + /// Block header primitive. + type BlockHeader: BlockHeader; + /// Block body primitive. + type BlockBody: BlockBody; + /// Signed version of the transaction type. + type SignedTx: Send + + Sync + + Unpin + + Clone + + fmt::Debug + + PartialEq + + Eq + + MaybeSerde + + MaybeArbitrary + + 'static; + /// Transaction envelope type ID. + type TxType: Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + MaybeArbitrary + + 'static; + /// A receipt. + type Receipt: Receipt; +} +/// Helper trait that sets trait bounds on [`NodePrimitives`]. +pub trait FullNodePrimitives +where + Self: NodePrimitives< + Block: FullBlock
, + BlockHeader: FullBlockHeader, + BlockBody: FullBlockBody, + SignedTx: FullSignedTx, + TxType: FullTxType, + Receipt: FullReceipt, + > + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + 'static, +{ +} + +impl FullNodePrimitives for T where + T: NodePrimitives< + Block: FullBlock
, + BlockHeader: FullBlockHeader, + BlockBody: FullBlockBody, + SignedTx: FullSignedTx, + TxType: FullTxType, + Receipt: FullReceipt, + > + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + 'static +{ +} + +/// Helper adapter type for accessing [`NodePrimitives`] block header types. +pub type HeaderTy = ::BlockHeader; + +/// Helper adapter type for accessing [`NodePrimitives`] block body types. +pub type BodyTy = ::BlockBody; + +/// Helper adapter type for accessing [`NodePrimitives`] receipt types. +pub type ReceiptTy = ::Receipt; diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index 5c317dc49a23..e2af40c447ed 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,23 +1,53 @@ //! Receipt abstraction +use alloc::vec::Vec; +use core::fmt; + use alloy_consensus::TxReceipt; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; +use alloy_primitives::B256; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by receipt to support full node operations. -pub trait FullReceipt: Receipt + Compact {} +pub trait FullReceipt: Receipt + MaybeCompact {} -impl FullReceipt for T where T: Receipt + Compact {} +impl FullReceipt for T where T: ReceiptExt + MaybeCompact {} /// Abstraction of a receipt. +#[auto_impl::auto_impl(&, Arc)] pub trait Receipt: - TxReceipt + Send + + Sync + + Unpin + + Clone + Default + + fmt::Debug + + TxReceipt + alloy_rlp::Encodable + alloy_rlp::Decodable - + Serialize - + for<'de> Deserialize<'de> + + MaybeSerde + + InMemorySize + + MaybeArbitrary { /// Returns transaction type. fn tx_type(&self) -> u8; } + +/// Extension if [`Receipt`] used in block execution. +pub trait ReceiptExt: Receipt { + /// Calculates the receipts root of the given receipts. + fn receipts_root(receipts: &[&Self]) -> B256; +} + +/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). +pub fn gas_spent_by_transactions(receipts: I) -> Vec<(u64, u64)> +where + I: IntoIterator, + T: TxReceipt, +{ + receipts + .into_iter() + .enumerate() + .map(|(id, receipt)| (id as u64, receipt.cumulative_gas_used() as u64)) + .collect() +} diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs new file mode 100644 index 000000000000..4d721dd00b30 --- /dev/null +++ b/crates/primitives-traits/src/size.rs @@ -0,0 +1,66 @@ +use alloy_consensus::{Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; + +/// Trait for calculating a heuristic for the in-memory size of a struct. +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait InMemorySize { + /// Returns a heuristic for the in-memory size of a struct. + fn size(&self) -> usize; +} + +impl InMemorySize for alloy_consensus::Signed { + fn size(&self) -> usize { + T::size(self.tx()) + self.signature().size() + self.hash().size() + } +} + +/// Implement `InMemorySize` for a type with `size_of` +macro_rules! impl_in_mem_size_size_of { + ($($ty:ty),*) => { + $( + impl InMemorySize for $ty { + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } + } + )* + }; +} + +impl_in_mem_size_size_of!(Signature, TxHash); + +/// Implement `InMemorySize` for a type with a native `size` method. +macro_rules! impl_in_mem_size { + ($($ty:ty),*) => { + $( + impl InMemorySize for $ty { + #[inline] + fn size(&self) -> usize { + Self::size(self) + } + } + )* + }; +} + +impl_in_mem_size!(Header, TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); + +#[cfg(test)] +mod tests { + use super::*; + + // ensures we don't have any recursion in the `InMemorySize` impls + #[test] + fn no_in_memory_no_recursion() { + fn assert_no_recursion() { + let _ = T::default().size(); + } + assert_no_recursion::
(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + } +} diff --git a/crates/primitives-traits/src/storage.rs b/crates/primitives-traits/src/storage.rs index 39b6155ee284..c6b9b1e11c75 100644 --- a/crates/primitives-traits/src/storage.rs +++ b/crates/primitives-traits/src/storage.rs @@ -1,13 +1,12 @@ use alloy_primitives::{B256, U256}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use serde::{Deserialize, Serialize}; /// Account storage entry. /// /// `key` is the subkey when used as a value in the `StorageChangeSets` table. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct StorageEntry { /// Storage key. pub key: B256, @@ -31,7 +30,8 @@ impl From<(B256, U256)> for StorageEntry { // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey -impl Compact for StorageEntry { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageEntry { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, diff --git a/crates/primitives-traits/src/transaction/execute.rs b/crates/primitives-traits/src/transaction/execute.rs new file mode 100644 index 000000000000..c7350f1941be --- /dev/null +++ b/crates/primitives-traits/src/transaction/execute.rs @@ -0,0 +1,10 @@ +//! Abstraction of an executable transaction. + +use alloy_primitives::Address; +use revm_primitives::TxEnv; + +/// Loads transaction into execution environment. +pub trait FillTxEnv { + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); +} diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index a1ad81ab3270..3a0871c99a43 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -1,61 +1,80 @@ //! Transaction abstraction -use core::{fmt::Debug, hash::Hash}; +pub mod execute; +pub mod signed; +pub mod tx_type; -use alloy_primitives::{TxKind, B256}; +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; +use core::{fmt, hash::Hash}; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, +}; -pub mod signed; +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullTransaction: Transaction + MaybeCompact {} + +impl FullTransaction for T where T: Transaction + MaybeCompact {} -#[allow(dead_code)] /// Abstraction of a transaction. pub trait Transaction: - Debug - + Default + Send + + Sync + + Unpin + Clone + + fmt::Debug + Eq + PartialEq + Hash - + Serialize - + alloy_rlp::Encodable - + alloy_rlp::Decodable - + for<'de> Deserialize<'de> + alloy_consensus::Transaction + + InMemorySize + + MaybeSerde + MaybeArbitrary { - /// Heavy operation that return signature hash over rlp encoded transaction. - /// It is only for signature signing or signer recovery. - fn signature_hash(&self) -> B256; - - /// Gets the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. - fn kind(&self) -> TxKind; + /// Returns true if the transaction is a legacy transaction. + #[inline] + fn is_legacy(&self) -> bool { + self.ty() == LEGACY_TX_TYPE_ID + } - /// Returns true if the tx supports dynamic fees - fn is_dynamic_fee(&self) -> bool; + /// Returns true if the transaction is an EIP-2930 transaction. + #[inline] + fn is_eip2930(&self) -> bool { + self.ty() == EIP2930_TX_TYPE_ID + } - /// Returns the effective gas price for the given base fee. - fn effective_gas_price(&self, base_fee: Option) -> u128; + /// Returns true if the transaction is an EIP-1559 transaction. + #[inline] + fn is_eip1559(&self) -> bool { + self.ty() == EIP1559_TX_TYPE_ID + } - /// This encodes the transaction _without_ the signature, and is only suitable for creating a - /// hash intended for signing. - fn encode_without_signature(&self, out: &mut dyn bytes::BufMut); + /// Returns true if the transaction is an EIP-4844 transaction. + #[inline] + fn is_eip4844(&self) -> bool { + self.ty() == EIP4844_TX_TYPE_ID + } - /// Calculates a heuristic for the in-memory size of the [Transaction]. - fn size(&self) -> usize; + /// Returns true if the transaction is an EIP-7702 transaction. + #[inline] + fn is_eip7702(&self) -> bool { + self.ty() == EIP7702_TX_TYPE_ID + } } -#[cfg(not(feature = "arbitrary"))] -/// Helper trait that requires arbitrary implementation if the feature is enabled. -pub trait MaybeArbitrary {} - -#[cfg(feature = "arbitrary")] -/// Helper trait that requires arbitrary implementation if the feature is enabled. -pub trait MaybeArbitrary: for<'a> arbitrary::Arbitrary<'a> {} - -/// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullTransaction: Transaction + Compact {} - -impl FullTransaction for T where T: Transaction + Compact {} +impl Transaction for T where + T: Send + + Sync + + Unpin + + Clone + + fmt::Debug + + Eq + + PartialEq + + Hash + + alloy_consensus::Transaction + + InMemorySize + + MaybeSerde + + MaybeArbitrary +{ +} diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index c40403865dff..5e0a91b4da2b 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -1,46 +1,49 @@ //! API of a signed transaction. -use alloc::fmt; -use core::hash::Hash; -use reth_codecs::Compact; - -use alloy_consensus::Transaction; +use crate::{FillTxEnv, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; +use alloc::{fmt, vec::Vec}; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; -use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, TxHash, B256}; -use revm_primitives::TxEnv; +use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; +use core::hash::Hash; /// Helper trait that unifies all behaviour required by block to support full node operations. -pub trait FullSignedTx: SignedTransaction + Compact {} +pub trait FullSignedTx: SignedTransaction + FillTxEnv + MaybeCompact {} -impl FullSignedTx for T where T: SignedTransaction + Compact {} +impl FullSignedTx for T where T: SignedTransaction + FillTxEnv + MaybeCompact {} /// A signed transaction. +#[auto_impl::auto_impl(&, Arc)] pub trait SignedTransaction: - fmt::Debug + Send + + Sync + + Unpin + Clone + + fmt::Debug + PartialEq + Eq + Hash - + Send - + Sync - + serde::Serialize - + for<'a> serde::Deserialize<'a> + alloy_rlp::Encodable + alloy_rlp::Decodable + Encodable2718 + Decodable2718 + + alloy_consensus::Transaction + + MaybeSerde + + MaybeArbitrary + + InMemorySize { - /// Transaction type that is signed. - type Transaction: Transaction; + /// Transaction envelope type ID. + type Type: TxType; + + /// Returns the transaction type. + fn tx_type(&self) -> Self::Type { + Self::Type::try_from(self.ty()).expect("should decode tx type id") + } /// Returns reference to transaction hash. fn tx_hash(&self) -> &TxHash; - /// Returns reference to transaction. - fn transaction(&self) -> &Self::Transaction; - /// Returns reference to signature. - fn signature(&self) -> &Signature; + fn signature(&self) -> &PrimitiveSignature; /// Recover signer from signature and hash. /// @@ -58,20 +61,17 @@ pub trait SignedTransaction: /// /// Returns `None` if the transaction's signature is invalid, see also /// `reth_primitives::transaction::recover_signer_unchecked`. - fn recover_signer_unchecked(&self) -> Option
; + fn recover_signer_unchecked(&self) -> Option
{ + self.recover_signer_unchecked_with_buf(&mut Vec::new()) + } - /// Create a new signed transaction from a transaction and its signature. - /// - /// This will also calculate the transaction hash using its encoding. - fn from_transaction_and_signature(transaction: Self::Transaction, signature: Signature) - -> Self; + /// Same as [`Self::recover_signer_unchecked`] but receives a buffer to operate on. This is used + /// during batch recovery to avoid allocating a new buffer for each transaction. + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
; /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) } - - /// Fills [`TxEnv`] with an [`Address`] and transaction. - fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); } diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs new file mode 100644 index 000000000000..d2caebe4c9f1 --- /dev/null +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -0,0 +1,62 @@ +//! Abstraction of transaction envelope type ID. + +use core::fmt; + +use alloy_primitives::{U64, U8}; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; + +/// Helper trait that unifies all behaviour required by transaction type ID to support full node +/// operations. +pub trait FullTxType: TxType + MaybeCompact {} + +impl FullTxType for T where T: TxType + MaybeCompact {} + +/// Trait representing the behavior of a transaction type. +pub trait TxType: + Send + + Sync + + Unpin + + Clone + + Copy + + Default + + fmt::Debug + + fmt::Display + + PartialEq + + Eq + + PartialEq + + Into + + Into + + TryFrom + + TryFrom + + TryFrom + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + InMemorySize + + MaybeArbitrary +{ + /// Returns `true` if this is a legacy transaction. + fn is_legacy(&self) -> bool; + + /// Returns `true` if this is an eip-2930 transaction. + fn is_eip2930(&self) -> bool; + + /// Returns `true` if this is an eip-1559 transaction. + fn is_eip1559(&self) -> bool; + + /// Returns `true` if this is an eip-4844 transaction. + fn is_eip4844(&self) -> bool; + + /// Returns `true` if this is an eip-7702 transaction. + fn is_eip7702(&self) -> bool; + + /// Returns whether this transaction type can be __broadcasted__ as full transaction over the + /// network. + /// + /// Some transactions are not broadcastable as objects and only allowed to be broadcasted as + /// hashes, e.g. because they missing context (e.g. blob sidecar). + fn is_broadcastable_in_full(&self) -> bool { + // EIP-4844 transactions are not broadcastable in full, only hashes are allowed. + !self.is_eip4844() + } +} diff --git a/crates/primitives-traits/src/tx_type.rs b/crates/primitives-traits/src/tx_type.rs deleted file mode 100644 index aebf7584fe9c..000000000000 --- a/crates/primitives-traits/src/tx_type.rs +++ /dev/null @@ -1,28 +0,0 @@ -use alloy_eips::eip2718::Eip2718Error; -use alloy_primitives::{U64, U8}; -use alloy_rlp::{Decodable, Encodable}; -use core::fmt::{Debug, Display}; - -/// Trait representing the behavior of a transaction type. -pub trait TxType: - Into - + Into - + PartialEq - + Eq - + PartialEq - + TryFrom - + TryFrom - + TryFrom - + From - + Debug - + Display - + Clone - + Copy - + Default - + Encodable - + Decodable - + Send - + Sync - + 'static -{ -} diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 699229684eca..0849ab6202e6 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,12 +1,8 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. -/// Re-export from `alloy_eips`. -#[doc(inline)] -pub use alloy_eips::eip4895::Withdrawal; - #[cfg(test)] mod tests { - use super::*; + use alloy_eips::eip4895::Withdrawal; use alloy_primitives::Address; use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 04d96aa369a2..9787c9f3a6a9 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -13,10 +13,9 @@ workspace = true [dependencies] # reth -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true -reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } @@ -28,6 +27,7 @@ alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-rpc-types = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } alloy-eips = { workspace = true, features = ["serde"] } +alloy-trie = { workspace = true, features = ["serde"] } # optimism op-alloy-rpc-types = { workspace = true, optional = true } @@ -62,10 +62,11 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # eth -reth-chainspec.workspace = true +reth-chainspec = { workspace = true, features = ["arbitrary"] } reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true +reth-trie-common = { workspace = true, features = ["arbitrary"] } revm-primitives = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } @@ -79,6 +80,7 @@ proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true +rstest.workspace = true criterion.workspace = true pprof = { workspace = true, features = [ @@ -101,8 +103,14 @@ std = [ "revm-primitives/std", "secp256k1?/std", "serde/std", + "alloy-trie/std" +] +reth-codec = [ + "dep:reth-codecs", + "dep:zstd", + "dep:modular-bitfield", "std", + "reth-primitives-traits/reth-codec", ] -reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] asm-keccak = ["alloy-primitives/asm-keccak", "revm-primitives/asm-keccak"] arbitrary = [ "dep:arbitrary", @@ -114,14 +122,15 @@ arbitrary = [ "revm-primitives/arbitrary", "secp256k1", "reth-chainspec/arbitrary", - "reth-trie-common/arbitrary", "alloy-consensus/arbitrary", "alloy-primitives/arbitrary", "alloy-rpc-types?/arbitrary", "alloy-serde?/arbitrary", "op-alloy-consensus?/arbitrary", "op-alloy-rpc-types?/arbitrary", - "reth-codecs?/arbitrary" + "reth-codecs?/arbitrary", + "alloy-trie/arbitrary", + "reth-trie-common/arbitrary" ] secp256k1 = ["dep:secp256k1"] c-kzg = [ @@ -146,13 +155,15 @@ test-utils = [ "reth-chainspec/test-utils", "reth-codecs?/test-utils", "reth-trie-common/test-utils", + "arbitrary", ] serde-bincode-compat = [ + "serde_with", + "alloy-eips/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", "op-alloy-consensus?/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", - "serde_with", - "alloy-eips/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", ] [[bench]] diff --git a/crates/primitives/benches/recover_ecdsa_crit.rs b/crates/primitives/benches/recover_ecdsa_crit.rs index 8e8e279b2a4a..9273d71f6f56 100644 --- a/crates/primitives/benches/recover_ecdsa_crit.rs +++ b/crates/primitives/benches/recover_ecdsa_crit.rs @@ -4,6 +4,7 @@ use alloy_rlp::Decodable; use criterion::{criterion_group, criterion_main, Criterion}; use pprof::criterion::{Output, PProfProfiler}; use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; /// Benchmarks the recovery of the public key from the ECDSA message using criterion. pub fn criterion_benchmark(c: &mut Criterion) { diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 462b27f9c73c..a72c83996c01 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -156,7 +156,7 @@ impl TryFrom for TransactionSigned { _ => return Err(ConversionError::Custom("unknown transaction type".to_string())), }; - Ok(Self { transaction, signature, hash }) + Ok(Self { transaction, signature, hash: hash.into() }) } } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 54bcb27293cb..5618d81bd8fc 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,11 +1,16 @@ -use crate::{GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered}; +use crate::{ + traits::BlockExt, transaction::SignedTransactionIntoRecoveredExt, BlockBodyTxExt, GotExpected, + SealedHeader, TransactionSigned, TransactionSignedEcRecovered, +}; use alloc::vec::Vec; +use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_primitives::{Address, Bytes, Sealable, B256}; +use alloy_primitives::{Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; +use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction}; use serde::{Deserialize, Serialize}; /// Ethereum full block. @@ -21,73 +26,31 @@ pub struct Block { pub body: BlockBody, } -impl Block { - /// Calculate the header hash and seal the block so that it can't be changed. - pub fn seal_slow(self) -> SealedBlock { - let sealed = self.header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedBlock { header: SealedHeader::new(header, seal), body: self.body } - } +impl reth_primitives_traits::Block for Block { + type Header = Header; + type Body = BlockBody; - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - pub fn seal(self, hash: B256) -> SealedBlock { - SealedBlock { header: SealedHeader::new(self.header, hash), body: self.body } - } - - /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> { - self.body.recover_signers() + fn new(header: Self::Header, body: Self::Body) -> Self { + Self { header, body } } - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") + fn header(&self) -> &Self::Header { + &self.header } - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - #[track_caller] - pub fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result { - let senders = if self.body.transactions.len() == senders.len() { - senders - } else { - let Some(senders) = self.body.recover_signers() else { return Err(self) }; - senders - }; - - Ok(BlockWithSenders { block: self, senders }) + fn body(&self) -> &Self::Body { + &self.body } - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - pub fn with_recovered_senders(self) -> Option { - let senders = self.senders()?; - Some(BlockWithSenders { block: self, senders }) + fn split(self) -> (Self::Header, Self::Body) { + (self.header, self.body) } +} +impl InMemorySize for Block { /// Calculates a heuristic for the in-memory size of the [`Block`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.header.size() + self.body.size() } } @@ -189,39 +152,44 @@ impl<'a> arbitrary::Arbitrary<'a> for Block { /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Default, Deref, DerefMut)] -pub struct BlockWithSenders { +pub struct BlockWithSenders { /// Block #[deref] #[deref_mut] - pub block: Block, + pub block: B, /// List of senders that match the transactions in the block pub senders: Vec
, } -impl BlockWithSenders { +impl BlockWithSenders { + /// New block with senders + pub const fn new_unchecked(block: B, senders: Vec
) -> Self { + Self { block, senders } + } + /// New block with senders. Return none if len of tx and senders does not match - pub fn new(block: Block, senders: Vec
) -> Option { - (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) + pub fn new(block: B, senders: Vec
) -> Option { + (block.body().transactions().len() == senders.len()).then_some(Self { block, senders }) } /// Seal the block with a known hash. /// /// WARNING: This method does not perform validation whether the hash is correct. #[inline] - pub fn seal(self, hash: B256) -> SealedBlockWithSenders { + pub fn seal(self, hash: B256) -> SealedBlockWithSenders { let Self { block, senders } = self; - SealedBlockWithSenders { block: block.seal(hash), senders } + SealedBlockWithSenders:: { block: block.seal(hash), senders } } /// Calculate the header hash and seal the block with senders so that it can't be changed. #[inline] - pub fn seal_slow(self) -> SealedBlockWithSenders { + pub fn seal_slow(self) -> SealedBlockWithSenders { SealedBlockWithSenders { block: self.block.seal_slow(), senders: self.senders } } /// Split Structure to its components #[inline] - pub fn into_components(self) -> (Block, Vec
) { + pub fn into_components(self) -> (B, Vec
) { (self.block, self.senders) } @@ -229,18 +197,27 @@ impl BlockWithSenders { #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { - self.senders.iter().zip(self.block.body.transactions()) + ) -> impl Iterator::Transaction)> + + '_ { + self.senders.iter().zip(self.block.body().transactions()) } /// Returns an iterator over all transactions in the chain. #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator { + ) -> impl Iterator< + Item = TransactionSignedEcRecovered< + ::Transaction, + >, + > + where + ::Transaction: SignedTransaction, + { self.block - .body - .transactions + .split() + .1 + .into_transactions() .into_iter() .zip(self.senders) .map(|(tx, sender)| tx.with_signer(sender)) @@ -248,30 +225,31 @@ impl BlockWithSenders { /// Consumes the block and returns the transactions of the block. #[inline] - pub fn into_transactions(self) -> Vec { - self.block.body.transactions + pub fn into_transactions( + self, + ) -> Vec<::Transaction> { + self.block.split().1.into_transactions() } } /// Sealed Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp, 32))] -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlock { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +pub struct SealedBlock { /// Locked block header. #[deref] #[deref_mut] - pub header: SealedHeader, + pub header: SealedHeader, /// Block body. - pub body: BlockBody, + pub body: B, } -impl SealedBlock { +impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] - pub const fn new(header: SealedHeader, body: BlockBody) -> Self { + pub const fn new(header: SealedHeader, body: B) -> Self { Self { header, body } } @@ -281,24 +259,37 @@ impl SealedBlock { self.header.hash() } - /// Splits the sealed block into underlying components - #[inline] - pub fn split(self) -> (SealedHeader, Vec, Vec
) { - (self.header, self.body.transactions, self.body.ommers) - } - /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components #[inline] - pub fn split_header_body(self) -> (SealedHeader, BlockBody) { + pub fn split_header_body(self) -> (SealedHeader, B) { (self.header, self.body) } +} +impl SealedBlock { /// Returns an iterator over all blob transactions of the block #[inline] pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { self.body.blob_transactions_iter() } + /// Calculates the total gas used by blob transactions in the sealed block. + pub fn blob_gas_used(&self) -> u64 { + self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() + } + + /// Returns whether or not the block contains any blob transactions. + #[inline] + pub fn has_blob_transactions(&self) -> bool { + self.body.has_blob_transactions() + } + + /// Returns whether or not the block contains any eip-7702 transactions. + #[inline] + pub fn has_eip7702_transactions(&self) -> bool { + self.body.has_eip7702_transactions() + } + /// Returns only the blob transactions, if any, from the block body. #[inline] pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { @@ -312,25 +303,42 @@ impl SealedBlock { .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) .flatten() } +} - /// Returns all blob versioned hashes from the block body. +impl SealedBlock +where + H: reth_primitives_traits::BlockHeader, + B: reth_primitives_traits::BlockBody, +{ + /// Splits the sealed block into underlying components #[inline] - pub fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() + pub fn split(self) -> (SealedHeader, B) { + (self.header, self.body) } /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> { + pub fn senders(&self) -> Option> + where + B::Transaction: SignedTransaction, + { self.body.recover_signers() } /// Seal sealed block with recovered transaction senders. - pub fn seal_with_senders(self) -> Option { + pub fn seal_with_senders(self) -> Option> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { self.try_seal_with_senders().ok() } /// Seal sealed block with recovered transaction senders. - pub fn try_seal_with_senders(self) -> Result { + pub fn try_seal_with_senders(self) -> Result, Self> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { match self.senders() { Some(senders) => Ok(SealedBlockWithSenders { block: self, senders }), None => Err(self), @@ -344,7 +352,11 @@ impl SealedBlock { /// If the number of senders does not match the number of transactions in the block /// and the signer recovery for one of the transactions fails. #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders { + pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { self.try_with_senders_unchecked(senders).expect("stored block is valid") } @@ -356,14 +368,18 @@ impl SealedBlock { /// /// Returns an error if a signature is invalid. #[track_caller] - pub fn try_with_senders_unchecked( + pub fn try_with_senders_unchecked( self, senders: Vec
, - ) -> Result { - let senders = if self.body.transactions.len() == senders.len() { + ) -> Result, Self> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { + let senders = if self.body.transactions().len() == senders.len() { senders } else { - let Some(senders) = self.body.recover_signers() else { return Err(self) }; + let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; senders }; @@ -371,31 +387,11 @@ impl SealedBlock { } /// Unseal the block - pub fn unseal(self) -> Block { - Block { header: self.header.unseal(), body: self.body } - } - - /// Calculates a heuristic for the in-memory size of the [`SealedBlock`]. - #[inline] - pub fn size(&self) -> usize { - self.header.size() + self.body.size() - } - - /// Calculates the total gas used by blob transactions in the sealed block. - pub fn blob_gas_used(&self) -> u64 { - self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() - } - - /// Returns whether or not the block contains any blob transactions. - #[inline] - pub fn has_blob_transactions(&self) -> bool { - self.body.has_blob_transactions() - } - - /// Returns whether or not the block contains any eip-7702 transactions. - #[inline] - pub fn has_eip7702_transactions(&self) -> bool { - self.body.has_eip7702_transactions() + pub fn unseal(self) -> Block + where + Block: reth_primitives_traits::Block
, + { + Block::new(self.header.unseal(), self.body) } /// Ensures that the transaction root in the block header is valid. @@ -410,13 +406,16 @@ impl SealedBlock { /// /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` /// error containing the calculated and expected roots. - pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> { + pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> + where + B::Transaction: Encodable2718, + { let calculated_root = self.body.calculate_tx_root(); - if self.header.transactions_root != calculated_root { + if self.header.transactions_root() != calculated_root { return Err(GotExpected { got: calculated_root, - expected: self.header.transactions_root, + expected: self.header.transactions_root(), }) } @@ -425,8 +424,18 @@ impl SealedBlock { /// Returns a vector of transactions RLP encoded with /// [`alloy_eips::eip2718::Encodable2718::encoded_2718`]. - pub fn raw_transactions(&self) -> Vec { - self.body.transactions().map(|tx| tx.encoded_2718().into()).collect() + pub fn raw_transactions(&self) -> Vec + where + B::Transaction: Encodable2718, + { + self.body.transactions().iter().map(|tx| tx.encoded_2718().into()).collect() + } +} + +impl InMemorySize for SealedBlock { + #[inline] + fn size(&self) -> usize { + self.header.size() + self.body.size() } } @@ -436,39 +445,102 @@ impl From for Block { } } +impl Default for SealedBlock +where + SealedHeader: Default, + B: Default, +{ + fn default() -> Self { + Self { header: Default::default(), body: Default::default() } + } +} + +impl reth_primitives_traits::Block for SealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, + Self: Serialize + for<'a> Deserialize<'a>, +{ + type Header = H; + type Body = B; + + fn new(header: Self::Header, body: Self::Body) -> Self { + Self { header: SealedHeader::seal(header), body } + } + + fn header(&self) -> &Self::Header { + self.header.header() + } + + fn body(&self) -> &Self::Body { + &self.body + } + + fn split(self) -> (Self::Header, Self::Body) { + (self.header.unseal(), self.body) + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock +where + SealedHeader: arbitrary::Arbitrary<'a>, + B: arbitrary::Arbitrary<'a>, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Self { header: u.arbitrary()?, body: u.arbitrary()? }) + } +} + +/// A helepr trait to construct [`SealedBlock`] from a [`reth_primitives_traits::Block`]. +pub type SealedBlockFor = SealedBlock< + ::Header, + ::Body, +>; /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlockWithSenders { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +pub struct SealedBlockWithSenders { /// Sealed block #[deref] #[deref_mut] - pub block: SealedBlock, + #[serde(bound = "SealedBlock: Serialize + serde::de::DeserializeOwned")] + pub block: SealedBlock, /// List of senders that match transactions from block. pub senders: Vec
, } -impl SealedBlockWithSenders { +impl Default for SealedBlockWithSenders { + fn default() -> Self { + Self { block: SealedBlock::default(), senders: Default::default() } + } +} + +impl SealedBlockWithSenders { /// New sealed block with sender. Return none if len of tx and senders does not match - pub fn new(block: SealedBlock, senders: Vec
) -> Option { - (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) + pub fn new(block: SealedBlock, senders: Vec
) -> Option { + (block.body.transactions().len() == senders.len()).then_some(Self { block, senders }) } +} +impl SealedBlockWithSenders { /// Split Structure to its components #[inline] - pub fn into_components(self) -> (SealedBlock, Vec
) { + pub fn into_components(self) -> (SealedBlock, Vec
) { (self.block, self.senders) } /// Returns the unsealed [`BlockWithSenders`] #[inline] - pub fn unseal(self) -> BlockWithSenders { - let Self { block, senders } = self; - BlockWithSenders { block: block.unseal(), senders } + pub fn unseal(self) -> BlockWithSenders { + let (block, senders) = self.into_components(); + let (header, body) = block.split(); + let header = header.unseal(); + BlockWithSenders::new_unchecked(B::new(header, body), senders) } /// Returns an iterator over all transactions in the block. #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { + pub fn transactions(&self) -> &[::Transaction] { self.block.body.transactions() } @@ -476,24 +548,34 @@ impl SealedBlockWithSenders { #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction)> + + '_ { self.senders.iter().zip(self.block.body.transactions()) } /// Consumes the block and returns the transactions of the block. #[inline] - pub fn into_transactions(self) -> Vec { - self.block.body.transactions + pub fn into_transactions( + self, + ) -> Vec<::Transaction> { + self.block.body.into_transactions() } /// Returns an iterator over all transactions in the chain. #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator { + ) -> impl Iterator< + Item = TransactionSignedEcRecovered< + ::Transaction, + >, + > + where + ::Transaction: SignedTransaction, + { self.block .body - .transactions + .into_transactions() .into_iter() .zip(self.senders) .map(|(tx, sender)| tx.with_signer(sender)) @@ -503,7 +585,7 @@ impl SealedBlockWithSenders { #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let block = SealedBlock::arbitrary(u)?; + let block: SealedBlock = SealedBlock::arbitrary(u)?; let senders = block .body @@ -539,11 +621,6 @@ impl BlockBody { Block { header, body: self } } - /// Calculate the transaction root for the block body. - pub fn calculate_tx_root(&self) -> B256 { - crate::proofs::calculate_transaction_root(&self.transactions) - } - /// Calculate the ommers root for the block body. pub fn calculate_ommers_root(&self) -> B256 { crate::proofs::calculate_ommers_root(&self.ommers) @@ -555,11 +632,6 @@ impl BlockBody { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } - /// Recover signer addresses for all transactions in the block body. - pub fn recover_signers(&self) -> Option> { - TransactionSigned::recover_signers(&self.transactions, self.transactions.len()) - } - /// Returns whether or not the block body contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -597,16 +669,12 @@ impl BlockBody { pub fn blob_versioned_hashes(&self) -> Vec<&B256> { self.blob_versioned_hashes_iter().collect() } +} - /// Returns an iterator over all transactions. - #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { - self.transactions.iter() - } - +impl InMemorySize for BlockBody { /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.transactions.iter().map(TransactionSigned::size).sum::() + self.transactions.capacity() * core::mem::size_of::() + self.ommers.iter().map(Header::size).sum::() + @@ -617,6 +685,27 @@ impl BlockBody { } } +impl reth_primitives_traits::BlockBody for BlockBody { + type Transaction = TransactionSigned; + type OmmerHeader = Header; + + fn transactions(&self) -> &[Self::Transaction] { + &self.transactions + } + + fn into_transactions(self) -> Vec { + self.transactions + } + + fn withdrawals(&self) -> Option<&Withdrawals> { + self.withdrawals.as_ref() + } + + fn ommers(&self) -> Option<&[Self::OmmerHeader]> { + Some(&self.ommers) + } +} + impl From for BlockBody { fn from(block: Block) -> Self { Self { @@ -911,6 +1000,12 @@ mod tests { use alloy_rlp::{Decodable, Encodable}; use std::str::FromStr; + const fn _traits() { + const fn assert_block() {} + assert_block::(); + assert_block::(); + } + /// Check parsing according to EIP-1898. #[test] fn can_parse_blockid_u64() { @@ -1074,18 +1169,18 @@ mod tests { Some(BlockWithSenders { block: block.clone(), senders: vec![sender] }) ); let sealed = block.seal_slow(); - assert_eq!(SealedBlockWithSenders::new(sealed.clone(), vec![]), None); + assert_eq!(SealedBlockWithSenders::::new(sealed.clone(), vec![]), None); assert_eq!( - SealedBlockWithSenders::new(sealed.clone(), vec![sender]), + SealedBlockWithSenders::::new(sealed.clone(), vec![sender]), Some(SealedBlockWithSenders { block: sealed, senders: vec![sender] }) ); } #[test] fn test_default_seal() { - let block = SealedBlock::default(); + let block: SealedBlock = SealedBlock::default(); let sealed = block.hash(); - let block = block.unseal(); + let block: Block = block.unseal(); let block = block.seal_slow(); assert_eq!(sealed, block.hash()); } diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs deleted file mode 100644 index 09c488cc25ad..000000000000 --- a/crates/primitives/src/constants/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Ethereum protocol-related constants - -pub use reth_primitives_traits::constants::*; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index f44e1ee6a09e..224e025f39d5 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -21,44 +21,43 @@ extern crate alloc; +mod traits; +pub use traits::*; + #[cfg(feature = "alloy-compat")] mod alloy_compat; mod block; #[cfg(feature = "reth-codec")] mod compression; -pub mod constants; pub mod proofs; mod receipt; pub use reth_static_file_types as static_file; pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; -pub use block::{Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockWithSenders}; +pub use block::{ + Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockFor, SealedBlockWithSenders, +}; #[cfg(feature = "reth-codec")] pub use compression::*; -pub use constants::HOLESKY_GENESIS_HASH; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, - LogData, SealedHeader, StorageEntry, + LogData, NodePrimitives, SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; -pub use transaction::{ - BlobTransaction, PooledTransactionsElement, PooledTransactionsElementEcRecovered, -}; - pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, - InvalidTransactionError, Transaction, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, + BlobTransaction, InvalidTransactionError, PooledTransactionsElement, + PooledTransactionsElementEcRecovered, Transaction, TransactionMeta, TransactionSigned, + TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, }; // Re-exports pub use reth_ethereum_forks::*; -pub use revm_primitives::{self, JumpTable}; #[cfg(any(test, feature = "arbitrary"))] pub use arbitrary; @@ -80,3 +79,17 @@ pub mod serde_bincode_compat { transaction::{serde_bincode_compat as transaction, serde_bincode_compat::*}, }; } + +/// Temp helper struct for integrating [`NodePrimitives`]. +#[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[non_exhaustive] +pub struct EthPrimitives; + +impl reth_primitives_traits::NodePrimitives for EthPrimitives { + type Block = crate::Block; + type BlockHeader = alloy_consensus::Header; + type BlockBody = crate::BlockBody; + type SignedTx = crate::TransactionSigned; + type TxType = crate::TxType; + type Receipt = crate::Receipt; +} diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 000244d2c549..81c26d7180e0 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,20 +1,20 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, TransactionSigned}; -use alloc::vec::Vec; -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use crate::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; +use alloc::{borrow::Borrow, vec::Vec}; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawal}; use alloy_primitives::{keccak256, B256}; -use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; +use alloy_trie::root::{ordered_trie_root, ordered_trie_root_with_encoder}; /// Calculate a transaction root. /// /// `(rlp(index), encoded(tx))` pairs. pub fn calculate_transaction_root(transactions: &[T]) -> B256 where - T: AsRef, + T: Encodable2718, { - ordered_trie_root_with_encoder(transactions, |tx: &T, buf| tx.as_ref().encode_2718(buf)) + ordered_trie_root_with_encoder(transactions, |tx, buf| tx.borrow().encode_2718(buf)) } /// Calculates the root hash of the withdrawals. diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index e60bddb9d799..95d707d1b2d5 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,18 +1,26 @@ -#[cfg(feature = "reth-codec")] -use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::TxType; use alloc::{vec, vec::Vec}; -use alloy_consensus::constants::{ - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, +use core::cmp::Ordering; +use reth_primitives_traits::InMemorySize; + +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, + Eip658Value, TxReceipt, }; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{Bloom, Log, B256}; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; -use core::{cmp::Ordering, ops::Deref}; use derive_more::{DerefMut, From, IntoIterator}; +use reth_primitives_traits::receipt::ReceiptExt; use serde::{Deserialize, Serialize}; +#[cfg(feature = "reth-codec")] +use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; +use crate::TxType; + +/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). +pub use reth_primitives_traits::receipt::gas_spent_by_transactions; + /// Receipt containing result of transaction execution. #[derive( Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, @@ -64,13 +72,65 @@ impl Receipt { } } +impl TxReceipt for Receipt { + fn status_or_post_state(&self) -> Eip658Value { + self.success.into() + } + + fn status(&self) -> bool { + self.success + } + + fn bloom(&self) -> Bloom { + alloy_primitives::logs_bloom(self.logs.iter()) + } + + fn cumulative_gas_used(&self) -> u128 { + self.cumulative_gas_used as u128 + } + + fn logs(&self) -> &[Log] { + &self.logs + } +} + +impl reth_primitives_traits::Receipt for Receipt { + fn tx_type(&self) -> u8 { + self.tx_type as u8 + } +} + +impl ReceiptExt for Receipt { + fn receipts_root(_receipts: &[&Self]) -> B256 { + #[cfg(feature = "optimism")] + panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); + #[cfg(not(feature = "optimism"))] + crate::proofs::calculate_receipt_root_no_memo(_receipts) + } +} + +impl InMemorySize for Receipt { + /// Calculates a heuristic for the in-memory size of the [Receipt]. + #[inline] + fn size(&self) -> usize { + let total_size = self.tx_type.size() + + core::mem::size_of::() + + core::mem::size_of::() + + self.logs.capacity() * core::mem::size_of::(); + + #[cfg(feature = "optimism")] + return total_size + 2 * core::mem::size_of::>(); + #[cfg(not(feature = "optimism"))] + total_size + } +} + /// A collection of receipts organized as a two-dimensional vector. #[derive( Clone, Debug, PartialEq, Eq, - Default, Serialize, Deserialize, From, @@ -78,12 +138,12 @@ impl Receipt { DerefMut, IntoIterator, )] -pub struct Receipts { +pub struct Receipts { /// A two-dimensional vector of optional `Receipt` instances. - pub receipt_vec: Vec>>, + pub receipt_vec: Vec>>, } -impl Receipts { +impl Receipts { /// Returns the length of the `Receipts` vector. pub fn len(&self) -> usize { self.receipt_vec.len() @@ -95,26 +155,26 @@ impl Receipts { } /// Push a new vector of receipts into the `Receipts` collection. - pub fn push(&mut self, receipts: Vec>) { + pub fn push(&mut self, receipts: Vec>) { self.receipt_vec.push(receipts); } /// Retrieves all recorded receipts from index and calculates the root using the given closure. - pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&Receipt]) -> B256) -> Option { + pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&T]) -> B256) -> Option { let receipts = self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?; Some(f(receipts.as_slice())) } } -impl From> for Receipts { - fn from(block_receipts: Vec) -> Self { +impl From> for Receipts { + fn from(block_receipts: Vec) -> Self { Self { receipt_vec: vec![block_receipts.into_iter().map(Option::Some).collect()] } } } -impl FromIterator>> for Receipts { - fn from_iter>>>(iter: I) -> Self { +impl FromIterator>> for Receipts { + fn from_iter>>>(iter: I) -> Self { iter.into_iter().collect::>().into() } } @@ -126,11 +186,15 @@ impl From for ReceiptWithBloom { } } +impl Default for Receipts { + fn default() -> Self { + Self { receipt_vec: Vec::new() } + } +} + /// [`Receipt`] with calculated bloom filter. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct ReceiptWithBloom { /// Bloom filter build from logs. pub bloom: Bloom, @@ -160,17 +224,6 @@ impl ReceiptWithBloom { } } -/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). -pub fn gas_spent_by_transactions>( - receipts: impl IntoIterator, -) -> Vec<(u64, u64)> { - receipts - .into_iter() - .enumerate() - .map(|(id, receipt)| (id as u64, receipt.deref().cumulative_gas_used)) - .collect() -} - #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Receipt { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -345,7 +398,7 @@ impl Decodable for ReceiptWithBloom { Self::decode_receipt(buf, TxType::Eip7702) } #[cfg(feature = "optimism")] - crate::transaction::DEPOSIT_TX_TYPE_ID => { + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { buf.advance(1); Self::decode_receipt(buf, TxType::Deposit) } @@ -481,7 +534,7 @@ impl ReceiptWithBloomEncoder<'_> { } #[cfg(feature = "optimism")] TxType::Deposit => { - out.put_u8(crate::transaction::DEPOSIT_TX_TYPE_ID); + out.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); } } out.put_slice(payload.as_ref()); @@ -514,8 +567,7 @@ impl Encodable for ReceiptWithBloomEncoder<'_> { #[cfg(test)] mod tests { use super::*; - use crate::revm_primitives::Bytes; - use alloy_primitives::{address, b256, bytes, hex_literal::hex}; + use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes}; use reth_codecs::Compact; #[test] diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs new file mode 100644 index 000000000000..ec4e75c8c6d0 --- /dev/null +++ b/crates/primitives/src/traits.rs @@ -0,0 +1,137 @@ +use crate::{ + transaction::{recover_signers, recover_signers_unchecked}, + BlockWithSenders, SealedBlock, +}; +use alloc::vec::Vec; +use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; +use reth_primitives_traits::{Block, BlockBody, BlockHeader, SealedHeader, SignedTransaction}; +use revm_primitives::{Address, B256}; + +/// Extension trait for [`reth_primitives_traits::Block`] implementations +/// allowing for conversions into common block parts containers such as [`SealedBlock`], +/// [`BlockWithSenders`], etc. +pub trait BlockExt: Block { + /// Calculate the header hash and seal the block so that it can't be changed. + fn seal_slow(self) -> SealedBlock { + let (header, body) = self.split(); + SealedBlock { header: SealedHeader::seal(header), body } + } + + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + fn seal(self, hash: B256) -> SealedBlock { + let (header, body) = self.split(); + SealedBlock { header: SealedHeader::new(header, hash), body } + } + + /// Expensive operation that recovers transaction signer. + fn senders(&self) -> Option> + where + ::Transaction: SignedTransaction, + { + self.body().recover_signers() + } + + /// Transform into a [`BlockWithSenders`]. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + /// + /// Note: this is expected to be called with blocks read from disk. + #[track_caller] + fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders + where + ::Transaction: SignedTransaction, + { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a [`BlockWithSenders`] using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// See also [`recover_signers_unchecked`] + /// + /// Returns an error if a signature is invalid. + #[track_caller] + fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result, Self> + where + ::Transaction: SignedTransaction, + { + let senders = if self.body().transactions().len() == senders.len() { + senders + } else { + let Some(senders) = self.body().recover_signers_unchecked() else { return Err(self) }; + senders + }; + + Ok(BlockWithSenders::new_unchecked(self, senders)) + } + + /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained + /// transactions. + /// + /// Returns `None` if a transaction is invalid. + fn with_recovered_senders(self) -> Option> + where + ::Transaction: SignedTransaction, + { + let senders = self.senders()?; + Some(BlockWithSenders::new_unchecked(self, senders)) + } +} + +impl BlockExt for T {} + +/// Extension trait for [`BlockBody`] adding helper methods operating with transactions. +pub trait BlockBodyTxExt: BlockBody { + /// Calculate the transaction root for the block body. + fn calculate_tx_root(&self) -> B256 + where + Self::Transaction: Encodable2718, + { + crate::proofs::calculate_transaction_root(self.transactions()) + } + + /// Recover signer addresses for all transactions in the block body. + fn recover_signers(&self) -> Option> + where + Self::Transaction: SignedTransaction, + { + recover_signers(self.transactions(), self.transactions().len()) + } + + /// Recover signer addresses for all transactions in the block body _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid, see also + /// [`recover_signers_unchecked`]. + fn recover_signers_unchecked(&self) -> Option> + where + Self::Transaction: SignedTransaction, + { + recover_signers_unchecked(self.transactions(), self.transactions().len()) + } +} + +impl BlockBodyTxExt for T {} + +/// Extension trait for [`BlockHeader`] adding useful helper methods. +pub trait HeaderExt: BlockHeader { + /// TODO: remove once is released + /// + /// Returns the parent block's number and hash + /// + /// Note: for the genesis block the parent number is 0 and the parent hash is the zero hash. + fn parent_num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.number().saturating_sub(1), self.parent_hash()) + } +} + +impl HeaderExt for T {} diff --git a/crates/primitives/src/traits/mod.rs b/crates/primitives/src/traits/mod.rs deleted file mode 100644 index 49fb73ea5555..000000000000 --- a/crates/primitives/src/traits/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! Abstractions of primitive data types - -pub mod block; -pub mod transaction; - -pub use block::{body::BlockBody, Block}; -pub use transaction::signed::SignedTransaction; - -pub use alloy_consensus::BlockHeader; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index ed1a7daf1e84..e1524aa1dc87 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,13 +1,11 @@ //! Transaction types. -#[cfg(any(test, feature = "reth-codec"))] -use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; +use alloc::vec::Vec; use alloy_consensus::{ - transaction::RlpEcdsaTx, SignableTransaction, Transaction as _, TxEip1559, TxEip2930, - TxEip4844, TxEip7702, TxLegacy, + transaction::RlpEcdsaTx, SignableTransaction, Signed, Transaction as _, TxEip1559, TxEip2930, + TxEip4844, TxEip4844Variant, TxEip7702, TxLegacy, TypedTransaction, }; use alloy_eips::{ - eip1898::BlockHashOrNumber, eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, eip7702::SignedAuthorization, @@ -16,30 +14,33 @@ use alloy_primitives::{ keccak256, Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; -use core::mem; +use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; use once_cell as _; #[cfg(not(feature = "std"))] -use once_cell::sync::Lazy as LazyLock; +use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; #[cfg(feature = "optimism")] use op_alloy_consensus::DepositTransaction; +#[cfg(feature = "optimism")] +use op_alloy_consensus::TxDeposit; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use revm_primitives::{AuthorizationList, TxEnv}; use serde::{Deserialize, Serialize}; use signature::decode_with_eip155_chain_id; #[cfg(feature = "std")] -use std::sync::LazyLock; +use std::sync::{LazyLock, OnceLock}; +pub use compat::FillTxEnv; pub use error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; +pub use reth_primitives_traits::WithEncoded; pub use sidecar::BlobTransaction; - -pub use compat::FillTxEnv; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; -pub use variant::TransactionSignedVariant; pub(crate) mod access_list; mod compat; @@ -47,31 +48,23 @@ mod error; mod meta; mod pooled; mod sidecar; -mod signature; mod tx_type; + +/// Handling transaction signature operations, including signature recovery, +/// applying chain IDs, and EIP-2 validation. +pub mod signature; + pub(crate) mod util; -mod variant; -#[cfg(feature = "optimism")] -use op_alloy_consensus::TxDeposit; -#[cfg(feature = "optimism")] -pub use tx_type::DEPOSIT_TX_TYPE_ID; #[cfg(any(test, feature = "reth-codec"))] -use tx_type::{ +pub use tx_type::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY, }; -use alloc::vec::Vec; -use reth_primitives_traits::SignedTransaction; -use revm_primitives::{AuthorizationList, TxEnv}; - -/// Either a transaction hash or number. -pub type TxHashOrNumber = BlockHashOrNumber; - -// Expected number of transactions where we can expect a speed-up by recovering the senders in -// parallel. -pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = +/// Expected number of transactions where we can expect a speed-up by recovering the senders in +/// parallel. +pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = LazyLock::new(|| match rayon::current_num_threads() { 0..=1 => usize::MAX, 2..=8 => 10, @@ -230,29 +223,6 @@ impl Transaction { } } - /// Gets the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. - pub const fn kind(&self) -> TxKind { - match self { - Self::Legacy(TxLegacy { to, .. }) | - Self::Eip2930(TxEip2930 { to, .. }) | - Self::Eip1559(TxEip1559 { to, .. }) => *to, - Self::Eip4844(TxEip4844 { to, .. }) | Self::Eip7702(TxEip7702 { to, .. }) => { - TxKind::Call(*to) - } - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { to, .. }) => *to, - } - } - - /// Get the transaction's address of the contract that will be called, or the address that will - /// receive the transfer. - /// - /// Returns `None` if this is a `CREATE` transaction. - pub fn to(&self) -> Option
{ - self.kind().to().copied() - } - /// Get the transaction's type pub const fn tx_type(&self) -> TxType { match self { @@ -266,56 +236,6 @@ impl Transaction { } } - /// Returns the [`AccessList`] of the transaction. - /// - /// Returns `None` for legacy transactions. - pub const fn access_list(&self) -> Option<&AccessList> { - match self { - Self::Legacy(_) => None, - Self::Eip2930(tx) => Some(&tx.access_list), - Self::Eip1559(tx) => Some(&tx.access_list), - Self::Eip4844(tx) => Some(&tx.access_list), - Self::Eip7702(tx) => Some(&tx.access_list), - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - - /// Returns the [`SignedAuthorization`] list of the transaction. - /// - /// Returns `None` if this transaction is not EIP-7702. - pub fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - match self { - Self::Eip7702(tx) => Some(&tx.authorization_list), - _ => None, - } - } - - /// Returns true if the tx supports dynamic fees - pub const fn is_dynamic_fee(&self) -> bool { - match self { - Self::Legacy(_) | Self::Eip2930(_) => false, - Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, - #[cfg(feature = "optimism")] - Self::Deposit(_) => false, - } - } - - /// Blob versioned hashes for eip4844 transaction, for legacy, eip1559, eip2930 and eip7702 - /// transactions this is `None` - /// - /// This is also commonly referred to as the "blob versioned hashes" (`BlobVersionedHashes`). - pub fn blob_versioned_hashes(&self) -> Option> { - match self { - Self::Legacy(_) | Self::Eip2930(_) | Self::Eip1559(_) | Self::Eip7702(_) => None, - Self::Eip4844(TxEip4844 { blob_versioned_hashes, .. }) => { - Some(blob_versioned_hashes.clone()) - } - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 /// transaction. /// @@ -325,21 +245,6 @@ impl Transaction { self.as_eip4844().map(TxEip4844::blob_gas) } - /// Returns the effective gas price for the given base fee. - /// - /// If the transaction is a legacy or EIP2930 transaction, the gas price is returned. - pub const fn effective_gas_price(&self, base_fee: Option) -> u128 { - match self { - Self::Legacy(tx) => tx.gas_price, - Self::Eip2930(tx) => tx.gas_price, - Self::Eip1559(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Self::Eip4844(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Self::Eip7702(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - /// Returns the effective miner gas tip cap (`gasTipCap`) for the given base fee: /// `min(maxFeePerGas - baseFee, maxPriorityFeePerGas)` /// @@ -371,19 +276,6 @@ impl Transaction { } } - /// Get the transaction's input field. - pub const fn input(&self) -> &Bytes { - match self { - Self::Legacy(TxLegacy { input, .. }) | - Self::Eip2930(TxEip2930 { input, .. }) | - Self::Eip1559(TxEip1559 { input, .. }) | - Self::Eip4844(TxEip4844 { input, .. }) | - Self::Eip7702(TxEip7702 { input, .. }) => input, - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { input, .. }) => input, - } - } - /// This encodes the transaction _without_ the signature, and is only suitable for creating a /// hash intended for signing. pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { @@ -472,20 +364,6 @@ impl Transaction { } } - /// Calculates a heuristic for the in-memory size of the [Transaction]. - #[inline] - pub fn size(&self) -> usize { - match self { - Self::Legacy(tx) => tx.size(), - Self::Eip2930(tx) => tx.size(), - Self::Eip1559(tx) => tx.size(), - Self::Eip4844(tx) => tx.size(), - Self::Eip7702(tx) => tx.size(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.size(), - } - } - /// Returns true if the transaction is a legacy transaction. #[inline] pub const fn is_legacy(&self) -> bool { @@ -557,6 +435,22 @@ impl Transaction { } } +impl InMemorySize for Transaction { + /// Calculates a heuristic for the in-memory size of the [Transaction]. + #[inline] + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip4844(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.size(), + } + } +} + #[cfg(any(test, feature = "reth-codec"))] impl reth_codecs::Compact for Transaction { // Serializes the TxType to the buffer if necessary, returning 2 bits of the type as an @@ -602,19 +496,19 @@ impl reth_codecs::Compact for Transaction { use bytes::Buf; match identifier { - COMPACT_IDENTIFIER_LEGACY => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => { let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); (Self::Legacy(tx), buf) } - COMPACT_IDENTIFIER_EIP2930 => { + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => { let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); (Self::Eip2930(tx), buf) } - COMPACT_IDENTIFIER_EIP1559 => { + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => { let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); (Self::Eip1559(tx), buf) } - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { // An identifier of 3 indicates that the transaction type did not fit into // the backwards compatible 2 bit identifier, their transaction types are // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, @@ -622,16 +516,16 @@ impl reth_codecs::Compact for Transaction { // reading the full 8 bits (single byte) and match on this transaction type. let identifier = buf.get_u8(); match identifier { - EIP4844_TX_TYPE_ID => { + alloy_consensus::constants::EIP4844_TX_TYPE_ID => { let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); (Self::Eip4844(tx), buf) } - EIP7702_TX_TYPE_ID => { + alloy_consensus::constants::EIP7702_TX_TYPE_ID => { let (tx, buf) = TxEip7702::from_compact(buf, buf.len()); (Self::Eip7702(tx), buf) } #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => { + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); (Self::Deposit(tx), buf) } @@ -748,6 +642,39 @@ impl alloy_consensus::Transaction for Transaction { } } + fn effective_gas_price(&self, base_fee: Option) -> u128 { + match self { + Self::Legacy(tx) => tx.effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.effective_gas_price(base_fee), + Self::Eip4844(tx) => tx.effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.effective_gas_price(base_fee), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.effective_gas_price(base_fee), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(_) | Self::Eip2930(_) => false, + Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, + #[cfg(feature = "optimism")] + Self::Deposit(_) => false, + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.kind(), + Self::Eip2930(tx) => tx.kind(), + Self::Eip1559(tx) => tx.kind(), + Self::Eip4844(tx) => tx.kind(), + Self::Eip7702(tx) => tx.kind(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.kind(), + } + } + fn value(&self) -> U256 { match self { Self::Legacy(tx) => tx.value(), @@ -819,16 +746,25 @@ impl alloy_consensus::Transaction for Transaction { Self::Deposit(tx) => tx.authorization_list(), } } +} - fn kind(&self) -> TxKind { - match self { - Self::Legacy(tx) => tx.kind(), - Self::Eip2930(tx) => tx.kind(), - Self::Eip1559(tx) => tx.kind(), - Self::Eip4844(tx) => tx.kind(), - Self::Eip7702(tx) => tx.kind(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.kind(), +impl From for Transaction { + fn from(value: TxEip4844Variant) -> Self { + match value { + TxEip4844Variant::TxEip4844(tx) => tx.into(), + TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx.into(), + } + } +} + +impl From for Transaction { + fn from(value: TypedTransaction) -> Self { + match value { + TypedTransaction::Legacy(tx) => tx.into(), + TypedTransaction::Eip2930(tx) => tx.into(), + TypedTransaction::Eip1559(tx) => tx.into(), + TypedTransaction::Eip4844(tx) => tx.into(), + TypedTransaction::Eip7702(tx) => tx.into(), } } } @@ -915,7 +851,7 @@ impl TransactionSignedNoHash { #[inline] pub fn with_hash(self) -> TransactionSigned { let Self { signature, transaction } = self; - TransactionSigned::from_transaction_and_signature(transaction, signature) + TransactionSigned::new_unhashed(transaction, signature) } /// Recovers a list of signers from a transaction list iterator @@ -1029,6 +965,22 @@ impl reth_codecs::Compact for TransactionSignedNoHash { } } +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for TransactionSigned { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let tx: TransactionSignedNoHash = self.clone().into(); + tx.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (tx, buf) = TransactionSignedNoHash::from_compact(buf, len); + (tx.into(), buf) + } +} + impl From for TransactionSigned { fn from(tx: TransactionSignedNoHash) -> Self { tx.with_hash() @@ -1043,10 +995,11 @@ impl From for TransactionSignedNoHash { /// Signed transaction. #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +#[derive(Debug, Clone, Eq, AsRef, Deref, Serialize, Deserialize)] pub struct TransactionSigned { /// Transaction hash - pub hash: TxHash, + #[serde(skip)] + pub hash: OnceLock, /// The transaction signature values pub signature: Signature, /// Raw transaction info @@ -1071,12 +1024,34 @@ impl AsRef for TransactionSigned { } } +impl Hash for TransactionSigned { + fn hash(&self, state: &mut H) { + self.signature.hash(state); + self.transaction.hash(state); + } +} + +impl PartialEq for TransactionSigned { + fn eq(&self, other: &Self) -> bool { + self.signature == other.signature && + self.transaction == other.transaction && + self.tx_hash() == other.tx_hash() + } +} + // === impl TransactionSigned === impl TransactionSigned { - /// Transaction signature. - pub const fn signature(&self) -> &Signature { - &self.signature + /// Creates a new signed transaction from the given parts. + pub fn new(transaction: Transaction, signature: Signature, hash: B256) -> Self { + Self { hash: hash.into(), signature, transaction } + } + + /// Creates a new signed transaction from the given transaction and signature without the hash. + /// + /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. + pub fn new_unhashed(transaction: Transaction, signature: Signature) -> Self { + Self { hash: Default::default(), signature, transaction } } /// Transaction @@ -1085,49 +1060,8 @@ impl TransactionSigned { } /// Transaction hash. Used to identify transaction. - pub const fn hash(&self) -> TxHash { - self.hash - } - - /// Reference to transaction hash. Used to identify transaction. - pub const fn hash_ref(&self) -> &TxHash { - &self.hash - } - - /// Recover signer from signature and hash. - /// - /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also [`recover_signer`]. - /// - /// Note: - /// - /// This can fail for some early ethereum mainnet transactions pre EIP-2, use - /// [`Self::recover_signer_unchecked`] if you want to recover the signer without ensuring that - /// the signature has a low `s` value. - pub fn recover_signer(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - let signature_hash = self.signature_hash(); - recover_signer(&self.signature, signature_hash) - } - - /// Recover signer from signature and hash _without ensuring that the signature has a low `s` - /// value_. - /// - /// Returns `None` if the transaction's signature is invalid, see also - /// [`recover_signer_unchecked`]. - pub fn recover_signer_unchecked(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - let signature_hash = self.signature_hash(); - recover_signer_unchecked(&self.signature, signature_hash) + pub fn hash(&self) -> TxHash { + *self.tx_hash() } /// Recovers a list of signers from a transaction list iterator. @@ -1220,19 +1154,10 @@ impl TransactionSigned { keccak256(self.encoded_2718()) } - /// Create a new signed transaction from a transaction and its signature. - /// - /// This will also calculate the transaction hash using its encoding. - pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { - let mut initial_tx = Self { transaction, hash: Default::default(), signature }; - initial_tx.hash = initial_tx.recalculate_hash(); - initial_tx - } - - /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + self.transaction.size() + mem::size_of::() + /// Splits the transaction into parts. + pub fn into_parts(self) -> (Transaction, Signature, B256) { + let hash = self.hash(); + (self.transaction, self.signature, hash) } /// Decodes legacy transaction from the data buffer into a tuple. @@ -1292,103 +1217,48 @@ impl TransactionSigned { // so decoding methods do not need to manually advance the buffer pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; - let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + let signed = + Self { transaction: Transaction::Legacy(transaction), hash: hash.into(), signature }; Ok(signed) } } -impl alloy_consensus::Transaction for TransactionSigned { - fn chain_id(&self) -> Option { - self.deref().chain_id() - } - - fn nonce(&self) -> u64 { - self.deref().nonce() - } - - fn gas_limit(&self) -> u64 { - self.deref().gas_limit() - } - - fn gas_price(&self) -> Option { - self.deref().gas_price() - } - - fn max_fee_per_gas(&self) -> u128 { - self.deref().max_fee_per_gas() - } - - fn max_priority_fee_per_gas(&self) -> Option { - self.deref().max_priority_fee_per_gas() - } - - fn max_fee_per_blob_gas(&self) -> Option { - self.deref().max_fee_per_blob_gas() - } - - fn priority_fee_or_price(&self) -> u128 { - self.deref().priority_fee_or_price() - } - - fn value(&self) -> U256 { - self.deref().value() - } - - fn input(&self) -> &Bytes { - self.deref().input() - } - - fn ty(&self) -> u8 { - self.deref().ty() - } - - fn access_list(&self) -> Option<&AccessList> { - self.deref().access_list() - } - - fn blob_versioned_hashes(&self) -> Option<&[B256]> { - alloy_consensus::Transaction::blob_versioned_hashes(self.deref()) - } - - fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - self.deref().authorization_list() - } - - fn kind(&self) -> TxKind { - self.deref().kind() - } -} - impl SignedTransaction for TransactionSigned { - type Transaction = Transaction; + type Type = TxType; fn tx_hash(&self) -> &TxHash { - Self::hash_ref(self) - } - - fn transaction(&self) -> &Self::Transaction { - Self::transaction(self) + self.hash.get_or_init(|| self.recalculate_hash()) } fn signature(&self) -> &Signature { - Self::signature(self) + &self.signature } fn recover_signer(&self) -> Option
{ - Self::recover_signer(self) - } - - fn recover_signer_unchecked(&self) -> Option
{ - Self::recover_signer_unchecked(self) + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(feature = "optimism")] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + let signature_hash = self.signature_hash(); + recover_signer(&self.signature, signature_hash) } - fn from_transaction_and_signature( - transaction: Self::Transaction, - signature: Signature, - ) -> Self { - Self::from_transaction_and_signature(transaction, signature) + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(feature = "optimism")] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(&self.signature, signature_hash) } +} +impl reth_primitives_traits::FillTxEnv for TransactionSigned { fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { tx_env.caller = sender; match self.as_ref() { @@ -1469,6 +1339,84 @@ impl SignedTransaction for TransactionSigned { } } +impl InMemorySize for TransactionSigned { + /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. + #[inline] + fn size(&self) -> usize { + self.hash().size() + self.transaction.size() + self.signature().size() + } +} + +impl alloy_consensus::Transaction for TransactionSigned { + fn chain_id(&self) -> Option { + self.deref().chain_id() + } + + fn nonce(&self) -> u64 { + self.deref().nonce() + } + + fn gas_limit(&self) -> u64 { + self.deref().gas_limit() + } + + fn gas_price(&self) -> Option { + self.deref().gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.deref().max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.deref().max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.deref().max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.deref().priority_fee_or_price() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.deref().effective_gas_price(base_fee) + } + + fn is_dynamic_fee(&self) -> bool { + self.deref().is_dynamic_fee() + } + + fn kind(&self) -> TxKind { + self.deref().kind() + } + + fn value(&self) -> U256 { + self.deref().value() + } + + fn input(&self) -> &Bytes { + self.deref().input() + } + + fn ty(&self) -> u8 { + self.deref().ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.deref().access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + alloy_consensus::Transaction::blob_versioned_hashes(self.deref()) + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.deref().authorization_list() + } +} + impl From for TransactionSigned { fn from(recovered: TransactionSignedEcRecovered) -> Self { recovered.signed_transaction @@ -1558,6 +1506,10 @@ impl Encodable2718 for TransactionSigned { fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { self.transaction.eip2718_encode(&self.signature, out) } + + fn trie_hash(&self) -> B256 { + self.hash() + } } impl Decodable2718 for TransactionSigned { @@ -1566,22 +1518,22 @@ impl Decodable2718 for TransactionSigned { TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), TxType::Eip2930 => { let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash: hash.into() }) } TxType::Eip1559 => { let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash: hash.into() }) } TxType::Eip7702 => { let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash: hash.into() }) } TxType::Eip4844 => { let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash: hash.into() }) } #[cfg(feature = "optimism")] - TxType::Deposit => Ok(Self::from_transaction_and_signature( + TxType::Deposit => Ok(Self::new_unhashed( Transaction::Deposit(TxDeposit::rlp_decode(buf)?), TxDeposit::signature(), )), @@ -1593,6 +1545,35 @@ impl Decodable2718 for TransactionSigned { } } +macro_rules! impl_from_signed { + ($($tx:ident),*) => { + $( + impl From> for TransactionSigned { + fn from(value: Signed<$tx>) -> Self { + let(tx,sig,hash) = value.into_parts(); + Self::new(tx.into(), sig, hash) + } + } + )* + }; +} + +impl_from_signed!(TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844, TypedTransaction); + +impl From> for TransactionSigned { + fn from(value: Signed) -> Self { + let (tx, sig, hash) = value.into_parts(); + Self::new(tx, sig, hash) + } +} + +impl From for Signed { + fn from(value: TransactionSigned) -> Self { + let (tx, sig, hash) = value.into_parts(); + Self::new_unchecked(tx, sig, hash) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -1619,57 +1600,53 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { #[cfg(feature = "optimism")] let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; - - Ok(Self::from_transaction_and_signature(transaction, signature)) + Ok(Self::new_unhashed(transaction, signature)) } } /// Signed transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Hash, Eq, AsRef, Deref)] -pub struct TransactionSignedEcRecovered { +pub struct TransactionSignedEcRecovered { /// Signer of the transaction signer: Address, /// Signed transaction #[deref] #[as_ref] - signed_transaction: TransactionSigned, + signed_transaction: T, } // === impl TransactionSignedEcRecovered === -impl TransactionSignedEcRecovered { +impl TransactionSignedEcRecovered { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer } /// Returns a reference to [`TransactionSigned`] - pub const fn as_signed(&self) -> &TransactionSigned { + pub const fn as_signed(&self) -> &T { &self.signed_transaction } /// Transform back to [`TransactionSigned`] - pub fn into_signed(self) -> TransactionSigned { + pub fn into_signed(self) -> T { self.signed_transaction } /// Dissolve Self to its component - pub fn to_components(self) -> (TransactionSigned, Address) { + pub fn to_components(self) -> (T, Address) { (self.signed_transaction, self.signer) } /// Create [`TransactionSignedEcRecovered`] from [`TransactionSigned`] and [`Address`] of the /// signer. #[inline] - pub const fn from_signed_transaction( - signed_transaction: TransactionSigned, - signer: Address, - ) -> Self { + pub const fn from_signed_transaction(signed_transaction: T, signer: Address) -> Self { Self { signed_transaction, signer } } } -impl Encodable for TransactionSignedEcRecovered { +impl Encodable for TransactionSignedEcRecovered { /// This encodes the transaction _with_ the signature, and an rlp header. /// /// Refer to docs for [`TransactionSigned::encode`] for details on the exact format. @@ -1682,9 +1659,9 @@ impl Encodable for TransactionSignedEcRecovered { } } -impl Decodable for TransactionSignedEcRecovered { +impl Decodable for TransactionSignedEcRecovered { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let signed_transaction = TransactionSigned::decode(buf)?; + let signed_transaction = T::decode(buf)?; let signer = signed_transaction .recover_signer() .ok_or(RlpError::Custom("Unable to recover decoded transaction signer."))?; @@ -1692,59 +1669,24 @@ impl Decodable for TransactionSignedEcRecovered { } } -/// Generic wrapper with encoded Bytes, such as transaction data. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct WithEncoded(Bytes, pub T); - -impl From<(Bytes, T)> for WithEncoded { - fn from(value: (Bytes, T)) -> Self { - Self(value.0, value.1) - } -} - -impl WithEncoded { - /// Wraps the value with the bytes. - pub const fn new(bytes: Bytes, value: T) -> Self { - Self(bytes, value) - } - - /// Get the encoded bytes - pub fn encoded_bytes(&self) -> Bytes { - self.0.clone() - } - - /// Get the underlying value - pub const fn value(&self) -> &T { - &self.1 - } - - /// Returns ownership of the underlying value. - pub fn into_value(self) -> T { - self.1 - } - - /// Transform the value - pub fn transform>(self) -> WithEncoded { - WithEncoded(self.0, self.1.into()) - } - - /// Split the wrapper into [`Bytes`] and value tuple - pub fn split(self) -> (Bytes, T) { - (self.0, self.1) +/// Extension trait for [`SignedTransaction`] to convert it into [`TransactionSignedEcRecovered`]. +pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { + /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] _without + /// ensuring that the signature has a low `s` value_ (EIP-2). + /// + /// Returns `None` if the transaction's signature is invalid. + fn into_ecrecovered_unchecked(self) -> Option> { + let signer = self.recover_signer_unchecked()?; + Some(TransactionSignedEcRecovered::from_signed_transaction(self, signer)) } - /// Maps the inner value to a new value using the given function. - pub fn map U>(self, op: F) -> WithEncoded { - WithEncoded(self.0, op(self.1)) + /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. + fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { + TransactionSignedEcRecovered::from_signed_transaction(self, signer) } } -impl WithEncoded> { - /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. - pub fn transpose(self) -> Option> { - self.1.map(|v| WithEncoded(self.0, v)) - } -} +impl SignedTransactionIntoRecoveredExt for T where T: SignedTransaction {} /// Bincode-compatible transaction type serde implementations. #[cfg(feature = "serde-bincode-compat")] @@ -1858,7 +1800,7 @@ pub mod serde_bincode_compat { impl<'a> From<&'a super::TransactionSigned> for TransactionSigned<'a> { fn from(value: &'a super::TransactionSigned) -> Self { Self { - hash: value.hash, + hash: value.hash(), signature: value.signature, transaction: Transaction::from(&value.transaction), } @@ -1868,7 +1810,7 @@ pub mod serde_bincode_compat { impl<'a> From> for super::TransactionSigned { fn from(value: TransactionSigned<'a>) -> Self { Self { - hash: value.hash, + hash: value.hash.into(), signature: value.signature, transaction: value.transaction.into(), } @@ -1951,11 +1893,42 @@ pub mod serde_bincode_compat { } } +/// Recovers a list of signers from a transaction list iterator. +/// +/// Returns `None`, if some transaction's signature is invalid +pub fn recover_signers<'a, I, T>(txes: I, num_txes: usize) -> Option> +where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, +{ + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer()).collect() + } +} + +/// Recovers a list of signers from a transaction list iterator _without ensuring that the +/// signature has a low `s` value_. +/// +/// Returns `None`, if some transaction's signature is invalid. +pub fn recover_signers_unchecked<'a, I, T>(txes: I, num_txes: usize) -> Option> +where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, +{ + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } +} + #[cfg(test)] mod tests { use crate::{ transaction::{TxEip1559, TxKind, TxLegacy}, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + Transaction, TransactionSigned, TransactionSignedEcRecovered, }; use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; @@ -1965,6 +1938,7 @@ mod tests { use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] @@ -2030,13 +2004,15 @@ mod tests { assert_eq!( tx.blob_versioned_hashes(), - Some(vec![ - b256!("012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a"), - b256!("0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4"), - b256!("013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7"), - b256!("01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1"), - b256!("011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549"), - ]) + Some( + &[ + b256!("012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a"), + b256!("0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4"), + b256!("013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7"), + b256!("01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1"), + b256!("011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549"), + ][..] + ) ); } @@ -2159,9 +2135,9 @@ mod tests { signature: Signature, hash: Option, ) { - let expected = TransactionSigned::from_transaction_and_signature(transaction, signature); + let expected = TransactionSigned::new_unhashed(transaction, signature); if let Some(hash) = hash { - assert_eq!(hash, expected.hash); + assert_eq!(hash, expected.hash()); } assert_eq!(bytes.len(), expected.length()); @@ -2256,7 +2232,7 @@ mod tests { let signature = crate::sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) + TransactionSigned::new_unhashed(tx, signature) }).collect(); let parallel_senders = TransactionSigned::recover_signers(&txes, txes.len()).unwrap(); @@ -2338,17 +2314,17 @@ mod tests { input: Bytes::from(input), }); - let tx_signed_no_hash = TransactionSignedNoHash { signature, transaction }; - test_transaction_signed_to_from_compact(tx_signed_no_hash); + let tx = TransactionSigned::new_unhashed(transaction, signature); + test_transaction_signed_to_from_compact(tx); } } - fn test_transaction_signed_to_from_compact(tx_signed_no_hash: TransactionSignedNoHash) { + fn test_transaction_signed_to_from_compact(tx: TransactionSigned) { // zstd aware `to_compact` let mut buff: Vec = Vec::new(); - let written_bytes = tx_signed_no_hash.to_compact(&mut buff); - let (decoded, _) = TransactionSignedNoHash::from_compact(&buff, written_bytes); - assert_eq!(tx_signed_no_hash, decoded); + let written_bytes = tx.to_compact(&mut buff); + let (decoded, _) = TransactionSigned::from_compact(&buff, written_bytes); + assert_eq!(tx, decoded); } #[test] diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 0d48dd5a443b..979a55f2739e 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -1,20 +1,34 @@ //! Defines the types for blob transactions, legacy, and other EIP-2718 transactions included in a //! response to `GetPooledTransactions`. -use super::{error::TransactionConversionError, signature::recover_signer, TxEip7702}; -use crate::{BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered}; -use alloy_eips::eip4844::BlobTransactionSidecar; - +use super::{ + error::TransactionConversionError, recover_signer_unchecked, signature::recover_signer, + TxEip7702, +}; +use crate::{ + BlobTransaction, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, +}; +use alloc::vec::Vec; use alloy_consensus::{ constants::EIP4844_TX_TYPE_ID, - transaction::{RlpEcdsaTx, TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - SignableTransaction, TxEip4844WithSidecar, + transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, + SignableTransaction, Signed, TxEip4844WithSidecar, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Result, Encodable2718}, + eip2930::AccessList, + eip4844::BlobTransactionSidecar, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{ + Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; -use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; -use alloy_primitives::{Address, PrimitiveSignature as Signature, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; +use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use revm_primitives::keccak256; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a @@ -22,42 +36,14 @@ use serde::{Deserialize, Serialize}; #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum PooledTransactionsElement { - /// A legacy transaction - Legacy { - /// The inner transaction - transaction: TxLegacy, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-2930 typed transaction - Eip2930 { - /// The inner transaction - transaction: TxEip2930, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-1559 typed transaction - Eip1559 { - /// The inner transaction - transaction: TxEip1559, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-7702 typed transaction - Eip7702 { - /// The inner transaction - transaction: TxEip7702, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, + /// An untagged [`TxLegacy`]. + Legacy(Signed), + /// A [`TxEip2930`] tagged with type 1. + Eip2930(Signed), + /// A [`TxEip1559`] tagged with type 2. + Eip1559(Signed), + /// A [`TxEip7702`] tagged with type 4. + Eip7702(Signed), /// A blob transaction, which includes the transaction, blob data, commitments, and proofs. BlobTransaction(BlobTransaction), } @@ -69,18 +55,19 @@ impl PooledTransactionsElement { /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. pub fn try_from_broadcast(tx: TransactionSigned) -> Result { + let hash = tx.hash(); match tx { - TransactionSigned { transaction: Transaction::Legacy(tx), signature, hash } => { - Ok(Self::Legacy { transaction: tx, signature, hash }) + TransactionSigned { transaction: Transaction::Legacy(tx), signature, .. } => { + Ok(Self::Legacy(Signed::new_unchecked(tx, signature, hash))) } - TransactionSigned { transaction: Transaction::Eip2930(tx), signature, hash } => { - Ok(Self::Eip2930 { transaction: tx, signature, hash }) + TransactionSigned { transaction: Transaction::Eip2930(tx), signature, .. } => { + Ok(Self::Eip2930(Signed::new_unchecked(tx, signature, hash))) } - TransactionSigned { transaction: Transaction::Eip1559(tx), signature, hash } => { - Ok(Self::Eip1559 { transaction: tx, signature, hash }) + TransactionSigned { transaction: Transaction::Eip1559(tx), signature, .. } => { + Ok(Self::Eip1559(Signed::new_unchecked(tx, signature, hash))) } - TransactionSigned { transaction: Transaction::Eip7702(tx), signature, hash } => { - Ok(Self::Eip7702 { transaction: tx, signature, hash }) + TransactionSigned { transaction: Transaction::Eip7702(tx), signature, .. } => { + Ok(Self::Eip7702(Signed::new_unchecked(tx, signature, hash))) } // Not supported because missing blob sidecar tx @ TransactionSigned { transaction: Transaction::Eip4844(_), .. } => Err(tx), @@ -99,15 +86,16 @@ impl PooledTransactionsElement { tx: TransactionSigned, sidecar: BlobTransactionSidecar, ) -> Result { + let hash = tx.hash(); Ok(match tx { // If the transaction is an EIP-4844 transaction... - TransactionSigned { transaction: Transaction::Eip4844(tx), signature, hash } => { + TransactionSigned { transaction: Transaction::Eip4844(tx), signature, .. } => { // Construct a `PooledTransactionsElement::BlobTransaction` with provided sidecar. - Self::BlobTransaction(BlobTransaction { + Self::BlobTransaction(BlobTransaction(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, signature, hash, - transaction: TxEip4844WithSidecar { tx, sidecar }, - }) + ))) } // If the transaction is not EIP-4844, return an error with the original // transaction. @@ -119,44 +107,33 @@ impl PooledTransactionsElement { /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> B256 { match self { - Self::Legacy { transaction, .. } => transaction.signature_hash(), - Self::Eip2930 { transaction, .. } => transaction.signature_hash(), - Self::Eip1559 { transaction, .. } => transaction.signature_hash(), - Self::Eip7702 { transaction, .. } => transaction.signature_hash(), - Self::BlobTransaction(blob_tx) => blob_tx.transaction.signature_hash(), + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + Self::BlobTransaction(tx) => tx.signature_hash(), } } /// Reference to transaction hash. Used to identify transaction. pub const fn hash(&self) -> &TxHash { match self { - Self::Legacy { hash, .. } | - Self::Eip2930 { hash, .. } | - Self::Eip1559 { hash, .. } | - Self::Eip7702 { hash, .. } => hash, - Self::BlobTransaction(tx) => &tx.hash, + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::BlobTransaction(tx) => tx.0.hash(), } } /// Returns the signature of the transaction. pub const fn signature(&self) -> &Signature { match self { - Self::Legacy { signature, .. } | - Self::Eip2930 { signature, .. } | - Self::Eip1559 { signature, .. } | - Self::Eip7702 { signature, .. } => signature, - Self::BlobTransaction(blob_tx) => &blob_tx.signature, - } - } - - /// Returns the transaction nonce. - pub const fn nonce(&self) -> u64 { - match self { - Self::Legacy { transaction, .. } => transaction.nonce, - Self::Eip2930 { transaction, .. } => transaction.nonce, - Self::Eip1559 { transaction, .. } => transaction.nonce, - Self::Eip7702 { transaction, .. } => transaction.nonce, - Self::BlobTransaction(blob_tx) => blob_tx.transaction.tx.nonce, + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::BlobTransaction(tx) => tx.0.signature(), } } @@ -178,6 +155,18 @@ impl PooledTransactionsElement { } } + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Legacy(tx) => tx.tx().encode_for_signing(out), + Self::Eip2930(tx) => tx.tx().encode_for_signing(out), + Self::Eip1559(tx) => tx.tx().encode_for_signing(out), + Self::BlobTransaction(tx) => tx.tx().encode_for_signing(out), + Self::Eip7702(tx) => tx.tx().encode_for_signing(out), + } + } + /// Create [`TransactionSignedEcRecovered`] by converting this transaction into /// [`TransactionSigned`] and [`Address`] of the signer. pub fn into_ecrecovered_transaction(self, signer: Address) -> TransactionSignedEcRecovered { @@ -187,24 +176,10 @@ impl PooledTransactionsElement { /// Returns the inner [`TransactionSigned`]. pub fn into_transaction(self) -> TransactionSigned { match self { - Self::Legacy { transaction, signature, hash } => { - TransactionSigned { transaction: Transaction::Legacy(transaction), signature, hash } - } - Self::Eip2930 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip2930(transaction), - signature, - hash, - }, - Self::Eip1559 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip1559(transaction), - signature, - hash, - }, - Self::Eip7702 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip7702(transaction), - signature, - hash, - }, + Self::Legacy(tx) => tx.into(), + Self::Eip2930(tx) => tx.into(), + Self::Eip1559(tx) => tx.into(), + Self::Eip7702(tx) => tx.into(), Self::BlobTransaction(blob_tx) => blob_tx.into_parts().0, } } @@ -218,7 +193,7 @@ impl PooledTransactionsElement { /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. pub const fn as_legacy(&self) -> Option<&TxLegacy> { match self { - Self::Legacy { transaction, .. } => Some(transaction), + Self::Legacy(tx) => Some(tx.tx()), _ => None, } } @@ -226,7 +201,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. pub const fn as_eip2930(&self) -> Option<&TxEip2930> { match self { - Self::Eip2930 { transaction, .. } => Some(transaction), + Self::Eip2930(tx) => Some(tx.tx()), _ => None, } } @@ -234,7 +209,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. pub const fn as_eip1559(&self) -> Option<&TxEip1559> { match self { - Self::Eip1559 { transaction, .. } => Some(transaction), + Self::Eip1559(tx) => Some(tx.tx()), _ => None, } } @@ -242,7 +217,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip4844`] variant if the transaction is an EIP-4844 transaction. pub const fn as_eip4844(&self) -> Option<&TxEip4844> { match self { - Self::BlobTransaction(tx) => Some(&tx.transaction.tx), + Self::BlobTransaction(tx) => Some(tx.0.tx().tx()), _ => None, } } @@ -250,7 +225,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip7702`] variant if the transaction is an EIP-7702 transaction. pub const fn as_eip7702(&self) -> Option<&TxEip7702> { match self { - Self::Eip7702 { transaction, .. } => Some(transaction), + Self::Eip7702(tx) => Some(tx.tx()), _ => None, } } @@ -263,43 +238,11 @@ impl PooledTransactionsElement { pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } +} - /// Max fee per blob gas for eip4844 transaction [`TxEip4844`]. - /// - /// Returns `None` for non-eip4844 transactions. - /// - /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). - pub const fn max_fee_per_blob_gas(&self) -> Option { - match self { - Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_fee_per_blob_gas), - _ => None, - } - } - - /// Max priority fee per gas for eip1559 transaction, for legacy and eip2930 transactions this - /// is `None` - /// - /// This is also commonly referred to as the "Gas Tip Cap" (`GasTipCap`). - pub const fn max_priority_fee_per_gas(&self) -> Option { - match self { - Self::Legacy { .. } | Self::Eip2930 { .. } => None, - Self::Eip1559 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), - Self::Eip7702 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), - Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_priority_fee_per_gas), - } - } - - /// Max fee per gas for eip1559 transaction, for legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - pub const fn max_fee_per_gas(&self) -> u128 { - match self { - Self::Legacy { transaction, .. } => transaction.gas_price, - Self::Eip2930 { transaction, .. } => transaction.gas_price, - Self::Eip1559 { transaction, .. } => transaction.max_fee_per_gas, - Self::Eip7702 { transaction, .. } => transaction.max_fee_per_gas, - Self::BlobTransaction(tx) => tx.transaction.tx.max_fee_per_gas, - } +impl Hash for PooledTransactionsElement { + fn hash(&self, state: &mut H) { + self.trie_hash().hash(state); } } @@ -387,53 +330,37 @@ impl Decodable for PooledTransactionsElement { impl Encodable2718 for PooledTransactionsElement { fn type_flag(&self) -> Option { match self { - Self::Legacy { .. } => None, - Self::Eip2930 { .. } => Some(0x01), - Self::Eip1559 { .. } => Some(0x02), - Self::BlobTransaction { .. } => Some(0x03), - Self::Eip7702 { .. } => Some(0x04), + Self::Legacy(_) => None, + Self::Eip2930(_) => Some(0x01), + Self::Eip1559(_) => Some(0x02), + Self::BlobTransaction(_) => Some(0x03), + Self::Eip7702(_) => Some(0x04), } } fn encode_2718_len(&self) -> usize { match self { - Self::Legacy { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip2930 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.eip2718_encoded_length(signature) - } - Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { - transaction.eip2718_encoded_length(signature) - } + Self::Legacy(tx) => tx.eip2718_encoded_length(), + Self::Eip2930(tx) => tx.eip2718_encoded_length(), + Self::Eip1559(tx) => tx.eip2718_encoded_length(), + Self::Eip7702(tx) => tx.eip2718_encoded_length(), + Self::BlobTransaction(tx) => tx.eip2718_encoded_length(), } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { match self { - Self::Legacy { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip2930 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.eip2718_encode(signature, out) - } - Self::BlobTransaction(BlobTransaction { transaction, signature, .. }) => { - transaction.eip2718_encode(signature, out) - } + Self::Legacy(tx) => tx.eip2718_encode(out), + Self::Eip2930(tx) => tx.eip2718_encode(out), + Self::Eip1559(tx) => tx.eip2718_encode(out), + Self::Eip7702(tx) => tx.eip2718_encode(out), + Self::BlobTransaction(tx) => tx.eip2718_encode(out), } } + + fn trie_hash(&self) -> B256 { + *self.hash() + } } impl Decodable2718 for PooledTransactionsElement { @@ -456,7 +383,7 @@ impl Decodable2718 for PooledTransactionsElement { } tx_type => { let typed_tx = TransactionSigned::typed_decode(tx_type, buf)?; - + let hash = typed_tx.hash(); match typed_tx.transaction { Transaction::Legacy(_) => Err(RlpError::Custom( "legacy transactions should not be a result of typed decoding", @@ -466,21 +393,11 @@ impl Decodable2718 for PooledTransactionsElement { Transaction::Eip4844(_) => Err(RlpError::Custom( "EIP-4844 transactions can only be decoded with transaction type 0x03", ).into()), - Transaction::Eip2930(tx) => Ok(Self::Eip2930 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), - Transaction::Eip1559(tx) => Ok(Self::Eip1559 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), - Transaction::Eip7702(tx) => Ok(Self::Eip7702 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), + Transaction::Eip2930(tx) => Ok(Self::Eip2930 ( + Signed::new_unchecked(tx, typed_tx.signature, hash) + )), + Transaction::Eip1559(tx) => Ok(Self::Eip1559( Signed::new_unchecked(tx, typed_tx.signature, hash))), + Transaction::Eip7702(tx) => Ok(Self::Eip7702( Signed::new_unchecked(tx, typed_tx.signature, hash))), #[cfg(feature = "optimism")] Transaction::Deposit(_) => Err(RlpError::Custom("Optimism deposit transaction cannot be decoded to PooledTransactionsElement").into()) } @@ -493,7 +410,232 @@ impl Decodable2718 for PooledTransactionsElement { let (transaction, hash, signature) = TransactionSigned::decode_rlp_legacy_transaction_tuple(buf)?; - Ok(Self::Legacy { transaction, signature, hash }) + Ok(Self::Legacy(Signed::new_unchecked(transaction, signature, hash))) + } +} + +impl alloy_consensus::Transaction for PooledTransactionsElement { + fn chain_id(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().chain_id(), + Self::Eip2930(tx) => tx.tx().chain_id(), + Self::Eip1559(tx) => tx.tx().chain_id(), + Self::Eip7702(tx) => tx.tx().chain_id(), + Self::BlobTransaction(tx) => tx.tx().chain_id(), + } + } + + fn nonce(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().nonce(), + Self::Eip2930(tx) => tx.tx().nonce(), + Self::Eip1559(tx) => tx.tx().nonce(), + Self::Eip7702(tx) => tx.tx().nonce(), + Self::BlobTransaction(tx) => tx.tx().nonce(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().gas_limit(), + Self::Eip2930(tx) => tx.tx().gas_limit(), + Self::Eip1559(tx) => tx.tx().gas_limit(), + Self::Eip7702(tx) => tx.tx().gas_limit(), + Self::BlobTransaction(tx) => tx.tx().gas_limit(), + } + } + + fn gas_price(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().gas_price(), + Self::Eip2930(tx) => tx.tx().gas_price(), + Self::Eip1559(tx) => tx.tx().gas_price(), + Self::Eip7702(tx) => tx.tx().gas_price(), + Self::BlobTransaction(tx) => tx.tx().gas_price(), + } + } + + fn max_fee_per_gas(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_gas(), + Self::BlobTransaction(tx) => tx.tx().max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_priority_fee_per_gas(), + Self::BlobTransaction(tx) => tx.tx().max_priority_fee_per_gas(), + } + } + + fn max_fee_per_blob_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_blob_gas(), + Self::BlobTransaction(tx) => tx.tx().max_fee_per_blob_gas(), + } + } + + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().priority_fee_or_price(), + Self::Eip2930(tx) => tx.tx().priority_fee_or_price(), + Self::Eip1559(tx) => tx.tx().priority_fee_or_price(), + Self::Eip7702(tx) => tx.tx().priority_fee_or_price(), + Self::BlobTransaction(tx) => tx.tx().priority_fee_or_price(), + } + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.tx().effective_gas_price(base_fee), + Self::BlobTransaction(tx) => tx.tx().effective_gas_price(base_fee), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_dynamic_fee(), + Self::Eip2930(tx) => tx.tx().is_dynamic_fee(), + Self::Eip1559(tx) => tx.tx().is_dynamic_fee(), + Self::Eip7702(tx) => tx.tx().is_dynamic_fee(), + Self::BlobTransaction(tx) => tx.tx().is_dynamic_fee(), + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.tx().kind(), + Self::Eip2930(tx) => tx.tx().kind(), + Self::Eip1559(tx) => tx.tx().kind(), + Self::Eip7702(tx) => tx.tx().kind(), + Self::BlobTransaction(tx) => tx.tx().kind(), + } + } + + fn value(&self) -> U256 { + match self { + Self::Legacy(tx) => tx.tx().value(), + Self::Eip2930(tx) => tx.tx().value(), + Self::Eip1559(tx) => tx.tx().value(), + Self::Eip7702(tx) => tx.tx().value(), + Self::BlobTransaction(tx) => tx.tx().value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Self::Legacy(tx) => tx.tx().input(), + Self::Eip2930(tx) => tx.tx().input(), + Self::Eip1559(tx) => tx.tx().input(), + Self::Eip7702(tx) => tx.tx().input(), + Self::BlobTransaction(tx) => tx.tx().input(), + } + } + + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.tx().ty(), + Self::Eip2930(tx) => tx.tx().ty(), + Self::Eip1559(tx) => tx.tx().ty(), + Self::Eip7702(tx) => tx.tx().ty(), + Self::BlobTransaction(tx) => tx.tx().ty(), + } + } + + fn access_list(&self) -> Option<&AccessList> { + match self { + Self::Legacy(tx) => tx.tx().access_list(), + Self::Eip2930(tx) => tx.tx().access_list(), + Self::Eip1559(tx) => tx.tx().access_list(), + Self::Eip7702(tx) => tx.tx().access_list(), + Self::BlobTransaction(tx) => tx.tx().access_list(), + } + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + match self { + Self::Legacy(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip2930(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip1559(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip7702(tx) => tx.tx().blob_versioned_hashes(), + Self::BlobTransaction(tx) => tx.tx().blob_versioned_hashes(), + } + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + match self { + Self::Legacy(tx) => tx.tx().authorization_list(), + Self::Eip2930(tx) => tx.tx().authorization_list(), + Self::Eip1559(tx) => tx.tx().authorization_list(), + Self::Eip7702(tx) => tx.tx().authorization_list(), + Self::BlobTransaction(tx) => tx.tx().authorization_list(), + } + } +} + +impl SignedTransaction for PooledTransactionsElement { + type Type = TxType; + + fn tx_hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::BlobTransaction(tx) => tx.hash(), + } + } + + fn signature(&self) -> &Signature { + match self { + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::BlobTransaction(tx) => tx.signature(), + } + } + + fn recover_signer(&self) -> Option
{ + let signature_hash = self.signature_hash(); + recover_signer(self.signature(), signature_hash) + } + + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(self.signature(), signature_hash) + } +} + +impl InMemorySize for PooledTransactionsElement { + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + Self::BlobTransaction(tx) => tx.size(), + } + } +} + +impl From for PooledTransactionsElement { + fn from(recovered: PooledTransactionsElementEcRecovered) -> Self { + recovered.into_transaction() } } @@ -518,63 +660,65 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { // Attempt to create a `TransactionSigned` with arbitrary data. let tx_signed = TransactionSigned::arbitrary(u)?; // Attempt to create a `PooledTransactionsElement` with arbitrary data, handling the Result. - match Self::try_from(tx_signed) { - Ok(Self::BlobTransaction(mut tx)) => { - // Successfully converted to a BlobTransaction, now generate a sidecar. - tx.transaction.sidecar = alloy_eips::eip4844::BlobTransactionSidecar::arbitrary(u)?; - Ok(Self::BlobTransaction(tx)) + match Self::try_from_broadcast(tx_signed) { + Ok(tx) => Ok(tx), + Err(tx) => { + let (tx, sig, hash) = tx.into_parts(); + match tx { + Transaction::Eip4844(tx) => { + let sidecar = BlobTransactionSidecar::arbitrary(u)?; + Ok(Self::BlobTransaction(BlobTransaction(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, + sig, + hash, + )))) + } + _ => Err(arbitrary::Error::IncorrectFormat), + } } - Ok(tx) => Ok(tx), // Successfully converted, but not a BlobTransaction. - Err(_) => Err(arbitrary::Error::IncorrectFormat), /* Conversion failed, return an - * arbitrary error. */ } } } /// A signed pooled transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Eq, AsRef, Deref)] -pub struct PooledTransactionsElementEcRecovered { +pub struct PooledTransactionsElementEcRecovered { /// Signer of the transaction signer: Address, /// Signed transaction #[deref] #[as_ref] - transaction: PooledTransactionsElement, + transaction: T, } -// === impl PooledTransactionsElementEcRecovered === +impl PooledTransactionsElementEcRecovered { + /// Create an instance from the given transaction and the [`Address`] of the signer. + pub const fn from_signed_transaction(transaction: T, signer: Address) -> Self { + Self { transaction, signer } + } -impl PooledTransactionsElementEcRecovered { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer } - /// Transform back to [`PooledTransactionsElement`] - pub fn into_transaction(self) -> PooledTransactionsElement { + /// Consume the type and return the transaction + pub fn into_transaction(self) -> T { self.transaction } + /// Dissolve Self to its component + pub fn into_components(self) -> (T, Address) { + (self.transaction, self.signer) + } +} +impl PooledTransactionsElementEcRecovered { /// Transform back to [`TransactionSignedEcRecovered`] pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { let (tx, signer) = self.into_components(); tx.into_ecrecovered_transaction(signer) } - /// Dissolve Self to its component - pub fn into_components(self) -> (PooledTransactionsElement, Address) { - (self.transaction, self.signer) - } - - /// Create [`TransactionSignedEcRecovered`] from [`PooledTransactionsElement`] and [`Address`] - /// of the signer. - pub const fn from_signed_transaction( - transaction: PooledTransactionsElement, - signer: Address, - ) -> Self { - Self { transaction, signer } - } - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// @@ -605,9 +749,28 @@ impl TryFrom for PooledTransactionsElementEcRecove } } +impl Encodable2718 for PooledTransactionsElementEcRecovered { + fn type_flag(&self) -> Option { + self.transaction.type_flag() + } + + fn encode_2718_len(&self) -> usize { + self.transaction.encode_2718_len() + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.encode_2718(out) + } + + fn trie_hash(&self) -> B256 { + self.transaction.trie_hash() + } +} + #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Transaction as _; use alloy_primitives::{address, hex}; use assert_matches::assert_matches; use bytes::Bytes; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 48a02f4e7405..2cf04bc8e741 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,9 +1,10 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] use crate::{Transaction, TransactionSigned}; -use alloy_consensus::{transaction::RlpEcdsaTx, TxEip4844WithSidecar}; +use alloy_consensus::{transaction::RlpEcdsaTx, Signed, TxEip4844WithSidecar}; use alloy_eips::eip4844::BlobTransactionSidecar; -use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; +use derive_more::Deref; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their @@ -11,16 +12,8 @@ use serde::{Deserialize, Serialize}; /// /// This is defined in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking) as an element /// of a `PooledTransactions` response. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlobTransaction { - /// The transaction hash. - pub hash: TxHash, - /// The transaction signature. - pub signature: Signature, - /// The transaction payload with the sidecar. - #[serde(flatten)] - pub transaction: TxEip4844WithSidecar, -} +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Deref)] +pub struct BlobTransaction(pub Signed); impl BlobTransaction { /// Constructs a new [`BlobTransaction`] from a [`TransactionSigned`] and a @@ -31,15 +24,16 @@ impl BlobTransaction { tx: TransactionSigned, sidecar: BlobTransactionSidecar, ) -> Result { - let TransactionSigned { transaction, signature, hash } = tx; + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx; match transaction { - Transaction::Eip4844(transaction) => Ok(Self { - hash, - transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, + Transaction::Eip4844(transaction) => Ok(Self(Signed::new_unchecked( + TxEip4844WithSidecar { tx: transaction, sidecar }, signature, - }), + hash, + ))), transaction => { - let tx = TransactionSigned { transaction, signature, hash }; + let tx = TransactionSigned::new(transaction, signature, hash); Err((tx, sidecar)) } } @@ -53,19 +47,16 @@ impl BlobTransaction { &self, proof_settings: &c_kzg::KzgSettings, ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { - self.transaction.validate_blob(proof_settings) + self.tx().validate_blob(proof_settings) } /// Splits the [`BlobTransaction`] into its [`TransactionSigned`] and [`BlobTransactionSidecar`] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { - let transaction = TransactionSigned { - transaction: Transaction::Eip4844(self.transaction.tx), - hash: self.hash, - signature: self.signature, - }; - - (transaction, self.transaction.sidecar) + let (transaction, signature, hash) = self.0.into_parts(); + let (transaction, sidecar) = transaction.into_parts(); + let transaction = TransactionSigned::new(transaction.into(), signature, hash); + (transaction, sidecar) } /// Decodes a [`BlobTransaction`] from RLP. This expects the encoding to be: @@ -79,8 +70,17 @@ impl BlobTransaction { pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, signature, hash) = TxEip4844WithSidecar::rlp_decode_signed(data)?.into_parts(); + Ok(Self(Signed::new_unchecked(transaction, signature, hash))) + } +} - Ok(Self { transaction, hash, signature }) +impl InMemorySize for BlobTransaction { + fn size(&self) -> usize { + // TODO(mattsse): replace with next alloy bump + self.0.hash().size() + + self.0.signature().size() + + self.0.tx().tx().size() + + self.0.tx().sidecar.size() } } diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index ef4fab0fccb0..6056266ae0fe 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -15,7 +15,7 @@ const SECP256K1N_HALF: U256 = U256::from_be_bytes([ pub(crate) fn decode_with_eip155_chain_id( buf: &mut &[u8], ) -> alloy_rlp::Result<(Signature, Option)> { - let v: u64 = Decodable::decode(buf)?; + let v = Decodable::decode(buf)?; let r: U256 = Decodable::decode(buf)?; let s: U256 = Decodable::decode(buf)?; @@ -72,6 +72,7 @@ mod tests { }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, Address, PrimitiveSignature as Signature, B256, U256}; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index eff1c17a71a7..784a976ab792 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -4,29 +4,27 @@ use alloy_consensus::constants::{ }; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; +use derive_more::Display; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; /// Identifier parameter for legacy transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_LEGACY: usize = 0; +pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; /// Identifier parameter for EIP-2930 transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_EIP2930: usize = 1; +pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; /// Identifier parameter for EIP-1559 transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_EIP1559: usize = 2; +pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; /// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier /// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is /// read from the buffer as a single byte. #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; - -/// Identifier for [`TxDeposit`](op_alloy_consensus::TxDeposit) transaction. -#[cfg(feature = "optimism")] -pub const DEPOSIT_TX_TYPE_ID: u8 = 126; +pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; /// Transaction Type /// @@ -36,24 +34,42 @@ pub const DEPOSIT_TX_TYPE_ID: u8 = 126; /// /// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files). #[derive( - Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize, Hash, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Default, + Serialize, + Deserialize, + Hash, + Display, )] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[display("tx type: {_variant}")] pub enum TxType { /// Legacy transaction pre EIP-2929 #[default] + #[display("legacy (0)")] Legacy = 0_isize, /// AccessList transaction + #[display("eip2930 (1)")] Eip2930 = 1_isize, /// Transaction with Priority fee + #[display("eip1559 (2)")] Eip1559 = 2_isize, /// Shard Blob Transactions - EIP-4844 + #[display("eip4844 (3)")] Eip4844 = 3_isize, /// EOA Contract Code Transactions - EIP-7702 + #[display("eip7702 (4)")] Eip7702 = 4_isize, /// Optimism Deposit transaction. #[cfg(feature = "optimism")] + #[display("deposit (126)")] Deposit = 126_isize, } @@ -72,6 +88,41 @@ impl TxType { } } +impl reth_primitives_traits::TxType for TxType { + #[inline] + fn is_legacy(&self) -> bool { + matches!(self, Self::Legacy) + } + + #[inline] + fn is_eip2930(&self) -> bool { + matches!(self, Self::Eip2930) + } + + #[inline] + fn is_eip1559(&self) -> bool { + matches!(self, Self::Eip1559) + } + + #[inline] + fn is_eip4844(&self) -> bool { + matches!(self, Self::Eip4844) + } + + #[inline] + fn is_eip7702(&self) -> bool { + matches!(self, Self::Eip7702) + } +} + +impl InMemorySize for TxType { + /// Calculates a heuristic for the in-memory size of the [`TxType`]. + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } +} + impl From for u8 { fn from(value: TxType) -> Self { match value { @@ -81,7 +132,7 @@ impl From for u8 { TxType::Eip4844 => EIP4844_TX_TYPE_ID, TxType::Eip7702 => EIP7702_TX_TYPE_ID, #[cfg(feature = "optimism")] - TxType::Deposit => DEPOSIT_TX_TYPE_ID, + TxType::Deposit => op_alloy_consensus::DEPOSIT_TX_TYPE_ID, } } } @@ -140,6 +191,8 @@ impl reth_codecs::Compact for TxType { where B: bytes::BufMut + AsMut<[u8]>, { + use reth_codecs::txtype::*; + match self { Self::Legacy => COMPACT_IDENTIFIER_LEGACY, Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, @@ -154,7 +207,7 @@ impl reth_codecs::Compact for TxType { } #[cfg(feature = "optimism")] Self::Deposit => { - buf.put_u8(DEPOSIT_TX_TYPE_ID); + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } } @@ -167,16 +220,16 @@ impl reth_codecs::Compact for TxType { use bytes::Buf; ( match identifier { - COMPACT_IDENTIFIER_LEGACY => Self::Legacy, - COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, - COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { let extended_identifier = buf.get_u8(); match extended_identifier { EIP4844_TX_TYPE_ID => Self::Eip4844, EIP7702_TX_TYPE_ID => Self::Eip7702, #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => Self::Deposit, + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, _ => panic!("Unsupported TxType identifier: {extended_identifier}"), } } @@ -217,128 +270,84 @@ impl Decodable for TxType { } } -impl From for TxType { - fn from(value: alloy_consensus::TxType) -> Self { - match value { - alloy_consensus::TxType::Legacy => Self::Legacy, - alloy_consensus::TxType::Eip2930 => Self::Eip2930, - alloy_consensus::TxType::Eip1559 => Self::Eip1559, - alloy_consensus::TxType::Eip4844 => Self::Eip4844, - alloy_consensus::TxType::Eip7702 => Self::Eip7702, - } - } -} - #[cfg(test)] mod tests { + use super::*; use alloy_primitives::hex; - use rand::Rng; use reth_codecs::Compact; - - use super::*; + use reth_primitives_traits::TxType as _; + use rstest::rstest; #[test] - fn test_u64_to_tx_type() { - // Test for Legacy transaction - assert_eq!(TxType::try_from(U64::from(LEGACY_TX_TYPE_ID)).unwrap(), TxType::Legacy); - - // Test for EIP2930 transaction - assert_eq!(TxType::try_from(U64::from(EIP2930_TX_TYPE_ID)).unwrap(), TxType::Eip2930); - - // Test for EIP1559 transaction - assert_eq!(TxType::try_from(U64::from(EIP1559_TX_TYPE_ID)).unwrap(), TxType::Eip1559); - - // Test for EIP4844 transaction - assert_eq!(TxType::try_from(U64::from(EIP4844_TX_TYPE_ID)).unwrap(), TxType::Eip4844); - - // Test for EIP7702 transaction - assert_eq!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID)).unwrap(), TxType::Eip7702); - - // Test for Deposit transaction - #[cfg(feature = "optimism")] - assert_eq!(TxType::try_from(U64::from(DEPOSIT_TX_TYPE_ID)).unwrap(), TxType::Deposit); - - // For transactions with unsupported values - assert!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID + 1)).is_err()); + fn is_broadcastable() { + assert!(TxType::Legacy.is_broadcastable_in_full()); + assert!(TxType::Eip1559.is_broadcastable_in_full()); + assert!(!TxType::Eip4844.is_broadcastable_in_full()); } - #[test] - fn test_txtype_to_compat() { - let cases = vec![ - (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), - (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), - (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), - (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), - (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), - #[cfg(feature = "optimism")] - (TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]), - ]; - - for (tx_type, expected_identifier, expected_buf) in cases { - let mut buf = vec![]; - let identifier = tx_type.to_compact(&mut buf); - assert_eq!( - identifier, expected_identifier, - "Unexpected identifier for TxType {tx_type:?}", - ); - assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}"); - } + #[rstest] + #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] + #[case(U64::from(EIP2930_TX_TYPE_ID), Ok(TxType::Eip2930))] + #[case(U64::from(EIP1559_TX_TYPE_ID), Ok(TxType::Eip1559))] + #[case(U64::from(EIP4844_TX_TYPE_ID), Ok(TxType::Eip4844))] + #[case(U64::from(EIP7702_TX_TYPE_ID), Ok(TxType::Eip7702))] + #[cfg_attr( + feature = "optimism", + case(U64::from(op_alloy_consensus::DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)) + )] + #[case(U64::MAX, Err("invalid tx type"))] + fn test_u64_to_tx_type(#[case] input: U64, #[case] expected: Result) { + let tx_type_result = TxType::try_from(input); + assert_eq!(tx_type_result, expected); } - #[test] - fn test_txtype_from_compact() { - let cases = vec![ - (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), - (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), - (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), - (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), - (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), - #[cfg(feature = "optimism")] - (TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]), - ]; - - for (expected_type, identifier, buf) in cases { - let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); - assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}",); - assert!( - remaining_buf.is_empty(), - "Buffer not fully consumed for identifier {identifier}", - ); - } + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] + fn test_txtype_to_compact( + #[case] tx_type: TxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!(identifier, expected_identifier, "Unexpected identifier for TxType {tx_type:?}",); + assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}",); } - #[test] - fn decode_tx_type() { - // Test for Legacy transaction - let tx_type = TxType::decode(&mut &hex!("80")[..]).unwrap(); - assert_eq!(tx_type, TxType::Legacy); - - // Test for EIP2930 transaction - let tx_type = TxType::decode(&mut &[EIP2930_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip2930); - - // Test for EIP1559 transaction - let tx_type = TxType::decode(&mut &[EIP1559_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip1559); - - // Test for EIP4844 transaction - let tx_type = TxType::decode(&mut &[EIP4844_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip4844); - - // Test for EIP7702 transaction - let tx_type = TxType::decode(&mut &[EIP7702_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip7702); - - // Test random byte not in range - let buf = [rand::thread_rng().gen_range(EIP7702_TX_TYPE_ID + 1..=u8::MAX)]; - assert!(TxType::decode(&mut &buf[..]).is_err()); + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] + fn test_txtype_from_compact( + #[case] expected_type: TxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); + } - // Test for Deposit transaction - #[cfg(feature = "optimism")] - { - let buf = [DEPOSIT_TX_TYPE_ID]; - let tx_type = TxType::decode(&mut &buf[..]).unwrap(); - assert_eq!(tx_type, TxType::Deposit); - } + #[rstest] + #[case(&hex!("80"), Ok(TxType::Legacy))] + #[case(&[EIP2930_TX_TYPE_ID], Ok(TxType::Eip2930))] + #[case(&[EIP1559_TX_TYPE_ID], Ok(TxType::Eip1559))] + #[case(&[EIP4844_TX_TYPE_ID], Ok(TxType::Eip4844))] + #[case(&[EIP7702_TX_TYPE_ID], Ok(TxType::Eip7702))] + #[case(&[u8::MAX], Err(alloy_rlp::Error::InputTooShort))] + #[cfg_attr(feature = "optimism", case(&[op_alloy_consensus::DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] + fn decode_tx_type(#[case] input: &[u8], #[case] expected: Result) { + let tx_type_result = TxType::decode(&mut &input[..]); + assert_eq!(tx_type_result, expected) } } diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs deleted file mode 100644 index 888c83946cab..000000000000 --- a/crates/primitives/src/transaction/variant.rs +++ /dev/null @@ -1,145 +0,0 @@ -//! Helper enum functions for `Transaction`, `TransactionSigned` and -//! `TransactionSignedEcRecovered` - -use crate::{ - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, -}; -use alloy_primitives::{Address, B256}; -use core::ops::Deref; - -/// Represents various different transaction formats used in reth. -/// -/// All variants are based on a the raw [Transaction] data and can contain additional information -/// extracted (expensive) from that transaction, like the hash and the signer. -#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From)] -pub enum TransactionSignedVariant { - /// A signed transaction without a hash. - SignedNoHash(TransactionSignedNoHash), - /// Contains the plain transaction data its signature and hash. - Signed(TransactionSigned), - /// Contains the plain transaction data its signature and hash and the successfully recovered - /// signer. - SignedEcRecovered(TransactionSignedEcRecovered), -} - -impl TransactionSignedVariant { - /// Returns the raw transaction object - pub const fn as_raw(&self) -> &Transaction { - match self { - Self::SignedNoHash(tx) => &tx.transaction, - Self::Signed(tx) => &tx.transaction, - Self::SignedEcRecovered(tx) => &tx.signed_transaction.transaction, - } - } - - /// Returns the hash of the transaction - pub fn hash(&self) -> B256 { - match self { - Self::SignedNoHash(tx) => tx.hash(), - Self::Signed(tx) => tx.hash, - Self::SignedEcRecovered(tx) => tx.hash, - } - } - - /// Returns the signer of the transaction. - /// - /// If the transaction is of not of [`TransactionSignedEcRecovered`] it will be recovered. - pub fn signer(&self) -> Option
{ - match self { - Self::SignedNoHash(tx) => tx.recover_signer(), - Self::Signed(tx) => tx.recover_signer(), - Self::SignedEcRecovered(tx) => Some(tx.signer), - } - } - - /// Returns [`TransactionSigned`] type - /// else None - pub const fn as_signed(&self) -> Option<&TransactionSigned> { - match self { - Self::Signed(tx) => Some(tx), - _ => None, - } - } - - /// Returns `TransactionSignedEcRecovered` type - /// else None - pub const fn as_signed_ec_recovered(&self) -> Option<&TransactionSignedEcRecovered> { - match self { - Self::SignedEcRecovered(tx) => Some(tx), - _ => None, - } - } - - /// Returns true if the transaction is of [`TransactionSigned`] variant - pub const fn is_signed(&self) -> bool { - matches!(self, Self::Signed(_)) - } - - /// Returns true if the transaction is of [`TransactionSignedNoHash`] variant - pub const fn is_signed_no_hash(&self) -> bool { - matches!(self, Self::SignedNoHash(_)) - } - - /// Returns true if the transaction is of [`TransactionSignedEcRecovered`] variant - pub const fn is_signed_ec_recovered(&self) -> bool { - matches!(self, Self::SignedEcRecovered(_)) - } - - /// Consumes the [`TransactionSignedVariant`] and returns the consumed [Transaction] - pub fn into_raw(self) -> Transaction { - match self { - Self::SignedNoHash(tx) => tx.transaction, - Self::Signed(tx) => tx.transaction, - Self::SignedEcRecovered(tx) => tx.signed_transaction.transaction, - } - } - - /// Consumes the [`TransactionSignedVariant`] and returns the consumed [`TransactionSigned`] - pub fn into_signed(self) -> TransactionSigned { - match self { - Self::SignedNoHash(tx) => tx.with_hash(), - Self::Signed(tx) => tx, - Self::SignedEcRecovered(tx) => tx.signed_transaction, - } - } - - /// Consumes the [`TransactionSignedVariant`] and converts it into a - /// [`TransactionSignedEcRecovered`] - /// - /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. - /// - /// Returns `None` if the transaction's signature is invalid - pub fn into_signed_ec_recovered(self) -> Option { - self.try_into_signed_ec_recovered().ok() - } - - /// Consumes the [`TransactionSignedVariant`] and converts it into a - /// [`TransactionSignedEcRecovered`] - /// - /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. - /// - /// Returns an error if the transaction's signature is invalid. - pub fn try_into_signed_ec_recovered( - self, - ) -> Result { - match self { - Self::SignedEcRecovered(tx) => Ok(tx), - Self::Signed(tx) => tx.try_into_ecrecovered(), - Self::SignedNoHash(tx) => tx.with_hash().try_into_ecrecovered(), - } - } -} - -impl AsRef for TransactionSignedVariant { - fn as_ref(&self) -> &Transaction { - self.as_raw() - } -} - -impl Deref for TransactionSignedVariant { - type Target = Transaction; - - fn deref(&self) -> &Self::Target { - self.as_raw() - } -} diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 2f2a37d5ba66..f772ff546691 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -22,8 +22,13 @@ reth-provider.workspace = true reth-tokio-util.workspace = true reth-config.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true +# ethereum +alloy-consensus.workspace = true +alloy-eips.workspace = true + # metrics reth-metrics.workspace = true metrics.workspace = true @@ -41,6 +46,7 @@ rustc-hash.workspace = true # reth reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true reth-tracing.workspace = true diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 71d73c416106..4fd56617121a 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -1,11 +1,13 @@ use crate::{segments::SegmentSet, Pruner}; +use alloy_eips::eip2718::Encodable2718; use reth_chainspec::MAINNET; use reth_config::PruneConfig; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; use reth_exex_types::FinishedExExHeight; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, - PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, + NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use std::time::Duration; @@ -76,8 +78,15 @@ impl PrunerBuilder { /// Builds a [Pruner] from the current configuration with the given provider factory. pub fn build_with_provider_factory(self, provider_factory: PF) -> Pruner where - PF: DatabaseProviderFactory - + StaticFileProviderFactory, + PF: DatabaseProviderFactory< + ProviderRW: PruneCheckpointWriter + + BlockReader + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, + > + StaticFileProviderFactory< + Primitives = ::Primitives, + >, { let segments = SegmentSet::from_components(provider_factory.static_file_provider(), self.segments); @@ -93,10 +102,15 @@ impl PrunerBuilder { } /// Builds a [Pruner] from the current configuration with the given static file provider. - pub fn build(self, static_file_provider: StaticFileProvider) -> Pruner + pub fn build( + self, + static_file_provider: StaticFileProvider, + ) -> Pruner where - Provider: - DBProvider + BlockReader + PruneCheckpointWriter + TransactionsProvider, + Provider: StaticFileProviderFactory> + + DBProvider + + BlockReader + + PruneCheckpointWriter, { let segments = SegmentSet::::from_components(static_file_provider, self.segments); diff --git a/crates/prune/prune/src/event.rs b/crates/prune/prune/src/event.rs deleted file mode 100644 index 95a90d7628cc..000000000000 --- a/crates/prune/prune/src/event.rs +++ /dev/null @@ -1,16 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_prune_types::{PruneProgress, PruneSegment}; -use std::time::Duration; - -/// An event emitted by a [Pruner][crate::Pruner]. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum PrunerEvent { - /// Emitted when pruner started running. - Started { tip_block_number: BlockNumber }, - /// Emitted when pruner finished running. - Finished { - tip_block_number: BlockNumber, - elapsed: Duration, - stats: Vec<(PruneSegment, usize, PruneProgress)>, - }, -} diff --git a/crates/prune/prune/src/lib.rs b/crates/prune/prune/src/lib.rs index 5a43afeb5026..e6bcbe5e8121 100644 --- a/crates/prune/prune/src/lib.rs +++ b/crates/prune/prune/src/lib.rs @@ -12,7 +12,6 @@ mod builder; mod db_ext; mod error; -mod event; mod metrics; mod pruner; pub mod segments; @@ -20,7 +19,6 @@ pub mod segments; use crate::metrics::Metrics; pub use builder::PrunerBuilder; pub use error::PrunerError; -pub use event::PrunerEvent; pub use pruner::{Pruner, PrunerResult, PrunerWithFactory, PrunerWithResult}; // Re-export prune types diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index d21560cae607..0ad149bb654d 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -9,7 +9,7 @@ use reth_exex_types::FinishedExExHeight; use reth_provider::{ DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, }; -use reth_prune_types::{PruneLimiter, PruneProgress, PruneSegment, PrunerOutput}; +use reth_prune_types::{PruneLimiter, PruneProgress, PrunedSegmentInfo, PrunerOutput}; use reth_tokio_util::{EventSender, EventStream}; use std::time::{Duration, Instant}; use tokio::sync::watch; @@ -21,8 +21,6 @@ pub type PrunerResult = Result; /// The pruner type itself with the result of [`Pruner::run`] pub type PrunerWithResult = (Pruner, PrunerResult); -type PrunerStats = Vec<(PruneSegment, usize, PruneProgress)>; - /// Pruner with preset provider factory. pub type PrunerWithFactory = Pruner<::ProviderRW, PF>; @@ -174,14 +172,15 @@ where /// be pruned according to the highest `static_files`. Segments are parts of the database that /// represent one or more tables. /// - /// Returns [`PrunerStats`], total number of entries pruned, and [`PruneProgress`]. + /// Returns a list of stats per pruned segment, total number of entries pruned, and + /// [`PruneProgress`]. fn prune_segments( &mut self, provider: &Provider, tip_block_number: BlockNumber, limiter: &mut PruneLimiter, - ) -> Result<(PrunerStats, usize, PrunerOutput), PrunerError> { - let mut stats = PrunerStats::new(); + ) -> Result<(Vec, usize, PrunerOutput), PrunerError> { + let mut stats = Vec::with_capacity(self.segments.len()); let mut pruned = 0; let mut output = PrunerOutput { progress: PruneProgress::Finished, @@ -249,7 +248,12 @@ where if segment_output.pruned > 0 { limiter.increment_deleted_entries_count_by(segment_output.pruned); pruned += segment_output.pruned; - stats.push((segment.segment(), segment_output.pruned, segment_output.progress)); + let info = PrunedSegmentInfo { + segment: segment.segment(), + pruned: segment_output.pruned, + progress: segment_output.progress, + }; + stats.push(info); } } else { debug!(target: "pruner", segment = ?segment.segment(), purpose = ?segment.purpose(), "Nothing to prune for the segment"); diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index b3b40aab5b3b..e828512fa824 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -148,6 +148,7 @@ impl PruneInput { mod tests { use super::*; use alloy_primitives::B256; + use reth_primitives_traits::BlockBody; use reth_provider::{ providers::BlockchainProvider2, test_utils::{create_test_provider_factory, MockEthProvider}, @@ -245,7 +246,7 @@ mod tests { // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); assert_eq!(range, 0..=num_txs - 1); } @@ -292,7 +293,7 @@ mod tests { // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); assert_eq!(range, 0..=num_txs - 1,); } @@ -327,7 +328,7 @@ mod tests { // Get the last tx number // Calculate the total number of transactions let num_txs = - blocks.iter().map(|block| block.body.transactions().count() as u64).sum::(); + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); let max_range = num_txs - 1; // Create a prune input with a previous checkpoint that is the last tx number diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index c081bf88c7d2..a365738a777d 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -6,10 +6,11 @@ //! node after static file producer has finished use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; -use reth_db::{tables, transaction::DbTxMut}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{ PruneCheckpoint, PruneProgress, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -21,7 +22,10 @@ pub(crate) fn prune( input: PruneInput, ) -> Result where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, @@ -35,7 +39,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |_| false, diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 23d03345b096..d7bbee1042ba 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -2,10 +2,12 @@ use crate::segments::{ AccountHistory, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, TransactionLookup, UserReceipts, }; -use reth_db::transaction::DbTxMut; +use alloy_eips::eip2718::Encodable2718; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + StaticFileProviderFactory, }; use reth_prune_types::PruneModes; @@ -45,12 +47,15 @@ impl SegmentSet { impl SegmentSet where - Provider: DBProvider + TransactionsProvider + PruneCheckpointWriter + BlockReader, + Provider: StaticFileProviderFactory> + + DBProvider + + PruneCheckpointWriter + + BlockReader, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. pub fn from_components( - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { let PruneModes { diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index 8700a653b111..5cd6f62643a4 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -12,7 +12,7 @@ use reth_db::{ tables, transaction::DbTxMut, }; -use reth_provider::{providers::StaticFileProvider, DBProvider}; +use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; use reth_prune_types::{ PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -24,17 +24,19 @@ use tracing::trace; const HEADER_TABLES_TO_PRUNE: usize = 3; #[derive(Debug)] -pub struct Headers { - static_file_provider: StaticFileProvider, +pub struct Headers { + static_file_provider: StaticFileProvider, } -impl Headers { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Headers { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl> Segment for Headers { +impl> Segment + for Headers +{ fn segment(&self) -> PruneSegment { PruneSegment::Headers } @@ -89,7 +91,7 @@ impl> Segment for Headers { pruned += entries_pruned; } - let done = last_pruned_block.map_or(false, |block| block == block_range_end); + let done = last_pruned_block == Some(block_range_end); let progress = PruneProgress::new(done, &limiter); Ok(SegmentOutput { diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs index f766f7ea1d35..6cdc53759904 100644 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ b/crates/prune/prune/src/segments/static_file/receipts.rs @@ -2,28 +2,33 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, - PruneCheckpointWriter, TransactionsProvider, + PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use reth_static_file_types::StaticFileSegment; #[derive(Debug)] -pub struct Receipts { - static_file_provider: StaticFileProvider, +pub struct Receipts { + static_file_provider: StaticFileProvider, } -impl Receipts { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Receipts { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl Segment for Receipts +impl Segment for Receipts where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: StaticFileProviderFactory> + + DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader, { fn segment(&self) -> PruneSegment { PruneSegment::Receipts diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs index 12772af5f880..20274e5dc706 100644 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ b/crates/prune/prune/src/segments/static_file/transactions.rs @@ -3,8 +3,12 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{providers::StaticFileProvider, BlockReader, DBProvider, TransactionsProvider}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory, + TransactionsProvider, +}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; @@ -12,19 +16,22 @@ use reth_static_file_types::StaticFileSegment; use tracing::trace; #[derive(Debug)] -pub struct Transactions { - static_file_provider: StaticFileProvider, +pub struct Transactions { + static_file_provider: StaticFileProvider, } -impl Transactions { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Transactions { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl Segment for Transactions +impl Segment for Transactions where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + StaticFileProviderFactory>, { fn segment(&self) -> PruneSegment { PruneSegment::Transactions @@ -52,7 +59,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = *tx_range.end(); - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::SignedTx, + >>( tx_range, &mut limiter, |_| false, diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index 5bc9feaf023d..97708ad6de18 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -2,10 +2,11 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use tracing::instrument; @@ -23,7 +24,11 @@ impl Receipts { impl Segment for Receipts where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::Receipts diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index ee2accee1b34..778aac1e7b9b 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -3,14 +3,17 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, PruneCheckpointWriter, TransactionsProvider}; +use alloy_consensus::TxReceipt; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, +}; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneProgress, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, MINIMUM_PRUNING_DISTANCE, }; use tracing::{instrument, trace}; - #[derive(Debug)] pub struct ReceiptsByLogs { config: ReceiptsLogPruneConfig, @@ -24,7 +27,11 @@ impl ReceiptsByLogs { impl Segment for ReceiptsByLogs where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::ContractLogs @@ -142,12 +149,14 @@ where // Delete receipts, except the ones in the inclusion list let mut last_skipped_transaction = 0; let deleted; - (deleted, done) = provider.tx_ref().prune_table_with_range::( + (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |(tx_num, receipt)| { let skip = num_addresses > 0 && - receipt.logs.iter().any(|log| { + receipt.logs().iter().any(|log| { filtered_addresses[..num_addresses].contains(&&log.address) }); @@ -223,6 +232,7 @@ mod tests { use assert_matches::assert_matches; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; + use reth_primitives_traits::InMemorySize; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, TransactionsProvider}; use reth_prune_types::{PruneLimiter, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index f189e6c36af4..77bb0a5e2d47 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -90,6 +90,7 @@ mod tests { Itertools, }; use reth_db::tables; + use reth_primitives_traits::SignedTransaction; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index 2df8cccf3056..27f4f5085d2b 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -3,9 +3,10 @@ use crate::{ segments::{PruneInput, Segment, SegmentOutput}, PrunerError, }; +use alloy_eips::eip2718::Encodable2718; use rayon::prelude::*; use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; +use reth_provider::{BlockReader, DBProvider}; use reth_prune_types::{ PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutputCheckpoint, }; @@ -24,7 +25,7 @@ impl TransactionLookup { impl Segment for TransactionLookup where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + BlockReader, { fn segment(&self) -> PruneSegment { PruneSegment::TransactionLookup @@ -58,7 +59,7 @@ where let hashes = provider .transactions_by_tx_range(tx_range.clone())? .into_par_iter() - .map(|transaction| transaction.hash()) + .map(|transaction| transaction.trie_hash()) .collect::>(); // Number of transactions retrieved from the database should match the tx range count @@ -142,7 +143,7 @@ mod tests { for block in &blocks { tx_hash_numbers.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { - tx_hash_numbers.push((transaction.hash, tx_hash_numbers.len() as u64)); + tx_hash_numbers.push((transaction.hash(), tx_hash_numbers.len() as u64)); } } let tx_hash_numbers_len = tx_hash_numbers.len(); diff --git a/crates/prune/types/src/event.rs b/crates/prune/types/src/event.rs new file mode 100644 index 000000000000..bac5f0d512cc --- /dev/null +++ b/crates/prune/types/src/event.rs @@ -0,0 +1,22 @@ +use crate::PrunedSegmentInfo; +use alloy_primitives::BlockNumber; +use std::time::Duration; + +/// An event emitted by a pruner. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum PrunerEvent { + /// Emitted when pruner started running. + Started { + /// The tip block number before pruning. + tip_block_number: BlockNumber, + }, + /// Emitted when pruner finished running. + Finished { + /// The tip block number before pruning. + tip_block_number: BlockNumber, + /// The elapsed time for the pruning process. + elapsed: Duration, + /// Collected pruning stats. + stats: Vec, + }, +} diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 8483b7b73705..82a41f0c2b15 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -9,6 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod checkpoint; +mod event; mod limiter; mod mode; mod pruner; @@ -16,10 +17,12 @@ mod segment; mod target; pub use checkpoint::PruneCheckpoint; +pub use event::PrunerEvent; pub use limiter::PruneLimiter; pub use mode::PruneMode; pub use pruner::{ - PruneInterruptReason, PruneProgress, PrunerOutput, SegmentOutput, SegmentOutputCheckpoint, + PruneInterruptReason, PruneProgress, PrunedSegmentInfo, PrunerOutput, SegmentOutput, + SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; use serde::{Deserialize, Serialize}; diff --git a/crates/prune/types/src/limiter.rs b/crates/prune/types/src/limiter.rs index 3a1059949300..d555db25733b 100644 --- a/crates/prune/types/src/limiter.rs +++ b/crates/prune/types/src/limiter.rs @@ -78,7 +78,7 @@ impl PruneLimiter { /// Returns `true` if the limit on the number of deleted entries (rows in the database) is /// reached. pub fn is_deleted_entries_limit_reached(&self) -> bool { - self.deleted_entries_limit.as_ref().map_or(false, |limit| limit.is_limit_reached()) + self.deleted_entries_limit.as_ref().is_some_and(|limit| limit.is_limit_reached()) } /// Increments the number of deleted entries by the given number. @@ -112,7 +112,7 @@ impl PruneLimiter { /// Returns `true` if time limit is reached. pub fn is_time_limit_reached(&self) -> bool { - self.time_limit.as_ref().map_or(false, |limit| limit.is_limit_reached()) + self.time_limit.as_ref().is_some_and(|limit| limit.is_limit_reached()) } /// Returns `true` if any limit is reached. diff --git a/crates/prune/types/src/pruner.rs b/crates/prune/types/src/pruner.rs index dbfafff639e8..fb9079257298 100644 --- a/crates/prune/types/src/pruner.rs +++ b/crates/prune/types/src/pruner.rs @@ -1,6 +1,6 @@ -use alloy_primitives::{BlockNumber, TxNumber}; - use crate::{PruneCheckpoint, PruneLimiter, PruneMode, PruneSegment}; +use alloy_primitives::{BlockNumber, TxNumber}; +use derive_more::Display; /// Pruner run output. #[derive(Debug)] @@ -17,6 +17,18 @@ impl From for PrunerOutput { } } +/// Represents information of a pruner run for a segment. +#[derive(Debug, Clone, PartialEq, Eq, Display)] +#[display("(table={segment}, pruned={pruned}, status={progress})")] +pub struct PrunedSegmentInfo { + /// The pruned segment + pub segment: PruneSegment, + /// Number of pruned entries + pub pruned: usize, + /// Prune progress + pub progress: PruneProgress, +} + /// Segment pruning output. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct SegmentOutput { @@ -67,16 +79,18 @@ impl SegmentOutputCheckpoint { } /// Progress of pruning. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)] pub enum PruneProgress { /// There is more data to prune. + #[display("HasMoreData({_0})")] HasMoreData(PruneInterruptReason), /// Pruning has been finished. + #[display("Finished")] Finished, } /// Reason for interrupting a prune run. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)] pub enum PruneInterruptReason { /// Prune run timed out. Timeout, diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 3ee680101084..95def23a4432 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -19,6 +19,7 @@ reth-execution-errors.workspace = true reth-prune-types.workspace = true reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } +reth-primitives-traits.workspace = true # alloy alloy-eips.workspace = true @@ -41,18 +42,22 @@ std = [ "revm/std", "alloy-eips/std", "alloy-consensus/std", + "reth-primitives-traits/std", ] +witness = ["dep:reth-trie"] test-utils = [ "dep:reth-trie", "reth-primitives/test-utils", "reth-trie?/test-utils", "revm/test-utils", - "reth-prune-types/test-utils" + "reth-prune-types/test-utils", + "reth-primitives-traits/test-utils", ] serde = [ "revm/serde", - "reth-trie?/serde", "alloy-eips/serde", "alloy-primitives/serde", "alloy-consensus/serde", + "reth-primitives-traits/serde", + "reth-trie?/serde", ] diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index be3ef0a37821..15ba049250f5 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,10 +1,12 @@ //! Helper for handling execution of multiple blocks. use alloc::vec::Vec; + use alloy_eips::eip7685::Requests; use alloy_primitives::{map::HashSet, Address, BlockNumber}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; -use reth_primitives::{Receipt, Receipts}; +use reth_primitives::Receipts; +use reth_primitives_traits::Receipt; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; @@ -13,7 +15,7 @@ use revm::db::states::bundle_state::BundleRetention; /// - pruning receipts according to the pruning configuration. /// - batch range if known #[derive(Debug, Default)] -pub struct BlockBatchRecord { +pub struct BlockBatchRecord { /// Pruning configuration. prune_modes: PruneModes, /// The collection of receipts. @@ -21,7 +23,7 @@ pub struct BlockBatchRecord { /// The inner vector stores receipts ordered by transaction number. /// /// If receipt is None it means it is pruned. - receipts: Receipts, + receipts: Receipts, /// The collection of EIP-7685 requests. /// Outer vector stores requests for each block sequentially. /// The inner vector stores requests ordered by transaction number. @@ -41,9 +43,12 @@ pub struct BlockBatchRecord { tip: Option, } -impl BlockBatchRecord { +impl BlockBatchRecord { /// Create a new receipts recorder with the given pruning configuration. - pub fn new(prune_modes: PruneModes) -> Self { + pub fn new(prune_modes: PruneModes) -> Self + where + T: Default, + { Self { prune_modes, ..Default::default() } } @@ -73,12 +78,15 @@ impl BlockBatchRecord { } /// Returns the recorded receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts { &self.receipts } /// Returns all recorded receipts. - pub fn take_receipts(&mut self) -> Receipts { + pub fn take_receipts(&mut self) -> Receipts + where + T: Default, + { core::mem::take(&mut self.receipts) } @@ -98,11 +106,11 @@ impl BlockBatchRecord { !self .prune_modes .account_history - .map_or(false, |mode| mode.should_prune(block_number, tip)) && + .is_some_and(|mode| mode.should_prune(block_number, tip)) && !self .prune_modes .storage_history - .map_or(false, |mode| mode.should_prune(block_number, tip)) + .is_some_and(|mode| mode.should_prune(block_number, tip)) }) { BundleRetention::Reverts } else { @@ -111,7 +119,10 @@ impl BlockBatchRecord { } /// Save receipts to the executor. - pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { + pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> + where + T: Receipt, + { let mut receipts = receipts.into_iter().map(Some).collect(); // Prune receipts if necessary. self.prune_receipts(&mut receipts).map_err(InternalBlockExecutionError::from)?; @@ -121,10 +132,10 @@ impl BlockBatchRecord { } /// Prune receipts according to the pruning configuration. - fn prune_receipts( - &mut self, - receipts: &mut Vec>, - ) -> Result<(), PruneSegmentError> { + fn prune_receipts(&mut self, receipts: &mut Vec>) -> Result<(), PruneSegmentError> + where + T: Receipt, + { let (Some(first_block), Some(tip)) = (self.first_block, self.tip) else { return Ok(()) }; let block_number = first_block + self.receipts.len() as u64; @@ -132,7 +143,7 @@ impl BlockBatchRecord { // Block receipts should not be retained if self.prune_modes.receipts == Some(PruneMode::Full) || // [`PruneSegment::Receipts`] takes priority over [`PruneSegment::ContractLogs`] - self.prune_modes.receipts.map_or(false, |mode| mode.should_prune(block_number, tip)) + self.prune_modes.receipts.is_some_and(|mode| mode.should_prune(block_number, tip)) { receipts.clear(); return Ok(()) @@ -161,7 +172,7 @@ impl BlockBatchRecord { // If there is an address_filter, it does not contain any of the // contract addresses, then remove this receipt. let inner_receipt = receipt.as_ref().expect("receipts have not been pruned"); - if !inner_receipt.logs.iter().any(|log| filter.contains(&log.address)) { + if !inner_receipt.logs().iter().any(|log| filter.contains(&log.address)) { receipt.take(); } } @@ -186,7 +197,7 @@ mod tests { #[test] fn test_save_receipts_empty() { - let mut recorder = BlockBatchRecord::default(); + let mut recorder: BlockBatchRecord = BlockBatchRecord::default(); // Create an empty vector of receipts let receipts = vec![]; diff --git a/crates/revm/src/cached.rs b/crates/revm/src/cached.rs index 88a41e1d8957..5d5262adc5b3 100644 --- a/crates/revm/src/cached.rs +++ b/crates/revm/src/cached.rs @@ -4,7 +4,7 @@ use alloy_primitives::{ Address, B256, U256, }; use core::cell::RefCell; -use reth_primitives::revm_primitives::{ +use revm::primitives::{ db::{Database, DatabaseRef}, AccountInfo, Bytecode, }; diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index b06ee816f8d0..5f18a0fe6166 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -29,3 +29,7 @@ pub use revm::{self, *}; /// Either type for flexible usage of different database types in the same context. pub mod either; + +/// Helper types for execution witness generation. +#[cfg(feature = "witness")] +pub mod witness; diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 813997c72d11..443d1d5ebcf2 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -11,8 +11,8 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, StorageProof, TrieInput, }; /// Mock state for testing @@ -112,6 +112,15 @@ impl StorageRootProvider for StateProviderTest { ) -> ProviderResult { unimplemented!("proof generation is not supported") } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + unimplemented!("proof generation is not supported") + } } impl StateProofProvider for StateProviderTest { diff --git a/crates/revm/src/witness.rs b/crates/revm/src/witness.rs new file mode 100644 index 000000000000..c40c87d324b3 --- /dev/null +++ b/crates/revm/src/witness.rs @@ -0,0 +1,76 @@ +use alloy_primitives::{keccak256, map::B256HashMap, Bytes, B256}; +use reth_trie::{HashedPostState, HashedStorage}; +use revm::State; + +/// Tracks state changes during execution. +#[derive(Debug, Clone, Default)] +pub struct ExecutionWitnessRecord { + /// Records all state changes + pub hashed_state: HashedPostState, + /// Map of all contract codes (created / accessed) to their preimages that were required during + /// the execution of the block, including during state root recomputation. + /// + /// `keccak(bytecodes) => bytecodes` + pub codes: B256HashMap, + /// Map of all hashed account and storage keys (addresses and slots) to their preimages + /// (unhashed account addresses and storage slots, respectively) that were required during + /// the execution of the block. during the execution of the block. + /// + /// `keccak(address|slot) => address|slot` + pub keys: B256HashMap, +} + +impl ExecutionWitnessRecord { + /// Records the state after execution. + pub fn record_executed_state(&mut self, statedb: &State) { + self.codes = statedb + .cache + .contracts + .iter() + .map(|(hash, code)| (*hash, code.original_bytes())) + .chain( + // cache state does not have all the contracts, especially when + // a contract is created within the block + // the contract only exists in bundle state, therefore we need + // to include them as well + statedb + .bundle_state + .contracts + .iter() + .map(|(hash, code)| (*hash, code.original_bytes())), + ) + .collect(); + + for (address, account) in &statedb.cache.accounts { + let hashed_address = keccak256(address); + self.hashed_state + .accounts + .insert(hashed_address, account.account.as_ref().map(|a| a.info.clone().into())); + + let storage = self + .hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + + if let Some(account) = &account.account { + self.keys.insert(hashed_address, address.to_vec().into()); + + for (slot, value) in &account.storage { + let slot = B256::from(*slot); + let hashed_slot = keccak256(slot); + storage.storage.insert(hashed_slot, *value); + + self.keys.insert(hashed_slot, slot.into()); + } + } + } + } + + /// Creates the record from the state after execution. + pub fn from_executed_state(state: &State) -> Self { + let mut record = Self::default(); + record.record_executed_state(state); + record + } +} diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 1b857d4a11f2..28ed9af5c134 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -136,8 +136,7 @@ pub trait DebugApi { /// to their preimages that were required during the execution of the block, including during /// state root recomputation. /// - /// The first argument is the block number or block hash. The second argument is a boolean - /// indicating whether to include the preimages of keys in the response. + /// The first argument is the block number or block hash. #[method(name = "executionWitness")] async fn debug_execution_witness(&self, block: BlockNumberOrTag) -> RpcResult; @@ -386,3 +385,26 @@ pub trait DebugApi { #[method(name = "writeMutexProfile")] async fn debug_write_mutex_profile(&self, file: String) -> RpcResult<()>; } + +/// An extension to the `debug_` namespace that provides additional methods for retrieving +/// witnesses. +/// +/// This is separate from the regular `debug_` api, because this depends on the network specific +/// params. For optimism this will expect the optimism specific payload attributes +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "debug"))] +pub trait DebugExecutionWitnessApi { + /// The `debug_executePayload` method allows for re-execution of a group of transactions with + /// the purpose of generating an execution witness. The witness comprises of a map of all + /// hashed trie nodes to their preimages that were required during the execution of the block, + /// including during state root recomputation. + /// + /// The first argument is the parent block hash. The second argument is the payload + /// attributes for the new block. + #[method(name = "executePayload", blocking)] + fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attributes, + ) -> RpcResult; +} diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 73775112dcf5..ac39b4802a87 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -37,7 +37,7 @@ pub use servers::*; pub mod servers { pub use crate::{ admin::AdminApiServer, - debug::DebugApiServer, + debug::{DebugApiServer, DebugExecutionWitnessApiServer}, engine::{EngineApiServer, EngineEthApiServer}, mev::{MevFullApiServer, MevSimApiServer}, net::NetApiServer, @@ -65,7 +65,7 @@ pub mod clients { pub use crate::{ admin::AdminApiClient, anvil::AnvilApiClient, - debug::DebugApiClient, + debug::{DebugApiClient, DebugExecutionWitnessApiClient}, engine::{EngineApiClient, EngineEthApiClient}, ganache::GanacheApiClient, hardhat::HardhatApiClient, diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index b6ae86c74083..a0712d617b66 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -19,6 +19,7 @@ reth-consensus.workspace = true reth-network-api.workspace = true reth-node-core.workspace = true reth-provider.workspace = true +reth-primitives.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true @@ -29,7 +30,8 @@ reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true -reth-primitives.workspace = true + +alloy-consensus.workspace = true # rpc/net jsonrpsee = { workspace = true, features = ["server"] } @@ -63,6 +65,7 @@ reth-rpc-engine-api.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-rpc-types-compat.workspace = true +reth-primitives.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 40acecfedf33..59b3ef870fe2 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,5 +1,6 @@ +use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_primitives::Header; +use reth_primitives::EthPrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_api::EthApiTypes; @@ -27,10 +28,15 @@ pub struct EthHandlers { impl EthHandlers where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: EthApiTypes + 'static, { /// Returns a new instance with handlers for `eth` namespace. diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 385b92af3d0b..8f5c84835aaa 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -16,9 +16,10 @@ //! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` +//! use alloy_consensus::Header; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::Header; +//! use reth_primitives::TransactionSigned; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ @@ -36,10 +37,16 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: FullRpcProvider< +//! Transaction = TransactionSigned, +//! Block = reth_primitives::Block, +//! Receipt = reth_primitives::Receipt, +//! > + AccountReader +//! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! Events: CanonStateSubscriptions + Clone + 'static, +//! Events: +//! CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, //! Consensus: reth_consensus::Consensus + Clone + 'static, @@ -73,10 +80,11 @@ //! //! //! ``` +//! use alloy_consensus::Header; //! use reth_engine_primitives::EngineTypes; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::Header; +//! use reth_primitives::TransactionSigned; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -109,10 +117,16 @@ //! block_executor: BlockExecutor, //! consensus: Consensus, //! ) where -//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Provider: FullRpcProvider< +//! Transaction = TransactionSigned, +//! Block = reth_primitives::Block, +//! Receipt = reth_primitives::Receipt, +//! > + AccountReader +//! + ChangeSetReader, //! Pool: TransactionPool + Unpin + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! Events: CanonStateSubscriptions + Clone + 'static, +//! Events: +//! CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, @@ -166,6 +180,8 @@ use std::{ time::{Duration, SystemTime, UNIX_EPOCH}, }; +use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; +use alloy_consensus::Header; use error::{ConflictingModules, RpcError, ServerKind}; use eth::DynEthApiBuilder; use http::{header::AUTHORIZATION, HeaderMap}; @@ -182,10 +198,10 @@ use reth_consensus::Consensus; use reth_engine_primitives::EngineTypes; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::Header; +use reth_primitives::EthPrimitives; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, ReceiptProvider, StateProviderFactory, }; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, @@ -197,15 +213,13 @@ use reth_rpc_eth_api::{ EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcReceipt, RpcTransaction, }; use reth_rpc_eth_types::{EthConfig, EthStateCache, EthSubscriptionIdProvider}; -use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; +use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; use serde::{Deserialize, Serialize}; use tower::Layer; use tower_http::cors::CorsLayer; -use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; - pub use cors::CorsDomainError; // re-export for convenience @@ -255,12 +269,14 @@ pub async fn launch, ) -> Result where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider + + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm
, + Events: CanonStateSubscriptions + Clone + 'static, + EvmConfig: ConfigureEvm
, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, { @@ -612,7 +628,7 @@ where Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EvmConfig: ConfigureEvm
, BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::Consensus + Clone + 'static, @@ -638,6 +654,10 @@ where EngineT: EngineTypes, EngineApi: EngineApiServer, EthApi: FullEthApiServer, + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + >, { let Self { provider, @@ -680,11 +700,11 @@ where /// # Example /// /// ```no_run + /// use alloy_consensus::Header; /// use reth_consensus::noop::NoopConsensus; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; - /// use reth_primitives::Header; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; @@ -713,6 +733,7 @@ where ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, + Provider: BlockReader, { let Self { provider, @@ -747,6 +768,10 @@ where ) -> TransportRpcModules<()> where EthApi: FullEthApiServer, + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + >, { let mut modules = TransportRpcModules::default(); @@ -904,10 +929,15 @@ pub struct RpcRegistryInner< impl RpcRegistryInner where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, EthApi: EthApiTypes + 'static, BlockExecutor: BlockExecutorProvider, @@ -1109,6 +1139,10 @@ where pub fn register_debug(&mut self) -> &mut Self where EthApi: EthApiSpec + EthTransactions + TraceExt, + Provider: BlockReader< + Block = ::Block, + Receipt = reth_primitives::Receipt, + >, { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -1123,6 +1157,7 @@ where pub fn register_trace(&mut self) -> &mut Self where EthApi: TraceExt, + Provider: BlockReader::Block>, { let trace_api = self.trace_api(); self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into()); @@ -1253,6 +1288,7 @@ where Arc::new(self.consensus.clone()), self.block_executor.clone(), self.config.flashbots.clone(), + Box::new(self.executor.clone()), ) } } @@ -1260,11 +1296,15 @@ where impl RpcRegistryInner where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider< + Block = ::Block, + Receipt = ::Receipt, + > + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, EthApi: FullEthApiServer, BlockExecutor: BlockExecutorProvider, Consensus: reth_consensus::Consensus + Clone + 'static, @@ -1417,6 +1457,7 @@ where Arc::new(self.consensus.clone()), self.block_executor.clone(), self.config.flashbots.clone(), + Box::new(self.executor.clone()), ) .into_rpc() .into(), @@ -1647,6 +1688,12 @@ impl RpcServerConfig { jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) } + /// Returns a [`CompressionLayer`] that adds compression support (gzip, deflate, brotli, zstd) + /// based on the client's `Accept-Encoding` header + fn maybe_compression_layer() -> Option { + Some(CompressionLayer::new()) + } + /// Builds and starts the configured server(s): http, ws, ipc. /// /// If both http and ws are on the same port, they are combined into one server. @@ -1711,7 +1758,8 @@ impl RpcServerConfig { .set_http_middleware( tower::ServiceBuilder::new() .option_layer(Self::maybe_cors_layer(cors)?) - .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)) + .option_layer(Self::maybe_compression_layer()), ) .set_rpc_middleware( self.rpc_middleware.clone().layer( @@ -1783,8 +1831,9 @@ impl RpcServerConfig { .http_only() .set_http_middleware( tower::ServiceBuilder::new() - .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) - .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)) + .option_layer(Self::maybe_compression_layer()), ) .set_rpc_middleware( self.rpc_middleware.clone().layer( @@ -1927,6 +1976,26 @@ impl TransportRpcModuleConfig { self.config.as_ref() } + /// Returns true if the given module is configured for any transport. + pub fn contains_any(&self, module: &RethRpcModule) -> bool { + self.contains_http(module) || self.contains_ws(module) || self.contains_ipc(module) + } + + /// Returns true if the given module is configured for the http transport. + pub fn contains_http(&self, module: &RethRpcModule) -> bool { + self.http.as_ref().is_some_and(|http| http.contains(module)) + } + + /// Returns true if the given module is configured for the ws transport. + pub fn contains_ws(&self, module: &RethRpcModule) -> bool { + self.ws.as_ref().is_some_and(|ws| ws.contains(module)) + } + + /// Returns true if the given module is configured for the ipc transport. + pub fn contains_ipc(&self, module: &RethRpcModule) -> bool { + self.ipc.as_ref().is_some_and(|ipc| ipc.contains(module)) + } + /// Ensures that both http and ws are configured and that they are configured to use the same /// port. fn ensure_ws_http_identical(&self) -> Result<(), WsHttpSamePortError> { @@ -1972,6 +2041,29 @@ impl TransportRpcModules { &self.config } + /// Merge the given [`Methods`] in all configured transport modules if the given + /// [`RethRpcModule`] is configured for the transport. + /// + /// Fails if any of the methods in other is present already. + pub fn merge_if_module_configured( + &mut self, + module: RethRpcModule, + other: impl Into, + ) -> Result<(), RegisterMethodError> { + let other = other.into(); + if self.module_config().contains_http(&module) { + self.merge_http(other.clone())?; + } + if self.module_config().contains_ws(&module) { + self.merge_ws(other.clone())?; + } + if self.module_config().contains_ipc(&module) { + self.merge_ipc(other)?; + } + + Ok(()) + } + /// Merge the given [Methods] in the configured http methods. /// /// Fails if any of the methods in other is present already. diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 71e8bf39f9ea..390ea7d6ba40 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -5,7 +5,7 @@ use alloy_primitives::U64; use alloy_rpc_types_engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_layer::JwtSecret; use reth_rpc_types_compat::engine::payload::{ diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index b5faa71cc5e5..8393d9427a6b 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -278,7 +278,13 @@ where .await .unwrap(); EthApiClient::::syncing(client).await.unwrap(); - EthApiClient::::send_transaction(client, transaction_request) + EthApiClient::::send_transaction( + client, + transaction_request.clone(), + ) + .await + .unwrap_err(); + EthApiClient::::sign_transaction(client, transaction_request) .await .unwrap_err(); EthApiClient::::hashrate(client).await.unwrap(); @@ -318,12 +324,6 @@ where .err() .unwrap() )); - assert!(is_unimplemented( - EthApiClient::::sign_transaction(client, call_request.clone()) - .await - .err() - .unwrap() - )); } async fn test_basic_debug_calls(client: &C) diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 62d1eea32254..4854ac44dc59 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -19,6 +19,7 @@ reth-rpc-api.workspace = true reth-storage-api.workspace = true reth-beacon-consensus.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-rpc-types-compat.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index a017c50678f0..1062363eafb8 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -75,7 +75,11 @@ struct EngineApiInner EngineApi where - Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, + Provider: HeaderProvider + + BlockReader + + StateProviderFactory + + EvmEnvProvider + + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, @@ -153,6 +157,20 @@ where .inspect(|_| self.inner.on_new_payload_response())?) } + /// Metered version of `new_payload_v1`. + async fn new_payload_v1_metered( + &self, + payload: ExecutionPayloadV1, + ) -> EngineApiResult { + let start = Instant::now(); + let gas_used = payload.gas_used; + let res = Self::new_payload_v1(self, payload).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v1.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + res + } + /// See also pub async fn new_payload_v2( &self, @@ -174,6 +192,20 @@ where .inspect(|_| self.inner.on_new_payload_response())?) } + /// Metered version of `new_payload_v2`. + pub async fn new_payload_v2_metered( + &self, + payload: ExecutionPayloadInputV2, + ) -> EngineApiResult { + let start = Instant::now(); + let gas_used = payload.execution_payload.gas_used; + let res = Self::new_payload_v2(self, payload).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v2.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + res + } + /// See also pub async fn new_payload_v3( &self, @@ -205,6 +237,23 @@ where .inspect(|_| self.inner.on_new_payload_response())?) } + // Metrics version of `new_payload_v3` + async fn new_payload_v3_metered( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult { + let start = Instant::now(); + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = + Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v3.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + Ok(res?) + } + /// See also pub async fn new_payload_v4( &self, @@ -237,6 +286,30 @@ where .inspect(|_| self.inner.on_new_payload_response())?) } + /// Metrics version of `new_payload_v4` + async fn new_payload_v4_metered( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Requests, + ) -> RpcResult { + let start = Instant::now(); + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = Self::new_payload_v4( + self, + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v4.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + Ok(res?) + } + /// Sends a message to the beacon consensus engine to update the fork choice _without_ /// withdrawals. /// @@ -418,7 +491,7 @@ where f: F, ) -> EngineApiResult>> where - F: Fn(Block) -> R + Send + 'static, + F: Fn(Provider::Block) -> R + Send + 'static, R: Send + 'static, { let (tx, rx) = oneshot::channel(); @@ -666,7 +739,11 @@ where impl EngineApiServer for EngineApi where - Provider: HeaderProvider + BlockReader + StateProviderFactory + EvmEnvProvider + 'static, + Provider: HeaderProvider + + BlockReader + + StateProviderFactory + + EvmEnvProvider + + 'static, EngineT: EngineTypes, Pool: TransactionPool + 'static, Validator: EngineValidator, @@ -677,26 +754,14 @@ where /// Caution: This should not accept the `withdrawals` field async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV1"); - let start = Instant::now(); - let gas_used = payload.gas_used; - let res = Self::new_payload_v1(self, payload).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v1.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v1_metered(payload).await?) } /// Handler for `engine_newPayloadV2` /// See also async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); - let start = Instant::now(); - let gas_used = payload.execution_payload.gas_used; - let res = Self::new_payload_v2(self, payload).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v2.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v2_metered(payload).await?) } /// Handler for `engine_newPayloadV3` @@ -708,14 +773,7 @@ where parent_beacon_block_root: B256, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV3"); - let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.gas_used; - let res = - Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v3.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v3_metered(payload, versioned_hashes, parent_beacon_block_root).await?) } /// Handler for `engine_newPayloadV4` @@ -728,20 +786,14 @@ where execution_requests: Requests, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); - let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.gas_used; - let res = Self::new_payload_v4( - self, - payload, - versioned_hashes, - parent_beacon_block_root, - execution_requests, - ) - .await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v4.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self + .new_payload_v4_metered( + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await?) } /// Handler for `engine_forkchoiceUpdatedV1` @@ -979,8 +1031,9 @@ mod tests { use super::*; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use assert_matches::assert_matches; - use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; + use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chainspec::{ChainSpec, MAINNET}; + use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::SealedBlock; diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 677bd2fb246d..4210d415bfed 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -2,8 +2,10 @@ use alloy_primitives::{B256, U256}; use jsonrpsee_types::error::{ INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE, INVALID_PARAMS_MSG, SERVER_ERROR_MSG, }; -use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; -use reth_payload_primitives::{EngineObjectValidationError, PayloadBuilderError}; +use reth_beacon_consensus::BeaconForkChoiceUpdateError; +use reth_engine_primitives::BeaconOnNewPayloadError; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::EngineObjectValidationError; use thiserror::Error; /// The Engine API result type diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index f341fd0474c4..78b0351d4a5c 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -1,7 +1,7 @@ //! Some payload tests use alloy_eips::eip4895::Withdrawals; -use alloy_primitives::{Bytes, Sealable, U256}; +use alloy_primitives::{Bytes, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1, @@ -24,10 +24,8 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body.transactions); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.body.ommers); - let sealed = transformed.header.seal_slow(); - let (header, seal) = sealed.into_parts(); block_to_payload(SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(transformed.header), body: transformed.body, }) } diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 421c10f8b41d..6500c3049781 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -501,7 +501,8 @@ where trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); Ok(EthTransactions::transaction_by_hash(self, hash) .await? - .map(|tx| tx.into_transaction(self.tx_resp_builder()))) + .map(|tx| tx.into_transaction(self.tx_resp_builder())) + .transpose()?) } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` @@ -624,6 +625,7 @@ where block_number: Option, ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_simulateV1"); + let _permit = self.tracing_task_guard().clone().acquire_owned().await; Ok(EthCall::simulate_v1(self, payload, block_number).await?) } @@ -779,8 +781,9 @@ where } /// Handler for: `eth_signTransaction` - async fn sign_transaction(&self, _transaction: TransactionRequest) -> RpcResult { - Err(internal_rpc_err("unimplemented")) + async fn sign_transaction(&self, request: TransactionRequest) -> RpcResult { + trace!(target: "rpc::eth", ?request, "Serving eth_signTransaction"); + Ok(EthTransactions::sign_transaction(self, request).await?) } /// Handler for: `eth_signTypedData` diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index e25ea84d699c..cce0aa01b01a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -2,21 +2,34 @@ use std::sync::Arc; +use alloy_consensus::BlockHeader; use alloy_eips::BlockId; use alloy_rpc_types_eth::{Block, Header, Index}; use futures::Future; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; -use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; +use reth_node_api::BlockBody; +use reth_primitives::{SealedBlockFor, SealedBlockWithSenders}; +use reth_provider::{ + BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider, ProviderReceipt, +}; use reth_rpc_types_compat::block::from_block; -use crate::{node::RpcNodeCoreExt, FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; +use crate::{ + node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, + RpcReceipt, +}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; /// Result type of the fetched block receipts. pub type BlockReceiptsResult = Result>>, E>; /// Result type of the fetched block and its receipts. -pub type BlockAndReceiptsResult = Result>)>, E>; +pub type BlockAndReceiptsResult = Result< + Option<( + SealedBlockFor<<::Provider as BlockReader>::Block>, + Arc::Provider>>>, + )>, + ::Error, +>; /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. @@ -49,7 +62,7 @@ pub trait EthBlocks: LoadBlock { let block_hash = block.hash(); let mut total_difficulty = self .provider() - .header_td_by_number(block.number) + .header_td_by_number(block.number()) .map_err(Self::Error::from_eth_err)?; if total_difficulty.is_none() { // if we failed to find td after we successfully loaded the block, try again using @@ -64,8 +77,7 @@ pub trait EthBlocks: LoadBlock { full.into(), Some(block_hash), self.tx_resp_builder(), - ) - .map_err(Self::Error::from_eth_err)?; + )?; Ok(Some(block)) } } @@ -84,7 +96,7 @@ pub trait EthBlocks: LoadBlock { .provider() .pending_block() .map_err(Self::Error::from_eth_err)? - .map(|block| block.body.transactions.len())) + .map(|block| block.body.transactions().len())) } let block_hash = match self @@ -121,7 +133,7 @@ pub trait EthBlocks: LoadBlock { fn load_block_and_receipts( &self, block_id: BlockId, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadReceipt, { @@ -164,7 +176,7 @@ pub trait EthBlocks: LoadBlock { fn ommers( &self, block_id: BlockId, - ) -> Result>, Self::Error> { + ) -> Result>, Self::Error> { self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } @@ -199,10 +211,16 @@ pub trait EthBlocks: LoadBlock { /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. + #[expect(clippy::type_complexity)] fn block_with_senders( &self, block_id: BlockId, - ) -> impl Future>, Self::Error>> + Send { + ) -> impl Future< + Output = Result< + Option::Block>>>, + Self::Error, + >, + > + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index ef29f8070261..f9441f0630ab 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -1,11 +1,12 @@ //! Loads a pending block from database. Helper trait for `eth_` transaction, call and trace RPC //! methods. +use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; use crate::{ - AsEthApiError, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, - RpcNodeCore, + helpers::estimate::EstimateCall, FromEthApiError, FromEvmError, FullEthApiTypes, + IntoEthApiError, RpcBlock, RpcNodeCore, }; -use alloy_consensus::BlockHeader; +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use alloy_rpc_types_eth::{ @@ -15,17 +16,19 @@ use alloy_rpc_types_eth::{ BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, }; use futures::Future; -use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; +use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm_primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, - ResultAndState, TransactTo, TxEnv, +use reth_node_api::BlockBody; +use reth_primitives::TransactionSigned; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; +use reth_revm::{ + database::StateProviderDatabase, + db::CacheDB, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, ResultAndState, TxEnv, }, - Header, TransactionSigned, + DatabaseRef, }; -use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, StateProvider}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, error::ensure_success, @@ -36,19 +39,16 @@ use reth_rpc_eth_types::{ simulate::{self, EthSimulateError}, EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, }; -use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; use revm::{Database, DatabaseCommit, GetInspector}; use revm_inspectors::{access_list::AccessListInspector, transfer::TransferInspector}; use tracing::trace; -use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; - /// Result type for `eth_simulateV1` RPC method. pub type SimulatedBlocksResult = Result>>, E>; /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. -pub trait EthCall: Call + LoadPendingBlock { +pub trait EthCall: EstimateCall + Call + LoadPendingBlock { /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, @@ -56,7 +56,7 @@ pub trait EthCall: Call + LoadPendingBlock { at: BlockId, state_override: Option, ) -> impl Future> + Send { - Call::estimate_gas_at(self, request, at, state_override) + EstimateCall::estimate_gas_at(self, request, at, state_override) } /// `eth_simulateV1` executes an arbitrary number of transactions on top of the requested state. @@ -201,7 +201,6 @@ pub trait EthCall: Call + LoadPendingBlock { parent_hash, total_difficulty, return_full_transactions, - &db, this.tx_resp_builder(), )?; @@ -279,14 +278,15 @@ pub trait EthCall: Call + LoadPendingBlock { // we're essentially replaying the transactions in the block here, hence we need the // state that points to the beginning of the block, which is the state at // the parent block - let mut at = block.parent_hash; + let mut at = block.parent_hash(); let mut replay_block_txs = true; - let num_txs = transaction_index.index().unwrap_or(block.body.transactions.len()); + let num_txs = + transaction_index.index().unwrap_or_else(|| block.body.transactions().len()); // but if all transactions are to be replayed, we can use the state at the block itself, // however only if we're not targeting the pending block, because for pending we can't // rely on the block's state being available - if !is_block_target_pending && num_txs == block.body.transactions.len() { + if !is_block_target_pending && num_txs == block.body.transactions().len() { at = block.hash(); replay_block_txs = false; } @@ -629,7 +629,7 @@ pub trait Call: LoadState> + SpawnBlocking { cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( @@ -683,284 +683,6 @@ pub trait Call: LoadState> + SpawnBlocking { Ok(index) } - /// Estimate gas needed for execution of the `request` at the [`BlockId`]. - fn estimate_gas_at( - &self, - request: TransactionRequest, - at: BlockId, - state_override: Option, - ) -> impl Future> + Send - where - Self: LoadPendingBlock, - { - async move { - let (cfg, block_env, at) = self.evm_env_at(at).await?; - - self.spawn_blocking_io(move |this| { - let state = this.state_at_block_id(at)?; - this.estimate_gas_with(cfg, block_env, request, state, state_override) - }) - .await - } - } - - /// Estimates the gas usage of the `request` with the state. - /// - /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search. - /// - /// ## EVM settings - /// - /// This modifies certain EVM settings to mirror geth's `SkipAccountChecks` when transacting requests, see also: : - /// - /// - `disable_eip3607` is set to `true` - /// - `disable_base_fee` is set to `true` - /// - `nonce` is set to `None` - fn estimate_gas_with( - &self, - mut cfg: CfgEnvWithHandlerCfg, - block: BlockEnv, - mut request: TransactionRequest, - state: S, - state_override: Option, - ) -> Result - where - S: StateProvider, - { - // Disabled because eth_estimateGas is sometimes used with eoa senders - // See - cfg.disable_eip3607 = true; - - // The basefee should be ignored for eth_estimateGas and similar - // See: - // - cfg.disable_base_fee = true; - - // set nonce to None so that the correct nonce is chosen by the EVM - request.nonce = None; - - // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; - let tx_request_gas_price = request.gas_price; - // the gas limit of the corresponding block - let block_env_gas_limit = block.gas_limit; - - // Determine the highest possible gas limit, considering both the request's specified limit - // and the block's limit. - let mut highest_gas_limit = tx_request_gas_limit - .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) - .unwrap_or(block_env_gas_limit); - - // Configure the evm env - let mut env = self.build_call_evm_env(cfg, block, request)?; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - // Apply any state overrides if specified. - if let Some(state_override) = state_override { - apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; - } - - // Optimize for simple transfer transactions, potentially reducing the gas estimate. - if env.tx.data.is_empty() { - if let TransactTo::Call(to) = env.tx.transact_to { - if let Ok(code) = db.db.account_code(to) { - let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); - if no_code_callee { - // If the tx is a simple transfer (call to an account with no code) we can - // shortcircuit. But simply returning - // `MIN_TRANSACTION_GAS` is dangerous because there might be additional - // field combos that bump the price up, so we try executing the function - // with the minimum gas limit to make sure. - let mut env = env.clone(); - env.tx.gas_limit = MIN_TRANSACTION_GAS; - if let Ok((res, _)) = self.transact(&mut db, env) { - if res.result.is_success() { - return Ok(U256::from(MIN_TRANSACTION_GAS)) - } - } - } - } - } - } - - // Check funds of the sender (only useful to check if transaction gas price is more than 0). - // - // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` - if env.tx.gas_price > U256::ZERO { - // cap the highest gas limit by max gas caller can afford with given gas price - highest_gas_limit = highest_gas_limit - .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); - } - - // We can now normalize the highest gas limit to a u64 - let mut highest_gas_limit: u64 = highest_gas_limit - .try_into() - .unwrap_or_else(|_| self.provider().chain_spec().max_gas_limit()); - - // If the provided gas limit is less than computed cap, use that - env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); - - trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); - - // Execute the transaction with the highest possible gas limit. - let (mut res, mut env) = match self.transact(&mut db, env.clone()) { - // Handle the exceptional case where the transaction initialization uses too much gas. - // If the gas price or gas limit was specified in the request, retry the transaction - // with the block's gas limit to determine if the failure was due to - // insufficient gas. - Err(err) - if err.is_gas_too_high() && - (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => - { - return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } - // Propagate other results (successful or other errors). - ethres => ethres?, - }; - - let gas_refund = match res.result { - ExecutionResult::Success { gas_refunded, .. } => gas_refunded, - ExecutionResult::Halt { reason, gas_used } => { - // here we don't check for invalid opcode because already executed with highest gas - // limit - return Err(RpcInvalidTransactionError::halt(reason, gas_used).into_eth_err()) - } - ExecutionResult::Revert { output, .. } => { - // if price or limit was included in the request then we can execute the request - // again with the block's gas limit to check if revert is gas related or not - return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { - Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } else { - // the transaction did revert - Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) - } - } - }; - - // At this point we know the call succeeded but want to find the _best_ (lowest) gas the - // transaction succeeds with. We find this by doing a binary search over the possible range. - - // we know the tx succeeded with the configured gas limit, so we can use that as the - // highest, in case we applied a gas cap due to caller allowance above - highest_gas_limit = env.tx.gas_limit; - - // NOTE: this is the gas the transaction used, which is less than the - // transaction requires to succeed. - let mut gas_used = res.result.gas_used(); - // the lowest value is capped by the gas used by the unconstrained transaction - let mut lowest_gas_limit = gas_used.saturating_sub(1); - - // As stated in Geth, there is a good chance that the transaction will pass if we set the - // gas limit to the execution gas used plus the gas refund, so we check this first - // 1 { - // An estimation error is allowed once the current gas limit range used in the binary - // search is small enough (less than 1.5% of the highest gas limit) - // { - // Decrease the highest gas limit if gas is too high - highest_gas_limit = mid_gas_limit; - } - Err(err) if err.is_gas_too_low() => { - // Increase the lowest gas limit if gas is too low - lowest_gas_limit = mid_gas_limit; - } - // Handle other cases, including successful transactions. - ethres => { - // Unpack the result and environment if the transaction was successful. - (res, env) = ethres?; - // Update the estimated gas range based on the transaction result. - update_estimated_gas_range( - res.result, - mid_gas_limit, - &mut highest_gas_limit, - &mut lowest_gas_limit, - )?; - } - } - - // New midpoint - mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; - } - - Ok(U256::from(highest_gas_limit)) - } - - /// Executes the requests again after an out of gas error to check if the error is gas related - /// or not - #[inline] - fn map_out_of_gas_err( - &self, - env_gas_limit: U256, - mut env: EnvWithHandlerCfg, - db: &mut DB, - ) -> Self::Error - where - DB: Database, - EthApiError: From, - { - let req_gas_limit = env.tx.gas_limit; - env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); - let (res, _) = match self.transact(db, env) { - Ok(res) => res, - Err(err) => return err, - }; - match res.result { - ExecutionResult::Success { .. } => { - // transaction succeeded by manually increasing the gas limit to - // highest, which means the caller lacks funds to pay for the tx - RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() - } - ExecutionResult::Revert { output, .. } => { - // reverted again after bumping the limit - RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() - } - ExecutionResult::Halt { reason, .. } => { - RpcInvalidTransactionError::EvmHalt(reason).into_eth_err() - } - } - } - /// Configures a new [`TxEnv`] for the [`TransactionRequest`] /// /// All [`TxEnv`] fields are derived from the given [`TransactionRequest`], if fields are @@ -971,7 +693,7 @@ pub trait Call: LoadState> + SpawnBlocking { request: TransactionRequest, ) -> Result { // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { + if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) } @@ -1125,51 +847,3 @@ pub trait Call: LoadState> + SpawnBlocking { Ok(env) } } - -/// Updates the highest and lowest gas limits for binary search based on the execution result. -/// -/// This function refines the gas limit estimates used in a binary search to find the optimal -/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on -/// whether the execution succeeded, reverted, or halted due to specific reasons. -#[inline] -fn update_estimated_gas_range( - result: ExecutionResult, - tx_gas_limit: u64, - highest_gas_limit: &mut u64, - lowest_gas_limit: &mut u64, -) -> Result<(), EthApiError> { - match result { - ExecutionResult::Success { .. } => { - // Cap the highest gas limit with the succeeding gas limit. - *highest_gas_limit = tx_gas_limit; - } - ExecutionResult::Revert { .. } => { - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - ExecutionResult::Halt { reason, .. } => { - match reason { - HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { - // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas - // left is too low. Treat this as an out of gas - // condition, knowing that the call succeeds with a - // higher gas limit. - // - // Common usage of invalid opcode in OpenZeppelin: - // - - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - err => { - // These cases should be unreachable because we know the transaction - // succeeds, but if they occur, treat them as an - // error. - return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) - } - } - } - }; - - Ok(()) -} diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs new file mode 100644 index 000000000000..f9d62855be12 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -0,0 +1,363 @@ +//! Estimate gas needed implementation + +use super::{Call, LoadPendingBlock}; +use crate::{AsEthApiError, FromEthApiError, IntoEthApiError}; +use alloy_primitives::U256; +use alloy_rpc_types_eth::{state::StateOverride, transaction::TransactionRequest, BlockId}; +use futures::Future; +use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; +use reth_provider::{ChainSpecProvider, StateProvider}; +use reth_revm::{ + database::StateProviderDatabase, + db::CacheDB, + primitives::{BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, HaltReason, TransactTo}, +}; +use reth_rpc_eth_types::{ + revm_utils::{apply_state_overrides, caller_gas_allowance}, + EthApiError, RevertError, RpcInvalidTransactionError, +}; +use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; +use revm_primitives::{db::Database, EnvWithHandlerCfg}; +use tracing::trace; + +/// Gas execution estimates +pub trait EstimateCall: Call { + /// Estimates the gas usage of the `request` with the state. + /// + /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search. + /// + /// ## EVM settings + /// + /// This modifies certain EVM settings to mirror geth's `SkipAccountChecks` when transacting requests, see also: : + /// + /// - `disable_eip3607` is set to `true` + /// - `disable_base_fee` is set to `true` + /// - `nonce` is set to `None` + fn estimate_gas_with( + &self, + mut cfg: CfgEnvWithHandlerCfg, + block: BlockEnv, + mut request: TransactionRequest, + state: S, + state_override: Option, + ) -> Result + where + S: StateProvider, + { + // Disabled because eth_estimateGas is sometimes used with eoa senders + // See + cfg.disable_eip3607 = true; + + // The basefee should be ignored for eth_estimateGas and similar + // See: + // + cfg.disable_base_fee = true; + + // set nonce to None so that the correct nonce is chosen by the EVM + request.nonce = None; + + // Keep a copy of gas related request values + let tx_request_gas_limit = request.gas.map(U256::from); + let tx_request_gas_price = request.gas_price; + // the gas limit of the corresponding block + let block_env_gas_limit = block.gas_limit; + + // Determine the highest possible gas limit, considering both the request's specified limit + // and the block's limit. + let mut highest_gas_limit = tx_request_gas_limit + .map(|mut tx_gas_limit| { + if block_env_gas_limit < tx_gas_limit { + // requested gas limit is higher than the allowed gas limit, capping + tx_gas_limit = block_env_gas_limit; + } + tx_gas_limit + }) + .unwrap_or(block_env_gas_limit); + + // Configure the evm env + let mut env = self.build_call_evm_env(cfg, block, request)?; + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // Apply any state overrides if specified. + if let Some(state_override) = state_override { + apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; + } + + // Optimize for simple transfer transactions, potentially reducing the gas estimate. + if env.tx.data.is_empty() { + if let TransactTo::Call(to) = env.tx.transact_to { + if let Ok(code) = db.db.account_code(to) { + let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); + if no_code_callee { + // If the tx is a simple transfer (call to an account with no code) we can + // shortcircuit. But simply returning + // `MIN_TRANSACTION_GAS` is dangerous because there might be additional + // field combos that bump the price up, so we try executing the function + // with the minimum gas limit to make sure. + let mut env = env.clone(); + env.tx.gas_limit = MIN_TRANSACTION_GAS; + if let Ok((res, _)) = self.transact(&mut db, env) { + if res.result.is_success() { + return Ok(U256::from(MIN_TRANSACTION_GAS)) + } + } + } + } + } + } + + // Check funds of the sender (only useful to check if transaction gas price is more than 0). + // + // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` + if env.tx.gas_price > U256::ZERO { + // cap the highest gas limit by max gas caller can afford with given gas price + highest_gas_limit = highest_gas_limit + .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); + } + + // We can now normalize the highest gas limit to a u64 + let mut highest_gas_limit: u64 = highest_gas_limit + .try_into() + .unwrap_or_else(|_| self.provider().chain_spec().max_gas_limit()); + + // If the provided gas limit is less than computed cap, use that + env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); + + trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); + + // Execute the transaction with the highest possible gas limit. + let (mut res, mut env) = match self.transact(&mut db, env.clone()) { + // Handle the exceptional case where the transaction initialization uses too much gas. + // If the gas price or gas limit was specified in the request, retry the transaction + // with the block's gas limit to determine if the failure was due to + // insufficient gas. + Err(err) + if err.is_gas_too_high() && + (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => + { + return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } + // Propagate other results (successful or other errors). + ethres => ethres?, + }; + + let gas_refund = match res.result { + ExecutionResult::Success { gas_refunded, .. } => gas_refunded, + ExecutionResult::Halt { reason, gas_used } => { + // here we don't check for invalid opcode because already executed with highest gas + // limit + return Err(RpcInvalidTransactionError::halt(reason, gas_used).into_eth_err()) + } + ExecutionResult::Revert { output, .. } => { + // if price or limit was included in the request then we can execute the request + // again with the block's gas limit to check if revert is gas related or not + return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { + Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } else { + // the transaction did revert + Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) + } + } + }; + + // At this point we know the call succeeded but want to find the _best_ (lowest) gas the + // transaction succeeds with. We find this by doing a binary search over the possible range. + + // we know the tx succeeded with the configured gas limit, so we can use that as the + // highest, in case we applied a gas cap due to caller allowance above + highest_gas_limit = env.tx.gas_limit; + + // NOTE: this is the gas the transaction used, which is less than the + // transaction requires to succeed. + let mut gas_used = res.result.gas_used(); + // the lowest value is capped by the gas used by the unconstrained transaction + let mut lowest_gas_limit = gas_used.saturating_sub(1); + + // As stated in Geth, there is a good chance that the transaction will pass if we set the + // gas limit to the execution gas used plus the gas refund, so we check this first + // 1 { + // An estimation error is allowed once the current gas limit range used in the binary + // search is small enough (less than 1.5% of the highest gas limit) + // { + // Decrease the highest gas limit if gas is too high + highest_gas_limit = mid_gas_limit; + } + Err(err) if err.is_gas_too_low() => { + // Increase the lowest gas limit if gas is too low + lowest_gas_limit = mid_gas_limit; + } + // Handle other cases, including successful transactions. + ethres => { + // Unpack the result and environment if the transaction was successful. + (res, env) = ethres?; + // Update the estimated gas range based on the transaction result. + update_estimated_gas_range( + res.result, + mid_gas_limit, + &mut highest_gas_limit, + &mut lowest_gas_limit, + )?; + } + } + + // New midpoint + mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; + } + + Ok(U256::from(highest_gas_limit)) + } + + /// Estimate gas needed for execution of the `request` at the [`BlockId`]. + fn estimate_gas_at( + &self, + request: TransactionRequest, + at: BlockId, + state_override: Option, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + { + async move { + let (cfg, block_env, at) = self.evm_env_at(at).await?; + + self.spawn_blocking_io(move |this| { + let state = this.state_at_block_id(at)?; + EstimateCall::estimate_gas_with( + &this, + cfg, + block_env, + request, + state, + state_override, + ) + }) + .await + } + } + + /// Executes the requests again after an out of gas error to check if the error is gas related + /// or not + #[inline] + fn map_out_of_gas_err( + &self, + env_gas_limit: U256, + mut env: EnvWithHandlerCfg, + db: &mut DB, + ) -> Self::Error + where + DB: Database, + EthApiError: From, + { + let req_gas_limit = env.tx.gas_limit; + env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); + let (res, _) = match self.transact(db, env) { + Ok(res) => res, + Err(err) => return err, + }; + match res.result { + ExecutionResult::Success { .. } => { + // transaction succeeded by manually increasing the gas limit to + // highest, which means the caller lacks funds to pay for the tx + RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() + } + ExecutionResult::Revert { output, .. } => { + // reverted again after bumping the limit + RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() + } + ExecutionResult::Halt { reason, .. } => { + RpcInvalidTransactionError::EvmHalt(reason).into_eth_err() + } + } + } +} + +/// Updates the highest and lowest gas limits for binary search based on the execution result. +/// +/// This function refines the gas limit estimates used in a binary search to find the optimal +/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on +/// whether the execution succeeded, reverted, or halted due to specific reasons. +#[inline] +pub fn update_estimated_gas_range( + result: ExecutionResult, + tx_gas_limit: u64, + highest_gas_limit: &mut u64, + lowest_gas_limit: &mut u64, +) -> Result<(), EthApiError> { + match result { + ExecutionResult::Success { .. } => { + // Cap the highest gas limit with the succeeding gas limit. + *highest_gas_limit = tx_gas_limit; + } + ExecutionResult::Revert { .. } => { + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + ExecutionResult::Halt { reason, .. } => { + match reason { + HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { + // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas + // left is too low. Treat this as an out of gas + // condition, knowing that the call succeeds with a + // higher gas limit. + // + // Common usage of invalid opcode in OpenZeppelin: + // + + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + err => { + // These cases should be unreachable because we know the transaction + // succeeds, but if they occur, treat them as an + // error. + return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) + } + } + } + }; + + Ok(()) +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 8ed45d2ac080..0099e0f6b160 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -1,5 +1,6 @@ //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. +use alloy_consensus::BlockHeader; use alloy_primitives::U256; use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory}; use futures::Future; @@ -287,7 +288,7 @@ pub trait LoadFee: LoadBlock { .block_with_senders(BlockNumberOrTag::Pending.into()) .await? .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Pending.into()))? - .base_fee_per_gas + .base_fee_per_gas() .ok_or(EthApiError::InvalidTransaction( RpcInvalidTransactionError::TxTypeNotSupported, ))?; @@ -324,7 +325,7 @@ pub trait LoadFee: LoadBlock { let suggested_tip = self.suggested_priority_fee(); async move { let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; - let base_fee = header.and_then(|h| h.base_fee_per_gas).unwrap_or_default(); + let base_fee = header.and_then(|h| h.base_fee_per_gas()).unwrap_or_default(); Ok(suggested_tip + U256::from(base_fee)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index 8adb0e281e71..174cb3bad046 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -17,7 +17,7 @@ pub mod block; pub mod blocking_task; pub mod call; -pub mod error; +pub mod estimate; pub mod fee; pub mod pending_block; pub mod receipt; diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 0173485aef5f..e1cd8f5c3c29 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -1,11 +1,9 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. -use std::time::{Duration, Instant}; - +use super::SpawnBlocking; use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; - -use alloy_consensus::EMPTY_OMMER_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip4844::MAX_DATA_GAS_PER_BLOCK, eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, }; @@ -19,36 +17,40 @@ use reth_evm::{ }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - proofs::calculate_transaction_root, - revm_primitives::{ - BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, - ResultAndState, SpecId, - }, - Block, BlockBody, Header, Receipt, SealedBlockWithSenders, SealedHeader, - TransactionSignedEcRecovered, + proofs::calculate_transaction_root, Block, BlockBody, BlockExt, InvalidTransactionError, + Receipt, SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, }; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, - ReceiptProvider, StateProviderFactory, + ProviderReceipt, ReceiptProvider, StateProviderFactory, +}; +use reth_revm::{ + database::StateProviderDatabase, + primitives::{ + BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, + ResultAndState, SpecId, + }, }; -use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; +use reth_transaction_pool::{ + error::InvalidPoolTransactionError, BestTransactionsAttributes, TransactionPool, +}; use reth_trie::HashedPostState; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; +use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tracing::debug; -use super::SpawnBlocking; - /// Loads a pending block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. pub trait LoadPendingBlock: EthApiTypes + RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, @@ -116,9 +118,18 @@ pub trait LoadPendingBlock: } /// Returns the locally built pending block + #[expect(clippy::type_complexity)] fn local_pending_block( &self, - ) -> impl Future)>, Self::Error>> + Send + ) -> impl Future< + Output = Result< + Option<( + SealedBlockWithSenders<::Block>, + Vec>, + )>, + Self::Error, + >, + > + Send where Self: SpawnBlocking, { @@ -283,7 +294,13 @@ pub trait LoadPendingBlock: // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + pool_tx.gas_limit(), + block_gas_limit, + ), + ); continue } @@ -291,7 +308,12 @@ pub trait LoadPendingBlock: // we don't want to leak any state changes made by private transactions, so we mark // them as invalid here which removes all dependent transactions from the iterator // before we can continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); continue } @@ -307,7 +329,13 @@ pub trait LoadPendingBlock: // invalid, which removes its dependent transactions from // the iterator. This is similar to the gas limit condition // for regular transactions above. - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -331,7 +359,12 @@ pub trait LoadPendingBlock: } else { // if the transaction is invalid, we can skip it and all of its // descendants - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue } diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 48394f1cd6bb..f663c5863b55 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -2,19 +2,22 @@ //! loads receipt data w.r.t. network. use futures::Future; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_primitives::TransactionMeta; +use reth_provider::{ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider}; use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -pub trait LoadReceipt: EthApiTypes + RpcNodeCoreExt + Send + Sync { +pub trait LoadReceipt: + EthApiTypes + RpcNodeCoreExt + Send + Sync +{ /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, - tx: TransactionSigned, + tx: ProviderTx, meta: TransactionMeta, - receipt: Receipt, + receipt: ProviderReceipt, ) -> impl Future, Self::Error>> + Send; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 7bc365d91c45..7ff9fa4deff5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,7 +1,7 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. -use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_eth::{Account, EIP1186AccountProofResponse}; @@ -10,7 +10,6 @@ use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; -use reth_primitives::Header; use reth_provider::{ BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 36d901fda5f8..114b4c41d905 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -3,12 +3,14 @@ use std::{fmt::Display, sync::Arc}; use crate::{FromEvmError, RpcNodeCore}; +use alloy_consensus::Header; use alloy_primitives::B256; use alloy_rpc_types_eth::{BlockId, TransactionInfo}; use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{Header, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; +use reth_provider::BlockReader; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -23,7 +25,7 @@ use revm_primitives::{ use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. -pub trait Trace: LoadState> { +pub trait Trace: LoadState> { /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. fn inspect( @@ -203,7 +205,7 @@ pub trait Trace: LoadState> { cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( @@ -229,7 +231,7 @@ pub trait Trace: LoadState> { fn trace_block_until( &self, block_id: BlockId, - block: Option>, + block: Option::Block>>>, highest_index: Option, config: TracingInspectorConfig, f: F, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 234008f21fe2..6ad8f8fd6ec0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -1,15 +1,18 @@ //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. //! network. -use alloy_consensus::Transaction; +use alloy_consensus::{BlockHeader, Transaction}; use alloy_dyn_abi::TypedData; use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; -use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned}; -use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; +use reth_primitives::{SealedBlockWithSenders, TransactionMeta, TransactionSigned}; +use reth_provider::{ + BlockNumReader, BlockReaderIdExt, ProviderReceipt, ProviderTx, ReceiptProvider, + TransactionsProvider, +}; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, EthApiError, SignError, TransactionSource, @@ -18,13 +21,12 @@ use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_blo use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::sync::Arc; -use crate::{ - FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, - RpcTransaction, -}; - use super::{ - Call, EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, + EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, +}; +use crate::{ + helpers::estimate::EstimateCall, FromEthApiError, FullEthApiTypes, IntoEthApiError, + RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, }; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -61,10 +63,13 @@ pub trait EthTransactions: LoadTransaction { /// Checks the pool and state. /// /// Returns `Ok(None)` if no matching transaction was found. + #[expect(clippy::complexity)] fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>>, Self::Error>, + > + Send { LoadTransaction::transaction_by_hash(self, hash) } @@ -149,19 +154,22 @@ pub trait EthTransactions: LoadTransaction { } /// Helper method that loads a transaction and its receipt. + #[expect(clippy::complexity)] fn load_transaction_and_receipt( &self, hash: TxHash, ) -> impl Future< - Output = Result, Self::Error>, + Output = Result< + Option<(ProviderTx, TransactionMeta, ProviderReceipt)>, + Self::Error, + >, > + Send where Self: 'static, { - let this = self.clone(); + let provider = self.provider().clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match this - .provider() + let (tx, meta) = match provider .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -169,11 +177,10 @@ pub trait EthTransactions: LoadTransaction { None => return Ok(None), }; - let receipt = - match this.provider().receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { - Some(recpt) => recpt, - None => return Ok(None), - }; + let receipt = match provider.receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { + Some(recpt) => recpt, + None => return Ok(None), + }; Ok(Some((tx, meta, receipt))) }) @@ -193,8 +200,8 @@ pub trait EthTransactions: LoadTransaction { async move { if let Some(block) = self.block_with_senders(block_id).await? { let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee_per_gas = block.base_fee_per_gas(); if let Some((signer, tx)) = block.transactions_with_sender().nth(index) { let tx_info = TransactionInfo { hash: Some(tx.hash()), @@ -208,7 +215,7 @@ pub trait EthTransactions: LoadTransaction { tx.clone().with_signer(*signer), tx_info, self.tx_resp_builder(), - ))) + )?)) } } @@ -233,7 +240,7 @@ pub trait EthTransactions: LoadTransaction { RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder()))); + return Ok(Some(from_recovered(transaction.into(), self.tx_resp_builder())?)); } } @@ -269,8 +276,8 @@ pub trait EthTransactions: LoadTransaction { .await? .and_then(|block| { let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee_per_gas = block.base_fee_per_gas(); block .transactions_with_sender() @@ -291,7 +298,7 @@ pub trait EthTransactions: LoadTransaction { ) }) }) - .ok_or(EthApiError::HeaderNotFound(block_id).into()) + .ok_or(EthApiError::HeaderNotFound(block_id))? .map(Some) } } @@ -309,7 +316,7 @@ pub trait EthTransactions: LoadTransaction { { async move { if let Some(block) = self.block_with_senders(block_id).await? { - if let Some(tx) = block.transactions().nth(index) { + if let Some(tx) = block.transactions().get(index) { return Ok(Some(tx.encoded_2718().into())) } } @@ -326,7 +333,7 @@ pub trait EthTransactions: LoadTransaction { tx: Bytes, ) -> impl Future> + Send { async move { - let recovered = recover_raw_transaction(tx.clone())?; + let recovered = recover_raw_transaction(tx)?; let pool_transaction = ::Transaction::from_pooled(recovered.into()); @@ -348,7 +355,7 @@ pub trait EthTransactions: LoadTransaction { mut request: TransactionRequest, ) -> impl Future> + Send where - Self: EthApiSpec + LoadBlock + LoadPendingBlock + Call, + Self: EthApiSpec + LoadBlock + LoadPendingBlock + EstimateCall, { async move { let from = match request.from { @@ -400,16 +407,10 @@ pub trait EthTransactions: LoadTransaction { txn: TransactionRequest, ) -> impl Future> + Send { async move { - let signers: Vec<_> = self.signers().read().iter().cloned().collect(); - for signer in signers { - if signer.is_signer_for(from) { - return match signer.sign_transaction(txn, from).await { - Ok(tx) => Ok(tx), - Err(e) => Err(e.into_eth_err()), - } - } - } - Err(EthApiError::InvalidTransactionSignature.into()) + self.find_signer(from)? + .sign_transaction(txn, from) + .await + .map_err(Self::Error::from_eth_err) } } @@ -430,6 +431,22 @@ pub trait EthTransactions: LoadTransaction { } } + /// Signs a transaction request using the given account in request + /// Returns the EIP-2718 encoded signed transaction. + fn sign_transaction( + &self, + request: TransactionRequest, + ) -> impl Future> + Send { + async move { + let from = match request.from { + Some(from) => from, + None => return Err(SignError::NoAccount.into_eth_err()), + }; + + Ok(self.sign_request(&from, request).await?.encoded_2718().into()) + } + } + /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. fn sign_typed_data(&self, data: &TypedData, account: Address) -> Result { Ok(self @@ -468,10 +485,13 @@ pub trait LoadTransaction: /// Checks the pool and state. /// /// Returns `Ok(None)` if no matching transaction was found. + #[expect(clippy::complexity)] fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>>, Self::Error>, + > + Send { async move { // Try to find the transaction on disk let mut resp = self diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index fa9737f84f0d..cb97a03e8b80 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -20,12 +20,14 @@ pub mod node; pub mod pubsub; pub mod types; +pub use reth_rpc_eth_types::error::{ + AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, +}; pub use reth_rpc_types_compat::TransactionCompat; pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; -pub use helpers::error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 4ae79c083411..12dbe8f66641 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -19,6 +19,9 @@ pub trait RpcNodeCore: Clone + Send + Sync { /// Network API. type Network: Send + Sync + Clone; + /// Builds new blocks. + type PayloadBuilder: Send + Sync + Clone; + /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -28,6 +31,9 @@ pub trait RpcNodeCore: Clone + Send + Sync { /// Returns the handle to the network fn network(&self) -> &Self::Network; + /// Returns the handle to the payload builder service. + fn payload_builder(&self) -> &Self::PayloadBuilder; + /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; } @@ -40,6 +46,7 @@ where type Pool = T::Pool; type Evm = ::Evm; type Network = ::Network; + type PayloadBuilder = ::PayloadBuilder; #[inline] fn pool(&self) -> &Self::Pool { @@ -56,6 +63,11 @@ where FullNodeComponents::network(self) } + #[inline] + fn payload_builder(&self) -> &Self::PayloadBuilder { + FullNodeComponents::payload_builder(self) + } + #[inline] fn provider(&self) -> &Self::Provider { FullNodeComponents::provider(self) diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index b75bce026fb4..2bac068483c7 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -7,9 +7,11 @@ use std::{ use alloy_network::Network; use alloy_rpc_types_eth::Block; +use reth_primitives::TransactionSigned; +use reth_provider::{ReceiptProvider, TransactionsProvider}; use reth_rpc_types_compat::TransactionCompat; -use crate::{AsEthApiError, FromEthApiError, FromEvmError}; +use crate::{AsEthApiError, FromEthApiError, FromEvmError, RpcNodeCore}; /// Network specific `eth` API types. pub trait EthApiTypes: Send + Sync + Clone { @@ -39,15 +41,35 @@ pub type RpcBlock = Block, ::HeaderResponse>; /// Adapter for network specific receipt type. pub type RpcReceipt = ::ReceiptResponse; +/// Adapter for network specific error type. +pub type RpcError = ::Error; + /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. -pub trait FullEthApiTypes: - EthApiTypes>> +pub trait FullEthApiTypes +where + Self: RpcNodeCore< + Provider: TransactionsProvider + + ReceiptProvider, + > + EthApiTypes< + TransactionCompat: TransactionCompat< + ::Transaction, + Transaction = RpcTransaction, + Error = RpcError, + >, + >, { } impl FullEthApiTypes for T where - T: EthApiTypes< - TransactionCompat: TransactionCompat>, - > + T: RpcNodeCore< + Provider: TransactionsProvider + + ReceiptProvider, + > + EthApiTypes< + TransactionCompat: TransactionCompat< + ::Transaction, + Transaction = RpcTransaction, + Error = RpcError, + >, + > { } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 9b38ed89724a..11bf6c6231d2 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -19,6 +19,7 @@ reth-evm.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-revm.workspace = true reth-rpc-server-types.workspace = true @@ -28,6 +29,7 @@ reth-transaction-pool.workspace = true reth-trie.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-sol-types.workspace = true @@ -35,7 +37,6 @@ alloy-rpc-types-eth.workspace = true revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } -alloy-eips.workspace = true # rpc jsonrpsee-core.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/builder/ctx.rs b/crates/rpc/rpc-eth-types/src/builder/ctx.rs index 2132dd0e22c4..db2beb4a4549 100644 --- a/crates/rpc/rpc-eth-types/src/builder/ctx.rs +++ b/crates/rpc/rpc-eth-types/src/builder/ctx.rs @@ -2,6 +2,7 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; +use reth_primitives::NodePrimitives; use reth_storage_api::BlockReaderIdExt; use reth_tasks::TaskSpawner; @@ -41,7 +42,12 @@ where where Provider: ChainSpecProvider + 'static, Tasks: TaskSpawner, - Events: CanonStateSubscriptions, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { let fee_history_cache = FeeHistoryCache::new(self.cache.clone(), self.config.fee_history_cache); diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 50fd4b04625f..1fbe16a2ed9c 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -67,6 +67,15 @@ impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> ) -> ProviderResult { self.0.storage_proof(address, slot, hashed_storage) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.0.storage_multiproof(address, slots, hashed_storage) + } } impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index b6b0364c477e..70c8b1a4f54f 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,5 +1,6 @@ //! Async caching support for eth RPC +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; @@ -7,7 +8,7 @@ use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; -use reth_primitives::{Header, Receipt, SealedBlockWithSenders, TransactionSigned}; +use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; @@ -104,7 +105,12 @@ impl EthStateCache { evm_config: EvmConfig, ) -> Self where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, EvmConfig: ConfigureEvm
, { Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) @@ -121,7 +127,12 @@ impl EthStateCache { evm_config: EvmConfig, ) -> Self where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Tasks: TaskSpawner + Clone + 'static, EvmConfig: ConfigureEvm
, { @@ -336,7 +347,12 @@ where impl Future for EthStateCacheService where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader + + EvmEnvProvider + + Clone + + Unpin + + 'static, Tasks: TaskSpawner + Clone + 'static, EvmConfig: ConfigureEvm
, { diff --git a/crates/rpc/rpc-eth-api/src/helpers/error.rs b/crates/rpc/rpc-eth-types/src/error/api.rs similarity index 87% rename from crates/rpc/rpc-eth-api/src/helpers/error.rs rename to crates/rpc/rpc-eth-types/src/error/api.rs index 1d991b8e65b6..419f530c4e21 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/error.rs +++ b/crates/rpc/rpc-eth-types/src/error/api.rs @@ -1,9 +1,10 @@ //! Helper traits to wrap generic l1 errors, in network specific error type configured in -//! [`EthApiTypes`](crate::EthApiTypes). +//! `reth_rpc_eth_api::EthApiTypes`. -use reth_rpc_eth_types::EthApiError; use revm_primitives::EVMError; +use crate::EthApiError; + /// Helper trait to wrap core [`EthApiError`]. pub trait FromEthApiError: From { /// Converts from error via [`EthApiError`]. @@ -51,7 +52,7 @@ pub trait AsEthApiError { fn as_err(&self) -> Option<&EthApiError>; /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooHigh`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooHigh). + /// [`RpcInvalidTransactionError::GasTooHigh`](crate::RpcInvalidTransactionError::GasTooHigh). fn is_gas_too_high(&self) -> bool { if let Some(err) = self.as_err() { return err.is_gas_too_high() @@ -61,7 +62,7 @@ pub trait AsEthApiError { } /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooLow`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooLow). + /// [`RpcInvalidTransactionError::GasTooLow`](crate::RpcInvalidTransactionError::GasTooLow). fn is_gas_too_low(&self) -> bool { if let Some(err) = self.as_err() { return err.is_gas_too_low() diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs similarity index 99% rename from crates/rpc/rpc-eth-types/src/error.rs rename to crates/rpc/rpc-eth-types/src/error/mod.rs index 641cbc88291f..187e2d943f70 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -1,13 +1,15 @@ //! Implementation specific Errors for the `eth_` namespace. -use std::time::Duration; +pub mod api; +pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; + +use core::time::Duration; use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, U256}; use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; use alloy_sol_types::decode_revert_reason; use reth_errors::RethError; -use reth_primitives::revm_primitives::InvalidHeader; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; @@ -17,6 +19,7 @@ use reth_transaction_pool::error::{ }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, InvalidTransaction, OutOfGasError}; use revm_inspectors::tracing::MuxError; +use revm_primitives::InvalidHeader; use tracing::error; /// A trait to convert an error to an RPC error. @@ -359,7 +362,7 @@ pub enum RpcInvalidTransactionError { SenderNoEOA, /// Gas limit was exceeded during execution. /// Contains the gas limit. - #[error("out of gas: gas required exceeds allowance: {0}")] + #[error("out of gas: gas required exceeds: {0}")] BasicOutOfGas(u64), /// Gas limit was exceeded during memory expansion. /// Contains the gas limit. diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 6c8b66246f33..922c3f9d474a 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -16,7 +16,7 @@ use futures::{ use metrics::atomics::AtomicU64; use reth_chain_state::CanonStateNotification; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_primitives::{Receipt, SealedBlock, TransactionSigned}; +use reth_primitives::{NodePrimitives, Receipt, SealedBlock, TransactionSigned}; use reth_storage_api::BlockReaderIdExt; use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas}; use serde::{Deserialize, Serialize}; @@ -205,13 +205,14 @@ struct FeeHistoryCacheInner { /// Awaits for new chain events and directly inserts them into the cache so they're available /// immediately before they need to be fetched from disk. -pub async fn fee_history_cache_new_blocks_task( +pub async fn fee_history_cache_new_blocks_task( fee_history_cache: FeeHistoryCache, mut events: St, provider: Provider, ) where - St: Stream + Unpin + 'static, + St: Stream> + Unpin + 'static, Provider: BlockReaderIdExt + ChainSpecProvider + 'static, + N: NodePrimitives, { // We're listening for new blocks emitted when the node is in live sync. // If the node transitions to stage sync, we need to fetch the missing blocks @@ -248,7 +249,7 @@ pub async fn fee_history_cache_new_blocks_task( break; }; - let committed = event .committed(); + let committed = event.committed(); let (blocks, receipts): (Vec<_>, Vec<_>) = committed .blocks_and_receipts() .map(|(block, receipts)| { diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index d73cd72b650c..3f8186ae1502 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -7,7 +7,14 @@ use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_rpc_server_types::constants; +use reth_primitives_traits::SignedTransaction; +use reth_rpc_server_types::{ + constants, + constants::gas_oracle::{ + DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, + DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, + }, +}; use reth_storage_api::BlockReaderIdExt; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; @@ -15,11 +22,6 @@ use std::fmt::{self, Debug, Formatter}; use tokio::sync::Mutex; use tracing::warn; -use reth_rpc_server_types::constants::gas_oracle::{ - DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, - DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, -}; - use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; /// The default gas limit for `eth_call` and adjacent calls. See diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 3e7c9db6d68e..2e41c7a1183d 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,12 +2,13 @@ //! //! Log parsing for building filter. -use alloy_eips::BlockNumHash; +use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives_traits::SignedTransaction; use reth_storage_api::BlockReader; use std::sync::Arc; @@ -58,7 +59,7 @@ pub enum ProviderOrBlock<'a, P: BlockReader> { /// Appends all matching logs of a block's receipts. /// If the log matches, look up the corresponding transaction hash. -pub fn append_matching_block_logs( +pub fn append_matching_block_logs>( all_logs: &mut Vec, provider_or_block: ProviderOrBlock<'_, P>, filter: &FilteredParams, @@ -110,7 +111,7 @@ pub fn append_matching_block_logs( ProviderError::TransactionNotFound(transaction_id.into()) })?; - Some(transaction.hash()) + Some(transaction.trie_hash()) } }; } diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index d8f413650a30..116026c2ddde 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,10 +4,12 @@ use std::time::Instant; +use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::Block; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. @@ -23,26 +25,26 @@ pub struct PendingBlockEnv { /// The origin for a configured [`PendingBlockEnv`] #[derive(Clone, Debug)] -pub enum PendingBlockEnvOrigin { +pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders), + ActualPending(SealedBlockWithSenders), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: /// - the timestamp /// - the block number /// - fees - DerivedFromLatest(SealedHeader), + DerivedFromLatest(SealedHeader), } -impl PendingBlockEnvOrigin { +impl PendingBlockEnvOrigin { /// Returns true if the origin is the actual pending block as received from the CL. pub const fn is_actual_pending(&self) -> bool { matches!(self, Self::ActualPending(_)) } /// Consumes the type and returns the actual pending block. - pub fn into_actual_pending(self) -> Option { + pub fn into_actual_pending(self) -> Option> { match self { Self::ActualPending(block) => Some(block), _ => None, @@ -67,13 +69,13 @@ impl PendingBlockEnvOrigin { /// header. pub fn build_target_hash(&self) -> B256 { match self { - Self::ActualPending(block) => block.parent_hash, + Self::ActualPending(block) => block.header().parent_hash(), Self::DerivedFromLatest(header) => header.hash(), } } /// Returns the header this pending block is based on. - pub fn header(&self) -> &SealedHeader { + pub fn header(&self) -> &SealedHeader { match self { Self::ActualPending(block) => &block.header, Self::DerivedFromLatest(header) => header, diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 247b4449ef5d..3136d42e9580 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,13 +1,13 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. +use super::{EthApiError, EthResult}; use alloy_consensus::{ReceiptEnvelope, Transaction}; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; +use reth_primitives_traits::SignedTransaction; use revm_primitives::calc_blob_gasprice; -use super::{EthApiError, EthResult}; - /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. pub fn build_receipt( transaction: &TransactionSigned, diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index b2a9a5e62ed4..a10b4afff9d7 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,6 +1,6 @@ //! Utilities for serving `eth_simulateV1` -use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; +use alloy_consensus::{Transaction as _, TxType}; use alloy_primitives::PrimitiveSignature as Signature; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, @@ -10,19 +10,16 @@ use alloy_rpc_types_eth::{ use jsonrpsee_types::ErrorObject; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, Transaction, TransactionSigned, TransactionSignedNoHash, + BlockBody, BlockWithSenders, Receipt, TransactionSigned, }; -use reth_revm::database::StateProviderDatabase; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; -use reth_storage_api::StateRootProvider; -use reth_trie::{HashedPostState, HashedStorage}; -use revm::{db::CacheDB, Database}; -use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; +use revm::Database; +use revm_primitives::{Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; use crate::{ - cache::db::StateProviderTraitObjWrapper, error::ToRpcError, EthApiError, RevertError, - RpcInvalidTransactionError, + error::{api::FromEthApiError, ToRpcError}, + EthApiError, RevertError, RpcInvalidTransactionError, }; /// Errors which may occur during `eth_simulateV1` execution. @@ -134,34 +131,7 @@ where // Create an empty signature for the transaction. let signature = Signature::new(Default::default(), Default::default(), false); - - let tx = match tx { - TypedTransaction::Legacy(tx) => { - TransactionSignedNoHash { transaction: Transaction::Legacy(tx), signature } - .with_hash() - } - TypedTransaction::Eip2930(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip2930(tx), signature } - .with_hash() - } - TypedTransaction::Eip1559(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip1559(tx), signature } - .with_hash() - } - TypedTransaction::Eip4844(tx) => { - let tx = match tx { - TxEip4844Variant::TxEip4844(tx) => tx, - TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx, - }; - TransactionSignedNoHash { transaction: Transaction::Eip4844(tx), signature } - .with_hash() - } - TypedTransaction::Eip7702(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip7702(tx), signature } - .with_hash() - } - }; - + let tx = TransactionSigned::new_unhashed(tx.into(), signature); transactions.push(tx); } @@ -169,17 +139,15 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -#[expect(clippy::complexity)] -pub fn build_block( +pub fn build_block>( results: Vec<(Address, ExecutionResult)>, transactions: Vec, block_env: &BlockEnv, parent_hash: B256, total_difficulty: U256, full_transactions: bool, - db: &CacheDB>>, tx_resp_builder: &T, -) -> Result>, EthApiError> { +) -> Result>, T::Error> { let mut calls: Vec = Vec::with_capacity(results.len()); let mut senders = Vec::with_capacity(results.len()); let mut receipts = Vec::with_capacity(results.len()); @@ -255,33 +223,36 @@ pub fn build_block( calls.push(call); } - let mut hashed_state = HashedPostState::default(); - for (address, account) in &db.accounts { - let hashed_address = keccak256(address); - hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); - - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); - - for (slot, value) in &account.storage { - let slot = B256::from(*slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, *value); - } - } - - let state_root = db.db.state_root(hashed_state)?; - - let header = reth_primitives::Header { + // TODO: uncomment once performance cost is acceptable + // + // let mut hashed_state = HashedPostState::default(); + // for (address, account) in &db.accounts { + // let hashed_address = keccak256(address); + // hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); + + // let storage = hashed_state + // .storages + // .entry(hashed_address) + // .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); + + // for (slot, value) in &account.storage { + // let slot = B256::from(*slot); + // let hashed_slot = keccak256(slot); + // storage.storage.insert(hashed_slot, *value); + // } + // } + + // let state_root = db.db.state_root(hashed_state).map_err(T::Error::from_eth_err)?; + let state_root = B256::ZERO; + + let header = alloy_consensus::Header { beneficiary: block_env.coinbase, difficulty: block_env.difficulty, number: block_env.number.to(), timestamp: block_env.timestamp.to(), base_fee_per_gas: Some(block_env.basefee.to()), gas_limit: block_env.gas_limit.to(), - gas_used: calls.iter().map(|c| c.gas_used).sum::(), + gas_used: calls.iter().map(|c| c.gas_used).sum(), blob_gas_used: Some(0), parent_hash, receipts_root: calculate_receipt_root(&receipts), @@ -305,6 +276,6 @@ pub fn build_block( let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - let block = from_block::(block, total_difficulty, txs_kind, None, tx_resp_builder)?; + let block = from_block(block, total_difficulty, txs_kind, None, tx_resp_builder)?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index bfff1cafead3..83ef97807de0 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -4,7 +4,8 @@ use alloy_primitives::B256; use alloy_rpc_types_eth::TransactionInfo; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives_traits::SignedTransaction; use reth_rpc_types_compat::{ transaction::{from_recovered, from_recovered_with_block_context}, TransactionCompat, @@ -12,15 +13,15 @@ use reth_rpc_types_compat::{ /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] -pub enum TransactionSource { +pub enum TransactionSource { /// Transaction exists in the pool (Pending) - Pool(TransactionSignedEcRecovered), + Pool(TransactionSignedEcRecovered), /// Transaction already included in a block /// /// This can be a historical block or a pending block (received from the CL) Block { /// Transaction fetched via provider - transaction: TransactionSignedEcRecovered, + transaction: TransactionSignedEcRecovered, /// Index of the transaction in the block index: u64, /// Hash of the block. @@ -34,19 +35,22 @@ pub enum TransactionSource { // === impl TransactionSource === -impl TransactionSource { +impl TransactionSource { /// Consumes the type and returns the wrapped transaction. - pub fn into_recovered(self) -> TransactionSignedEcRecovered { + pub fn into_recovered(self) -> TransactionSignedEcRecovered { self.into() } /// Conversion into network specific transaction type. - pub fn into_transaction(self, resp_builder: &T) -> T::Transaction { + pub fn into_transaction>( + self, + resp_builder: &Builder, + ) -> Result { match self { Self::Pool(tx) => from_recovered(tx, resp_builder), Self::Block { transaction, index, block_hash, block_number, base_fee } => { let tx_info = TransactionInfo { - hash: Some(transaction.hash()), + hash: Some(transaction.trie_hash()), index: Some(index), block_hash: Some(block_hash), block_number: Some(block_number), @@ -59,14 +63,14 @@ impl TransactionSource { } /// Returns the transaction and block related info, if not pending - pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { + pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { match self { Self::Pool(tx) => { - let hash = tx.hash(); + let hash = tx.trie_hash(); (tx, TransactionInfo { hash: Some(hash), ..Default::default() }) } Self::Block { transaction, index, block_hash, block_number, base_fee } => { - let hash = transaction.hash(); + let hash = transaction.trie_hash(); ( transaction, TransactionInfo { @@ -82,8 +86,8 @@ impl TransactionSource { } } -impl From for TransactionSignedEcRecovered { - fn from(value: TransactionSource) -> Self { +impl From> for TransactionSignedEcRecovered { + fn from(value: TransactionSource) -> Self { match value { TransactionSource::Pool(tx) => tx, TransactionSource::Block { transaction, .. } => transaction, diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml index ec8dcb8229ec..d44e5e89f013 100644 --- a/crates/rpc/rpc-layer/Cargo.toml +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -17,10 +17,11 @@ http.workspace = true jsonrpsee-http-client.workspace = true pin-project.workspace = true tower.workspace = true - +tower-http = { workspace = true, features = ["full"] } tracing.workspace = true [dev-dependencies] reqwest.workspace = true tokio = { workspace = true, features = ["macros"] } jsonrpsee = { workspace = true, features = ["server"] } +http-body-util.workspace=true diff --git a/crates/rpc/rpc-layer/src/compression_layer.rs b/crates/rpc/rpc-layer/src/compression_layer.rs new file mode 100644 index 000000000000..cf15f04aa78a --- /dev/null +++ b/crates/rpc/rpc-layer/src/compression_layer.rs @@ -0,0 +1,169 @@ +use jsonrpsee_http_client::{HttpBody, HttpRequest, HttpResponse}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tower::{Layer, Service}; +use tower_http::compression::{Compression, CompressionLayer as TowerCompressionLayer}; + +/// This layer is a wrapper around [`tower_http::compression::CompressionLayer`] that integrates +/// with jsonrpsee's HTTP types. It automatically compresses responses based on the client's +/// Accept-Encoding header. +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct CompressionLayer { + inner_layer: TowerCompressionLayer, +} + +impl CompressionLayer { + /// Creates a new compression layer with zstd, gzip, brotli and deflate enabled. + pub fn new() -> Self { + Self { + inner_layer: TowerCompressionLayer::new().gzip(true).br(true).deflate(true).zstd(true), + } + } +} + +impl Default for CompressionLayer { + /// Creates a new compression layer with default settings. + /// See [`CompressionLayer::new`] for details. + fn default() -> Self { + Self::new() + } +} + +impl Layer for CompressionLayer { + type Service = CompressionService; + + fn layer(&self, inner: S) -> Self::Service { + CompressionService { compression: self.inner_layer.layer(inner) } + } +} + +/// Service that performs response compression. +/// +/// Created by [`CompressionLayer`]. +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct CompressionService { + compression: Compression, +} + +impl Service for CompressionService +where + S: Service, + S::Future: Send + 'static, +{ + type Response = HttpResponse; + type Error = S::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.compression.poll_ready(cx) + } + + fn call(&mut self, req: HttpRequest) -> Self::Future { + let fut = self.compression.call(req); + + Box::pin(async move { + let resp = fut.await?; + let (parts, compressed_body) = resp.into_parts(); + let http_body = HttpBody::new(compressed_body); + + Ok(Self::Response::from_parts(parts, http_body)) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::header::{ACCEPT_ENCODING, CONTENT_ENCODING}; + use http_body_util::BodyExt; + use jsonrpsee_http_client::{HttpRequest, HttpResponse}; + use std::{convert::Infallible, future::ready}; + + const TEST_DATA: &str = "compress test data "; + const REPEAT_COUNT: usize = 1000; + + #[derive(Clone)] + struct MockRequestService; + + impl Service for MockRequestService { + type Response = HttpResponse; + type Error = Infallible; + type Future = std::future::Ready>; + + fn poll_ready( + &mut self, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + std::task::Poll::Ready(Ok(())) + } + + fn call(&mut self, _: HttpRequest) -> Self::Future { + let body = HttpBody::from(TEST_DATA.repeat(REPEAT_COUNT)); + let response = HttpResponse::builder().body(body).unwrap(); + ready(Ok(response)) + } + } + + fn setup_compression_service( + ) -> impl Service { + CompressionLayer::new().layer(MockRequestService) + } + + async fn get_response_size(response: HttpResponse) -> usize { + // Get the total size of the response body + response.into_body().collect().await.unwrap().to_bytes().len() + } + + #[tokio::test] + async fn test_gzip_compression() { + let mut service = setup_compression_service(); + let request = + HttpRequest::builder().header(ACCEPT_ENCODING, "gzip").body(HttpBody::empty()).unwrap(); + + let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len(); + + // Make the request + let response = service.call(request).await.unwrap(); + + // Verify the response has gzip content-encoding + assert_eq!( + response.headers().get(CONTENT_ENCODING).unwrap(), + "gzip", + "Response should be gzip encoded" + ); + + // Verify the response body is actually compressed (should be smaller than original) + let compressed_size = get_response_size(response).await; + assert!( + compressed_size < uncompressed_len, + "Compressed size ({compressed_size}) should be smaller than original size ({uncompressed_len})" + ); + } + + #[tokio::test] + async fn test_no_compression_when_not_requested() { + // Create a service with compression + let mut service = setup_compression_service(); + let request = HttpRequest::builder().body(HttpBody::empty()).unwrap(); + + let response = service.call(request).await.unwrap(); + assert!( + response.headers().get(CONTENT_ENCODING).is_none(), + "Response should not be compressed when not requested" + ); + + let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len(); + + // Verify the response body matches the original size + let response_size = get_response_size(response).await; + assert!( + response_size == uncompressed_len, + "Response size ({response_size}) should equal original size ({uncompressed_len})" + ); + } +} diff --git a/crates/rpc/rpc-layer/src/lib.rs b/crates/rpc/rpc-layer/src/lib.rs index 8387bb160e8b..540daf5592b7 100644 --- a/crates/rpc/rpc-layer/src/lib.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -13,9 +13,11 @@ use jsonrpsee_http_client::HttpResponse; mod auth_client_layer; mod auth_layer; +mod compression_layer; mod jwt_validator; pub use auth_layer::{AuthService, ResponseFuture}; +pub use compression_layer::CompressionLayer; // Export alloy JWT types pub use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret}; diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 9f96ff0cef35..43e4a9374369 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -140,6 +140,15 @@ impl RpcModuleSelection { (None, None) => true, } } + + /// Returns true if the selection contains the given module. + pub fn contains(&self, module: &RethRpcModule) -> bool { + match self { + Self::All => true, + Self::Standard => Self::STANDARD_MODULES.contains(module), + Self::Selection(s) => s.contains(module), + } + } } impl From<&HashSet> for RpcModuleSelection { diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index b963fa69d8b9..ee3fce68d3b5 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -5,6 +5,7 @@ use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, Index}; use alloy_rpc_types_trace::{ filter::TraceFilter, + opcode::BlockOpcodeGas, parity::{LocalizedTransactionTrace, TraceResults, TraceType}, tracerequest::TraceCallRequest, }; @@ -23,6 +24,9 @@ type RawTransactionTraceResult<'a> = /// A result type for the `trace_block` method that also captures the requested block. pub type TraceBlockResult = Result<(Vec, BlockId), (RpcError, BlockId)>; +/// A result type for the `trace_blockOpcodeGas` method that also captures the requested block. +pub type TraceBlockOpCodeGasResult = Result<(BlockOpcodeGas, BlockId), (RpcError, BlockId)>; + /// Type alias representing the result of replaying a transaction. pub type ReplayTransactionResult = Result<(TraceResults, TxHash), (RpcError, TxHash)>; @@ -65,6 +69,18 @@ pub trait TraceApiExt { I: IntoIterator, B: Into; + /// Returns a new stream that yields the traces the opcodes for the given blocks. + /// + /// See also [`StreamExt::buffered`]. + fn trace_block_opcode_gas_unordered( + &self, + params: I, + n: usize, + ) -> TraceBlockOpcodeGasStream<'_> + where + I: IntoIterator, + B: Into; + /// Returns a new stream that replays the transactions for the given transaction hashes. /// /// This returns all results in order. @@ -269,6 +285,26 @@ impl TraceApiExt for T { TraceBlockStream { stream: Box::pin(stream) } } + fn trace_block_opcode_gas_unordered( + &self, + params: I, + n: usize, + ) -> TraceBlockOpcodeGasStream<'_> + where + I: IntoIterator, + B: Into, + { + let blocks = params.into_iter().map(|b| b.into()).collect::>(); + let stream = futures::stream::iter(blocks.into_iter().map(move |block| async move { + match self.trace_block_opcode_gas(block).await { + Ok(result) => Ok((result.unwrap(), block)), + Err(err) => Err((err, block)), + } + })) + .buffered(n); + TraceBlockOpcodeGasStream { stream: Box::pin(stream) } + } + fn replay_transactions( &self, tx_hashes: I, @@ -406,6 +442,38 @@ impl std::fmt::Debug for TraceBlockStream<'_> { } } +/// A stream that yields the opcodes for the requested blocks. +#[must_use = "streams do nothing unless polled"] +pub struct TraceBlockOpcodeGasStream<'a> { + stream: Pin + 'a>>, +} + +impl TraceBlockOpcodeGasStream<'_> { + /// Returns the next error result of the stream. + pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { + loop { + match self.next().await? { + Ok(_) => continue, + Err(err) => return Some(err), + } + } + } +} + +impl Stream for TraceBlockOpcodeGasStream<'_> { + type Item = TraceBlockOpCodeGasResult; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.stream.as_mut().poll_next(cx) + } +} + +impl std::fmt::Debug for TraceBlockOpcodeGasStream<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TraceBlockOpcodeGasStream").finish_non_exhaustive() + } +} + /// A utility to compare RPC responses from two different clients. /// /// The `RpcComparer` is designed to perform comparisons between two RPC clients. @@ -670,4 +738,14 @@ mod tests { println!("Total successes: {successes}"); println!("Total failures: {failures}"); } + + #[tokio::test] + #[ignore] + async fn block_opcode_gas_stream() { + let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap(); + let block = vec![BlockNumberOrTag::Latest]; + let mut stream = client.trace_block_opcode_gas_unordered(block, 2); + assert_is_stream(&stream); + let _opcodes = stream.next().await.unwrap(); + } } diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 2e45d210d170..887986ada122 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -27,6 +27,7 @@ alloy-consensus.workspace = true # io serde.workspace = true +jsonrpsee-types.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 41bd057dfd6f..564f5a939fc1 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,14 +1,15 @@ //! Compatibility functions for rpc `Block` type. -use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; use alloy_consensus::Sealed; use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_eth::{ - Block, BlockError, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, + Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; -use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders}; +use reth_primitives::{Block as PrimitiveBlock, BlockWithSenders, TransactionSigned}; + +use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; /// Converts the given primitive block into a [`Block`] response with the given /// [`BlockTransactionsKind`] @@ -20,7 +21,7 @@ pub fn from_block( kind: BlockTransactionsKind, block_hash: Option, tx_resp_builder: &T, -) -> Result, BlockError> { +) -> Result, T::Error> { match kind { BlockTransactionsKind::Hashes => { Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) @@ -42,7 +43,7 @@ pub fn from_block_with_tx_hashes( block_hash: Option, ) -> Block { let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); - let transactions = block.body.transactions().map(|tx| tx.hash()).collect(); + let transactions = block.body.transactions.iter().map(|tx| tx.hash()).collect(); from_block_with_transactions( block.length(), @@ -63,7 +64,7 @@ pub fn from_block_full( total_difficulty: U256, block_hash: Option, tx_resp_builder: &T, -) -> Result, BlockError> { +) -> Result, T::Error> { let block_hash = block_hash.unwrap_or_else(|| block.block.header.hash_slow()); let block_number = block.block.number; let base_fee_per_gas = block.block.base_fee_per_gas; @@ -86,9 +87,13 @@ pub fn from_block_full( index: Some(idx as u64), }; - from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info, tx_resp_builder) + from_recovered_with_block_context::( + signed_tx_ec_recovered, + tx_info, + tx_resp_builder, + ) }) - .collect::>(); + .collect::, T::Error>>()?; Ok(from_block_with_transactions( block_length, diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 9050b0cced1c..46bc9502c579 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,7 +1,7 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, eip4895::Withdrawals, @@ -15,7 +15,7 @@ use alloy_rpc_types_engine::{ }; use reth_primitives::{ proofs::{self}, - Block, BlockBody, Header, SealedBlock, TransactionSigned, + Block, BlockBody, BlockExt, SealedBlock, TransactionSigned, }; /// Converts [`ExecutionPayloadV1`] to [`Block`] @@ -363,6 +363,7 @@ mod tests { CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, }; + use reth_primitives::BlockExt; #[test] fn roundtrip_payload_to_block() { diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs index cfbaaa622fbd..b439b61d44e8 100644 --- a/crates/rpc/rpc-types-compat/src/transaction.rs +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -1,5 +1,6 @@ //! Compatibility functions for rpc `Transaction` type. +use core::error; use std::fmt; use alloy_consensus::Transaction as _; @@ -7,7 +8,7 @@ use alloy_rpc_types_eth::{ request::{TransactionInput, TransactionRequest}, TransactionInfo, }; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; use serde::{Deserialize, Serialize}; /// Create a new rpc transaction result for a mined transaction, using the given block hash, @@ -15,25 +16,27 @@ use serde::{Deserialize, Serialize}; /// /// The block hash, number, and tx index fields should be from the original block where the /// transaction was mined. -pub fn from_recovered_with_block_context( - tx: TransactionSignedEcRecovered, +pub fn from_recovered_with_block_context>( + tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, resp_builder: &T, -) -> T::Transaction { +) -> Result { resp_builder.fill(tx, tx_info) } /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. -pub fn from_recovered( - tx: TransactionSignedEcRecovered, +pub fn from_recovered>( + tx: TransactionSignedEcRecovered, resp_builder: &T, -) -> T::Transaction { +) -> Result { resp_builder.fill(tx, TransactionInfo::default()) } /// Builds RPC transaction w.r.t. network. -pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { +pub trait TransactionCompat: + Send + Sync + Unpin + Clone + fmt::Debug +{ /// RPC transaction response type. type Transaction: Serialize + for<'de> Deserialize<'de> @@ -43,9 +46,16 @@ pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { + Clone + fmt::Debug; + /// RPC transaction error type. + type Error: error::Error + Into>; + /// Create a new rpc transaction result for a _pending_ signed transaction, setting block /// environment related fields to `None`. - fn fill(&self, tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; + fn fill( + &self, + tx: TransactionSignedEcRecovered, + tx_inf: TransactionInfo, + ) -> Result; /// Truncates the input of a transaction to only the first 4 bytes. // todo: remove in favour of using constructor on `TransactionResponse` or similar @@ -65,7 +75,7 @@ pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> Transact let access_list = tx.transaction.access_list().cloned(); let max_fee_per_blob_gas = tx.transaction.max_fee_per_blob_gas(); let authorization_list = tx.transaction.authorization_list().map(|l| l.to_vec()); - let blob_versioned_hashes = tx.transaction.blob_versioned_hashes(); + let blob_versioned_hashes = tx.transaction.blob_versioned_hashes().map(Vec::from); let tx_type = tx.transaction.tx_type(); // fees depending on the transaction type diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index ac3a548f9b5a..834b1a963bfe 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true reth-errors.workspace = true @@ -23,7 +24,7 @@ reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network-api.workspace = true reth-rpc-engine-api.workspace = true -reth-revm.workspace = true +reth-revm = { workspace = true, features = ["witness"] } reth-tasks = { workspace = true, features = ["rayon"] } reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true @@ -82,7 +83,7 @@ parking_lot.workspace = true # misc tracing.workspace = true -tracing-futures = "0.2" +tracing-futures.workspace = true futures.workspace = true rand.workspace = true serde.workspace = true diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index a74d1b5a1550..9fc1be93a2f2 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -18,12 +18,13 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{Block, SealedBlockWithSenders}; +use reth_primitives::{Block, BlockExt, SealedBlockWithSenders}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProofProvider, StateProviderFactory, TransactionVariant, }; -use reth_revm::database::StateProviderDatabase; +use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ helpers::{EthApiSpec, EthTransactions, TraceExt}, @@ -32,7 +33,6 @@ use reth_rpc_eth_api::{ use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_tasks::pool::BlockingTaskGuard; -use reth_trie::{HashedPostState, HashedStorage}; use revm::{ db::{CacheDB, State}, primitives::{db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg}, @@ -40,7 +40,6 @@ use revm::{ use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, }; -use revm_primitives::{keccak256, HashMap}; use std::sync::Arc; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; @@ -109,7 +108,7 @@ where let mut transactions = block.transactions_with_sender().enumerate().peekable(); let mut inspector = None; while let Some((index, (signer, tx))) = transactions.next() { - let tx_hash = tx.hash; + let tx_hash = tx.hash(); let env = EnvWithHandlerCfg { env: Env::boxed( @@ -257,7 +256,7 @@ where cfg.clone(), block_env.clone(), block_txs, - tx.hash, + tx.hash(), )?; let env = EnvWithHandlerCfg { @@ -276,7 +275,7 @@ where Some(TransactionContext { block_hash: Some(block_hash), tx_index: Some(index), - tx_hash: Some(tx.hash), + tx_hash: Some(tx.hash()), }), &mut None, ) @@ -613,60 +612,19 @@ where let db = StateProviderDatabase::new(&state_provider); let block_executor = this.inner.block_executor.executor(db); - let mut hashed_state = HashedPostState::default(); - let mut keys = HashMap::default(); - let mut codes = HashMap::default(); + let mut witness_record = ExecutionWitnessRecord::default(); let _ = block_executor .execute_with_state_closure( (&(*block).clone().unseal(), block.difficulty).into(), |statedb: &State<_>| { - codes = statedb - .cache - .contracts - .iter() - .map(|(hash, code)| (*hash, code.original_bytes())) - .chain( - // cache state does not have all the contracts, especially when - // a contract is created within the block - // the contract only exists in bundle state, therefore we need - // to include them as well - statedb - .bundle_state - .contracts - .iter() - .map(|(hash, code)| (*hash, code.original_bytes())), - ) - .collect(); - - for (address, account) in &statedb.cache.accounts { - let hashed_address = keccak256(address); - hashed_state.accounts.insert( - hashed_address, - account.account.as_ref().map(|a| a.info.clone().into()), - ); - - let storage = - hashed_state.storages.entry(hashed_address).or_insert_with( - || HashedStorage::new(account.status.was_destroyed()), - ); - - if let Some(account) = &account.account { - keys.insert(hashed_address, address.to_vec().into()); - - for (slot, value) in &account.storage { - let slot = B256::from(*slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, *value); - - keys.insert(hashed_slot, slot.into()); - } - } - } + witness_record.record_executed_state(statedb); }, ) .map_err(|err| EthApiError::Internal(err.into()))?; + let ExecutionWitnessRecord { hashed_state, codes, keys } = witness_record; + let state = state_provider.witness(Default::default(), hashed_state).map_err(Into::into)?; Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys }) @@ -836,7 +794,7 @@ where #[async_trait] impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt + Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider + StateProviderFactory diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index a2e0be304374..10eec4dbf974 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,31 +1,27 @@ //! `Eth` bundle implementation and helpers. -use std::sync::Arc; - +use alloy_consensus::Transaction as _; use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm_primitives::db::{DatabaseCommit, DatabaseRef}, - PooledTransactionsElement, -}; +use reth_primitives::{PooledTransactionsElement, Transaction}; +use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; -use reth_rpc_eth_api::{FromEthApiError, FromEvmError, RpcNodeCore}; +use reth_rpc_eth_api::{ + helpers::{Call, EthTransactions, LoadPendingBlock}, + EthCallBundleApiServer, FromEthApiError, FromEvmError, RpcNodeCore, +}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ - db::CacheDB, + db::{CacheDB, DatabaseCommit, DatabaseRef}, primitives::{ResultAndState, TxEnv}, }; use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId, MAX_BLOB_GAS_PER_BLOCK}; +use std::{ops::Deref, sync::Arc}; -use reth_provider::{ChainSpecProvider, HeaderProvider}; -use reth_rpc_eth_api::{ - helpers::{Call, EthTransactions, LoadPendingBlock}, - EthCallBundleApiServer, -}; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; /// `Eth` bundle implementation. pub struct EthBundle { /// All nested fields bundled together. @@ -92,7 +88,7 @@ where .iter() .filter_map(|(tx, _)| { if let PooledTransactionsElement::BlobTransaction(tx) = tx { - Some(tx.transaction.tx.blob_gas()) + Some(tx.tx().tx().blob_gas()) } else { None } @@ -184,8 +180,7 @@ where let tx = tx.into_transaction(); hasher.update(tx.hash()); - let gas_price = tx - .effective_tip_per_gas(basefee) + let gas_price = Transaction::effective_tip_per_gas(tx.deref(), basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) .map_err(Eth::Error::from_eth_err)?; eth_api.evm_config().fill_tx_env(evm.tx_mut(), &tx, signer); diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index c491ca21dfb2..b6b37c9f393e 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -7,6 +7,7 @@ use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, @@ -102,7 +103,12 @@ where ) -> Self where Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, { let blocking_task_pool = BlockingTaskPool::build().expect("failed to build blocking task pool"); @@ -151,6 +157,7 @@ where type Pool = Pool; type Evm = EvmConfig; type Network = Network; + type PayloadBuilder = (); fn pool(&self) -> &Self::Pool { self.inner.pool() @@ -164,6 +171,10 @@ where self.inner.network() } + fn payload_builder(&self) -> &Self::PayloadBuilder { + &() + } + fn provider(&self) -> &Self::Provider { self.inner.provider() } @@ -400,14 +411,15 @@ impl EthApiInner + BlockReader + ChainSpecProvider + EvmEnvProvider @@ -498,23 +510,21 @@ mod tests { let random_fee: u128 = rng.gen(); if let Some(base_fee_per_gas) = header.base_fee_per_gas { - let transaction = TransactionSigned { - transaction: reth_primitives::Transaction::Eip1559( - alloy_consensus::TxEip1559 { - max_priority_fee_per_gas: random_fee, - max_fee_per_gas: random_fee + base_fee_per_gas as u128, - ..Default::default() - }, - ), - ..Default::default() - }; + let transaction = TransactionSigned::new_unhashed( + reth_primitives::Transaction::Eip1559(alloy_consensus::TxEip1559 { + max_priority_fee_per_gas: random_fee, + max_fee_per_gas: random_fee + base_fee_per_gas as u128, + ..Default::default() + }), + Signature::test_signature(), + ); transactions.push(transaction); } else { - let transaction = TransactionSigned { - transaction: reth_primitives::Transaction::Legacy(Default::default()), - ..Default::default() - }; + let transaction = TransactionSigned::new_unhashed( + reth_primitives::Transaction::Legacy(Default::default()), + Signature::test_signature(), + ); transactions.push(transaction); } diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 589cb801e2c4..132d99a5c1a3 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -18,7 +18,7 @@ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSignedEcRecovered}; -use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; +use reth_provider::{BlockIdReader, BlockReader, ProviderError}; use reth_rpc_eth_api::{ EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat, }; @@ -34,7 +34,7 @@ use tokio::{ sync::{mpsc::Receiver, Mutex}, time::MissedTickBehavior, }; -use tracing::trace; +use tracing::{error, trace}; /// The maximum number of headers we read at once when handling a range filter. const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb @@ -144,7 +144,7 @@ where impl EthFilter where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, + Provider: BlockReader + BlockIdReader + 'static, Pool: TransactionPool + 'static, Eth: FullEthApiTypes, { @@ -244,7 +244,7 @@ where impl EthFilterApiServer> for EthFilter where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, + Provider: BlockReader + BlockIdReader + 'static, Pool: TransactionPool + 'static, Eth: FullEthApiTypes + 'static, { @@ -367,7 +367,7 @@ struct EthFilterInner { impl EthFilterInner where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, + Provider: BlockReader + BlockIdReader + 'static, Pool: TransactionPool + 'static, { /// Returns logs matching given filter object. @@ -625,10 +625,15 @@ where let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - pending_txs.push(from_recovered( - tx.transaction.to_recovered_transaction(), - &self.tx_resp_builder, - )) + match from_recovered(tx.transaction.to_recovered_transaction(), &self.tx_resp_builder) { + Ok(tx) => pending_txs.push(tx), + Err(err) => { + error!(target: "rpc", + %err, + "Failed to fill txn with block context" + ); + } + } } FilterChanges::Transactions(pending_txs) } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index fd3b9db9da28..f6aae34b961e 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -32,7 +32,6 @@ where let block_hash = block.hash(); let excess_blob_gas = block.excess_blob_gas; let timestamp = block.timestamp; - let block = block.unseal(); return block .body @@ -42,7 +41,7 @@ where .enumerate() .map(|(idx, (tx, receipt))| { let meta = TransactionMeta { - tx_hash: tx.hash, + tx_hash: tx.hash(), index: idx as u64, block_hash, block_number, diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index d1d33190a7c7..c0594c023fa1 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,13 +1,14 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. -use reth_evm::ConfigureEvm; -use reth_primitives::Header; -use reth_rpc_eth_api::helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}; - use crate::EthApi; +use alloy_consensus::Header; +use reth_evm::ConfigureEvm; +use reth_rpc_eth_api::helpers::{ + estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking, +}; impl EthCall for EthApi where - Self: Call + LoadPendingBlock + Self: EstimateCall + LoadPendingBlock { } @@ -26,3 +27,8 @@ where self.inner.max_simulate_blocks() } } + +impl EstimateCall for EthApi where + Self: Call +{ +} diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 6b28947df358..a67522ce0326 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,8 +1,8 @@ //! Support for building a pending block with transactions from local view of mempool. +use alloy_consensus::Header; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_primitives::Header; use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, @@ -18,8 +18,10 @@ impl LoadPendingBlock where Self: SpawnBlocking + RpcNodeCore< - Provider: BlockReaderIdExt - + EvmEnvProvider + Provider: BlockReaderIdExt< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + > + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool, diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 594cffd09f22..ae723fc5314f 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,6 +1,7 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_provider::{ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; @@ -8,7 +9,10 @@ use crate::EthApi; impl LoadReceipt for EthApi where - Self: RpcNodeCoreExt, + Self: RpcNodeCoreExt< + Provider: TransactionsProvider + + ReceiptProvider, + >, { async fn build_transaction_receipt( &self, diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index e7e9c64447b1..32645ba08d6c 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -109,6 +109,7 @@ impl EthSigner for DevSigner { #[cfg(test)] mod tests { + use alloy_consensus::Transaction; use alloy_primitives::{Bytes, U256}; use alloy_rpc_types_eth::TransactionInput; use revm_primitives::TxKind; diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index b270ed1b2ad1..9c60a4c105f9 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -1,12 +1,13 @@ //! Contains RPC handler implementations specific to tracing. +use alloy_consensus::Header; use reth_evm::ConfigureEvm; -use reth_primitives::Header; +use reth_provider::BlockReader; use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; impl Trace for EthApi where - Self: LoadState> + Self: LoadState> { } diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 19ffc55b398b..157213b54e66 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -4,10 +4,27 @@ use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; use alloy_network::{Ethereum, Network}; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered}; +use reth_rpc_eth_api::EthApiTypes; +use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; +/// A standalone [`EthApiTypes`] implementation for Ethereum. +#[derive(Debug, Clone, Copy, Default)] +pub struct EthereumEthApiTypes(EthTxBuilder); + +impl EthApiTypes for EthereumEthApiTypes { + type Error = EthApiError; + type NetworkTypes = Ethereum; + type TransactionCompat = EthTxBuilder; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + &self.0 + } +} + /// Builds RPC transaction response for l1. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Default)] +#[non_exhaustive] pub struct EthTxBuilder; impl TransactionCompat for EthTxBuilder @@ -16,13 +33,16 @@ where { type Transaction = ::TransactionResponse; + type Error = EthApiError; + fn fill( &self, tx: TransactionSignedEcRecovered, tx_info: TransactionInfo, - ) -> Self::Transaction { + ) -> Result { let from = tx.signer(); - let TransactionSigned { transaction, signature, hash } = tx.into_signed(); + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx.into_signed(); let inner: TxEnvelope = match transaction { reth_primitives::Transaction::Legacy(tx) => { @@ -54,14 +74,14 @@ where }) .unwrap_or_else(|| inner.max_fee_per_gas()); - Transaction { + Ok(Transaction { inner, block_hash, block_number, transaction_index, from, effective_gas_price: Some(effective_gas_price), - } + }) } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 4d1833add3e7..d8a5b95f55e7 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -13,6 +13,9 @@ pub use core::EthApi; pub use filter::EthFilter; pub use pubsub::EthPubSub; -pub use helpers::{signer::DevSigner, types::EthTxBuilder}; +pub use helpers::{ + signer::DevSigner, + types::{EthTxBuilder, EthereumEthApiTypes}, +}; pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 0702e3147ce6..8ad809b8b186 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::TxHash; use alloy_rpc_types_eth::{ pubsub::{ @@ -15,6 +16,7 @@ use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, }; use reth_network_api::NetworkInfo; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, TransactionCompat}; use reth_rpc_eth_types::logs_utils; @@ -27,6 +29,7 @@ use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, Stream, }; +use tracing::error; /// `Eth` pubsub RPC implementation. /// @@ -83,7 +86,14 @@ impl EthPubSubApiServer where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + Clone + + 'static, Network: NetworkInfo + Clone + 'static, Eth: TransactionCompat + 'static, { @@ -116,7 +126,14 @@ async fn handle_accepted( where Provider: BlockReader + EvmEnvProvider + Clone + 'static, Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions + Clone + 'static, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + Clone + + 'static, Network: NetworkInfo + Clone + 'static, Eth: TransactionCompat, { @@ -146,11 +163,23 @@ where match params { Params::Bool(true) => { // full transaction objects requested - let stream = pubsub.full_pending_transaction_stream().map(|tx| { - EthSubscriptionResult::FullTransaction(Box::new(from_recovered( + let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { + let tx_value = match from_recovered( tx.transaction.to_recovered_transaction(), &tx_resp_builder, - ))) + ) { + Ok(tx) => { + Some(EthSubscriptionResult::FullTransaction(Box::new(tx))) + } + Err(err) => { + error!(target = "rpc", + %err, + "Failed to fill transaction with block context" + ); + None + } + }; + std::future::ready(tx_value) }); return pipe_from_stream(accepted_sink, stream).await } @@ -320,7 +349,13 @@ where impl EthPubSubInner where Provider: BlockReader + EvmEnvProvider + 'static, - Events: CanonStateSubscriptions + 'static, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + SignedTx: Encodable2718, + BlockHeader = reth_primitives::Header, + Receipt = reth_primitives::Receipt, + >, + > + 'static, Network: NetworkInfo + 'static, Pool: 'static, { diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 40d951f755fa..f77b7e79da0c 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -10,10 +10,7 @@ use alloy_rpc_types_mev::{ use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm_primitives::db::{DatabaseCommit, DatabaseRef}, - TransactionSigned, -}; +use reth_primitives::TransactionSigned; use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::MevSimApiServer; @@ -26,6 +23,7 @@ use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::CacheDB, primitives::{Address, EnvWithHandlerCfg, ResultAndState, SpecId, TxEnv}, + DatabaseCommit, DatabaseRef, }; use std::{sync::Arc, time::Duration}; use tracing::info; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 45c5f1a3bc32..f81eefdc5ff1 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,3 +1,4 @@ +use alloy_consensus::Header; use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256, U256}; use alloy_rpc_types_eth::{ @@ -18,7 +19,6 @@ use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; use reth_evm::ConfigureEvmEnv; -use reth_primitives::Header; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; @@ -74,7 +74,7 @@ impl TraceApi { impl TraceApi where - Provider: BlockReader + Provider: BlockReader::Block> + StateProviderFactory + EvmEnvProvider + ChainSpecProvider @@ -565,7 +565,7 @@ where #[async_trait] impl TraceApiServer for TraceApi where - Provider: BlockReader + Provider: BlockReader::Block> + StateProviderFactory + EvmEnvProvider + ChainSpecProvider diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index d03e10ca75a8..3e46183b4661 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,3 +1,4 @@ +use core::fmt; use std::collections::BTreeMap; use alloy_consensus::Transaction; @@ -6,7 +7,7 @@ use alloy_rpc_types_txpool::{ TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus, }; use async_trait::async_trait; -use jsonrpsee::core::RpcResult as Result; +use jsonrpsee::core::RpcResult; use reth_primitives::TransactionSignedEcRecovered; use reth_rpc_api::TxPoolApiServer; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; @@ -35,33 +36,36 @@ where Pool: TransactionPool + 'static, Eth: TransactionCompat, { - fn content(&self) -> TxpoolContent { + fn content(&self) -> Result, Eth::Error> { #[inline] fn insert( tx: &Tx, content: &mut BTreeMap>, resp_builder: &RpcTxB, - ) where + ) -> Result<(), RpcTxB::Error> + where Tx: PoolTransaction>, RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered(tx.clone().into_consensus().into(), resp_builder), + from_recovered(tx.clone().into_consensus().into(), resp_builder)?, ); + + Ok(()) } let AllPoolTransactions { pending, queued } = self.pool.all_transactions(); let mut content = TxpoolContent { pending: BTreeMap::new(), queued: BTreeMap::new() }; for pending in pending { - insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder); + insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder)?; } for queued in queued { - insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder); + insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder)?; } - content + Ok(content) } } @@ -76,7 +80,7 @@ where /// Ref: [Here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_status) /// /// Handler for `txpool_status` - async fn txpool_status(&self) -> Result { + async fn txpool_status(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_status"); let all = self.pool.all_transactions(); Ok(TxpoolStatus { pending: all.pending.len() as u64, queued: all.queued.len() as u64 }) @@ -88,7 +92,7 @@ where /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_inspect) for more details /// /// Handler for `txpool_inspect` - async fn txpool_inspect(&self) -> Result { + async fn txpool_inspect(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_inspect"); #[inline] @@ -131,9 +135,9 @@ where async fn txpool_content_from( &self, from: Address, - ) -> Result> { + ) -> RpcResult> { trace!(target: "rpc::eth", ?from, "Serving txpool_contentFrom"); - Ok(self.content().remove_from(&from)) + Ok(self.content().map_err(Into::into)?.remove_from(&from)) } /// Returns the details of all transactions currently pending for inclusion in the next @@ -141,14 +145,14 @@ where /// /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details /// Handler for `txpool_content` - async fn txpool_content(&self) -> Result> { + async fn txpool_content(&self) -> RpcResult> { trace!(target: "rpc::eth", "Serving txpool_content"); - Ok(self.content()) + Ok(self.content().map_err(Into::into)?) } } -impl std::fmt::Debug for TxPoolApi { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Debug for TxPoolApi { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TxpoolApi").finish_non_exhaustive() } } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index c3f2aab70bb9..a5e29bb739f9 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -5,13 +5,13 @@ use alloy_rpc_types_beacon::relay::{ BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use alloy_rpc_types_engine::{ - BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, + BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, PayloadError, }; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_consensus::{Consensus, PostExecutionInput}; -use reth_errors::{BlockExecutionError, ConsensusError, ProviderError, RethError}; +use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_payload_validator::ExecutionPayloadValidator; @@ -22,70 +22,16 @@ use reth_provider::{ }; use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; -use reth_rpc_eth_types::EthApiError; -use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_rpc_server_types::result::internal_rpc_err; +use reth_tasks::TaskSpawner; use reth_trie::HashedPostState; use revm_primitives::{Address, B256, U256}; use serde::{Deserialize, Serialize}; use std::{collections::HashSet, sync::Arc}; -use tokio::sync::RwLock; - -/// Configuration for validation API. -#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct ValidationApiConfig { - /// Disallowed addresses. - pub disallow: HashSet
, -} - -#[derive(Debug, thiserror::Error)] -pub enum ValidationApiError { - #[error("block gas limit mismatch: {_0}")] - GasLimitMismatch(GotExpected), - #[error("block gas used mismatch: {_0}")] - GasUsedMismatch(GotExpected), - #[error("block parent hash mismatch: {_0}")] - ParentHashMismatch(GotExpected), - #[error("block hash mismatch: {_0}")] - BlockHashMismatch(GotExpected), - #[error("missing latest block in database")] - MissingLatestBlock, - #[error("could not verify proposer payment")] - ProposerPayment, - #[error("invalid blobs bundle")] - InvalidBlobsBundle, - #[error("block accesses blacklisted address: {_0}")] - Blacklist(Address), - #[error(transparent)] - Blob(#[from] BlobTransactionValidationError), - #[error(transparent)] - Consensus(#[from] ConsensusError), - #[error(transparent)] - Provider(#[from] ProviderError), - #[error(transparent)] - Execution(#[from] BlockExecutionError), -} - -#[derive(Debug)] -pub struct ValidationApiInner { - /// The provider that can interact with the chain. - provider: Provider, - /// Consensus implementation. - consensus: Arc, - /// Execution payload validator. - payload_validator: ExecutionPayloadValidator, - /// Block executor factory. - executor_provider: E, - /// Set of disallowed addresses - disallow: HashSet
, - /// Cached state reads to avoid redundant disk I/O across multiple validation attempts - /// targeting the same state. Stores a tuple of (`block_hash`, `cached_reads`) for the - /// latest head block state. Uses async `RwLock` to safely handle concurrent validation - /// requests. - cached_state: RwLock<(B256, CachedReads)>, -} +use tokio::sync::{oneshot, RwLock}; /// The type that implements the `validation` rpc namespace trait -#[derive(Debug, derive_more::Deref)] +#[derive(Clone, Debug, derive_more::Deref)] pub struct ValidationApi { #[deref] inner: Arc>, @@ -101,6 +47,7 @@ where consensus: Arc, executor_provider: E, config: ValidationApiConfig, + task_spawner: Box, ) -> Self { let ValidationApiConfig { disallow } = config; @@ -112,6 +59,7 @@ where executor_provider, disallow, cached_state: Default::default(), + task_spawner, }); Self { inner } @@ -392,55 +340,23 @@ where Ok(versioned_hashes) } -} - -#[async_trait] -impl BlockSubmissionValidationApiServer for ValidationApi -where - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory - + HeaderProvider - + AccountReader - + WithdrawalsProvider - + Clone - + 'static, - E: BlockExecutorProvider, -{ - async fn validate_builder_submission_v1( - &self, - _request: BuilderBlockValidationRequest, - ) -> RpcResult<()> { - Err(internal_rpc_err("unimplemented")) - } - - async fn validate_builder_submission_v2( - &self, - _request: BuilderBlockValidationRequestV2, - ) -> RpcResult<()> { - Err(internal_rpc_err("unimplemented")) - } - /// Validates a block submitted to the relay + /// Core logic for validating the builder submission v3 async fn validate_builder_submission_v3( &self, request: BuilderBlockValidationRequestV3, - ) -> RpcResult<()> { + ) -> Result<(), ValidationApiError> { let block = self .payload_validator .ensure_well_formed_payload( ExecutionPayload::V3(request.request.execution_payload), ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: request.parent_beacon_block_root, - versioned_hashes: self - .validate_blobs_bundle(request.request.blobs_bundle) - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result()?, + versioned_hashes: self.validate_blobs_bundle(request.request.blobs_bundle)?, }), - ) - .to_rpc_result()? + )? .try_seal_with_senders() - .map_err(|_| EthApiError::InvalidTransactionSignature)?; + .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; self.validate_message_against_block( block, @@ -448,15 +364,13 @@ where request.registered_gas_limit, ) .await - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result() } - /// Validates a block submitted to the relay + /// Core logic for validating the builder submission v4 async fn validate_builder_submission_v4( &self, request: BuilderBlockValidationRequestV4, - ) -> RpcResult<()> { + ) -> Result<(), ValidationApiError> { let block = self .payload_validator .ensure_well_formed_payload( @@ -465,16 +379,13 @@ where CancunPayloadFields { parent_beacon_block_root: request.parent_beacon_block_root, versioned_hashes: self - .validate_blobs_bundle(request.request.blobs_bundle) - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result()?, + .validate_blobs_bundle(request.request.blobs_bundle)?, }, request.request.execution_requests.into(), ), - ) - .to_rpc_result()? + )? .try_seal_with_senders() - .map_err(|_| EthApiError::InvalidTransactionSignature)?; + .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; self.validate_message_against_block( block, @@ -482,7 +393,131 @@ where request.registered_gas_limit, ) .await - .map_err(|e| RethError::Other(e.into())) - .to_rpc_result() } } + +#[async_trait] +impl BlockSubmissionValidationApiServer for ValidationApi +where + Provider: BlockReaderIdExt + + ChainSpecProvider + + StateProviderFactory + + HeaderProvider + + AccountReader + + WithdrawalsProvider + + Clone + + 'static, + E: BlockExecutorProvider, +{ + async fn validate_builder_submission_v1( + &self, + _request: BuilderBlockValidationRequest, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + async fn validate_builder_submission_v2( + &self, + _request: BuilderBlockValidationRequestV2, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v3(&this, request) + .await + .map_err(|err| internal_rpc_err(err.to_string())); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v4(&this, request) + .await + .map_err(|err| internal_rpc_err(err.to_string())); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? + } +} + +#[derive(Debug)] +pub struct ValidationApiInner { + /// The provider that can interact with the chain. + provider: Provider, + /// Consensus implementation. + consensus: Arc, + /// Execution payload validator. + payload_validator: ExecutionPayloadValidator, + /// Block executor factory. + executor_provider: E, + /// Set of disallowed addresses + disallow: HashSet
, + /// Cached state reads to avoid redundant disk I/O across multiple validation attempts + /// targeting the same state. Stores a tuple of (`block_hash`, `cached_reads`) for the + /// latest head block state. Uses async `RwLock` to safely handle concurrent validation + /// requests. + cached_state: RwLock<(B256, CachedReads)>, + /// Task spawner for blocking operations + task_spawner: Box, +} + +/// Configuration for validation API. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct ValidationApiConfig { + /// Disallowed addresses. + pub disallow: HashSet
, +} + +/// Errors thrown by the validation API. +#[derive(Debug, thiserror::Error)] +pub enum ValidationApiError { + #[error("block gas limit mismatch: {_0}")] + GasLimitMismatch(GotExpected), + #[error("block gas used mismatch: {_0}")] + GasUsedMismatch(GotExpected), + #[error("block parent hash mismatch: {_0}")] + ParentHashMismatch(GotExpected), + #[error("block hash mismatch: {_0}")] + BlockHashMismatch(GotExpected), + #[error("missing latest block in database")] + MissingLatestBlock, + #[error("could not verify proposer payment")] + ProposerPayment, + #[error("invalid blobs bundle")] + InvalidBlobsBundle, + /// When the transaction signature is invalid + #[error("invalid transaction signature")] + InvalidTransactionSignature, + #[error("block accesses blacklisted address: {_0}")] + Blacklist(Address), + #[error(transparent)] + Blob(#[from] BlobTransactionValidationError), + #[error(transparent)] + Consensus(#[from] ConsensusError), + #[error(transparent)] + Provider(#[from] ProviderError), + #[error(transparent)] + Execution(#[from] BlockExecutionError), + #[error(transparent)] + Payload(#[from] PayloadError), +} diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 8562b10b6a55..9a4ef35aaf25 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -1,5 +1,4 @@ use crate::PipelineEvent; -use alloy_primitives::{BlockNumber, TxNumber}; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, DatabaseError, RethError}; use reth_network_p2p::error::DownloadError; @@ -100,28 +99,6 @@ pub enum StageError { /// Static File segment segment: StaticFileSegment, }, - /// Unrecoverable inconsistency error related to a transaction number in a static file segment. - #[error( - "inconsistent transaction number for {segment}. db: {database}, static_file: {static_file}" - )] - InconsistentTxNumber { - /// Static File segment where this error was encountered. - segment: StaticFileSegment, - /// Expected database transaction number. - database: TxNumber, - /// Expected static file transaction number. - static_file: TxNumber, - }, - /// Unrecoverable inconsistency error related to a block number in a static file segment. - #[error("inconsistent block number for {segment}. db: {database}, static_file: {static_file}")] - InconsistentBlockNumber { - /// Static File segment where this error was encountered. - segment: StaticFileSegment, - /// Expected database block number. - database: BlockNumber, - /// Expected static file block number. - static_file: BlockNumber, - }, /// The prune checkpoint for the given segment is missing. #[error("missing prune checkpoint for {0}")] MissingPruneCheckpoint(PruneSegment), @@ -156,8 +133,6 @@ impl StageError { Self::MissingDownloadBuffer | Self::MissingSyncGap | Self::ChannelClosed | - Self::InconsistentBlockNumber { .. } | - Self::InconsistentTxNumber { .. } | Self::Internal(_) | Self::Fatal(_) ) diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 399a3ffb4b79..39d26cd88082 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -9,7 +9,7 @@ use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, ChainStateBlockReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StageCheckpointReader, - StageCheckpointWriter, StaticFileProviderFactory, + StageCheckpointWriter, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -177,7 +177,7 @@ impl Pipeline { self.progress .minimum_block_number .zip(self.max_block) - .map_or(false, |(progress, target)| progress >= target) + .is_some_and(|(progress, target)| progress >= target) { trace!( target: "sync::pipeline", @@ -358,10 +358,7 @@ impl Pipeline { ))?; } - UnifiedStorageWriter::commit_unwind( - provider_rw, - self.provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; stage.post_unwind_commit()?; @@ -396,7 +393,7 @@ impl Pipeline { let stage_reached_max_block = prev_checkpoint .zip(self.max_block) - .map_or(false, |(prev_progress, target)| prev_progress.block_number >= target); + .is_some_and(|(prev_progress, target)| prev_progress.block_number >= target); if stage_reached_max_block { warn!( target: "sync::pipeline", @@ -469,10 +466,7 @@ impl Pipeline { result: out.clone(), }); - UnifiedStorageWriter::commit( - provider_rw, - self.provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit(provider_rw)?; stage.post_execute_commit()?; @@ -533,7 +527,7 @@ fn on_stage_error( prev_checkpoint.unwrap_or_default(), )?; - UnifiedStorageWriter::commit(provider_rw, factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; // We unwind because of a validation error. If the unwind itself // fails, we bail entirely, diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 65bb2637b620..f97214f46433 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -39,7 +39,9 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } +alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -93,25 +95,25 @@ pprof = { workspace = true, features = [ [features] test-utils = [ - "dep:reth-chainspec", - "reth-network-p2p/test-utils", - "reth-db/test-utils", - "reth-provider/test-utils", - "reth-stages-api/test-utils", - "dep:reth-testing-utils", - "dep:tempfile", - "reth-chainspec?/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-downloaders/test-utils", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-revm/test-utils", - "reth-codecs/test-utils", - "reth-db-api/test-utils", - "reth-trie-db/test-utils", - "reth-trie/test-utils", - "reth-prune-types/test-utils" + "dep:reth-chainspec", + "reth-network-p2p/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-stages-api/test-utils", + "dep:reth-testing-utils", + "dep:tempfile", + "reth-chainspec?/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "reth-trie/test-utils", + "reth-prune-types/test-utils", ] [[bench]] diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index e6ae33f9c299..c1c3ff89d727 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,5 +1,5 @@ #![allow(unreachable_pub)] -use alloy_primitives::{Address, Sealable, B256, U256}; +use alloy_primitives::{Address, B256, U256}; use itertools::concat; use reth_db::{tables, test_utils::TempDatabase, Database, DatabaseEnv}; use reth_db_api::{ @@ -147,9 +147,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_second = second_block.clone(); let mut updated_header = cloned_second.header.unseal(); updated_header.state_root = root; - let sealed = updated_header.seal_slow(); - let (header, seal) = sealed.into_parts(); - *second_block = SealedBlock { header: SealedHeader::new(header, seal), ..cloned_second }; + *second_block = SealedBlock { header: SealedHeader::seal(updated_header), ..cloned_second }; let offset = transitions.len() as u64; @@ -182,9 +180,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_last = last_block.clone(); let mut updated_header = cloned_last.header.unseal(); updated_header.state_root = root; - let sealed = updated_header.seal_slow(); - let (header, seal) = sealed.into_parts(); - *last_block = SealedBlock { header: SealedHeader::new(header, seal), ..cloned_last }; + *last_block = SealedBlock { header: SealedHeader::seal(updated_header), ..cloned_last }; db.insert_blocks(blocks.iter(), StorageKind::Static).unwrap(); diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index 38a0f209dbdd..ce6a96cf3496 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -37,7 +37,7 @@ //! # let consensus: Arc = Arc::new(TestConsensus::default()); //! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build( //! # Arc::new(TestHeadersClient::default()), -//! # consensus.clone() +//! # consensus.clone().as_header_validator() //! # ); //! # let provider_factory = create_test_provider_factory(); //! # let bodies_downloader = BodiesDownloaderBuilder::default().build( diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index a25fcd4e1e57..d04a96470a03 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -76,7 +76,11 @@ use tokio::sync::watch; /// - [`PruneStage`] (execute) /// - [`FinishStage`] #[derive(Debug)] -pub struct DefaultStages { +pub struct DefaultStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Configuration for the online stages online: OnlineStages, /// Executor factory needs for execution stage @@ -87,13 +91,17 @@ pub struct DefaultStages { prune_modes: PruneModes, } -impl DefaultStages { +impl DefaultStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Create a new set of default stages with default values. #[allow(clippy::too_many_arguments)] pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, header_downloader: H, body_downloader: B, executor_factory: E, @@ -122,6 +130,8 @@ impl DefaultStages { impl DefaultStages where E: BlockExecutorProvider, + H: HeaderDownloader, + B: BodyDownloader, { /// Appends the default offline stages and default finish stage to the given builder. pub fn add_offline_stages( @@ -164,13 +174,17 @@ where /// These stages *can* be run without network access if the specified downloaders are /// themselves offline. #[derive(Debug)] -pub struct OnlineStages { +pub struct OnlineStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Sync gap provider for the headers stage. provider: Provider, /// The tip for the headers stage. tip: watch::Receiver, /// The consensus engine used to validate incoming data. - consensus: Arc, + consensus: Arc>, /// The block header downloader header_downloader: H, /// The block body downloader @@ -179,12 +193,16 @@ pub struct OnlineStages { stages_config: StageConfig, } -impl OnlineStages { +impl OnlineStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Create a new set of online stages with default values. pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, header_downloader: H, body_downloader: B, stages_config: StageConfig, @@ -196,7 +214,7 @@ impl OnlineStages { impl OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
+ 'static, B: BodyDownloader + 'static, { /// Create a new builder using the given headers stage. @@ -229,7 +247,7 @@ where provider, header_downloader, tip, - consensus.clone(), + consensus.clone().as_header_validator(), stages_config.etl, )) .add_stage(bodies) @@ -239,7 +257,7 @@ where impl StageSet for OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
+ 'static, B: BodyDownloader + 'static, HeaderStage: Stage, BodyStage: Stage, @@ -250,7 +268,7 @@ where self.provider, self.header_downloader, self.tip, - self.consensus.clone(), + self.consensus.clone().as_header_validator(), self.stages_config.etl.clone(), )) .add_stage(BodyStage::new(self.body_downloader)) diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 06a5250913ed..c1fde11c2354 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -4,20 +4,17 @@ use std::{ }; use futures_util::TryStreamExt; +use reth_codecs::Compact; +use reth_primitives_traits::{Block, BlockBody}; use tracing::*; -use alloy_primitives::TxNumber; -use reth_db::tables; -use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW}, - models::{StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, - transaction::DbTxMut, -}; +use reth_db::{tables, transaction::DbTx}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, + providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError, + StaticFileProviderFactory, StatsReader, StorageLocation, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, @@ -25,6 +22,8 @@ use reth_stages_api::{ }; use reth_storage_errors::provider::ProviderResult; +use super::missing_static_data_error; + /// The body stage downloads block bodies. /// /// The body stage downloads block bodies for all block headers stored locally in storage. @@ -60,7 +59,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>, + buffer: Option>>, } impl BodyStage { @@ -68,11 +67,94 @@ impl BodyStage { pub const fn new(downloader: D) -> Self { Self { downloader, buffer: None } } + + /// Ensures that static files and database are in sync. + fn ensure_consistency( + &self, + provider: &Provider, + unwind_block: Option, + ) -> Result<(), StageError> + where + Provider: DBProvider + BlockReader + StaticFileProviderFactory, + { + // Get id for the next tx_num of zero if there are no transactions. + let next_tx_num = provider + .tx_ref() + .cursor_read::()? + .last()? + .map(|(id, _)| id + 1) + .unwrap_or_default(); + + let static_file_provider = provider.static_file_provider(); + + // Make sure Transactions static file is at the same height. If it's further, this + // input execution was interrupted previously and we need to unwind the static file. + let next_static_file_tx_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions) + .map(|id| id + 1) + .unwrap_or_default(); + + match next_static_file_tx_num.cmp(&next_tx_num) { + // If static files are ahead, we are currently unwinding the stage or we didn't reach + // the database commit in a previous stage run. So, our only solution is to unwind the + // static files and proceed from the database expected height. + Ordering::Greater => { + let highest_db_block = + provider.tx_ref().entries::()? as u64; + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + static_file_producer + .prune_transactions(next_static_file_tx_num - next_tx_num, highest_db_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + // If static files are behind, then there was some corruption or loss of files. This + // error will trigger an unwind, that will bring the database to the same height as the + // static files. + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_block { + let next_tx_num_after_unwind = provider + .tx_ref() + .get::(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + // This means we need a deeper unwind. + if next_tx_num_after_unwind > next_static_file_tx_num { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Transactions, + )?) + } + } else { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Transactions, + )?) + } + } + Ordering::Equal => {} + } + + Ok(()) + } } -impl Stage for BodyStage +impl Stage for BodyStage where - Provider: DBProvider + StaticFileProviderFactory + StatsReader + BlockReader, + Provider: DBProvider + + StaticFileProviderFactory + + StatsReader + + BlockReader + + BlockWriter>, + D: BodyDownloader>, { /// Return the id of the stage fn id(&self) -> StageId { @@ -115,133 +197,23 @@ where } let (from_block, to_block) = input.next_block_range().into_inner(); - // Cursors used to write bodies, ommers and transactions - let tx = provider.tx_ref(); - let mut block_indices_cursor = tx.cursor_write::()?; - let mut tx_block_cursor = tx.cursor_write::()?; - let mut ommers_cursor = tx.cursor_write::()?; - let mut withdrawals_cursor = tx.cursor_write::()?; + self.ensure_consistency(provider, None)?; - // Get id for the next tx_num of zero if there are no transactions. - let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); - - let static_file_provider = provider.static_file_provider(); - let mut static_file_producer = - static_file_provider.get_writer(from_block, StaticFileSegment::Transactions)?; - - // Make sure Transactions static file is at the same height. If it's further, this - // input execution was interrupted previously and we need to unwind the static file. - let next_static_file_tx_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .map(|id| id + 1) - .unwrap_or_default(); - - match next_static_file_tx_num.cmp(&next_tx_num) { - // If static files are ahead, then we didn't reach the database commit in a previous - // stage run. So, our only solution is to unwind the static files and proceed from the - // database expected height. - Ordering::Greater => { - static_file_producer - .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; - // Since this is a database <-> static file inconsistency, we commit the change - // straight away. - static_file_producer.commit()?; - } - // If static files are behind, then there was some corruption or loss of files. This - // error will trigger an unwind, that will bring the database to the same height as the - // static files. - Ordering::Less => { - return Err(missing_static_data_error( - next_static_file_tx_num.saturating_sub(1), - &static_file_provider, - provider, - )?) - } - Ordering::Equal => {} - } - - debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, start_tx_id = next_tx_num, "Commencing sync"); + debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, "Commencing sync"); let buffer = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?; trace!(target: "sync::stages::bodies", bodies_len = buffer.len(), "Writing blocks"); - let mut highest_block = from_block; - for response in buffer { - // Write block - let block_number = response.block_number(); - - let block_indices = StoredBlockBodyIndices { - first_tx_num: next_tx_num, - tx_count: match &response { - BlockResponse::Full(block) => block.body.transactions.len() as u64, - BlockResponse::Empty(_) => 0, - }, - }; - - // Increment block on static file header. - if block_number > 0 { - let appended_block_number = static_file_producer.increment_block(block_number)?; - - if appended_block_number != block_number { - // This scenario indicates a critical error in the logic of adding new - // items. It should be treated as an `expect()` failure. - return Err(StageError::InconsistentBlockNumber { - segment: StaticFileSegment::Transactions, - database: block_number, - static_file: appended_block_number, - }) - } - } - - match response { - BlockResponse::Full(block) => { - // write transaction block index - if !block.body.transactions.is_empty() { - tx_block_cursor.append(block_indices.last_tx_num(), block.number)?; - } - - // Write transactions - for transaction in block.body.transactions { - let appended_tx_number = static_file_producer - .append_transaction(next_tx_num, &transaction.into())?; - - if appended_tx_number != next_tx_num { - // This scenario indicates a critical error in the logic of adding new - // items. It should be treated as an `expect()` failure. - return Err(StageError::InconsistentTxNumber { - segment: StaticFileSegment::Transactions, - database: next_tx_num, - static_file: appended_tx_number, - }) - } - - // Increment transaction id for each transaction. - next_tx_num += 1; - } - - // Write ommers if any - if !block.body.ommers.is_empty() { - ommers_cursor.append( - block_number, - StoredBlockOmmers { ommers: block.body.ommers }, - )?; - } - - // Write withdrawals if any - if let Some(withdrawals) = block.body.withdrawals { - if !withdrawals.is_empty() { - withdrawals_cursor - .append(block_number, StoredBlockWithdrawals { withdrawals })?; - } - } - } - BlockResponse::Empty(_) => {} - }; - - // insert block meta - block_indices_cursor.append(block_number, block_indices)?; - - highest_block = block_number; - } + let highest_block = buffer.last().map(|r| r.block_number()).unwrap_or(from_block); + + // Write bodies to database. + provider.append_block_bodies( + buffer + .into_iter() + .map(|response| (response.block_number(), response.into_body())) + .collect(), + // We are writing transactions directly to static files. + StorageLocation::StaticFiles, + )?; // The stage is "done" if: // - We got fewer blocks than our target @@ -262,66 +234,8 @@ where ) -> Result { self.buffer.take(); - let static_file_provider = provider.static_file_provider(); - let tx = provider.tx_ref(); - // Cursors to unwind bodies, ommers - let mut body_cursor = tx.cursor_write::()?; - let mut ommers_cursor = tx.cursor_write::()?; - let mut withdrawals_cursor = tx.cursor_write::()?; - // Cursors to unwind transitions - let mut tx_block_cursor = tx.cursor_write::()?; - - let mut rev_walker = body_cursor.walk_back(None)?; - while let Some((number, block_meta)) = rev_walker.next().transpose()? { - if number <= input.unwind_to { - break - } - - // Delete the ommers entry if any - if ommers_cursor.seek_exact(number)?.is_some() { - ommers_cursor.delete_current()?; - } - - // Delete the withdrawals entry if any - if withdrawals_cursor.seek_exact(number)?.is_some() { - withdrawals_cursor.delete_current()?; - } - - // Delete all transaction to block values. - if !block_meta.is_empty() && - tx_block_cursor.seek_exact(block_meta.last_tx_num())?.is_some() - { - tx_block_cursor.delete_current()?; - } - - // Delete the current body value - rev_walker.delete_current()?; - } - - let mut static_file_producer = - static_file_provider.latest_writer(StaticFileSegment::Transactions)?; - - // Unwind from static files. Get the current last expected transaction from DB, and match it - // on static file - let db_tx_num = - body_cursor.last()?.map(|(_, block_meta)| block_meta.last_tx_num()).unwrap_or_default(); - let static_file_tx_num: u64 = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .unwrap_or_default(); - - // If there are more transactions on database, then we are missing static file data and we - // need to unwind further. - if db_tx_num > static_file_tx_num { - return Err(missing_static_data_error( - static_file_tx_num, - &static_file_provider, - provider, - )?) - } - - // Unwinds static file - static_file_producer - .prune_transactions(static_file_tx_num.saturating_sub(db_tx_num), input.unwind_to)?; + self.ensure_consistency(provider, Some(input.unwind_to))?; + provider.remove_bodies_above(input.unwind_to, StorageLocation::Both)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) @@ -330,40 +244,6 @@ where } } -fn missing_static_data_error( - last_tx_num: TxNumber, - static_file_provider: &StaticFileProvider, - provider: &Provider, -) -> Result -where - Provider: BlockReader, -{ - let mut last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Transactions) - .unwrap_or_default(); - - // To be extra safe, we make sure that the last tx num matches the last block from its indices. - // If not, get it. - loop { - if let Some(indices) = provider.block_body_indices(last_block)? { - if indices.last_tx_num() <= last_tx_num { - break - } - } - if last_block == 0 { - break - } - last_block -= 1; - } - - let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - Ok(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Transactions, - }) -} - // TODO(alexey): ideally, we want to measure Bodies stage progress in bytes, but it's hard to know // beforehand how many bytes we need to download. So the good solution would be to measure the // progress in gas as a proxy to size. Execution stage uses a similar approach. @@ -608,9 +488,10 @@ mod tests { UnwindStageTestRunner, }, }; - use alloy_primitives::{BlockHash, BlockNumber, TxNumber, B256}; + use alloy_consensus::Header; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use futures_util::Stream; - use reth_db::{static_file::HeaderMask, tables}; + use reth_db::{static_file::HeaderWithHashMask, tables}; use reth_db_api::{ cursor::DbCursorRO, models::{StoredBlockBodyIndices, StoredBlockOmmers}, @@ -623,7 +504,7 @@ mod tests { }, error::DownloadResult, }; - use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader, StaticFileSegment}; + use reth_primitives::{BlockBody, SealedBlock, SealedHeader, StaticFileSegment}; use reth_provider::{ providers::StaticFileWriter, test_utils::MockNodeTypesWithDB, HeaderProvider, ProviderFactory, StaticFileProviderFactory, TransactionsProvider, @@ -724,9 +605,7 @@ mod tests { body.tx_num_range().try_for_each(|tx_num| { let transaction = random_signed_tx(&mut rng); - static_file_producer - .append_transaction(tx_num, &transaction.into()) - .map(drop) + static_file_producer.append_transaction(tx_num, &transaction).map(drop) })?; if body.tx_count != 0 { @@ -889,6 +768,8 @@ mod tests { } impl BodyDownloader for TestBodyDownloader { + type Body = BlockBody; + fn set_download_range( &mut self, range: RangeInclusive, @@ -898,7 +779,7 @@ mod tests { for header in static_file_provider.fetch_range_iter( StaticFileSegment::Headers, *range.start()..*range.end() + 1, - |cursor, number| cursor.get_two::>(number.into()), + |cursor, number| cursor.get_two::>(number.into()), )? { let (header, hash) = header?; self.headers.push_back(SealedHeader::new(header, hash)); @@ -909,7 +790,7 @@ mod tests { } impl Stream for TestBodyDownloader { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 88d5f8303788..ce969f2577d8 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,6 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; -use alloy_primitives::{BlockNumber, Sealable}; +use alloy_consensus::{BlockHeader, Header}; +use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; @@ -10,14 +11,13 @@ use reth_evm::{ }; use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; -use reth_primitives::{Header, SealedHeader, StaticFileSegment}; -use reth_primitives_traits::format_gas_throughput; +use reth_primitives::{SealedHeader, StaticFileSegment}; +use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - writer::UnifiedStorageWriter, - BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, StatsReader, - TransactionVariant, + providers::{StaticFileProvider, StaticFileWriter}, + BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, StateCommitmentProvider, StateWriter, + StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant, }; use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; @@ -35,6 +35,8 @@ use std::{ }; use tracing::*; +use super::missing_static_data_error; + /// The execution stage executes all transactions and /// update history indexes. /// @@ -169,14 +171,100 @@ impl ExecutionStage { } Ok(prune_modes) } + + /// Performs consistency check on static files. + /// + /// This function compares the highest receipt number recorded in the database with that in the + /// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks. + /// **If the height in the static file is higher**, it rolls back (unwinds) the static file. + /// **Conversely, if the height in the database is lower**, it triggers a rollback in the + /// database (by returning [`StageError`]) until the heights in both the database and static + /// file match. + fn ensure_consistency( + &self, + provider: &Provider, + checkpoint: u64, + unwind_to: Option, + ) -> Result<(), StageError> + where + Provider: StaticFileProviderFactory + DBProvider + BlockReader + HeaderProvider, + { + // If thre's any receipts pruning configured, receipts are written directly to database and + // inconsistencies are expected. + if self.prune_modes.has_receipts_pruning() { + return Ok(()) + } + + // Get next expected receipt number + let tx = provider.tx_ref(); + let next_receipt_num = tx + .cursor_read::()? + .seek_exact(checkpoint)? + .map(|(_, value)| value.next_tx_num()) + .unwrap_or(0); + + let static_file_provider = provider.static_file_provider(); + + // Get next expected receipt number in static files + let next_static_file_receipt_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .map(|num| num + 1) + .unwrap_or(0); + + // Check if we had any unexpected shutdown after committing to static files, but + // NOT committing to database. + match next_static_file_receipt_num.cmp(&next_receipt_num) { + // It can be equal when it's a chain of empty blocks, but we still need to update the + // last block in the range. + Ordering::Greater | Ordering::Equal => { + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Receipts)?; + static_file_producer + .prune_receipts(next_static_file_receipt_num - next_receipt_num, checkpoint)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_to { + let next_receipt_num_after_unwind = provider + .tx_ref() + .get::(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + if next_receipt_num_after_unwind > next_static_file_receipt_num { + // This means we need a deeper unwind. + } else { + return Ok(()) + } + } + + return Err(missing_static_data_error( + next_static_file_receipt_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Receipts, + )?) + } + } + + Ok(()) + } } impl Stage for ExecutionStage where E: BlockExecutorProvider, - Provider: - DBProvider + BlockReader + StaticFileProviderFactory + StatsReader + StateChangeWriter, - for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a>>: StateWriter, + Provider: DBProvider + + BlockReader + + StaticFileProviderFactory + + StatsReader + + BlockHashReader + + StateWriter + + StateCommitmentProvider, { /// Return the id of the stage fn id(&self) -> StageId { @@ -204,25 +292,9 @@ where let prune_modes = self.adjust_prune_modes(provider, start_block, max_block)?; let static_file_provider = provider.static_file_provider(); - // We only use static files for Receipts, if there is no receipt pruning of any kind. - let static_file_producer = if self.prune_modes.receipts.is_none() && - self.prune_modes.receipts_log_filter.is_empty() - { - debug!(target: "sync::stages::execution", start = start_block, "Preparing static file producer"); - let mut producer = - prepare_static_file_producer(provider, &static_file_provider, start_block)?; - // Since there might be a database <-> static file inconsistency (read - // `prepare_static_file_producer` for context), we commit the change straight away. - producer.commit()?; - Some(producer) - } else { - None - }; + self.ensure_consistency(provider, input.checkpoint().block_number, None)?; - let db = StateProviderDatabase(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider(), - )); + let db = StateProviderDatabase(LatestStateProviderRef::new(provider)); let mut executor = self.executor_provider.batch_executor(db); executor.set_tip(max_block); executor.set_prune_modes(prune_modes); @@ -267,20 +339,17 @@ where fetch_block_duration += fetch_block_start.elapsed(); - cumulative_gas += block.gas_used; + cumulative_gas += block.header().gas_used(); // Configure the executor to use the current state. - trace!(target: "sync::stages::execution", number = block_number, txs = block.body.transactions.len(), "Executing block"); + trace!(target: "sync::stages::execution", number = block_number, txs = block.body().transactions().len(), "Executing block"); // Execute the block let execute_start = Instant::now(); self.metrics.metered_one((&block, td).into(), |input| { - let sealed = block.header.clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - executor.execute_and_verify_one(input).map_err(|error| StageError::Block { - block: Box::new(SealedHeader::new(header, seal)), + block: Box::new(SealedHeader::seal(block.header().clone())), error: BlockErrorKind::Execution(error), }) })?; @@ -304,7 +373,7 @@ where } stage_progress = block_number; - stage_checkpoint.progress.processed += block.gas_used; + stage_checkpoint.progress.processed += block.gas_used(); // If we have ExExes we need to save the block in memory for later if self.exex_manager_handle.has_exexs() { @@ -343,7 +412,7 @@ where // the `has_exexs` check here as well if !blocks.is_empty() { let blocks = blocks.into_iter().map(|block| { - let hash = block.header.hash_slow(); + let hash = block.header().hash_slow(); block.seal(hash) }); @@ -362,8 +431,7 @@ where let time = Instant::now(); // write output - let mut writer = UnifiedStorageWriter::new(provider, static_file_producer); - writer.write_to_storage(state, OriginalValuesKnown::Yes)?; + provider.write_state(state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; let db_write_duration = time.elapsed(); debug!( @@ -410,10 +478,13 @@ where }) } + self.ensure_consistency(provider, input.checkpoint.block_number, Some(unwind_to))?; + // Unwind account and storage changesets, as well as receipts. // // This also updates `PlainStorageState` and `PlainAccountState`. - let bundle_state_with_receipts = provider.take_state(range.clone())?; + let bundle_state_with_receipts = + provider.take_state_above(unwind_to, StorageLocation::Both)?; // Prepare the input for post unwind commit hook, where an `ExExNotification` will be sent. if self.exex_manager_handle.has_exexs() { @@ -434,25 +505,6 @@ where } } - let static_file_provider = provider.static_file_provider(); - - // Unwind all receipts for transactions in the block range - if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { - // We only use static files for Receipts, if there is no receipt pruning of any kind. - - // prepare_static_file_producer does a consistency check that will unwind static files - // if the expected highest receipt in the files is higher than the database. - // Which is essentially what happens here when we unwind this stage. - let _static_file_producer = - prepare_static_file_producer(provider, &static_file_provider, *range.start())?; - } else { - // If there is any kind of receipt pruning/filtering we use the database, since static - // files do not support filters. - // - // If we hit this case, the receipts have already been unwound by the call to - // `take_state`. - } - // Update the checkpoint. let mut stage_checkpoint = input.checkpoint.execution_stage_checkpoint(); if let Some(stage_checkpoint) = stage_checkpoint.as_mut() { @@ -486,8 +538,8 @@ where } } -fn execution_checkpoint( - provider: &StaticFileProvider, +fn execution_checkpoint( + provider: &StaticFileProvider, start_block: BlockNumber, max_block: BlockNumber, checkpoint: StageCheckpoint, @@ -553,8 +605,8 @@ fn execution_checkpoint( }) } -fn calculate_gas_used_from_headers( - provider: &StaticFileProvider, +fn calculate_gas_used_from_headers( + provider: &StaticFileProvider, range: RangeInclusive, ) -> Result { debug!(target: "sync::stages::execution", ?range, "Calculating gas used from headers"); @@ -578,85 +630,6 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -/// Returns a `StaticFileProviderRWRefMut` static file producer after performing a consistency -/// check. -/// -/// This function compares the highest receipt number recorded in the database with that in the -/// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks. **If -/// the height in the static file is higher**, it rolls back (unwinds) the static file. -/// **Conversely, if the height in the database is lower**, it triggers a rollback in the database -/// (by returning [`StageError`]) until the heights in both the database and static file match. -fn prepare_static_file_producer<'a, 'b, Provider>( - provider: &'b Provider, - static_file_provider: &'a StaticFileProvider, - start_block: u64, -) -> Result, StageError> -where - Provider: DBProvider + BlockReader + HeaderProvider, - 'b: 'a, -{ - // Get next expected receipt number - let tx = provider.tx_ref(); - let next_receipt_num = tx - .cursor_read::()? - .seek_exact(start_block)? - .map(|(_, value)| value.first_tx_num) - .unwrap_or(0); - - // Get next expected receipt number in static files - let next_static_file_receipt_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .map(|num| num + 1) - .unwrap_or(0); - - let mut static_file_producer = - static_file_provider.get_writer(start_block, StaticFileSegment::Receipts)?; - - // Check if we had any unexpected shutdown after committing to static files, but - // NOT committing to database. - match next_static_file_receipt_num.cmp(&next_receipt_num) { - // It can be equal when it's a chain of empty blocks, but we still need to update the last - // block in the range. - Ordering::Greater | Ordering::Equal => static_file_producer.prune_receipts( - next_static_file_receipt_num - next_receipt_num, - start_block.saturating_sub(1), - )?, - Ordering::Less => { - let mut last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Receipts) - .unwrap_or(0); - - let last_receipt_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .unwrap_or(0); - - // To be extra safe, we make sure that the last receipt num matches the last block from - // its indices. If not, get it. - loop { - if let Some(indices) = provider.block_body_indices(last_block)? { - if indices.last_tx_num() <= last_receipt_num { - break - } - } - if last_block == 0 { - break - } - last_block -= 1; - } - - let missing_block = - Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - return Err(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Receipts, - }) - } - } - - Ok(static_file_producer) -} - #[cfg(test)] mod tests { use super::*; @@ -902,7 +875,7 @@ mod tests { // Tests node with database and node with static files for mut mode in modes { - let provider = factory.database_provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); if let Some(mode) = &mut mode { // Simulating a full node where we write receipts to database @@ -911,6 +884,7 @@ mod tests { let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let output = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); @@ -975,9 +949,10 @@ mod tests { "Post changed of a account" ); - let provider = factory.database_provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); - stage.prune_modes = mode.unwrap_or_default(); + stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.unwrap_or_default()); let _result = stage .unwind( @@ -1052,6 +1027,7 @@ mod tests { // Test Execution let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let result = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); @@ -1059,7 +1035,8 @@ mod tests { // Test Unwind provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); - stage.prune_modes = mode.unwrap_or_default(); + stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let result = stage .unwind( diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 1ca0e1aa1325..e6b1e548455f 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -61,7 +61,11 @@ impl AccountHashingStage { pub fn seed( provider: &reth_provider::DatabaseProvider, opts: SeedOpts, - ) -> Result, StageError> { + ) -> Result, StageError> + where + N::Primitives: + reth_primitives_traits::FullNodePrimitives, + { use alloy_primitives::U256; use reth_db_api::models::AccountBeforeTx; use reth_provider::{StaticFileProviderFactory, StaticFileWriter}; diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 49e687a96a1d..100fe4e979a7 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,7 +1,7 @@ use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_db::{tables, transaction::DbTx, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, @@ -13,9 +13,8 @@ use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDown use reth_primitives::{SealedHeader, StaticFileSegment}; use reth_primitives_traits::serde_bincode_compat; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, - StaticFileProviderFactory, + providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, + HeaderSyncGapProvider, StaticFileProviderFactory, }; use reth_stages_api::{ BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, @@ -49,7 +48,7 @@ pub struct HeaderStage { /// The tip for the stage. tip: watch::Receiver, /// Consensus client implementation - consensus: Arc, + consensus: Arc>, /// Current sync gap. sync_gap: Option, /// ETL collector with `HeaderHash` -> `BlockNumber` @@ -64,14 +63,14 @@ pub struct HeaderStage { impl HeaderStage where - Downloader: HeaderDownloader, + Downloader: HeaderDownloader
, { /// Create a new header stage pub fn new( database: Provider, downloader: Downloader, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, etl_config: EtlConfig, ) -> Self { Self { @@ -90,15 +89,16 @@ where /// /// Writes to static files ( `Header | HeaderTD | HeaderHash` ) and [`tables::HeaderNumbers`] /// database table. - fn write_headers( + fn write_headers + StaticFileProviderFactory>( &mut self, - provider: &impl DBProvider, - static_file_provider: StaticFileProvider, + provider: &P, ) -> Result { let total_headers = self.header_collector.len(); info!(target: "sync::stages::headers", total = total_headers, "Writing headers"); + let static_file_provider = provider.static_file_provider(); + // Consistency check of expected headers in static files vs DB is done on provider::sync_gap // when poll_execute_ready is polled. let mut last_header_number = static_file_provider @@ -194,7 +194,7 @@ where impl Stage for HeaderStage where P: HeaderSyncGapProvider, - D: HeaderDownloader, + D: HeaderDownloader
, Provider: DBProvider + StaticFileProviderFactory, { /// Return the id of the stage @@ -293,7 +293,7 @@ where // Write the headers and related tables to DB from ETL space let to_be_processed = self.hash_collector.len() as u64; - let last_header_number = self.write_headers(provider, provider.static_file_provider())?; + let last_header_number = self.write_headers(provider)?; // Clear ETL collectors self.hash_collector.clear(); @@ -392,7 +392,7 @@ mod tests { use crate::test_utils::{ stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; - use alloy_primitives::{Sealable, B256}; + use alloy_primitives::B256; use assert_matches::assert_matches; use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockBody, SealedBlock, SealedBlockWithSenders}; @@ -441,7 +441,9 @@ mod tests { } } - impl StageTestRunner for HeadersTestRunner { + impl + 'static> StageTestRunner + for HeadersTestRunner + { type S = HeaderStage, D>; fn db(&self) -> &TestStageDB { @@ -459,7 +461,9 @@ mod tests { } } - impl ExecuteStageTestRunner for HeadersTestRunner { + impl + 'static> ExecuteStageTestRunner + for HeadersTestRunner + { type Seed = Vec; fn seed_execution(&mut self, input: ExecInput) -> Result { @@ -505,9 +509,7 @@ mod tests { // validate the header let header = provider.header_by_number(block_num)?; assert!(header.is_some()); - let sealed = header.unwrap().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = SealedHeader::seal(header.unwrap()); assert_eq!(header.hash(), hash); // validate the header total difficulty @@ -537,7 +539,9 @@ mod tests { } } - impl UnwindStageTestRunner for HeadersTestRunner { + impl + 'static> UnwindStageTestRunner + for HeadersTestRunner + { fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> { self.check_no_header_entry_above(input.unwind_to) } diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index d1d3496d917a..2d2503b53919 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use reth_codecs::Compact; use reth_consensus::ConsensusError; use reth_db::tables; @@ -276,10 +276,7 @@ where // Reset the checkpoint self.save_execution_checkpoint(provider, None)?; - let sealed = target_block.seal_slow(); - let (header, seal) = sealed.into_parts(); - - validate_state_root(trie_root, SealedHeader::new(header, seal), to_block)?; + validate_state_root(trie_root, SealedHeader::seal(target_block), to_block)?; Ok(ExecOutput { checkpoint: StageCheckpoint::new(to_block) @@ -332,10 +329,7 @@ where .header_by_number(input.unwind_to)? .ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?; - let sealed = target.seal_slow(); - let (header, seal) = sealed.into_parts(); - - validate_state_root(block_root, SealedHeader::new(header, seal), input.unwind_to)?; + validate_state_root(block_root, SealedHeader::seal(target), input.unwind_to)?; // Validation passed, apply unwind changes to the database. provider.write_trie_updates(&updates)?; @@ -538,9 +532,7 @@ mod tests { .into_iter() .map(|(address, account)| (address, (account, std::iter::empty()))), ); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let sealed_head = SealedBlock { header: SealedHeader::new(header, seal), body }; + let sealed_head = SealedBlock { header: SealedHeader::seal(header), body }; let head_hash = sealed_head.hash(); let mut blocks = vec![sealed_head]; diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 4b9f9295103e..9d7cc685a7ef 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -296,8 +296,8 @@ mod tests { ) { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. - let static_file_provider = - StaticFileProvider::read_write(db.factory.static_file_provider().path()).unwrap(); + let mut static_file_provider = db.factory.static_file_provider(); + static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); // Simulate corruption by removing `prune_count` rows from the data file without updating // its offset list and configuration. @@ -314,9 +314,10 @@ mod tests { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. + let mut static_file_provider = db.factory.static_file_provider(); + static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); assert_eq!( - StaticFileProvider::read_write(db.factory.static_file_provider().path()) - .unwrap() + static_file_provider .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), Ok(expected) ); diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 8adf2fcad546..7e5d7af46eef 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -1,4 +1,5 @@ -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives::NodePrimitives; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, @@ -41,7 +42,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::Prune @@ -130,7 +131,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::PruneSenderRecovery @@ -171,6 +172,7 @@ mod tests { }; use alloy_primitives::B256; use reth_primitives::SealedBlock; + use reth_primitives_traits::SignedTransaction; use reth_provider::{ providers::StaticFileWriter, TransactionsProvider, TransactionsProviderExt, }; diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index a4eda6394c07..674d035021d6 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -1,13 +1,14 @@ use alloy_primitives::{Address, TxNumber}; use reth_config::config::SenderRecoveryConfig; use reth_consensus::ConsensusError; -use reth_db::{static_file::TransactionMask, tables, RawValue}; +use reth_db::{static_file::TransactionMask, table::Value, tables, RawValue}; use reth_db_api::{ cursor::DbCursorRW, transaction::{DbTx, DbTxMut}, DbTxUnwindExt, }; -use reth_primitives::{GotExpected, StaticFileSegment, TransactionSignedNoHash}; +use reth_primitives::{GotExpected, NodePrimitives, StaticFileSegment}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, StaticFileProviderFactory, StatsReader, @@ -59,7 +60,7 @@ impl Stage for SenderRecoveryStage where Provider: DBProvider + BlockReader - + StaticFileProviderFactory + + StaticFileProviderFactory> + StatsReader + PruneCheckpointReader, { @@ -233,7 +234,9 @@ fn setup_range_recovery( provider: &Provider, ) -> mpsc::Sender, RecoveryResultSender)>> where - Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, + Provider: DBProvider + + HeaderProvider + + StaticFileProviderFactory>, { let (tx_sender, tx_receiver) = mpsc::channel::, RecoveryResultSender)>>(); let static_file_provider = provider.static_file_provider(); @@ -254,9 +257,9 @@ where chunk_range.clone(), |cursor, number| { Ok(cursor - .get_one::>>( - number.into(), - )? + .get_one::::SignedTx>, + >>(number.into())? .map(|tx| (number, tx))) }, |_| true, @@ -300,17 +303,18 @@ where } #[inline] -fn recover_sender( - (tx_id, tx): (TxNumber, TransactionSignedNoHash), +fn recover_sender( + (tx_id, tx): (TxNumber, T), rlp_buf: &mut Vec, ) -> Result<(u64, Address), Box> { + rlp_buf.clear(); // We call [Signature::encode_and_recover_unchecked] because transactions run in the pipeline // are known to be valid - this means that we do not need to check whether or not the `s` // value is greater than `secp256k1n / 2` if past EIP-2. There are transactions // pre-homestead which have large `s` values, so using [Signature::recover_signer] here // would not be backwards-compatible. let sender = tx - .encode_and_recover_unchecked(rlp_buf) + .recover_signer_unchecked_with_buf(rlp_buf) .ok_or(SenderRecoveryStageError::FailedRecovery(FailedSenderRecoveryError { tx: tx_id }))?; Ok((tx_id, sender)) @@ -361,10 +365,16 @@ struct FailedSenderRecoveryError { #[cfg(test)] mod tests { + use super::*; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, + TestRunnerError, TestStageDB, UnwindStageTestRunner, + }; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db_api::cursor::DbCursorRO; use reth_primitives::{SealedBlock, TransactionSigned}; + use reth_primitives_traits::SignedTransaction; use reth_provider::{ providers::StaticFileWriter, DatabaseProviderFactory, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, @@ -375,12 +385,6 @@ mod tests { self, random_block, random_block_range, BlockParams, BlockRangeParams, }; - use super::*; - use crate::test_utils::{ - stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, - TestRunnerError, TestStageDB, UnwindStageTestRunner, - }; - stage_test_suite_ext!(SenderRecoveryTestRunner, sender_recovery); /// Execute a block range with a single transaction @@ -552,7 +556,7 @@ mod tests { blocks[..=max_pruned_block as usize] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), + .sum(), ), prune_mode: PruneMode::Full, }, @@ -567,8 +571,8 @@ mod tests { processed: blocks[..=max_processed_block] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), - total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum::() + .sum(), + total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum() } ); } @@ -667,11 +671,9 @@ mod tests { while let Some((_, body)) = body_cursor.next()? { for tx_id in body.tx_num_range() { let transaction: TransactionSigned = provider - .transaction_by_id_no_hash(tx_id)? - .map(|tx| TransactionSigned { - hash: Default::default(), // we don't require the hash - signature: tx.signature, - transaction: tx.transaction, + .transaction_by_id_unhashed(tx_id)? + .map(|tx| { + TransactionSigned::new_unhashed(tx.transaction, tx.signature) }) .expect("no transaction entry"); let signer = diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 60c958abf862..fab10b0f9535 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -1,12 +1,15 @@ +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{TxHash, TxNumber}; use num_traits::Zero; use reth_config::config::{EtlConfig, TransactionLookupConfig}; -use reth_db::{tables, RawKey, RawValue}; +use reth_db::{table::Value, tables, RawKey, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; +use reth_primitives::NodePrimitives; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt, @@ -60,7 +63,7 @@ where + BlockReader + PruneCheckpointReader + StatsReader - + StaticFileProviderFactory + + StaticFileProviderFactory> + TransactionsProviderExt, { /// Return the id of the stage @@ -206,7 +209,7 @@ where for tx_id in body.tx_num_range() { // First delete the transaction and hash to id mapping if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? { - if tx_hash_number_cursor.seek_exact(transaction.hash())?.is_some() { + if tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some() { tx_hash_number_cursor.delete_current()?; } } @@ -383,7 +386,7 @@ mod tests { for block in &blocks[..=max_processed_block] { for transaction in &block.body.transactions { if block.number > max_pruned_block { - tx_hash_numbers.push((transaction.hash, tx_hash_number)); + tx_hash_numbers.push((transaction.hash(), tx_hash_number)); } tx_hash_number += 1; } @@ -416,8 +419,8 @@ mod tests { processed: blocks[..=max_processed_block] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), - total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum::() + .sum(), + total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum() } ); } diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index caf039faca10..5aa1f3f880c3 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -1,5 +1,5 @@ //! Utils for `stages`. -use alloy_primitives::BlockNumber; +use alloy_primitives::{BlockNumber, TxNumber}; use reth_config::config::EtlConfig; use reth_db::BlockNumberList; use reth_db_api::{ @@ -10,7 +10,11 @@ use reth_db_api::{ DatabaseError, }; use reth_etl::Collector; -use reth_provider::DBProvider; +use reth_primitives::StaticFileSegment; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, ProviderError, + StaticFileProviderFactory, +}; use reth_stages_api::StageError; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; @@ -244,3 +248,36 @@ impl LoadMode { matches!(self, Self::Flush) } } + +/// Called when database is ahead of static files. Attempts to find the first block we are missing +/// transactions for. +pub(crate) fn missing_static_data_error( + last_tx_num: TxNumber, + static_file_provider: &StaticFileProvider, + provider: &Provider, + segment: StaticFileSegment, +) -> Result +where + Provider: BlockReader + StaticFileProviderFactory, +{ + let mut last_block = + static_file_provider.get_highest_static_file_block(segment).unwrap_or_default(); + + // To be extra safe, we make sure that the last tx num matches the last block from its indices. + // If not, get it. + loop { + if let Some(indices) = provider.block_body_indices(last_block)? { + if indices.last_tx_num() <= last_tx_num { + break + } + } + if last_block == 0 { + break + } + last_block -= 1; + } + + let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); + + Ok(StageError::MissingStaticFileData { block: missing_block, segment }) +} diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 4c43d4cdcd1d..2f9712f84364 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -15,7 +15,7 @@ use reth_db_api::{ DatabaseError as DbError, }; use reth_primitives::{ - Account, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, + Account, EthPrimitives, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, @@ -24,7 +24,7 @@ use reth_provider::{ }; use reth_storage_errors::provider::ProviderResult; use reth_testing_utils::generators::ChangeSet; -use std::{collections::BTreeMap, path::Path}; +use std::{collections::BTreeMap, fmt::Debug, path::Path}; use tempfile::TempDir; /// Test database that is used for testing stage implementations. @@ -142,7 +142,7 @@ impl TestStageDB { /// Insert header to static file if `writer` exists, otherwise to DB. pub fn insert_header( - writer: Option<&mut StaticFileProviderRWRefMut<'_>>, + writer: Option<&mut StaticFileProviderRWRefMut<'_, EthPrimitives>>, tx: &TX, header: &SealedHeader, td: U256, @@ -265,7 +265,7 @@ impl TestStageDB { let res = block.body.transactions.iter().try_for_each(|body_tx| { if let Some(txs_writer) = &mut txs_writer { - txs_writer.append_transaction(next_tx_num, &body_tx.clone().into())?; + txs_writer.append_transaction(next_tx_num, body_tx)?; } else { tx.put::(next_tx_num, body_tx.clone().into())? } diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index d22b116cdc59..89f60687895e 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -13,12 +13,14 @@ workspace = true [dependencies] # reth +reth-codecs.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true reth-tokio-util.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true reth-stages-types.workspace = true diff --git a/crates/static-file/static-file/src/lib.rs b/crates/static-file/static-file/src/lib.rs index 1bfe4134e954..6c95baaae920 100644 --- a/crates/static-file/static-file/src/lib.rs +++ b/crates/static-file/static-file/src/lib.rs @@ -7,14 +7,12 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod event; pub mod segments; mod static_file_producer; -pub use event::StaticFileProducerEvent; pub use static_file_producer::{ StaticFileProducer, StaticFileProducerInner, StaticFileProducerResult, - StaticFileProducerWithResult, StaticFileTargets, + StaticFileProducerWithResult, }; // Re-export for convenience. diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 54d5bee65cfd..e06e1f09a177 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -2,10 +2,7 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - DBProvider, -}; +use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -14,7 +11,7 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Headers; -impl Segment for Headers { +impl Segment for Headers { fn segment(&self) -> StaticFileSegment { StaticFileSegment::Headers } @@ -22,9 +19,9 @@ impl Segment for Headers { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; @@ -49,9 +46,7 @@ impl Segment for Headers { debug_assert_eq!(header_block, header_td_block); debug_assert_eq!(header_td_block, canonical_header_block); - let _static_file_block = - static_file_writer.append_header(&header, header_td.0, &canonical_header)?; - debug_assert_eq!(_static_file_block, header_block); + static_file_writer.append_header(&header, header_td.0, &canonical_header)?; } Ok(()) diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index 3d961c7b1197..fc79effdd5ac 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -10,22 +10,22 @@ mod receipts; pub use receipts::Receipts; use alloy_primitives::BlockNumber; -use reth_provider::providers::StaticFileProvider; +use reth_provider::StaticFileProviderFactory; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; /// A segment represents moving some portion of the data to static files. -pub trait Segment: Send + Sync { +pub trait Segment: Send + Sync { /// Returns the [`StaticFileSegment`]. fn segment(&self) -> StaticFileSegment; - /// Move data to static files for the provided block range. [`StaticFileProvider`] will handle + /// Move data to static files for the provided block range. + /// [`StaticFileProvider`](reth_provider::providers::StaticFileProvider) will handle /// the management of and writing to files. fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()>; } diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index 4e2185a598a8..bd808b4d839b 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -3,8 +3,7 @@ use alloy_primitives::BlockNumber; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, + providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -14,7 +13,9 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Receipts; -impl Segment for Receipts { +impl Segment + for Receipts +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Receipts } @@ -22,15 +23,14 @@ impl Segment for Receipts { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Receipts)?; for block in block_range { - let _static_file_block = static_file_writer.increment_block(block)?; - debug_assert_eq!(_static_file_block, block); + static_file_writer.increment_block(block)?; let block_body_indices = provider .block_body_indices(block)? diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 52e0ca8b575f..5b686cfe109f 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -1,10 +1,11 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; -use reth_db::tables; +use reth_codecs::Compact; +use reth_db::{table::Value, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, + providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -14,7 +15,12 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Transactions; -impl Segment for Transactions { +impl Segment for Transactions +where + Provider: StaticFileProviderFactory> + + DBProvider + + BlockReader, +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Transactions } @@ -24,22 +30,22 @@ impl Segment for Transactions { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider .get_writer(*block_range.start(), StaticFileSegment::Transactions)?; for block in block_range { - let _static_file_block = static_file_writer.increment_block(block)?; - debug_assert_eq!(_static_file_block, block); + static_file_writer.increment_block(block)?; let block_body_indices = provider .block_body_indices(block)? .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; - let mut transactions_cursor = - provider.tx_ref().cursor_read::()?; + let mut transactions_cursor = provider.tx_ref().cursor_read::::SignedTx, + >>()?; let transactions_walker = transactions_cursor.walk_range(block_body_indices.tx_num_range())?; diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 2c442aedfa34..371a344d8727 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -4,13 +4,16 @@ use crate::{segments, segments::Segment, StaticFileProducerEvent}; use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; +use reth_codecs::Compact; +use reth_db::table::Value; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileWriter, BlockReader, ChainStateBlockReader, DBProvider, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use reth_stages_types::StageId; -use reth_static_file_types::HighestStaticFiles; +use reth_static_file_types::{HighestStaticFiles, StaticFileTargets}; use reth_storage_errors::provider::ProviderResult; use reth_tokio_util::{EventSender, EventStream}; use std::{ @@ -66,40 +69,6 @@ pub struct StaticFileProducerInner { event_sender: EventSender, } -/// Static File targets, per data segment, measured in [`BlockNumber`]. -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct StaticFileTargets { - headers: Option>, - receipts: Option>, - transactions: Option>, -} - -impl StaticFileTargets { - /// Returns `true` if any of the targets are [Some]. - pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() - } - - // Returns `true` if all targets are either [`None`] or has beginning of the range equal to the - // highest static_file. - fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { - [ - (self.headers.as_ref(), static_files.headers), - (self.receipts.as_ref(), static_files.receipts), - (self.transactions.as_ref(), static_files.transactions), - ] - .iter() - .all(|(target_block_range, highest_static_fileted_block)| { - target_block_range.map_or(true, |target_block_range| { - *target_block_range.start() == - highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { - highest_static_fileted_block + 1 - }) - }) - }) - } -} - impl StaticFileProducerInner { fn new(provider: Provider, prune_modes: PruneModes) -> Self { Self { provider, prune_modes, event_sender: Default::default() } @@ -119,7 +88,12 @@ where impl StaticFileProducerInner where Provider: StaticFileProviderFactory - + DatabaseProviderFactory, + + DatabaseProviderFactory< + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + + BlockReader, + >, { /// Listen for events on the `static_file_producer`. pub fn events(&self) -> EventStream { @@ -170,7 +144,7 @@ where // Create a new database transaction on every segment to prevent long-lived read-only // transactions let provider = self.provider.database_provider_ro()?.disable_long_read_transaction_safety(); - segment.copy_to_static_files(provider, self.provider.static_file_provider(), block_range.clone())?; + segment.copy_to_static_files(provider, block_range.clone())?; let elapsed = start.elapsed(); // TODO(alexey): track in metrics debug!(target: "static_file", segment = %segment.segment(), ?block_range, ?elapsed, "Finished StaticFileProducer segment"); diff --git a/crates/static-file/static-file/src/event.rs b/crates/static-file/types/src/event.rs similarity index 87% rename from crates/static-file/static-file/src/event.rs rename to crates/static-file/types/src/event.rs index a11333ce53a2..1e5d2cb6032e 100644 --- a/crates/static-file/static-file/src/event.rs +++ b/crates/static-file/types/src/event.rs @@ -1,7 +1,7 @@ use crate::StaticFileTargets; use std::time::Duration; -/// An event emitted by a [`StaticFileProducer`][crate::StaticFileProducer]. +/// An event emitted by the static file producer. #[derive(Debug, PartialEq, Eq, Clone)] pub enum StaticFileProducerEvent { /// Emitted when static file producer started running. diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 6e954a781b71..4e9bf90f1c90 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -9,11 +9,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod compression; +mod event; mod segment; use alloy_primitives::BlockNumber; pub use compression::Compression; +pub use event::StaticFileProducerEvent; pub use segment::{SegmentConfig, SegmentHeader, SegmentRangeInclusive, StaticFileSegment}; +use std::ops::RangeInclusive; /// Default static file block count. pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000; @@ -62,6 +65,43 @@ impl HighestStaticFiles { } } +/// Static File targets, per data segment, measured in [`BlockNumber`]. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct StaticFileTargets { + /// Targeted range of headers. + pub headers: Option>, + /// Targeted range of receipts. + pub receipts: Option>, + /// Targeted range of transactions. + pub transactions: Option>, +} + +impl StaticFileTargets { + /// Returns `true` if any of the targets are [Some]. + pub const fn any(&self) -> bool { + self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + } + + /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the + /// highest static file. + pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { + [ + (self.headers.as_ref(), static_files.headers), + (self.receipts.as_ref(), static_files.receipts), + (self.transactions.as_ref(), static_files.transactions), + ] + .iter() + .all(|(target_block_range, highest_static_fileted_block)| { + target_block_range.map_or(true, |target_block_range| { + *target_block_range.start() == + highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { + highest_static_fileted_block + 1 + }) + }) + }) + } +} + /// Each static file has a fixed number of blocks. This gives out the range where the requested /// block is positioned. Used for segment filename. pub const fn find_fixed_range( diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 20a0673dff62..57fe9f726c7b 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -81,7 +81,7 @@ serde = [ "alloy-primitives/serde", "alloy-trie?/serde", "bytes/serde", - "op-alloy-consensus?/serde" + "op-alloy-consensus?/serde", ] arbitrary = [ "alloy-consensus?/arbitrary", diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 284c6454f838..86d397ad24f0 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -17,13 +17,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub use reth_codecs_derive::*; use serde as _; use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256}; use bytes::{Buf, BufMut}; -extern crate alloc; use alloc::vec::Vec; #[cfg(feature = "test-utils")] @@ -33,6 +34,8 @@ pub mod alloy; #[cfg(any(test, feature = "alloy"))] mod alloy; +pub mod txtype; + #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; diff --git a/crates/storage/codecs/src/txtype.rs b/crates/storage/codecs/src/txtype.rs new file mode 100644 index 000000000000..ce392b59cd08 --- /dev/null +++ b/crates/storage/codecs/src/txtype.rs @@ -0,0 +1,15 @@ +//! Commonly used constants for transaction types. + +/// Identifier parameter for legacy transaction +pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; + +/// Identifier parameter for EIP-2930 transaction +pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; + +/// Identifier parameter for EIP-1559 transaction +pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; + +/// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier +/// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is +/// read from the buffer as a single byte. +pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index f827e48c8c3c..3aa908a60093 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-codecs.workspace = true reth-db-models.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true @@ -25,6 +25,7 @@ reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true alloy-genesis.workspace = true +alloy-consensus.workspace = true # codecs modular-bitfield.workspace = true @@ -57,29 +58,27 @@ proptest-arbitrary-interop.workspace = true [features] test-utils = [ - "arbitrary", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-codecs/test-utils", - "reth-db-models/test-utils", - "reth-trie-common/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "arbitrary", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", + "reth-db-models/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", ] arbitrary = [ - "reth-primitives/arbitrary", - "reth-db-models/arbitrary", - "dep:arbitrary", - "dep:proptest", - "reth-primitives-traits/arbitrary", - "reth-trie-common/arbitrary", - "alloy-primitives/arbitrary", - "parity-scale-codec/arbitrary", - "reth-codecs/arbitrary", - "reth-prune-types/arbitrary", - "reth-stages-types/arbitrary" -] -optimism = [ - "reth-primitives/optimism", - "reth-codecs/optimism" + "reth-primitives/arbitrary", + "reth-db-models/arbitrary", + "dep:arbitrary", + "dep:proptest", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "parity-scale-codec/arbitrary", + "reth-codecs/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary", + "alloy-consensus/arbitrary", ] +optimism = ["reth-primitives/optimism", "reth-codecs/optimism"] diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index 7268d82dd3cc..0145ceb52b5b 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -1,8 +1,8 @@ //! Block related models and types. +use alloy_consensus::Header; use alloy_primitives::B256; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::Header; use serde::{Deserialize, Serialize}; /// The storage representation of a block's ommers. diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index fc3351b73b67..5d18711922ed 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -4,11 +4,12 @@ use crate::{ table::{Compress, Decode, Decompress, Encode}, DatabaseError, }; +use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; use reth_primitives::{ - Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash, TxType, + Account, Bytecode, Receipt, StorageEntry, TransactionSigned, TransactionSignedNoHash, TxType, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -226,6 +227,7 @@ impl_compression_for_compact!( Bytecode, AccountBeforeTx, TransactionSignedNoHash, + TransactionSigned, CompactU256, StageCheckpoint, PruneCheckpoint, @@ -313,7 +315,7 @@ mod tests { fn test_ensure_backwards_compatibility() { use super::*; use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; - use reth_primitives::{Account, Receipt, ReceiptWithBloom}; + use reth_primitives::{Account, Receipt}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, @@ -334,7 +336,6 @@ mod tests { assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 1); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); @@ -356,7 +357,6 @@ mod tests { validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::Zero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 963457af05c3..acdc8efc78fc 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -88,6 +88,9 @@ pub trait Table: Send + Sync + Debug + 'static { /// The table's name. const NAME: &'static str; + /// Whether the table is also a `DUPSORT` table. + const DUPSORT: bool; + /// Key element of `Table`. /// /// Sorting should be taken into account when encoding this. diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 8c930b22ef81..ec31edd06823 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -10,13 +10,11 @@ use reth_db_api::{transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; use reth_primitives::{Account, Bytecode, GotExpected, Receipts, StaticFileSegment, StorageEntry}; use reth_provider::{ - errors::provider::ProviderResult, - providers::{StaticFileProvider, StaticFileWriter}, - writer::UnifiedStorageWriter, + errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, - OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateChangeWriter, - StateWriter, StaticFileProviderFactory, TrieWriter, + OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateWriter, + StaticFileProviderFactory, StorageLocation, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; @@ -72,11 +70,13 @@ impl From for InitDatabaseError { pub fn init_genesis(factory: &PF) -> Result where PF: DatabaseProviderFactory + StaticFileProviderFactory + ChainSpecProvider + BlockHashReader, - PF::ProviderRW: StageCheckpointWriter + PF::ProviderRW: StaticFileProviderFactory + + StageCheckpointWriter + HistoryWriter + HeaderProvider + HashingWriter - + StateChangeWriter + + StateWriter + + StateWriter + AsRef, { let chain = factory.chain_spec(); @@ -114,8 +114,7 @@ where insert_genesis_history(&provider_rw, alloc.iter())?; // Insert header - let static_file_provider = factory.static_file_provider(); - insert_genesis_header(&provider_rw, &static_file_provider, &chain)?; + insert_genesis_header(&provider_rw, &chain)?; insert_genesis_state(&provider_rw, alloc.iter())?; @@ -124,6 +123,7 @@ where provider_rw.save_stage_checkpoint(stage, Default::default())?; } + let static_file_provider = provider_rw.static_file_provider(); // Static file segments start empty, so we need to initialize the genesis block. let segment = StaticFileSegment::Receipts; static_file_provider.latest_writer(segment)?.increment_block(0)?; @@ -133,7 +133,7 @@ where // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. - UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; Ok(hash) } @@ -144,7 +144,11 @@ pub fn insert_genesis_state<'a, 'b, Provider>( alloc: impl Iterator, ) -> ProviderResult<()> where - Provider: DBProvider + StateChangeWriter + HeaderProvider + AsRef, + Provider: StaticFileProviderFactory + + DBProvider + + HeaderProvider + + StateWriter + + AsRef, { insert_state(provider, alloc, 0) } @@ -156,7 +160,11 @@ pub fn insert_state<'a, 'b, Provider>( block: u64, ) -> ProviderResult<()> where - Provider: DBProvider + StateChangeWriter + HeaderProvider + AsRef, + Provider: StaticFileProviderFactory + + DBProvider + + HeaderProvider + + StateWriter + + AsRef, { let capacity = alloc.size_hint().1.unwrap_or(0); let mut state_init: BundleStateInit = HashMap::with_capacity(capacity); @@ -223,8 +231,7 @@ where Vec::new(), ); - let mut storage_writer = UnifiedStorageWriter::from_database(&provider); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_state(execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database)?; trace!(target: "reth::cli", "Inserted state"); @@ -296,14 +303,14 @@ where /// Inserts header for the genesis state. pub fn insert_genesis_header( provider: &Provider, - static_file_provider: &StaticFileProvider, chain: &Spec, ) -> ProviderResult<()> where - Provider: DBProvider, + Provider: StaticFileProviderFactory + DBProvider, Spec: EthChainSpec, { let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); + let static_file_provider = provider.static_file_provider(); match static_file_provider.block_hash(0) { Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { @@ -333,7 +340,8 @@ pub fn init_from_state_dump( etl_config: EtlConfig, ) -> eyre::Result where - Provider: DBProvider + Provider: StaticFileProviderFactory + + DBProvider + BlockNumReader + BlockHashReader + ChainSpecProvider @@ -341,8 +349,8 @@ where + HistoryWriter + HeaderProvider + HashingWriter - + StateChangeWriter + TrieWriter + + StateWriter + AsRef, { let block = provider_rw.last_block_number()?; @@ -457,11 +465,12 @@ fn dump_state( block: u64, ) -> Result<(), eyre::Error> where - Provider: DBProvider + Provider: StaticFileProviderFactory + + DBProvider + HeaderProvider + HashingWriter + HistoryWriter - + StateChangeWriter + + StateWriter + AsRef, { let accounts_len = collector.len(); @@ -584,7 +593,9 @@ struct GenesisAccountWithAddress { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::constants::{MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; + use alloy_consensus::constants::{ + HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + }; use alloy_genesis::Genesis; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; @@ -595,7 +606,6 @@ mod tests { transaction::DbTx, Database, }; - use reth_primitives::HOLESKY_GENESIS_HASH; use reth_primitives_traits::IntegerList; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 59d95c2263d0..0997c08b784d 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } # ethereum alloy-primitives.workspace = true diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index ed1d7fb67722..be7661c8b123 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -12,7 +12,7 @@ pub type NumTransactions = u64; /// /// It has the pointer to the transaction Number of the first /// transaction in the block and the total number of transactions. -#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize, Compact)] +#[derive(Debug, Default, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct StoredBlockBodyIndices { diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 324411613fcc..4a4eff471238 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -15,17 +15,18 @@ workspace = true # reth reth-db-api.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-fs-util.workspace = true reth-storage-errors.workspace = true reth-nippy-jar.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-trie-common = { workspace = true, features = ["serde"] } reth-tracing.workspace = true -reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true +alloy-consensus.workspace = true # mdbx reth-libmdbx = { workspace = true, optional = true, features = [ @@ -47,9 +48,8 @@ page_size = { version = "0.6.0", optional = true } thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true -paste.workspace = true rustc-hash = { workspace = true, optional = true } -sysinfo = { version = "0.31", default-features = false, features = ["system"] } +sysinfo = { version = "0.32", default-features = false, features = ["system"] } parking_lot = { workspace = true, optional = true } # arbitrary utils @@ -90,31 +90,29 @@ mdbx = [ "dep:rustc-hash", ] test-utils = [ - "dep:tempfile", - "arbitrary", - "parking_lot", - "reth-primitives/test-utils", - "reth-primitives-traits/test-utils", - "reth-db-api/test-utils", - "reth-nippy-jar/test-utils", - "reth-trie-common/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "dep:tempfile", + "arbitrary", + "parking_lot", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-db-api/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", ] bench = [] arbitrary = [ - "reth-primitives/arbitrary", - "reth-db-api/arbitrary", - "reth-primitives-traits/arbitrary", - "reth-trie-common/arbitrary", - "alloy-primitives/arbitrary", - "reth-prune-types/arbitrary", - "reth-stages-types/arbitrary" -] -optimism = [ - "reth-primitives/optimism", - "reth-db-api/optimism" + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary", + "alloy-consensus/arbitrary", ] +optimism = ["reth-primitives/optimism", "reth-db-api/optimism"] disable-lock = [] [[bench]] diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 78a3f7971dab..006213e4cb91 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -66,7 +66,7 @@ impl DatabaseEnvKind { } /// Arguments for database initialization. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug)] pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, @@ -99,6 +99,12 @@ pub struct DatabaseArguments { exclusive: Option, } +impl Default for DatabaseArguments { + fn default() -> Self { + Self::new(ClientVersion::default()) + } +} + impl DatabaseArguments { /// Create new database arguments with given client version. pub fn new(client_version: ClientVersion) -> Self { @@ -497,6 +503,7 @@ mod tests { test_utils::*, AccountChangeSets, }; + use alloy_consensus::Header; use alloy_primitives::{Address, B256, U256}; use reth_db_api::{ cursor::{DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, @@ -504,7 +511,7 @@ mod tests { table::{Encode, Table}, }; use reth_libmdbx::Error; - use reth_primitives::{Account, Header, StorageEntry}; + use reth_primitives::{Account, StorageEntry}; use reth_primitives_traits::IntegerList; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index a87ab7393f1f..b28a83f11ca4 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -110,6 +110,7 @@ impl ProcessUID { let pid2 = sysinfo::Pid::from(pid); system.refresh_processes_specifics( sysinfo::ProcessesToUpdate::Some(&[pid2]), + true, ProcessRefreshKind::new(), ); system.process(pid2).map(|process| Self { pid, start_time: process.start_time() }) diff --git a/crates/storage/db/src/metrics.rs b/crates/storage/db/src/metrics.rs index fecd691ee5d7..2d908c68156f 100644 --- a/crates/storage/db/src/metrics.rs +++ b/crates/storage/db/src/metrics.rs @@ -347,7 +347,7 @@ impl OperationMetrics { // Record duration only for large values to prevent the performance hit of clock syscall // on small operations - if value_size.map_or(false, |size| size > LARGE_VALUE_THRESHOLD_BYTES) { + if value_size.is_some_and(|size| size > LARGE_VALUE_THRESHOLD_BYTES) { let start = Instant::now(); let result = f(); self.large_value_duration_seconds.record(start.elapsed()); diff --git a/crates/storage/db/src/static_file/mask.rs b/crates/storage/db/src/static_file/mask.rs index f5d35a193d70..38831ea34cab 100644 --- a/crates/storage/db/src/static_file/mask.rs +++ b/crates/storage/db/src/static_file/mask.rs @@ -1,38 +1,5 @@ use reth_db_api::table::Decompress; -/// Generic Mask helper struct for selecting specific column values to read and decompress. -/// -/// #### Explanation: -/// -/// A `NippyJar` static file row can contain multiple column values. To specify the column values -/// to be read, a mask is utilized. -/// -/// For example, a static file with three columns, if the first and last columns are queried, the -/// mask `0b101` would be passed. To select only the second column, the mask `0b010` would be used. -/// -/// Since each static file has its own column distribution, different wrapper types are necessary. -/// For instance, `B256` might be the third column in the `Header` segment, while being the second -/// column in another segment. Hence, `Mask` would only be applicable to one of these -/// scenarios. -/// -/// Alongside, the column selector traits (eg. [`ColumnSelectorOne`]) this provides a structured way -/// to tie the types to be decoded to the mask necessary to query them. -#[derive(Debug)] -pub struct Mask(std::marker::PhantomData<(FIRST, SECOND, THIRD)>); - -macro_rules! add_segments { - ($($segment:tt),+) => { - paste::paste! { - $( - #[doc = concat!("Mask for ", stringify!($segment), " static file segment. See [`Mask`] for more.")] - #[derive(Debug)] - pub struct [<$segment Mask>](Mask); - )+ - } - }; -} -add_segments!(Header, Receipt, Transaction); - /// Trait for specifying a mask to select one column value. pub trait ColumnSelectorOne { /// First desired column value @@ -66,21 +33,45 @@ pub trait ColumnSelectorThree { #[macro_export] /// Add mask to select `N` column values from a specific static file segment row. macro_rules! add_static_file_mask { - ($mask_struct:tt, $type1:ty, $mask:expr) => { - impl ColumnSelectorOne for $mask_struct<$type1> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorOne for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; const MASK: usize = $mask; } }; - ($mask_struct:tt, $type1:ty, $type2:ty, $mask:expr) => { - impl ColumnSelectorTwo for $mask_struct<$type1, $type2> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $type2:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorTwo for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type2: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; type SECOND = $type2; const MASK: usize = $mask; } }; - ($mask_struct:tt, $type1:ty, $type2:ty, $type3:ty, $mask:expr) => { - impl ColumnSelectorThree for $mask_struct<$type1, $type2, $type3> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $type2:ty, $type3:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorThree for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type2: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type3: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; type SECOND = $type2; type THIRD = $type3; diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index ac2811a44d77..17833e7ee293 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -1,23 +1,44 @@ -use super::{ReceiptMask, TransactionMask}; use crate::{ add_static_file_mask, - static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, - HeaderTerminalDifficulties, RawValue, Receipts, Transactions, + static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo}, + HeaderTerminalDifficulties, }; use alloy_primitives::BlockHash; use reth_db_api::table::Table; -use reth_primitives::Header; // HEADER MASKS -add_static_file_mask!(HeaderMask, Header, 0b001); -add_static_file_mask!(HeaderMask, ::Value, 0b010); -add_static_file_mask!(HeaderMask, BlockHash, 0b100); -add_static_file_mask!(HeaderMask, Header, BlockHash, 0b101); -add_static_file_mask!(HeaderMask, ::Value, BlockHash, 0b110); +add_static_file_mask! { + #[doc = "Mask for selecting a single header from Headers static file segment"] + HeaderMask, H, 0b001 +} +add_static_file_mask! { + #[doc = "Mask for selecting a total difficulty value from Headers static file segment"] + TotalDifficultyMask, ::Value, 0b010 +} +add_static_file_mask! { + #[doc = "Mask for selecting a block hash value from Headers static file segment"] + BlockHashMask, BlockHash, 0b100 +} +add_static_file_mask! { + #[doc = "Mask for selecting a header along with block hash from Headers static file segment"] + HeaderWithHashMask, H, BlockHash, 0b101 +} +add_static_file_mask! { + #[doc = "Mask for selecting a total difficulty along with block hash from Headers static file segment"] + TDWithHashMask, + ::Value, + BlockHash, + 0b110 +} // RECEIPT MASKS -add_static_file_mask!(ReceiptMask, ::Value, 0b1); +add_static_file_mask! { + #[doc = "Mask for selecting a single receipt from Receipts static file segment"] + ReceiptMask, R, 0b1 +} // TRANSACTION MASKS -add_static_file_mask!(TransactionMask, ::Value, 0b1); -add_static_file_mask!(TransactionMask, RawValue<::Value>, 0b1); +add_static_file_mask! { + #[doc = "Mask for selecting a single transaction from Transactions static file segment"] + TransactionMask, T, 0b1 +} diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index f27a574f640e..8491bd6ed779 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -17,6 +17,7 @@ use reth_primitives::{ }; mod masks; +pub use masks::*; /// Alias type for a map of [`StaticFileSegment`] and sorted lists of existing static file ranges. type SortedStaticFiles = @@ -38,7 +39,7 @@ pub fn iter_static_files(path: impl AsRef) -> Result>(); for entry in entries { - if entry.metadata().map_or(false, |metadata| metadata.is_file()) { + if entry.metadata().is_ok_and(|metadata| metadata.is_file()) { if let Some((segment, _)) = StaticFileSegment::parse_filename(&entry.file_name().to_string_lossy()) { diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index c697c3199095..a1fea62f0d8b 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -19,6 +19,7 @@ pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow}; #[cfg(feature = "mdbx")] pub(crate) mod utils; +use alloy_consensus::Header; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use reth_db_api::{ models::{ @@ -30,7 +31,7 @@ use reth_db_api::{ }, table::{Decode, DupSort, Encode, Table}, }; -use reth_primitives::{Account, Bytecode, Header, Receipt, StorageEntry, TransactionSignedNoHash}; +use reth_primitives::{Account, Bytecode, Receipt, StorageEntry, TransactionSignedNoHash}; use reth_primitives_traits::IntegerList; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; @@ -140,6 +141,7 @@ macro_rules! tables { $value: reth_db_api::table::Value + 'static { const NAME: &'static str = table_names::$name; + const DUPSORT: bool = tables!(@bool $($subkey)?); type Key = $key; type Value = $value; @@ -344,9 +346,9 @@ tables! { } /// Canonical only Stores transaction receipts. - table Receipts { + table Receipts { type Key = TxNumber; - type Value = Receipt; + type Value = R; } /// Stores all smart contract bytecodes. diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 6b6de41613eb..453116ee5e35 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -14,6 +14,7 @@ pub struct RawTable { impl Table for RawTable { const NAME: &'static str = T::NAME; + const DUPSORT: bool = false; type Key = RawKey; type Value = RawValue; @@ -28,6 +29,7 @@ pub struct RawDupSort { impl Table for RawDupSort { const NAME: &'static str = T::NAME; + const DUPSORT: bool = true; type Key = RawKey; type Value = RawValue; diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index d60a2adb92bb..e69c0343f564 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,13 +1,9 @@ use crate::{db::DatabaseError, lockfile::StorageLockError, writer::UnifiedStorageWriterError}; -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256, U256}; -use derive_more::Display; -use reth_primitives::{GotExpected, StaticFileSegment, TxHashOrNumber}; - -#[cfg(feature = "std")] -use std::path::PathBuf; - use alloc::{boxed::Box, string::String}; +use alloy_eips::{BlockHashOrNumber, HashOrNumber}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; +use derive_more::Display; +use reth_primitives::{GotExpected, StaticFileSegment}; /// Provider result type. pub type ProviderResult = Result; @@ -66,12 +62,12 @@ pub enum ProviderError { /// when required header related data was not found but was required. #[display("no header found for {_0:?}")] HeaderNotFound(BlockHashOrNumber), - /// The specific transaction is missing. + /// The specific transaction identified by hash or id is missing. #[display("no transaction found for {_0:?}")] - TransactionNotFound(TxHashOrNumber), - /// The specific receipt is missing + TransactionNotFound(HashOrNumber), + /// The specific receipt for a transaction identified by hash or id is missing #[display("no receipt found for {_0:?}")] - ReceiptNotFound(TxHashOrNumber), + ReceiptNotFound(HashOrNumber), /// Unable to find the best block. #[display("best block does not exist")] BestBlockNotFound, @@ -81,15 +77,6 @@ pub enum ProviderError { /// Unable to find the safe block. #[display("safe block does not exist")] SafeBlockNotFound, - /// Mismatch of sender and transaction. - #[display("mismatch of sender and transaction id {tx_id}")] - MismatchOfTransactionAndSenderId { - /// The transaction ID. - tx_id: TxNumber, - }, - /// Block body wrong transaction count. - #[display("stored block indices does not match transaction count")] - BlockBodyTransactionCount, /// Thrown when the cache service task dropped. #[display("cache service task stopped")] CacheServiceUnavailable, @@ -120,7 +107,7 @@ pub enum ProviderError { /// Static File is not found at specified path. #[cfg(feature = "std")] #[display("not able to find {_0} static file at {_1:?}")] - MissingStaticFilePath(StaticFileSegment, PathBuf), + MissingStaticFilePath(StaticFileSegment, std::path::PathBuf), /// Static File is not found for requested block. #[display("not able to find {_0} static file for block number {_1}")] MissingStaticFileBlock(StaticFileSegment, BlockNumber), @@ -133,12 +120,12 @@ pub enum ProviderError { /// Trying to insert data from an unexpected block number. #[display("trying to append data to {_0} as block #{_1} but expected block #{_2}")] UnexpectedStaticFileBlockNumber(StaticFileSegment, BlockNumber, BlockNumber), + /// Trying to insert data from an unexpected block number. + #[display("trying to append row to {_0} at index #{_1} but expected index #{_2}")] + UnexpectedStaticFileTxNumber(StaticFileSegment, TxNumber, TxNumber), /// Static File Provider was initialized as read-only. #[display("cannot get a writer on a read-only environment.")] ReadOnlyStaticFileAccess, - /// Error encountered when the block number conversion from U256 to u64 causes an overflow. - #[display("failed to convert block number U256 to u64: {_0}")] - BlockNumberOverflow(U256), /// Consistent view error. #[display("failed to initialize consistent view: {_0}")] ConsistentView(Box), @@ -146,6 +133,8 @@ pub enum ProviderError { StorageLockError(StorageLockError), /// Storage writer error. UnifiedStorageWriterError(UnifiedStorageWriterError), + /// Received invalid output from configured storage implementation. + InvalidStorageOutput, } impl From for ProviderError { diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 index 7b182325b314..0934fea1c16c 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 @@ -27,7 +27,7 @@ mdbx_chk \- MDBX checking tool .SH DESCRIPTION The .B mdbx_chk -utility intended to check an MDBX database file. +utility is intended to check an MDBX database file. .SH OPTIONS .TP .BR \-V @@ -55,7 +55,7 @@ check, including full check of all meta-pages and actual size of database file. .BR \-w Open environment in read-write mode and lock for writing while checking. This could be impossible if environment already used by another process(s) -in an incompatible read-write mode. This allow rollback to last steady commit +in an incompatible read-write mode. This allows rollback to last steady commit (in case environment was not closed properly) and then check transaction IDs of meta-pages. Otherwise, without \fB\-w\fP option environment will be opened in read-only mode. @@ -90,7 +90,7 @@ then forcibly loads ones by sequential access and tries to lock database pages i .TP .BR \-n Open MDBX environment(s) which do not use subdirectories. -This is legacy option. For now MDBX handles this automatically. +This is a legacy option. For now MDBX handles this automatically. .SH DIAGNOSTICS Exit status is zero if no errors occur. Errors result in a non-zero exit status diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h index 43960abfb4cd..2665931de527 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h @@ -136,7 +136,7 @@ are only a few cases of changing data. | _DELETING_||| |Key is absent → Error since no such key |\ref mdbx_del() or \ref mdbx_replace()|Error \ref MDBX_NOTFOUND| |Key exist → Delete by key |\ref mdbx_del() with the parameter `data = NULL`|Deletion| -|Key exist → Delete by key with with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| +|Key exist → Delete by key with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| |Delete at the current cursor position |\ref mdbx_cursor_del() with \ref MDBX_CURRENT flag|Deletion| |Extract (read & delete) value by the key |\ref mdbx_replace() with zero flag and parameter `new_data = NULL`|Returning a deleted value| @@ -1413,7 +1413,7 @@ enum MDBX_env_flags_t { * \ref mdbx_env_set_syncbytes() and \ref mdbx_env_set_syncperiod() functions * could be very useful with `MDBX_SAFE_NOSYNC` flag. * - * The number and volume of of disk IOPs with MDBX_SAFE_NOSYNC flag will + * The number and volume of disk IOPs with MDBX_SAFE_NOSYNC flag will * exactly the as without any no-sync flags. However, you should expect a * larger process's [work set](https://bit.ly/2kA2tFX) and significantly worse * a [locality of reference](https://bit.ly/2mbYq2J), due to the more @@ -2079,7 +2079,7 @@ enum MDBX_option_t { * for all processes interacting with the database. * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K + * track readers in the environment. The default is about 100 for 4K * system page size. Starting a read-only transaction normally ties a lock * table slot to the current thread until the environment closes or the thread * exits. If \ref MDBX_NOTLS is in use, \ref mdbx_txn_begin() instead ties the @@ -3343,7 +3343,7 @@ mdbx_limits_txnsize_max(intptr_t pagesize); * \ingroup c_settings * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K system + * track readers in the environment. The default is about 100 for 4K system * page size. Starting a read-only transaction normally ties a lock table slot * to the current thread until the environment closes or the thread exits. If * \ref MDBX_NOTLS is in use, \ref mdbx_txn_begin() instead ties the slot to the @@ -5264,7 +5264,7 @@ LIBMDBX_API int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, * This returns a comparison as if the two data items were keys in the * specified database. * - * \warning There ss a Undefined behavior if one of arguments is invalid. + * \warning There is a Undefined behavior if one of arguments is invalid. * * \param [in] txn A transaction handle returned by \ref mdbx_txn_begin(). * \param [in] dbi A database handle returned by \ref mdbx_dbi_open(). diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index d733327cefa8..1457195be78c 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -56,7 +56,7 @@ pub enum SyncMode { /// flag could be used with [`Environment::sync()`](crate::Environment::sync) as alternatively /// for batch committing or nested transaction (in some cases). /// - /// The number and volume of of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the + /// The number and volume of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the /// as without any no-sync flags. However, you should expect a larger process's work set /// and significantly worse a locality of reference, due to the more intensive allocation /// of previously unused pages and increase the size of the database. diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 6afd4205a607..ae4a93724c41 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -5,7 +5,10 @@ use crate::{ }; use std::{ ptr, - sync::mpsc::{sync_channel, Receiver, SyncSender}, + sync::{ + mpsc::{sync_channel, Receiver, SyncSender}, + Arc, + }, }; #[derive(Copy, Clone, Debug)] @@ -28,7 +31,7 @@ pub(crate) enum TxnManagerMessage { pub(crate) struct TxnManager { sender: SyncSender, #[cfg(feature = "read-tx-timeouts")] - read_transactions: Option>, + read_transactions: Option>, } impl TxnManager { diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index 26ae951695df..56f140afbda4 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -35,10 +35,5 @@ rand = { workspace = true, features = ["small_rng"] } tempfile.workspace = true [features] -default = ["std"] +default = [] test-utils = [] -std = [ - "thiserror/std", - "lz4_flex/std", - "serde/std" -] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index b1d174feb2c3..98eddf22ee96 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -20,11 +20,6 @@ use std::{ ops::Range, path::{Path, PathBuf}, }; - -// Windows specific extension for std::fs -#[cfg(windows)] -use std::os::windows::prelude::OpenOptionsExt; - use tracing::*; /// Compression algorithms supported by `NippyJar`. diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 04a0bf42908e..2875b91149c5 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -17,6 +17,7 @@ reth-chainspec.workspace = true reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["reth-codec", "secp256k1"] } +reth-primitives-traits = { workspace = true, features = ["reth-codec"] } reth-fs-util.workspace = true reth-errors.workspace = true reth-storage-errors.workspace = true @@ -38,6 +39,7 @@ reth-node-types.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true revm.workspace = true # optimism @@ -65,7 +67,6 @@ strum.workspace = true # test-utils reth-ethereum-engine-primitives = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } # parallel utils rayon.workspace = true @@ -88,44 +89,47 @@ alloy-consensus.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-execution-types/optimism", - "reth-optimism-primitives", - "reth-codecs/optimism", - "reth-db/optimism", - "reth-db-api/optimism", - "revm/optimism" + "reth-primitives/optimism", + "reth-execution-types/optimism", + "reth-optimism-primitives", + "reth-codecs/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "revm/optimism", ] serde = [ - "reth-execution-types/serde", - "reth-trie-db/serde", - "reth-trie/serde", - "alloy-consensus?/serde", - "alloy-eips/serde", - "alloy-primitives/serde", - "alloy-rpc-types-engine/serde", - "dashmap/serde", - "notify/serde", - "parking_lot/serde", - "rand/serde", - "revm/serde", - "reth-codecs/serde" + "dashmap/serde", + "notify/serde", + "parking_lot/serde", + "rand/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-rpc-types-engine/serde", + "revm/serde", + "reth-codecs/serde", + "reth-optimism-primitives?/serde", + "reth-primitives-traits/serde", + "reth-execution-types/serde", + "reth-trie-db/serde", + "reth-trie/serde", ] test-utils = [ - "reth-db/test-utils", - "reth-nippy-jar/test-utils", - "reth-trie/test-utils", - "reth-chain-state/test-utils", - "reth-ethereum-engine-primitives", - "alloy-consensus", - "reth-chainspec/test-utils", - "reth-evm/test-utils", - "reth-network-p2p/test-utils", - "reth-primitives/test-utils", - "reth-codecs/test-utils", - "reth-db-api/test-utils", - "reth-trie-db/test-utils", - "revm/test-utils", - "reth-prune-types/test-utils", - "reth-stages-types/test-utils" + "reth-db/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie/test-utils", + "reth-chain-state/test-utils", + "reth-ethereum-engine-primitives", + "reth-chainspec/test-utils", + "reth-evm/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", + "reth-optimism-primitives?/arbitrary", ] diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 894a41620c52..deccdea2831f 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -46,6 +46,9 @@ pub use reth_chain_state::{ CanonStateNotifications, CanonStateSubscriptions, }; +// reexport traits to avoid breaking changes +pub use reth_storage_api::{HistoryWriter, StatsReader}; + pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { std::ops::Bound::Included(&v) => v, diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index dbfb4f7b872b..08f5e4680a2b 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -9,6 +9,7 @@ use crate::{ StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, @@ -24,14 +25,16 @@ use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, }; +use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{DBProvider, StorageChangeSetReader}; +use reth_storage_api::{DBProvider, NodePrimitivesProvider, StorageChangeSetReader}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -54,7 +57,7 @@ pub struct BlockchainProvider2 { pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical /// state. - pub(crate) canonical_in_memory_state: CanonicalInMemoryState, + pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } impl Clone for BlockchainProvider2 { @@ -114,7 +117,7 @@ impl BlockchainProvider2 { } /// Gets a clone of `canonical_in_memory_state`. - pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { + pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { self.canonical_in_memory_state.clone() } @@ -129,8 +132,8 @@ impl BlockchainProvider2 { /// This uses a given [`BlockState`] to initialize a state provider for that block. fn block_state_provider( &self, - state: &BlockState, - ) -> ProviderResult { + state: &BlockState, + ) -> ProviderResult> { let anchor_hash = state.anchor().hash; let latest_historical = self.database.history_by_block_hash(anchor_hash)?; Ok(state.state_provider(latest_historical)) @@ -142,11 +145,15 @@ impl BlockchainProvider2 { pub fn get_state( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>>> { self.consistent_provider()?.get_state(range) } } +impl NodePrimitivesProvider for BlockchainProvider2 { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for BlockchainProvider2 { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; @@ -162,7 +169,7 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } impl StaticFileProviderFactory for BlockchainProvider2 { - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } @@ -255,23 +262,33 @@ impl BlockIdReader for BlockchainProvider2 { } impl BlockReader for BlockchainProvider2 { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { self.consistent_provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.consistent_provider()?.block(id) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -296,7 +313,7 @@ impl BlockReader for BlockchainProvider2 { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.block_with_senders(id, transaction_kind) } @@ -304,53 +321,55 @@ impl BlockReader for BlockchainProvider2 { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.consistent_provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_block_with_senders_range(range) } } impl TransactionsProvider for BlockchainProvider2 { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.consistent_provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.consistent_provider()?.transaction_by_id(id) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { - self.consistent_provider()?.transaction_by_id_no_hash(id) + ) -> ProviderResult> { + self.consistent_provider()?.transaction_by_id_unhashed(id) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.consistent_provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.transaction_by_hash_with_meta(tx_hash) } @@ -361,21 +380,21 @@ impl TransactionsProvider for BlockchainProvider2 { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.transactions_by_tx_range(range) } @@ -392,28 +411,33 @@ impl TransactionsProvider for BlockchainProvider2 { } impl ReceiptProvider for BlockchainProvider2 { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.consistent_provider()?.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.consistent_provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.consistent_provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.consistent_provider()?.receipts_by_tx_range(range) } } impl ReceiptProviderIdExt for BlockchainProvider2 { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { self.consistent_provider()?.receipts_by_block_id(block) } } @@ -628,7 +652,7 @@ impl StateProviderFactory for BlockchainProvider2 { } } -impl CanonChainTracker for BlockchainProvider2 +impl CanonChainTracker for BlockchainProvider2 where Self: BlockReader, { @@ -664,9 +688,9 @@ where impl BlockReaderIdExt for BlockchainProvider2 where - Self: BlockReader + ReceiptProviderIdExt, + Self: ReceiptProviderIdExt, { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { self.consistent_provider()?.block_by_id(id) } @@ -694,13 +718,15 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider2 { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { +impl> CanonStateSubscriptions + for BlockchainProvider2 +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canonical_in_memory_state.subscribe_canon_state() } } -impl ForkChoiceSubscriptions for BlockchainProvider2 { +impl ForkChoiceSubscriptions for BlockchainProvider2 { fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_safe_block(); ForkChoiceNotifications(receiver) @@ -738,6 +764,8 @@ impl AccountReader for BlockchainProvider2 { } impl StateReader for BlockchainProvider2 { + type Receipt = ReceiptTy; + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. /// /// If data for the block does not exist, this will return [`None`]. @@ -747,19 +775,16 @@ impl StateReader for BlockchainProvider2 { /// inconsistent. Currently this can safely be called within the blockchain tree thread, /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. - fn get_state(&self, block: BlockNumber) -> ProviderResult> { + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>> { StateReader::get_state(&self.consistent_provider()?, block) } } #[cfg(test)] mod tests { - use std::{ - ops::{Range, RangeBounds}, - sync::Arc, - time::Instant, - }; - use crate::{ providers::BlockchainProvider2, test_utils::{ @@ -788,7 +813,10 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; + use reth_primitives::{ + BlockExt, Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, + }; + use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, @@ -800,7 +828,11 @@ mod tests { random_receipt, BlockParams, BlockRangeParams, }; use revm::db::BundleState; - use std::ops::Bound; + use std::{ + ops::{Bound, Range, RangeBounds}, + sync::Arc, + time::Instant, + }; const TEST_BLOCKS_COUNT: usize = 5; @@ -880,14 +912,18 @@ mod tests { .unwrap_or_default(); // Insert blocks into the database - for block in &database_blocks { + for (block, receipts) in database_blocks.iter().zip(&receipts) { // TODO: this should be moved inside `insert_historical_block`: let mut transactions_writer = static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + let mut receipts_writer = + static_file_provider.latest_writer(StaticFileSegment::Receipts)?; transactions_writer.increment_block(block.number)?; - for tx in block.body.transactions() { - let tx: TransactionSignedNoHash = tx.clone().into(); - transactions_writer.append_transaction(tx_num, &tx)?; + receipts_writer.increment_block(block.number)?; + + for (tx, receipt) in block.body.transactions().iter().zip(receipts) { + transactions_writer.append_transaction(tx_num, tx)?; + receipts_writer.append_receipt(tx_num, receipt)?; tx_num += 1; } @@ -896,21 +932,8 @@ mod tests { )?; } - // Insert receipts into the static files - UnifiedStorageWriter::new( - &provider_rw, - Some(factory.static_file_provider().latest_writer(StaticFileSegment::Receipts)?), - ) - .append_receipts_from_blocks( - // The initial block number is required - database_blocks.first().map(|b| b.number).unwrap_or_default(), - receipts[..database_blocks.len()] - .iter() - .map(|vec| vec.clone().into_iter().map(Some).collect::>()), - )?; - // Commit to both storages: database and static files - UnifiedStorageWriter::commit(provider_rw, factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; let provider = BlockchainProvider2::new(factory)?; @@ -996,10 +1019,9 @@ mod tests { // Push to disk let provider_rw = hook_provider.database_provider_rw().unwrap(); UnifiedStorageWriter::from(&provider_rw, &hook_provider.static_file_provider()) - .save_blocks(&[lowest_memory_block]) - .unwrap(); - UnifiedStorageWriter::commit(provider_rw, hook_provider.static_file_provider()) + .save_blocks(vec![lowest_memory_block]) .unwrap(); + UnifiedStorageWriter::commit(provider_rw).unwrap(); // Remove from memory hook_provider.canonical_in_memory_state.remove_persisted_blocks(num_hash); @@ -2243,9 +2265,7 @@ mod tests { (transactions_by_tx_range, |block: &SealedBlock, _: &Vec>| block .body .transactions - .iter() - .map(|tx| Into::::into(tx.clone())) - .collect::>()), + .clone()), (receipts_by_tx_range, |block: &SealedBlock, receipts: &Vec>| receipts [block.number as usize] .clone()) @@ -2337,7 +2357,7 @@ mod tests { (block_range, |block: &SealedBlock| block.clone().unseal()), (block_with_senders_range, |block: &SealedBlock| block .clone() - .unseal() + .unseal::() .with_senders_unchecked(vec![])), (sealed_block_with_senders_range, |block: &SealedBlock| block .clone() @@ -2532,7 +2552,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), - block.clone().unseal().with_recovered_senders() + block.clone().unseal::().with_recovered_senders() ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) ), @@ -2541,7 +2561,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), - block.clone().unseal().with_recovered_senders() + block.clone().unseal::().with_recovered_senders() ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) ), @@ -2551,7 +2571,12 @@ mod tests { |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), Some( - block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + block + .clone() + .unseal::() + .with_recovered_senders() + .unwrap() + .seal(block.hash()) ) ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) @@ -2562,7 +2587,12 @@ mod tests { |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), Some( - block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + block + .clone() + .unseal::() + .with_recovered_senders() + .unwrap() + .seal(block.hash()) ) ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) @@ -2587,12 +2617,10 @@ mod tests { ), ( ONE, - transaction_by_id_no_hash, + transaction_by_id_unhashed, |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( tx_num, - Some(Into::::into( - block.body.transactions[test_tx_index].clone() - )) + Some(block.body.transactions[test_tx_index].clone()) ), u64::MAX ), diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index be6549033cde..652f6fb33fd2 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -9,7 +9,8 @@ use reth_primitives::{Account, Bytecode}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, TrieInput, }; /// A state provider that resolves to data from either a wrapped [`crate::ExecutionOutcome`] @@ -138,6 +139,17 @@ impl StorageRootProvider storage.extend(&hashed_storage); self.state_provider.storage_proof(address, slot, storage) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut storage = self.get_hashed_storage(address); + storage.extend(&hashed_storage); + self.state_provider.storage_multiproof(address, slots, storage) + } } impl StateProofProvider diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 98f7820e34a2..e70f4b4e5e1d 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -6,24 +6,30 @@ use crate::{ StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, }; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_node_types::{BlockTy, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, + TransactionMeta, }; +use reth_primitives_traits::{Block, BlockBody}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{DatabaseProviderFactory, StateProvider, StorageChangeSetReader}; +use reth_storage_api::{ + DatabaseProviderFactory, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, +}; use reth_storage_errors::provider::ProviderResult; use revm::{ db::states::PlainStorageRevert, @@ -43,13 +49,14 @@ use tracing::trace; /// CAUTION: Avoid holding this provider for too long or the inner database transaction will /// time-out. #[derive(Debug)] +#[doc(hidden)] // triggers ICE for `cargo docs` pub struct ConsistentProvider { /// Storage provider. storage_provider: as DatabaseProviderFactory>::Provider, /// Head block at time of [`Self`] creation - head_block: Option>, + head_block: Option>>, /// In-memory canonical state. This is not a snapshot, and can change! Use with caution. - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, } impl ConsistentProvider { @@ -60,7 +67,7 @@ impl ConsistentProvider { /// view of memory and database. pub fn new( storage_provider_factory: ProviderFactory, - state: CanonicalInMemoryState, + state: CanonicalInMemoryState, ) -> ProviderResult { // Each one provides a snapshot at the time of instantiation, but its order matters. // @@ -108,7 +115,7 @@ impl ConsistentProvider { Ok(self.block_state_provider_ref(state)?.boxed()) } else { trace!(target: "providers::blockchain", "Using database state for latest state provider"); - self.storage_provider.latest() + Ok(self.storage_provider.latest()) } } @@ -144,7 +151,7 @@ impl ConsistentProvider { pub fn get_state( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>>> { if range.is_empty() { return Ok(None) } @@ -302,7 +309,7 @@ impl ConsistentProvider { RangeInclusive, &mut P, ) -> ProviderResult>, - G: Fn(&BlockState, &mut P) -> Option, + G: Fn(&BlockState, &mut P) -> Option, P: FnMut(&T) -> bool, { // Each one provides a snapshot at the time of instantiation, but its order matters. @@ -394,8 +401,8 @@ impl ConsistentProvider { /// This uses a given [`BlockState`] to initialize a state provider for that block. fn block_state_provider_ref( &self, - state: &BlockState, - ) -> ProviderResult> { + state: &BlockState, + ) -> ProviderResult> { let anchor_hash = state.anchor().hash; let latest_historical = self.history_by_block_hash_ref(anchor_hash)?; let in_memory = state.chain().map(|block_state| block_state.block()).collect(); @@ -418,7 +425,7 @@ impl ConsistentProvider { &DatabaseProviderRO, RangeInclusive, ) -> ProviderResult>, - M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, + M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, { let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); let provider = &self.storage_provider; @@ -440,7 +447,7 @@ impl ConsistentProvider { let (start, end) = self.convert_range_bounds(range, || { in_mem_chain .iter() - .map(|b| b.block_ref().block().body.transactions.len() as u64) + .map(|b| b.block_ref().block().body.transactions().len() as u64) .sum::() + last_block_body_index.last_tx_num() }); @@ -472,7 +479,7 @@ impl ConsistentProvider { // Iterate from the lowest block to the highest in-memory chain for block_state in in_mem_chain.iter().rev() { - let block_tx_count = block_state.block_ref().block().body.transactions.len(); + let block_tx_count = block_state.block_ref().block().body.transactions().len(); let remaining = (tx_range.end() - tx_range.start() + 1) as usize; // If the transaction range start is equal or higher than the next block first @@ -514,7 +521,7 @@ impl ConsistentProvider { ) -> ProviderResult> where S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, - M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, + M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, { let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); let provider = &self.storage_provider; @@ -546,10 +553,10 @@ impl ConsistentProvider { let executed_block = block_state.block_ref(); let block = executed_block.block(); - for tx_index in 0..block.body.transactions.len() { + for tx_index in 0..block.body.transactions().len() { match id { HashOrNumber::Hash(tx_hash) => { - if tx_hash == block.body.transactions[tx_index].hash() { + if tx_hash == block.body.transactions()[tx_index].trie_hash() { return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) } } @@ -581,7 +588,7 @@ impl ConsistentProvider { ) -> ProviderResult where S: FnOnce(&DatabaseProviderRO) -> ProviderResult, - M: Fn(&BlockState) -> ProviderResult, + M: Fn(&BlockState) -> ProviderResult, { if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { return fetch_from_block_state(block_state) @@ -610,8 +617,12 @@ impl ConsistentProvider { } } +impl NodePrimitivesProvider for ConsistentProvider { + type Primitives = N::Primitives; +} + impl StaticFileProviderFactory for ConsistentProvider { - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.storage_provider.static_file_provider() } } @@ -774,7 +785,13 @@ impl BlockIdReader for ConsistentProvider { } impl BlockReader for ConsistentProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { match source { BlockSource::Any | BlockSource::Canonical => { // Note: it's fine to return the unsealed block because the caller already has @@ -791,7 +808,7 @@ impl BlockReader for ConsistentProvider { } } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block(id), @@ -799,15 +816,19 @@ impl BlockReader for ConsistentProvider { ) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -820,7 +841,7 @@ impl BlockReader for ConsistentProvider { return Ok(Some(Vec::new())) } - Ok(Some(block_state.block_ref().block().body.ommers.clone())) + Ok(block_state.block_ref().block().body.ommers().map(|o| o.to_vec())) }, ) } @@ -846,7 +867,7 @@ impl BlockReader for ConsistentProvider { // Iterate from the lowest block in memory until our target block for state in block_state.chain().collect::>().into_iter().rev() { - let block_tx_count = state.block_ref().block.body.transactions.len() as u64; + let block_tx_count = state.block_ref().block.body.transactions().len() as u64; if state.block_ref().block().number == number { stored_indices.tx_count = block_tx_count; } else { @@ -869,7 +890,7 @@ impl BlockReader for ConsistentProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block_with_senders(id, transaction_kind), @@ -881,7 +902,7 @@ impl BlockReader for ConsistentProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), @@ -889,7 +910,7 @@ impl BlockReader for ConsistentProvider { ) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_range(range), @@ -901,7 +922,7 @@ impl BlockReader for ConsistentProvider { fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_with_senders_range(range), @@ -913,7 +934,7 @@ impl BlockReader for ConsistentProvider { fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), @@ -924,6 +945,8 @@ impl BlockReader for ConsistentProvider { } impl TransactionsProvider for ConsistentProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( tx_hash.into(), @@ -932,29 +955,36 @@ impl TransactionsProvider for ConsistentProvider { ) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_by_id(id), |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) + Ok(block_state + .block_ref() + .block() + .body + .transactions() + .get(tx_index) + .cloned() + .map(Into::into)) }, ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), - |provider| provider.transaction_by_id_no_hash(id), + |provider| provider.transaction_by_id_unhashed(id), |tx_index, _, block_state| { Ok(block_state .block_ref() .block() .body - .transactions + .transactions() .get(tx_index) .cloned() .map(Into::into)) @@ -962,7 +992,7 @@ impl TransactionsProvider for ConsistentProvider { ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { return Ok(Some(tx)) } @@ -973,7 +1003,7 @@ impl TransactionsProvider for ConsistentProvider { fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { if let Some((tx, meta)) = self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) { @@ -994,22 +1024,22 @@ impl TransactionsProvider for ConsistentProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), + |block_state| Ok(Some(block_state.block_ref().block().body().transactions().to_vec())), ) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), + |block_state, _| Some(block_state.block_ref().block().body().transactions().to_vec()), |_| true, ) } @@ -1017,16 +1047,12 @@ impl TransactionsProvider for ConsistentProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx_range( range, |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), |index_range, block_state| { - Ok(block_state.block_ref().block().body.transactions[index_range] - .iter() - .cloned() - .map(Into::into) - .collect()) + Ok(block_state.block_ref().block().body.transactions()[index_range].to_vec()) }, ) } @@ -1052,7 +1078,9 @@ impl TransactionsProvider for ConsistentProvider { } impl ReceiptProvider for ConsistentProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.receipt(id), @@ -1062,7 +1090,7 @@ impl ReceiptProvider for ConsistentProvider { ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { for block_state in self.head_block.iter().flat_map(|b| b.chain()) { let executed_block = block_state.block_ref(); let block = executed_block.block(); @@ -1070,12 +1098,13 @@ impl ReceiptProvider for ConsistentProvider { // assuming 1:1 correspondence between transactions and receipts debug_assert_eq!( - block.body.transactions.len(), + block.body.transactions().len(), receipts.len(), "Mismatch between transaction and receipt count" ); - if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) + if let Some(tx_index) = + block.body.transactions().iter().position(|tx| tx.trie_hash() == hash) { // safe to use tx_index for receipts due to 1:1 correspondence return Ok(receipts.get(tx_index).cloned()); @@ -1085,7 +1114,10 @@ impl ReceiptProvider for ConsistentProvider { self.storage_provider.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( block, |db_provider| db_provider.receipts_by_block(block), @@ -1096,7 +1128,7 @@ impl ReceiptProvider for ConsistentProvider { fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_in_memory_or_storage_by_tx_range( range, |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), @@ -1108,7 +1140,7 @@ impl ReceiptProvider for ConsistentProvider { } impl ReceiptProviderIdExt for ConsistentProvider { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; @@ -1153,7 +1185,7 @@ impl WithdrawalsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), + |block_state| Ok(block_state.block_ref().block().body.withdrawals().cloned()), ) } @@ -1168,8 +1200,8 @@ impl WithdrawalsProvider for ConsistentProvider { .block_ref() .block() .body - .withdrawals - .clone() + .withdrawals() + .cloned() .and_then(|mut w| w.pop())) }, ) @@ -1276,7 +1308,7 @@ impl ChainSpecProvider for ConsistentProvider { } impl BlockReaderIdExt for ConsistentProvider { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -1324,34 +1356,20 @@ impl BlockReaderIdExt for ConsistentProvider { Ok(self.canonical_in_memory_state.get_finalized_header()) } BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Earliest => self + .header_by_number(0)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Number(num) => self + .header_by_number(num)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), } } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } @@ -1483,6 +1501,8 @@ impl AccountReader for ConsistentProvider { } impl StateReader for ConsistentProvider { + type Receipt = ReceiptTy; + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. /// /// If data for the block does not exist, this will return [`None`]. @@ -1492,7 +1512,10 @@ impl StateReader for ConsistentProvider { /// inconsistent. Currently this can safely be called within the blockchain tree thread, /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. - fn get_state(&self, block: BlockNumber) -> ProviderResult> { + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>> { if let Some(state) = self.head_block.as_ref().and_then(|b| b.block_on_chain(block.into())) { let state = state.block_ref().execution_outcome().clone(); Ok(Some(state)) diff --git a/crates/storage/provider/src/providers/database/chain.rs b/crates/storage/provider/src/providers/database/chain.rs new file mode 100644 index 000000000000..57bc2e0b5ce6 --- /dev/null +++ b/crates/storage/provider/src/providers/database/chain.rs @@ -0,0 +1,42 @@ +use crate::{providers::NodeTypesForProvider, DatabaseProvider}; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_node_types::FullNodePrimitives; +use reth_primitives::EthPrimitives; +use reth_storage_api::{ChainStorageReader, ChainStorageWriter, EthStorage}; + +/// Trait that provides access to implementations of [`ChainStorage`] +pub trait ChainStorage: Send + Sync { + /// Provides access to the chain reader. + fn reader(&self) -> impl ChainStorageReader, Primitives> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider; + + /// Provides access to the chain writer. + fn writer(&self) -> impl ChainStorageWriter, Primitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypesForProvider; +} + +impl ChainStorage for EthStorage { + fn reader( + &self, + ) -> impl ChainStorageReader, EthPrimitives> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider, + { + self + } + + fn writer( + &self, + ) -> impl ChainStorageWriter, EthPrimitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypesForProvider, + { + self + } +} diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index 7e9ee7202c01..4ee8f1ce5b12 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -22,14 +22,6 @@ impl Default for DurationsRecorder { } impl DurationsRecorder { - /// Saves the provided duration for future logging and instantly reports as a metric with - /// `action` label. - pub(crate) fn record_duration(&mut self, action: Action, duration: Duration) { - self.actions.push((action, duration)); - self.current_metrics.record_duration(action, duration); - self.latest = Some(self.start.elapsed()); - } - /// Records the duration since last record, saves it for future logging and instantly reports as /// a metric with `action` label. pub(crate) fn record_relative(&mut self, action: Action) { @@ -56,11 +48,6 @@ pub(crate) enum Action { InsertHeaders, InsertHeaderNumbers, InsertHeaderTerminalDifficulties, - InsertBlockOmmers, - InsertTransactionSenders, - InsertTransactions, - InsertTransactionHashNumbers, - InsertBlockWithdrawals, InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, @@ -95,16 +82,6 @@ struct DatabaseProviderMetrics { insert_header_numbers: Histogram, /// Duration of insert header TD insert_header_td: Histogram, - /// Duration of insert block ommers - insert_block_ommers: Histogram, - /// Duration of insert tx senders - insert_tx_senders: Histogram, - /// Duration of insert transactions - insert_transactions: Histogram, - /// Duration of insert transaction hash numbers - insert_tx_hash_numbers: Histogram, - /// Duration of insert block withdrawals - insert_block_withdrawals: Histogram, /// Duration of insert block body indices insert_block_body_indices: Histogram, /// Duration of insert transaction blocks @@ -131,11 +108,6 @@ impl DatabaseProviderMetrics { Action::InsertHeaders => self.insert_headers.record(duration), Action::InsertHeaderNumbers => self.insert_header_numbers.record(duration), Action::InsertHeaderTerminalDifficulties => self.insert_header_td.record(duration), - Action::InsertBlockOmmers => self.insert_block_ommers.record(duration), - Action::InsertTransactionSenders => self.insert_tx_senders.record(duration), - Action::InsertTransactions => self.insert_transactions.record(duration), - Action::InsertTransactionHashNumbers => self.insert_tx_hash_numbers.record(duration), - Action::InsertBlockWithdrawals => self.insert_block_withdrawals.record(duration), Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index bb532329ee31..3c22a1a73a23 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -7,6 +7,7 @@ use crate::{ PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, @@ -18,14 +19,14 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::TryIntoHistoricalStateProvider; +use reth_storage_api::{NodePrimitivesProvider, TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -43,6 +44,9 @@ use super::ProviderNodeTypes; mod metrics; +mod chain; +pub use chain::*; + /// A common provider that fetches data from a database or static file. /// /// This provider implements most provider or provider factory traits. @@ -52,22 +56,25 @@ pub struct ProviderFactory { /// Chain spec chain_spec: Arc, /// Static File Provider - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, /// Optional pruning configuration prune_modes: PruneModes, + /// The node storage handler. + storage: Arc, } impl fmt::Debug for ProviderFactory where - N: NodeTypesWithDB, + N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes } = self; + let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) .field("prune_modes", &prune_modes) + .field("storage", &storage) .finish() } } @@ -77,9 +84,15 @@ impl ProviderFactory { pub fn new( db: N::DB, chain_spec: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, ) -> Self { - Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::none() } + Self { + db, + chain_spec, + static_file_provider, + prune_modes: PruneModes::none(), + storage: Default::default(), + } } /// Enables metrics on the static file provider. @@ -113,13 +126,14 @@ impl>> ProviderFactory { path: P, chain_spec: Arc, args: DatabaseArguments, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, ) -> RethResult { Ok(Self { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, prune_modes: PruneModes::none(), + storage: Default::default(), }) } } @@ -138,6 +152,7 @@ impl ProviderFactory { self.chain_spec.clone(), self.static_file_provider.clone(), self.prune_modes.clone(), + self.storage.clone(), )) } @@ -152,6 +167,7 @@ impl ProviderFactory { self.chain_spec.clone(), self.static_file_provider.clone(), self.prune_modes.clone(), + self.storage.clone(), ))) } @@ -159,7 +175,7 @@ impl ProviderFactory { #[track_caller] pub fn latest(&self) -> ProviderResult { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProvider::new(self.db.tx()?, self.static_file_provider()))) + Ok(Box::new(LatestStateProvider::new(self.database_provider_ro()?))) } /// Storage provider for state at that given block @@ -186,6 +202,10 @@ impl ProviderFactory { } } +impl NodePrimitivesProvider for ProviderFactory { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for ProviderFactory { type DB = N::DB; type Provider = DatabaseProvider<::TX, N>; @@ -202,7 +222,7 @@ impl DatabaseProviderFactory for ProviderFactory { impl StaticFileProviderFactory for ProviderFactory { /// Returns static file provider - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } @@ -335,23 +355,33 @@ impl BlockNumReader for ProviderFactory { } impl BlockReader for ProviderFactory { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { self.provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.provider()?.block(id) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { self.provider()?.pending_block() } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { self.provider()?.pending_block_with_senders() } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { self.provider()?.pending_block_and_receipts() } @@ -370,7 +400,7 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.block_with_senders(id, transaction_kind) } @@ -378,35 +408,37 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders_range(range) } } impl TransactionsProvider for ProviderFactory { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, @@ -415,26 +447,26 @@ impl TransactionsProvider for ProviderFactory { ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, - |static_file| static_file.transaction_by_id_no_hash(id), - || self.provider()?.transaction_by_id_no_hash(id), + |static_file| static_file.transaction_by_id_unhashed(id), + || self.provider()?.transaction_by_id_unhashed(id), ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.provider()?.transaction_by_hash_with_meta(tx_hash) } @@ -445,21 +477,21 @@ impl TransactionsProvider for ProviderFactory { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.provider()?.transactions_by_tx_range(range) } @@ -476,7 +508,8 @@ impl TransactionsProvider for ProviderFactory { } impl ReceiptProvider for ProviderFactory { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, id, @@ -485,18 +518,21 @@ impl ReceiptProvider for ProviderFactory { ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Receipts, to_range(range), @@ -614,6 +650,7 @@ impl Clone for ProviderFactory { chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), prune_modes: self.prune_modes.clone(), + storage: self.storage.clone(), } } } @@ -625,7 +662,7 @@ mod tests { providers::{StaticFileProvider, StaticFileWriter}, test_utils::{blocks::TEST_BLOCK, create_test_provider_factory, MockNodeTypesWithDB}, BlockHashReader, BlockNumReader, BlockWriter, DBProvider, HeaderSyncGapProvider, - TransactionsProvider, + StorageLocation, TransactionsProvider, }; use alloy_primitives::{TxNumber, B256, U256}; use assert_matches::assert_matches; @@ -637,6 +674,7 @@ mod tests { test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; use reth_primitives::StaticFileSegment; + use reth_primitives_traits::SignedTransaction; use reth_prune_types::{PruneMode, PruneModes}; use reth_storage_errors::provider::ProviderError; use reth_testing_utils::generators::{self, random_block, random_header, BlockParams}; @@ -696,14 +734,20 @@ mod tests { { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap()), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); assert_matches!( provider.transaction_sender(0), Ok(Some(sender)) if sender == block.body.transactions[0].recover_signer().unwrap() ); - assert_matches!(provider.transaction_id(block.body.transactions[0].hash), Ok(Some(0))); + assert_matches!( + provider.transaction_id(block.body.transactions[0].hash()), + Ok(Some(0)) + ); } { @@ -714,11 +758,14 @@ mod tests { }; let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap(),), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); - assert_matches!(provider.transaction_id(block.body.transactions[0].hash), Ok(None)); + assert_matches!(provider.transaction_id(block.body.transactions[0].hash()), Ok(None)); } } @@ -735,7 +782,10 @@ mod tests { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap()), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); @@ -753,21 +803,6 @@ mod tests { let db_senders = provider.senders_by_tx_range(range); assert_eq!(db_senders, Ok(vec![])); - - let result = provider.take_block_transaction_range(0..=0); - assert_eq!( - result, - Ok(vec![( - 0, - block - .body - .transactions - .iter() - .cloned() - .map(|tx| tx.into_ecrecovered().unwrap()) - .collect() - )]) - ) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 9ba20306f379..cfbe20cf4b44 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,26 +1,36 @@ use crate::{ bundle_state::StorageRevertsIter, - providers::{database::metrics, static_file::StaticFileWriter, StaticFileProvider}, + providers::{ + database::{chain::ChainStorage, metrics}, + static_file::StaticFileWriter, + NodeTypesForProvider, StaticFileProvider, + }, to_range, traits::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, - writer::UnifiedStorageWriter, - AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, - BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, - HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, - HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, - OriginalValuesKnown, ProviderError, PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, - StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, - StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, + BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, + DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, + HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, + LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, + PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, + StateCommitmentProvider, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, + StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, TrieWriter, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, }; -use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; -use itertools::{izip, Itertools}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap, HashSet}, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, +}; +use itertools::Itertools; use rayon::slice::ParallelSliceMut; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_db::{ @@ -31,24 +41,27 @@ use reth_db_api::{ database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, + ShardedKey, StoredBlockBodyIndices, }, table::Table, transaction::{DbTx, DbTxMut}, - DatabaseError, DbTxUnwindExt, + DatabaseError, }; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; -use reth_node_types::NodeTypes; +use reth_node_types::{BlockTy, BodyTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, }; +use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; +use reth_storage_api::{ + BlockBodyReader, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, + TryIntoHistoricalStateProvider, +}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -62,14 +75,13 @@ use revm::{ }; use std::{ cmp::Ordering, - collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{BTreeMap, BTreeSet}, fmt::Debug, ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, - time::{Duration, Instant}, }; use tokio::sync::watch; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, trace}; /// A [`DatabaseProvider`] that holds a read-only database transaction. pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; @@ -134,9 +146,11 @@ pub struct DatabaseProvider { /// Chain spec chain_spec: Arc, /// Static File provider - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, + /// Node storage handler. + storage: Arc, } impl DatabaseProvider { @@ -147,10 +161,10 @@ impl DatabaseProvider { } impl DatabaseProvider { - /// State provider for latest block - pub fn latest<'a>(&'a self) -> ProviderResult> { + /// State provider for latest state + pub fn latest<'a>(&'a self) -> Box { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProviderRef::new(&self.tx, self.static_file_provider.clone()))) + Box::new(LatestStateProviderRef::new(self)) } /// Storage provider for state at that given block hash @@ -163,10 +177,7 @@ impl DatabaseProvider { if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProviderRef::new( - &self.tx, - self.static_file_provider.clone(), - ))) + return Ok(Box::new(LatestStateProviderRef::new(self))) } // +1 as the changeset that we want is the one that was applied after this block. @@ -177,11 +188,7 @@ impl DatabaseProvider { let storage_history_prune_checkpoint = self.get_prune_checkpoint(PruneSegment::StorageHistory)?; - let mut state_provider = HistoricalStateProviderRef::new( - &self.tx, - block_number, - self.static_file_provider.clone(), - ); + let mut state_provider = HistoricalStateProviderRef::new(self, block_number); // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. @@ -202,11 +209,21 @@ impl DatabaseProvider { Ok(Box::new(state_provider)) } + + #[cfg(feature = "test-utils")] + /// Sets the prune modes for provider. + pub fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.prune_modes = prune_modes; + } +} + +impl NodePrimitivesProvider for DatabaseProvider { + type Primitives = N::Primitives; } impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } @@ -226,10 +243,11 @@ impl DatabaseProvider { pub const fn new_rw( tx: TX, chain_spec: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, + storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, storage } } } @@ -239,6 +257,123 @@ impl AsRef for DatabaseProvider { } } +impl DatabaseProvider { + /// Unwinds trie state for the given range. + /// + /// This includes calculating the resulted state root and comparing it with the parent block + /// state root. + pub fn unwind_trie_state_range( + &self, + range: RangeInclusive, + ) -> ProviderResult<()> { + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; + + // Unwind account hashes. Add changed accounts to account prefix set. + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; + let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, account) in hashed_addresses { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + if account.is_none() { + destroyed_accounts.insert(hashed_address); + } + } + + // Unwind account history indices. + self.unwind_account_history_indices(changed_accounts.iter())?; + let storage_range = BlockNumberAddress::range(range.clone()); + + let changed_storages = self + .tx + .cursor_read::()? + .walk_range(storage_range)? + .collect::, _>>()?; + + // Unwind storage hashes. Add changed account and storage keys to corresponding prefix + // sets. + let mut storage_prefix_sets = HashMap::::default(); + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; + for (hashed_address, hashed_slots) in storage_entries { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); + for slot in hashed_slots { + storage_prefix_set.insert(Nibbles::unpack(slot)); + } + storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); + } + + // Unwind storage history indices. + self.unwind_storage_history_indices(changed_storages.iter().copied())?; + + // Calculate the reverted merkle root. + // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets + // are pre-loaded. + let prefix_sets = TriePrefixSets { + account_prefix_set: account_prefix_set.freeze(), + storage_prefix_sets, + destroyed_accounts, + }; + let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) + .with_prefix_sets(prefix_sets) + .root_with_updates() + .map_err(Into::::into)?; + + let parent_number = range.start().saturating_sub(1); + let parent_state_root = self + .header_by_number(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? + .state_root; + + // state root should be always correct as we are reverting state. + // but for sake of double verification we will check it again. + if new_state_root != parent_state_root { + let parent_hash = self + .block_hash(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; + return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { + root: GotExpected { got: new_state_root, expected: parent_state_root }, + block_number: parent_number, + block_hash: parent_hash, + }))) + } + self.write_trie_updates(&trie_updates)?; + + Ok(()) + } + + /// Removes receipts from all transactions starting with provided number (inclusive). + fn remove_receipts_from( + &self, + from_tx: TxNumber, + last_block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult<()> { + if remove_from.database() { + // iterate over block body and remove receipts + self.remove::>>(from_tx..)?; + } + + if remove_from.static_files() && !self.prune_modes.has_receipts_pruning() { + let static_file_receipt_num = + self.static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts); + + let to_delete = static_file_receipt_num + .map(|static_num| (static_num + 1).saturating_sub(from_tx)) + .unwrap_or_default(); + + self.static_file_provider + .latest_writer(StaticFileSegment::Receipts)? + .prune_receipts(to_delete, last_block)?; + } + + Ok(()) + } +} + impl TryIntoHistoricalStateProvider for DatabaseProvider { fn try_into_history_at_block( self, @@ -247,7 +382,7 @@ impl TryIntoHistoricalStateProvider for Databa if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProvider::new(self.tx, self.static_file_provider))) + return Ok(Box::new(LatestStateProvider::new(self))) } // +1 as the changeset that we want is the one that was applied after this block. @@ -258,8 +393,7 @@ impl TryIntoHistoricalStateProvider for Databa let storage_history_prune_checkpoint = self.get_prune_checkpoint(PruneSegment::StorageHistory)?; - let mut state_provider = - HistoricalStateProvider::new(self.tx, block_number, self.static_file_provider); + let mut state_provider = HistoricalStateProvider::new(self, block_number); // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. @@ -282,15 +416,17 @@ impl TryIntoHistoricalStateProvider for Databa } } -impl + 'static> - DatabaseProvider -{ +impl StateCommitmentProvider for DatabaseProvider { + type StateCommitment = N::StateCommitment; +} + +impl DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders<::Block>, ) -> ProviderResult { let ttd = if block.number == 0 { block.difficulty @@ -314,7 +450,7 @@ impl + writer.append_header(block.header.as_ref(), ttd, &block.hash())?; - self.insert_block(block) + self.insert_block(block, StorageLocation::Database) } } @@ -365,15 +501,16 @@ where Ok(Vec::new()) } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, chain_spec: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, + storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, storage } } /// Consume `DbTx` or `DbTxMut`. @@ -395,14 +532,16 @@ impl DatabaseProvider { pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } +} +impl DatabaseProvider { fn transactions_by_tx_range_with_cursor( &self, range: impl RangeBounds, cursor: &mut C, - ) -> ProviderResult> + ) -> ProviderResult>> where - C: DbCursorRO, + C: DbCursorRO>>, { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Transactions, @@ -416,7 +555,7 @@ impl DatabaseProvider { fn block_with_senders( &self, id: BlockHashOrNumber, - transaction_kind: TransactionVariant, + _transaction_kind: TransactionVariant, header_by_number: HF, construct_block: BF, ) -> ProviderResult> @@ -424,21 +563,11 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(BlockNumber) -> ProviderResult>, - BF: FnOnce( - H, - Vec, - Vec
, - Vec
, - Option, - ) -> ProviderResult>, + BF: FnOnce(H, BodyTy, Vec
) -> ProviderResult>, { let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; let Some(header) = header_by_number(block_number)? else { return Ok(None) }; - let ommers = self.ommers(block_number.into())?.unwrap_or_default(); - let withdrawals = - self.withdrawals_by_block(block_number.into(), header.as_ref().timestamp)?; - // Get the block body // // If the body indices are not found, this means that the transactions either do not exist @@ -455,20 +584,14 @@ impl DatabaseProvider { (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) }; - let body = transactions - .into_iter() - .map(|tx| match transaction_kind { - TransactionVariant::NoHash => TransactionSigned { - // Caller explicitly asked for no hash, so we don't calculate it - hash: B256::ZERO, - signature: tx.signature, - transaction: tx.transaction, - }, - TransactionVariant::WithHash => tx.with_hash(), - }) - .collect(); + let body = self + .storage + .reader() + .read_block_bodies(self, vec![(header.as_ref(), transactions)])? + .pop() + .ok_or(ProviderError::InvalidStorageOutput)?; - construct_block(header, body, senders, ommers, withdrawals) + construct_block(header, body, senders) } /// Returns a range of blocks from the database. @@ -490,7 +613,7 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: FnOnce(RangeInclusive) -> ProviderResult>, - F: FnMut(H, Range, Vec
, Option) -> ProviderResult, + F: FnMut(H, BodyTy, Range) -> ProviderResult, { if range.is_empty() { return Ok(Vec::new()) @@ -500,50 +623,41 @@ impl DatabaseProvider { let mut blocks = Vec::with_capacity(len); let headers = headers_range(range)?; - let mut ommers_cursor = self.tx.cursor_read::()?; - let mut withdrawals_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let mut block_body_cursor = self.tx.cursor_read::()?; + let mut present_headers = Vec::new(); for header in headers { - let header_ref = header.as_ref(); // If the body indices are not found, this means that the transactions either do // not exist in the database yet, or they do exit but are // not indexed. If they exist but are not indexed, we don't // have enough information to return the block anyways, so // we skip the block. if let Some((_, block_body_indices)) = - block_body_cursor.seek_exact(header_ref.number)? + block_body_cursor.seek_exact(header.as_ref().number)? { let tx_range = block_body_indices.tx_num_range(); - - // If we are past shanghai, then all blocks should have a withdrawal list, - // even if empty - let withdrawals = - if self.chain_spec.is_shanghai_active_at_timestamp(header_ref.timestamp) { - withdrawals_cursor - .seek_exact(header_ref.number)? - .map(|(_, w)| w.withdrawals) - .unwrap_or_default() - .into() - } else { - None - }; - let ommers = - if self.chain_spec.final_paris_total_difficulty(header_ref.number).is_some() { - Vec::new() - } else { - ommers_cursor - .seek_exact(header_ref.number)? - .map(|(_, o)| o.ommers) - .unwrap_or_default() - }; - - if let Ok(b) = assemble_block(header, tx_range, ommers, withdrawals) { - blocks.push(b); - } + present_headers.push((header, tx_range)); } } + let mut inputs = Vec::new(); + for (header, tx_range) in &present_headers { + let transactions = if tx_range.is_empty() { + Vec::new() + } else { + self.transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? + }; + + inputs.push((header.as_ref(), transactions)); + } + + let bodies = self.storage.reader().read_block_bodies(self, inputs)?; + + for ((header, tx_range), body) in present_headers.into_iter().zip(bodies) { + blocks.push(assemble_block(header, body, tx_range)?); + } + Ok(blocks) } @@ -567,34 +681,22 @@ impl DatabaseProvider { N::ChainSpec: EthereumHardforks, H: AsRef
, HF: Fn(RangeInclusive) -> ProviderResult>, - BF: Fn( - H, - Vec, - Vec
, - Option, - Vec
, - ) -> ProviderResult, + BF: Fn(H, BodyTy, Vec
) -> ProviderResult, { - let mut tx_cursor = self.tx.cursor_read::()?; let mut senders_cursor = self.tx.cursor_read::()?; - self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals| { - let (body, senders) = if tx_range.is_empty() { - (Vec::new(), Vec::new()) + self.block_range(range, headers_range, |header, body, tx_range| { + let senders = if tx_range.is_empty() { + Vec::new() } else { - let body = self - .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect::>(); // fetch senders from the senders table let known_senders = senders_cursor .walk_range(tx_range.clone())? .collect::, _>>()?; - let mut senders = Vec::with_capacity(body.len()); - for (tx_num, tx) in tx_range.zip(body.iter()) { + let mut senders = Vec::with_capacity(body.transactions().len()); + for (tx_num, tx) in tx_range.zip(body.transactions()) { match known_senders.get(&tx_num) { None => { // recover the sender from the transaction if not found @@ -607,101 +709,13 @@ impl DatabaseProvider { } } - (body, senders) + senders }; - assemble_block(header, body, ommers, withdrawals, senders) + assemble_block(header, body, senders) }) } - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the - /// transaction ids. - /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the - /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct - /// the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. - /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the plain state - /// 3. Save the old value to the local state - /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the local state - /// 3. Set the local state to the value in the changeset - /// - /// If the range is empty, or there are no blocks for the given range, then this returns `None`. - pub fn get_state( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - if range.is_empty() { - return Ok(None) - } - let start_block_number = *range.start(); - - // We are not removing block meta as it is used to get block changesets. - let block_bodies = self.get::(range.clone())?; - - // get transaction receipts - let Some(from_transaction_num) = block_bodies.first().map(|bodies| bodies.1.first_tx_num()) - else { - return Ok(None) - }; - let Some(to_transaction_num) = block_bodies.last().map(|bodies| bodies.1.last_tx_num()) - else { - return Ok(None) - }; - - let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.get::(storage_range)?; - let account_changeset = self.get::(range)?; - - // This is not working for blocks that are not at tip. as plain state is not the last - // state of end range. We should rename the functions or add support to access - // History state. Accessing history state can be tricky but we are not gaining - // anything. - let mut plain_accounts_cursor = self.tx.cursor_read::()?; - let mut plain_storage_cursor = self.tx.cursor_dup_read::()?; - - let (state, reverts) = self.populate_bundle_state( - account_changeset, - storage_changeset, - &mut plain_accounts_cursor, - &mut plain_storage_cursor, - )?; - - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.get::(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::with_capacity(block_bodies.len()); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); - } - } - receipts.push(block_receipts); - } - - Ok(Some(ExecutionOutcome::new_init( - state, - reverts, - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - ))) - } - /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the /// [`PlainAccountState`] and [`PlainStorageState`] tables, based on the given storage and /// account changesets. @@ -790,301 +804,31 @@ impl DatabaseProvider { Ok(self.tx.commit()?) } - /// Remove requested block transactions, without returning them. - /// - /// This will remove block data for the given range from the following tables: - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionSenders`](tables::TransactionSenders) - /// * [`TransactionHashNumbers`](tables::TransactionHashNumbers) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - pub fn remove_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult<()> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.take::(range)?; - - if block_bodies.is_empty() { - return Ok(()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(()) - } - - // Get transactions so we can then remove - let transactions = self - .take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - // remove senders - self.remove::(first_transaction..=last_transaction)?; - - // Remove TransactionHashNumbers - let mut tx_hash_cursor = self.tx.cursor_write::()?; - for (_, tx) in &transactions { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlocks index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.remove::(tx_id_range)?; + /// Load shard and remove it. If list is empty, last shard was full or + /// there are no shards at all. + fn take_shard(&self, key: T::Key) -> ProviderResult> + where + T: Table, + { + let mut cursor = self.tx.cursor_read::()?; + let shard = cursor.seek_exact(key)?; + if let Some((shard_key, list)) = shard { + // delete old shard so new one can be inserted. + self.tx.delete::(shard_key, None)?; + let list = list.iter().collect::>(); + return Ok(list) } - - Ok(()) + Ok(Vec::new()) } - /// Get requested blocks transaction with senders, also removing them from the database + /// Insert history index to the database. /// - /// This will remove block data for the given range from the following tables: - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionSenders`](tables::TransactionSenders) - /// * [`TransactionHashNumbers`](tables::TransactionHashNumbers) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - pub fn take_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult)>> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let mut senders = - self.take::(first_transaction..=last_transaction)?; - - recover_block_senders(&mut senders, &transactions, first_transaction, last_transaction)?; - - // Remove TransactionHashNumbers - let mut tx_hash_cursor = self.tx.cursor_write::()?; - for (_, tx) in &transactions { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlocks index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.remove::(tx_id_range)?; - } - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id == sender_tx_id { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } else { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Remove the given range of blocks, without returning any of the blocks. - /// - /// This will remove block data for the given range from the following tables: - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`BlockOmmers`](tables::BlockOmmers) - /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// - /// This will also remove transaction data according to - /// [`remove_block_transaction_range`](Self::remove_block_transaction_range). - pub fn remove_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult<()> { - let block_headers = self.remove::(range.clone())?; - if block_headers == 0 { - return Ok(()) - } - - self.tx.unwind_table_by_walker::( - range.clone(), - )?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove_block_transaction_range(range.clone())?; - self.remove::(range)?; - - Ok(()) - } - - /// Remove the given range of blocks, and return them. - /// - /// This will remove block data for the given range from the following tables: - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`BlockOmmers`](tables::BlockOmmers) - /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// - /// This will also remove transaction data according to - /// [`take_block_transaction_range`](Self::take_block_transaction_range). - pub fn take_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult> - where - N::ChainSpec: EthereumHardforks, - { - // For blocks we need: - // - // - Headers - // - Bodies (transactions) - // - Uncles/ommers - // - Withdrawals - // - Signers - - let block_headers = self.take::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - self.tx.unwind_table_by_walker::( - range.clone(), - )?; - let block_header_hashes = self.take::(range.clone())?; - let block_ommers = self.take::(range.clone())?; - let block_withdrawals = self.take::(range.clone())?; - let block_tx = self.take_block_transaction_range(range.clone())?; - - let mut blocks = Vec::with_capacity(block_headers.len()); - - // rm HeaderTerminalDifficulties - self.remove::(range)?; - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); - - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - - for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) - { - let header = SealedHeader::new(header, header_hash); - - let (transactions, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); - - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); - } - }; - - // withdrawal can be missing - let shanghai_is_active = - self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Withdrawals::default()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } - } - } else { - withdrawals = None - } - - blocks.push(SealedBlockWithSenders { - block: SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals }, - }, - senders, - }) - } - - Ok(blocks) - } - - /// Load shard and remove it. If list is empty, last shard was full or - /// there are no shards at all. - fn take_shard(&self, key: T::Key) -> ProviderResult> - where - T: Table, - { - let mut cursor = self.tx.cursor_read::()?; - let shard = cursor.seek_exact(key)?; - if let Some((shard_key, list)) = shard { - // delete old shard so new one can be inserted. - self.tx.delete::(shard_key, None)?; - let list = list.iter().collect::>(); - return Ok(list) - } - Ok(Vec::new()) - } - - /// Insert history index to the database. - /// - /// For each updated partial key, this function removes the last shard from - /// the database (if any), appends the new indices to it, chunks the resulting integer list and - /// inserts the new shards back into the database. - /// - /// This function is used by history indexing stages. - fn append_history_index( + /// For each updated partial key, this function removes the last shard from + /// the database (if any), appends the new indices to it, chunks the resulting integer list and + /// inserts the new shards back into the database. + /// + /// This function is used by history indexing stages. + fn append_history_index( &self, index_updates: impl IntoIterator)>, mut sharded_key_factory: impl FnMut(P, BlockNumber) -> T::Key, @@ -1401,10 +1145,14 @@ impl BlockNumReader for DatabaseProvider> BlockReader - for DatabaseProvider -{ - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { +impl BlockReader for DatabaseProvider { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) } else { @@ -1417,39 +1165,44 @@ impl> BlockReader /// If the header for this block is not found, this returns `None`. /// If the header is found, but the transactions either do not exist, or are not indexed, this /// will return None. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { if let Some(number) = self.convert_hash_or_number(id)? { if let Some(header) = self.header_by_number(number)? { - let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; - let ommers = self.ommers(number.into())?.unwrap_or_default(); // If the body indices are not found, this means that the transactions either do not // exist in the database yet, or they do exit but are not indexed. // If they exist but are not indexed, we don't have enough // information to return the block anyways, so we return `None`. - let transactions = match self.transactions_by_block(number.into())? { - Some(transactions) => transactions, - None => return Ok(None), + let Some(transactions) = self.transactions_by_block(number.into())? else { + return Ok(None) }; - return Ok(Some(Block { - header, - body: BlockBody { transactions, ommers, withdrawals }, - })) + let body = self + .storage + .reader() + .read_block_bodies(self, vec![(&header, transactions)])? + .pop() + .ok_or(ProviderError::InvalidStorageOutput)?; + + return Ok(Some(Self::Block::new(header, body))) } } Ok(None) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(None) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -1488,13 +1241,13 @@ impl> BlockReader &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, |block_number| self.header_by_number(block_number), - |header, transactions, senders, ommers, withdrawals| { - Block { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + Self::Block::new(header, body) // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1509,13 +1262,13 @@ impl> BlockReader &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, |block_number| self.sealed_header(block_number), - |header, transactions, senders, ommers, withdrawals| { - SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + SealedBlock { header, body } // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1526,34 +1279,23 @@ impl> BlockReader ) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - let mut tx_cursor = self.tx.cursor_read::()?; + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.block_range( range, |range| self.headers_range(range), - |header, tx_range, ommers, withdrawals| { - let transactions = if tx_range.is_empty() { - Vec::new() - } else { - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect() - }; - Ok(Block { header, body: BlockBody { transactions, ommers, withdrawals } }) - }, + |header, body, _| Ok(Self::Block::new(header, body)), ) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.headers_range(range), - |header, transactions, ommers, withdrawals, senders| { - Block { header, body: BlockBody { transactions, ommers, withdrawals } } + |header, body, senders| { + Self::Block::new(header, body) .try_with_senders_unchecked(senders) .map_err(|_| ProviderError::SenderRecoveryError) }, @@ -1563,22 +1305,19 @@ impl> BlockReader fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.sealed_headers_range(range), - |header, transactions, ommers, withdrawals, senders| { - SealedBlockWithSenders::new( - SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } }, - senders, - ) - .ok_or(ProviderError::SenderRecoveryError) + |header, body, senders| { + SealedBlockWithSenders::new(SealedBlock { header, body }, senders) + .ok_or(ProviderError::SenderRecoveryError) }, ) } } -impl> TransactionsProviderExt +impl TransactionsProviderExt for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and @@ -1592,7 +1331,7 @@ impl> Transaction tx_range, |static_file, range, _| static_file.transaction_hashes_by_range(range), |tx_range, _| { - let mut tx_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let tx_range_size = tx_range.clone().count(); let tx_walker = tx_cursor.walk_range(tx_range)?; @@ -1601,12 +1340,15 @@ impl> Transaction let mut transaction_count = 0; #[inline] - fn calculate_hash( - entry: Result<(TxNumber, TransactionSignedNoHash), DatabaseError>, + fn calculate_hash( + entry: Result<(TxNumber, T), DatabaseError>, rlp_buf: &mut Vec, - ) -> Result<(B256, TxNumber), Box> { + ) -> Result<(B256, TxNumber), Box> + where + T: Encodable2718, + { let (tx_id, tx) = entry.map_err(|e| Box::new(e.into()))?; - tx.transaction.eip2718_encode(&tx.signature, rlp_buf); + tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } @@ -1648,59 +1390,49 @@ impl> Transaction } // Calculates the hash of the given transaction -impl> TransactionsProvider - for DatabaseProvider -{ +impl TransactionsProvider for DatabaseProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, |static_file| static_file.transaction_by_id(id), - || Ok(self.tx.get::(id)?.map(Into::into)), + || Ok(self.tx.get::>(id)?), ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, - |static_file| static_file.transaction_by_id_no_hash(id), - || Ok(self.tx.get::(id)?), + |static_file| static_file.transaction_by_id_unhashed(id), + || Ok(self.tx.get::>(id)?), ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { - Ok(self.transaction_by_id_no_hash(id)?.map(|tx| TransactionSigned { - hash, - signature: tx.signature, - transaction: tx.transaction, - })) + Ok(self.transaction_by_id_unhashed(id)?) } else { Ok(None) } - .map(|tx| tx.map(Into::into)) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { - if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { - let transaction = TransactionSigned { - hash: tx_hash, - signature: tx.signature, - transaction: tx.transaction, - }; + if let Some(transaction) = self.transaction_by_id_unhashed(transaction_id)? { if let Some(block_number) = transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? { @@ -1741,8 +1473,8 @@ impl> Transaction fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { - let mut tx_cursor = self.tx.cursor_read::()?; + ) -> ProviderResult>> { + let mut tx_cursor = self.tx.cursor_read::>()?; if let Some(block_number) = self.convert_hash_or_number(id)? { if let Some(body) = self.block_body_indices(block_number)? { @@ -1750,12 +1482,7 @@ impl> Transaction return if tx_range.is_empty() { Ok(Some(Vec::new())) } else { - Ok(Some( - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect(), - )) + Ok(Some(self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)?)) } } } @@ -1765,8 +1492,8 @@ impl> Transaction fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { - let mut tx_cursor = self.tx.cursor_read::()?; + ) -> ProviderResult>> { + let mut tx_cursor = self.tx.cursor_read::>()?; let mut results = Vec::new(); let mut body_cursor = self.tx.cursor_read::()?; for entry in body_cursor.walk_range(range)? { @@ -1778,7 +1505,6 @@ impl> Transaction results.push( self.transactions_by_tx_range_with_cursor(tx_num_range, &mut tx_cursor)? .into_iter() - .map(Into::into) .collect(), ); } @@ -1789,10 +1515,10 @@ impl> Transaction fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.transactions_by_tx_range_with_cursor( range, - &mut self.tx.cursor_read::()?, + &mut self.tx.cursor_read::>()?, ) } @@ -1808,19 +1534,19 @@ impl> Transaction } } -impl> ReceiptProvider - for DatabaseProvider -{ - fn receipt(&self, id: TxNumber) -> ProviderResult> { +impl ReceiptProvider for DatabaseProvider { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, id, |static_file| static_file.receipt(id), - || Ok(self.tx.get::(id)?), + || Ok(self.tx.get::>(id)?), ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { self.receipt(id) } else { @@ -1828,7 +1554,10 @@ impl> ReceiptProv } } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(block)? { if let Some(body) = self.block_body_indices(number)? { let tx_range = body.tx_num_range(); @@ -1845,12 +1574,15 @@ impl> ReceiptProv fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Receipts, to_range(range), |static_file, range, _| static_file.receipts_by_tx_range(range), - |range, _| self.cursor_read_collect::(range).map_err(Into::into), + |range, _| { + self.cursor_read_collect::>(range) + .map_err(Into::into) + }, |_| true, ) } @@ -2075,7 +1807,77 @@ impl StorageReader for DatabaseProvider } } -impl StateChangeWriter for DatabaseProvider { +impl StateWriter + for DatabaseProvider +{ + type Receipt = ReceiptTy; + + fn write_state( + &self, + execution_outcome: ExecutionOutcome, + is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, + ) -> ProviderResult<()> { + let (plain_state, reverts) = + execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); + + self.write_state_reverts(reverts, execution_outcome.first_block)?; + self.write_state_changes(plain_state)?; + + let mut bodies_cursor = self.tx.cursor_read::()?; + + let has_receipts_pruning = self.prune_modes.has_receipts_pruning() || + execution_outcome.receipts.iter().flatten().any(|receipt| receipt.is_none()); + + // Prepare receipts cursor if we are going to write receipts to the database + // + // We are writing to database if requested or if there's any kind of receipt pruning + // configured + let mut receipts_cursor = (write_receipts_to.database() || has_receipts_pruning) + .then(|| self.tx.cursor_write::>()) + .transpose()?; + + // Prepare receipts static writer if we are going to write receipts to static files + // + // We are writing to static files if requested and if there's no receipt pruning configured + let mut receipts_static_writer = (write_receipts_to.static_files() && + !has_receipts_pruning) + .then(|| { + self.static_file_provider + .get_writer(execution_outcome.first_block, StaticFileSegment::Receipts) + }) + .transpose()?; + + for (idx, receipts) in execution_outcome.receipts.into_iter().enumerate() { + let block_number = execution_outcome.first_block + idx as u64; + + // Increment block number for receipts static file writer + if let Some(writer) = receipts_static_writer.as_mut() { + writer.increment_block(block_number)?; + } + + let first_tx_index = bodies_cursor + .seek_exact(block_number)? + .map(|(_, indices)| indices.first_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; + + for (idx, receipt) in receipts.into_iter().enumerate() { + let receipt_idx = first_tx_index + idx as u64; + if let Some(receipt) = receipt { + if let Some(writer) = &mut receipts_static_writer { + writer.append_receipt(receipt_idx, &receipt)?; + } + + if let Some(cursor) = &mut receipts_cursor { + cursor.append(receipt_idx, receipt)?; + } + } + } + } + + Ok(()) + } + fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2265,9 +2067,15 @@ impl StateChangeWriter for DatabaseP /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()> { + fn remove_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult<()> { + let range = block + 1..=self.last_block_number()?; + if range.is_empty() { - return Ok(()) + return Ok(()); } // We are not removing block meta as it is used to get block changesets. @@ -2276,8 +2084,6 @@ impl StateChangeWriter for DatabaseP // get transaction receipts let from_transaction_num = block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); - let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); @@ -2330,8 +2136,7 @@ impl StateChangeWriter for DatabaseP } } - // iterate over block body and remove receipts - self.remove::(from_transaction_num..=to_transaction_num)?; + self.remove_receipts_from(from_transaction_num, block, remove_receipts_from)?; Ok(()) } @@ -2357,7 +2162,13 @@ impl StateChangeWriter for DatabaseP /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn take_state(&self, range: RangeInclusive) -> ProviderResult { + fn take_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult> { + let range = block + 1..=self.last_block_number()?; + if range.is_empty() { return Ok(ExecutionOutcome::default()) } @@ -2425,22 +2236,45 @@ impl StateChangeWriter for DatabaseP } } - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.take::(from_transaction_num..=to_transaction_num)?.into_iter(); + // Collect receipts into tuples (tx_num, receipt) to correctly handle pruned receipts + let mut receipts_iter = self + .static_file_provider + .get_range_with_static_file_or_database( + StaticFileSegment::Receipts, + from_transaction_num..to_transaction_num + 1, + |static_file, range, _| { + static_file + .receipts_by_tx_range(range.clone()) + .map(|r| range.into_iter().zip(r).collect()) + }, + |range, _| { + self.tx + .cursor_read::>()? + .walk_range(range)? + .map(|r| r.map_err(Into::into)) + .collect() + }, + |_| true, + )? + .into_iter() + .peekable(); let mut receipts = Vec::with_capacity(block_bodies.len()); // loop break if we are at the end of the blocks. for (_, block_body) in block_bodies { let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); + for num in block_body.tx_num_range() { + if receipts_iter.peek().is_some_and(|(n, _)| *n == num) { + block_receipts.push(receipts_iter.next().map(|(_, r)| r)); + } else { + block_receipts.push(None); } } receipts.push(block_receipts); } + self.remove_receipts_from(from_transaction_num, block, remove_receipts_from)?; + Ok(ExecutionOutcome::new_init( state, reverts, @@ -2612,7 +2446,7 @@ impl HashingWriter for DatabaseProvi // Apply values to HashedState, and remove the account if it's None. let mut hashed_storage_keys: HashMap> = - HashMap::with_capacity(hashed_storages.len()); + HashMap::with_capacity_and_hasher(hashed_storages.len(), Default::default()); let mut hashed_storage = self.tx.cursor_dup_write::()?; for (hashed_address, key, value) in hashed_storages.into_iter().rev() { hashed_storage_keys.entry(hashed_address).or_default().insert(key); @@ -2898,217 +2732,62 @@ impl HistoryWriter for DatabaseProvi } } -impl StateReader for DatabaseProvider { - fn get_state(&self, block: BlockNumber) -> ProviderResult> { - self.get_state(block..=block) - } -} - -impl + 'static> - BlockExecutionWriter for DatabaseProvider +impl BlockExecutionWriter + for DatabaseProvider { - fn take_block_and_execution_range( + fn take_block_and_execution_above( &self, - range: RangeInclusive, - ) -> ProviderResult { - let changed_accounts = self - .tx - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } - - // Unwind account history indices. - self.unwind_account_history_indices(changed_accounts.iter())?; - let storage_range = BlockNumberAddress::range(range.clone()); - - let changed_storages = self - .tx - .cursor_read::()? - .walk_range(storage_range)? - .collect::, _>>()?; - - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } - - // Unwind storage history indices. - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(Into::::into)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult> { + let range = block + 1..=self.last_block_number()?; - // get blocks - let blocks = self.take_block_range(range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + self.unwind_trie_state_range(range.clone())?; // get execution res - let execution_state = self.take_state(range.clone())?; + let execution_state = self.take_state_above(block, remove_from)?; + + let blocks = self.sealed_block_with_senders_range(range)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove::(range)?; + self.remove_blocks_above(block, remove_from)?; // Update pipeline progress - if let Some(fork_number) = unwind_to { - self.update_pipeline_stages(fork_number, true)?; - } + self.update_pipeline_stages(block, true)?; Ok(Chain::new(blocks, execution_state, None)) } - fn remove_block_and_execution_range( + fn remove_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_from: StorageLocation, ) -> ProviderResult<()> { - let changed_accounts = self - .tx - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } + let range = block + 1..=self.last_block_number()?; - // Unwind account history indices. - self.unwind_account_history_indices(changed_accounts.iter())?; - - let storage_range = BlockNumberAddress::range(range.clone()); - let changed_storages = self - .tx - .cursor_read::()? - .walk_range(storage_range)? - .collect::, _>>()?; - - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } - - // Unwind storage history indices. - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(Into::::into)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; - - // get blocks - let blocks = self.take_block_range(range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + self.unwind_trie_state_range(range)?; // remove execution res - self.remove_state(range.clone())?; + self.remove_state_above(block, remove_from)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove::(range)?; + self.remove_blocks_above(block, remove_from)?; // Update pipeline progress - if let Some(block_number) = unwind_to { - self.update_pipeline_stages(block_number, true)?; - } + self.update_pipeline_stages(block, true)?; Ok(()) } } -impl + 'static> BlockWriter +impl BlockWriter for DatabaseProvider { + type Block = BlockTy; + type Receipt = ReceiptTy; + /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`Headers`](tables::Headers) @@ -3131,22 +2810,13 @@ impl + /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, + write_to: StorageLocation, ) -> ProviderResult { let block_number = block.number; let mut durations_recorder = metrics::DurationsRecorder::default(); - self.tx.put::(block_number, block.hash())?; - durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); - - // Put header with canonical hashes. - self.tx.put::(block_number, block.header.as_ref().clone())?; - durations_recorder.record_relative(metrics::Action::InsertHeaders); - - self.tx.put::(block.hash(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); - // total difficulty let ttd = if block_number == 0 { block.difficulty @@ -3157,18 +2827,27 @@ impl + parent_ttd + block.difficulty }; - self.tx.put::(block_number, ttd.into())?; - durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); + if write_to.database() { + self.tx.put::(block_number, block.hash())?; + durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); - // insert body ommers data - if !block.body.ommers.is_empty() { - self.tx.put::( - block_number, - StoredBlockOmmers { ommers: block.block.body.ommers }, - )?; - durations_recorder.record_relative(metrics::Action::InsertBlockOmmers); + // Put header with canonical hashes. + self.tx.put::(block_number, block.header.as_ref().clone())?; + durations_recorder.record_relative(metrics::Action::InsertHeaders); + + self.tx.put::(block_number, ttd.into())?; + durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); + } + + if write_to.static_files() { + let mut writer = + self.static_file_provider.get_writer(block_number, StaticFileSegment::Headers)?; + writer.append_header(&block.header, ttd, &block.hash())?; } + self.tx.put::(block.hash(), block_number)?; + durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); + let mut next_tx_num = self .tx .cursor_read::()? @@ -3178,84 +2857,25 @@ impl + durations_recorder.record_relative(metrics::Action::GetNextTxNum); let first_tx_num = next_tx_num; - let tx_count = block.block.body.transactions.len() as u64; + let tx_count = block.block.body.transactions().len() as u64; // Ensures we have all the senders for the block's transactions. - let mut tx_senders_elapsed = Duration::default(); - let mut transactions_elapsed = Duration::default(); - let mut tx_hash_numbers_elapsed = Duration::default(); - for (transaction, sender) in - block.block.body.transactions.into_iter().zip(block.senders.iter()) + block.block.body.transactions().iter().zip(block.senders.iter()) { - let hash = transaction.hash(); - - if self - .prune_modes - .sender_recovery - .as_ref() - .filter(|prune_mode| prune_mode.is_full()) - .is_none() - { - let start = Instant::now(); - self.tx.put::(next_tx_num, *sender)?; - tx_senders_elapsed += start.elapsed(); - } + let hash = transaction.tx_hash(); - let start = Instant::now(); - self.tx.put::(next_tx_num, transaction.into())?; - let elapsed = start.elapsed(); - if elapsed > Duration::from_secs(1) { - warn!( - target: "providers::db", - ?block_number, - tx_num = %next_tx_num, - hash = %hash, - ?elapsed, - "Transaction insertion took too long" - ); + if self.prune_modes.sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { + self.tx.put::(next_tx_num, *sender)?; } - transactions_elapsed += elapsed; - if self - .prune_modes - .transaction_lookup - .filter(|prune_mode| prune_mode.is_full()) - .is_none() - { - let start = Instant::now(); - self.tx.put::(hash, next_tx_num)?; - tx_hash_numbers_elapsed += start.elapsed(); + if self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full()) { + self.tx.put::(*hash, next_tx_num)?; } next_tx_num += 1; } - durations_recorder - .record_duration(metrics::Action::InsertTransactionSenders, tx_senders_elapsed); - durations_recorder - .record_duration(metrics::Action::InsertTransactions, transactions_elapsed); - durations_recorder.record_duration( - metrics::Action::InsertTransactionHashNumbers, - tx_hash_numbers_elapsed, - ); - if let Some(withdrawals) = block.block.body.withdrawals { - if !withdrawals.is_empty() { - self.tx.put::( - block_number, - StoredBlockWithdrawals { withdrawals }, - )?; - durations_recorder.record_relative(metrics::Action::InsertBlockWithdrawals); - } - } - - let block_indices = StoredBlockBodyIndices { first_tx_num, tx_count }; - self.tx.put::(block_number, block_indices.clone())?; - durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); - - if !block_indices.is_empty() { - self.tx.put::(block_indices.last_tx_num(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); - } + self.append_block_bodies(vec![(block_number, Some(block.block.body))], write_to)?; debug!( target: "providers::db", @@ -3264,14 +2884,177 @@ impl + "Inserted block" ); - Ok(block_indices) + Ok(StoredBlockBodyIndices { first_tx_num, tx_count }) + } + + fn append_block_bodies( + &self, + bodies: Vec<(BlockNumber, Option>)>, + write_transactions_to: StorageLocation, + ) -> ProviderResult<()> { + let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; + + // Initialize writer if we will be writing transactions to staticfiles + let mut tx_static_writer = write_transactions_to + .static_files() + .then(|| { + self.static_file_provider.get_writer(from_block, StaticFileSegment::Transactions) + }) + .transpose()?; + + let mut block_indices_cursor = self.tx.cursor_write::()?; + let mut tx_block_cursor = self.tx.cursor_write::()?; + + // Initialize cursor if we will be writing transactions to database + let mut tx_cursor = write_transactions_to + .database() + .then(|| self.tx.cursor_write::>>()) + .transpose()?; + + // Get id for the next tx_num or zero if there are no transactions. + let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); + + for (block_number, body) in &bodies { + // Increment block on static file header. + if let Some(writer) = tx_static_writer.as_mut() { + writer.increment_block(*block_number)?; + } + + let tx_count = body.as_ref().map(|b| b.transactions().len() as u64).unwrap_or_default(); + let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; + + let mut durations_recorder = metrics::DurationsRecorder::default(); + + // insert block meta + block_indices_cursor.append(*block_number, block_indices)?; + + durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); + + let Some(body) = body else { continue }; + + // write transaction block index + if !body.transactions().is_empty() { + tx_block_cursor.append(block_indices.last_tx_num(), *block_number)?; + durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); + } + + // write transactions + for transaction in body.transactions() { + if let Some(writer) = tx_static_writer.as_mut() { + writer.append_transaction(next_tx_num, transaction)?; + } + if let Some(cursor) = tx_cursor.as_mut() { + cursor.append(next_tx_num, transaction.clone())?; + } + + // Increment transaction id for each transaction. + next_tx_num += 1; + } + + debug!( + target: "providers::db", + ?block_number, + actions = ?durations_recorder.actions, + "Inserted block body" + ); + } + + self.storage.writer().write_block_bodies(self, bodies)?; + + Ok(()) + } + + fn remove_blocks_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + let mut canonical_headers_cursor = self.tx.cursor_write::()?; + let mut rev_headers = canonical_headers_cursor.walk_back(None)?; + + while let Some(Ok((number, hash))) = rev_headers.next() { + if number <= block { + break + } + self.tx.delete::(hash, None)?; + rev_headers.delete_current()?; + } + self.remove::(block + 1..)?; + self.remove::(block + 1..)?; + + // First transaction to be removed + let unwind_tx_from = self + .tx + .get::(block)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + // Last transaction to be removed + let unwind_tx_to = self + .tx + .cursor_read::()? + .last()? + // shouldn't happen because this was OK above + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))? + .1 + .last_tx_num(); + + if unwind_tx_from <= unwind_tx_to { + for (hash, _) in self.transaction_hashes_by_range(unwind_tx_from..(unwind_tx_to + 1))? { + self.tx.delete::(hash, None)?; + } + } + + self.remove::(unwind_tx_from..)?; + + self.remove_bodies_above(block, remove_transactions_from)?; + + Ok(()) + } + + fn remove_bodies_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + self.storage.writer().remove_block_bodies_above(self, block)?; + + // First transaction to be removed + let unwind_tx_from = self + .tx + .get::(block)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + self.remove::(block + 1..)?; + self.remove::(unwind_tx_from..)?; + + if remove_transactions_from.database() { + self.remove::>>(unwind_tx_from..)?; + } + + if remove_transactions_from.static_files() { + let static_file_tx_num = self + .static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions); + + let to_delete = static_file_tx_num + .map(|static_tx| (static_tx + 1).saturating_sub(unwind_tx_from)) + .unwrap_or_default(); + + self.static_file_provider + .latest_writer(StaticFileSegment::Transactions)? + .prune_transactions(to_delete, block)?; + } + + Ok(()) } /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, - blocks: Vec, - execution_outcome: ExecutionOutcome, + blocks: Vec>, + execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()> { @@ -3289,16 +3072,11 @@ impl + // Insert the blocks for block in blocks { - self.insert_block(block)?; + self.insert_block(block, StorageLocation::Database)?; durations_recorder.record_relative(metrics::Action::InsertBlock); } - // Write state and changesets to the database. - // Must be written after blocks because of the receipt lookup. - // TODO: should _these_ be moved to storagewriter? seems like storagewriter should be - // _above_ db provider - let mut storage_writer = UnifiedStorageWriter::from_database(self); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + self.write_state(execution_outcome, OriginalValuesKnown::No, StorageLocation::Database)?; durations_recorder.record_relative(metrics::Action::InsertState); // insert hashes and intermediate merkle nodes @@ -3346,7 +3124,7 @@ impl PruneCheckpointWriter for DatabaseProvider StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { @@ -3418,79 +3196,3 @@ impl DBProvider for DatabaseProvider self.prune_modes_ref() } } - -/// Helper method to recover senders for any blocks in the db which do not have senders. This -/// compares the length of the input senders [`Vec`], with the length of given transactions [`Vec`], -/// and will add to the input senders vec if there are more transactions. -/// -/// NOTE: This will modify the input senders list, which is why a mutable reference is required. -fn recover_block_senders( - senders: &mut Vec<(u64, Address)>, - transactions: &[(u64, TransactionSigned)], - first_transaction: u64, - last_transaction: u64, -) -> ProviderResult<()> { - // Recover senders manually if not found in db - // NOTE: Transactions are always guaranteed to be in the database whereas - // senders might be pruned. - if senders.len() != transactions.len() { - if senders.len() > transactions.len() { - error!(target: "providers::db", senders=%senders.len(), transactions=%transactions.len(), - first_tx=%first_transaction, last_tx=%last_transaction, - "unexpected senders and transactions mismatch"); - } - let missing = transactions.len().saturating_sub(senders.len()); - senders.reserve(missing); - // Find all missing senders, their corresponding tx numbers and indexes to the original - // `senders` vector at which the recovered senders will be inserted. - let mut missing_senders = Vec::with_capacity(missing); - { - let mut senders = senders.iter().peekable(); - - // `transactions` contain all entries. `senders` contain _some_ of the senders for - // these transactions. Both are sorted and indexed by `TxNumber`. - // - // The general idea is to iterate on both `transactions` and `senders`, and advance - // the `senders` iteration only if it matches the current `transactions` entry's - // `TxNumber`. Otherwise, add the transaction to the list of missing senders. - for (i, (tx_number, transaction)) in transactions.iter().enumerate() { - if let Some((sender_tx_number, _)) = senders.peek() { - if sender_tx_number == tx_number { - // If current sender's `TxNumber` matches current transaction's - // `TxNumber`, advance the senders iterator. - senders.next(); - } else { - // If current sender's `TxNumber` doesn't match current transaction's - // `TxNumber`, add it to missing senders. - missing_senders.push((i, tx_number, transaction)); - } - } else { - // If there's no more senders left, but we're still iterating over - // transactions, add them to missing senders - missing_senders.push((i, tx_number, transaction)); - } - } - } - - // Recover senders - let recovered_senders = TransactionSigned::recover_signers( - missing_senders.iter().map(|(_, _, tx)| *tx).collect::>(), - missing_senders.len(), - ) - .ok_or(ProviderError::SenderRecoveryError)?; - - // Insert recovered senders along with tx numbers at the corresponding indexes to the - // original `senders` vector - for ((i, tx_number, _), sender) in missing_senders.into_iter().zip(recovered_senders) { - // Insert will put recovered senders at necessary positions and shift the rest - senders.insert(i, (*tx_number, sender)); - } - - // Debug assertions which are triggered during the test to ensure that all senders are - // present and sorted - debug_assert_eq!(senders.len(), transactions.len(), "missing one or more senders"); - debug_assert!(senders.iter().tuple_windows().all(|(a, b)| a.0 < b.0), "senders not sorted"); - } - - Ok(()) -} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index c859ddba8a54..6631b5b1b31a 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -3,15 +3,17 @@ use crate::{ BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, - ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, + NodePrimitivesProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, + ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, + WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, }; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, @@ -19,12 +21,13 @@ use reth_blockchain_tree_api::{ }; use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{BlockTy, FullNodePrimitives, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -36,6 +39,7 @@ use std::{ sync::Arc, time::Instant, }; + use tracing::trace; mod database; @@ -66,10 +70,52 @@ pub use blockchain_provider::BlockchainProvider2; mod consistent; pub use consistent::ConsistentProvider; +/// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy +/// [`ProviderNodeTypes`]. +pub trait NodeTypesForProvider +where + Self: NodeTypes< + ChainSpec: EthereumHardforks, + Storage: ChainStorage, + Primitives: FullNodePrimitives< + SignedTx: Value, + Receipt: Value, + BlockHeader = alloy_consensus::Header, + >, + >, +{ +} + +impl NodeTypesForProvider for T where + T: NodeTypes< + ChainSpec: EthereumHardforks, + Storage: ChainStorage, + Primitives: FullNodePrimitives< + SignedTx: Value, + Receipt: Value, + BlockHeader = alloy_consensus::Header, + >, + > +{ +} + /// Helper trait keeping common requirements of providers for [`NodeTypesWithDB`]. -pub trait ProviderNodeTypes: NodeTypesWithDB {} +pub trait ProviderNodeTypes +where + Self: NodeTypesForProvider + NodeTypesWithDB, +{ +} +impl ProviderNodeTypes for T where T: NodeTypesForProvider + NodeTypesWithDB {} -impl ProviderNodeTypes for T where T: NodeTypesWithDB {} +/// A helper trait with requirements for [`NodeTypesForProvider`] to be used within legacy +/// blockchain tree. +pub trait NodeTypesForTree: NodeTypesForProvider {} +impl NodeTypesForTree for T where T: NodeTypesForProvider {} + +/// Helper trait with requirements for [`ProviderNodeTypes`] to be used within legacy blockchain +/// tree. +pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} +impl TreeNodeTypes for T where T: ProviderNodeTypes + NodeTypesForTree {} /// The main type for interacting with the blockchain. /// @@ -81,9 +127,9 @@ pub struct BlockchainProvider { /// Provider type used to access the database. database: ProviderFactory, /// The blockchain tree instance. - tree: Arc, + tree: Arc>, /// Tracks the chain info wrt forkchoice updates - chain_info: ChainInfoTracker, + chain_info: ChainInfoTracker, } impl Clone for BlockchainProvider { @@ -99,7 +145,7 @@ impl Clone for BlockchainProvider { impl BlockchainProvider { /// Sets the treeviewer for the provider. #[doc(hidden)] - pub fn with_tree(mut self, tree: Arc) -> Self { + pub fn with_tree(mut self, tree: Arc>) -> Self { self.tree = tree; self } @@ -111,7 +157,7 @@ impl BlockchainProvider { /// if it exists. pub fn with_blocks( database: ProviderFactory, - tree: Arc, + tree: Arc>, latest: SealedHeader, finalized: Option, safe: Option, @@ -121,7 +167,10 @@ impl BlockchainProvider { /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { + pub fn new( + database: ProviderFactory, + tree: Arc>, + ) -> ProviderResult { let provider = database.provider()?; let best = provider.chain_info()?; let latest_header = provider @@ -188,6 +237,10 @@ where } } +impl NodePrimitivesProvider for BlockchainProvider { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for BlockchainProvider { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; @@ -203,7 +256,7 @@ impl DatabaseProviderFactory for BlockchainProvider { } impl StaticFileProviderFactory for BlockchainProvider { - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } @@ -295,8 +348,14 @@ impl BlockIdReader for BlockchainProvider { } } -impl BlockReader for BlockchainProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { +impl BlockReader for BlockchainProvider { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { let block = match source { BlockSource::Any => { // check database first @@ -315,22 +374,26 @@ impl BlockReader for BlockchainProvider { Ok(block) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { match id { BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), BlockHashOrNumber::Number(num) => self.database.block_by_number(num), } } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.tree.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.tree.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.tree.pending_block_and_receipts()) } @@ -355,7 +418,7 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.block_with_senders(id, transaction_kind) } @@ -363,53 +426,55 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.database.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.sealed_block_with_senders_range(range) } } impl TransactionsProvider for BlockchainProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.database.transaction_by_id(id) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { - self.database.transaction_by_id_no_hash(id) + ) -> ProviderResult> { + self.database.transaction_by_id_unhashed(id) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transaction_by_hash_with_meta(tx_hash) } @@ -420,21 +485,21 @@ impl TransactionsProvider for BlockchainProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.database.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.database.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transactions_by_tx_range(range) } @@ -451,27 +516,32 @@ impl TransactionsProvider for BlockchainProvider { } impl ReceiptProvider for BlockchainProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.database.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.receipts_by_tx_range(range) } } -impl ReceiptProviderIdExt for BlockchainProvider { +impl ReceiptProviderIdExt for BlockchainProvider { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { @@ -808,7 +878,7 @@ impl BlockReaderIdExt for BlockchainProvider where Self: BlockReader + ReceiptProviderIdExt, { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -847,34 +917,20 @@ where BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), BlockNumberOrTag::Safe => Ok(self.chain_info.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Earliest => self + .header_by_number(0)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), BlockNumberOrTag::Pending => Ok(self.tree.pending_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Number(num) => self + .header_by_number(num)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), } } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } @@ -907,7 +963,7 @@ impl BlockchainTreePendingStateProvider for BlockchainProv } impl CanonStateSubscriptions for BlockchainProvider { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.tree.subscribe_to_canonical_state() } } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 56a1d057e704..ad36a4a5ab3e 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,6 +1,6 @@ use crate::{ - providers::{state::macros::delegate_provider_impls, StaticFileProvider}, - AccountReader, BlockHashReader, ProviderError, StateProvider, StateRootProvider, + providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, + ProviderError, StateProvider, StateRootProvider, }; use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ @@ -14,14 +14,17 @@ use reth_db_api::{ table::Table, transaction::DbTx, }; -use reth_primitives::{Account, Bytecode, StaticFileSegment}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_primitives::{Account, Bytecode}; +use reth_storage_api::{ + BlockNumReader, DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, + StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, @@ -41,15 +44,13 @@ use std::fmt::Debug; /// - [`tables::AccountChangeSets`] /// - [`tables::StorageChangeSets`] #[derive(Debug)] -pub struct HistoricalStateProviderRef<'b, TX: DbTx> { - /// Transaction - tx: &'b TX, +pub struct HistoricalStateProviderRef<'b, Provider> { + /// Database provider + provider: &'b Provider, /// Block number is main index for the history state of accounts and storages. block_number: BlockNumber, /// Lowest blocks at which different parts of the state are available. lowest_available_blocks: LowestAvailableBlocks, - /// Static File provider - static_file_provider: StaticFileProvider, } #[derive(Debug, Eq, PartialEq)] @@ -60,25 +61,22 @@ pub enum HistoryInfo { MaybeInPlainState, } -impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { +impl<'b, Provider: DBProvider + BlockNumReader + StateCommitmentProvider> + HistoricalStateProviderRef<'b, Provider> +{ /// Create new `StateProvider` for historical block number - pub fn new( - tx: &'b TX, - block_number: BlockNumber, - static_file_provider: StaticFileProvider, - ) -> Self { - Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } + pub fn new(provider: &'b Provider, block_number: BlockNumber) -> Self { + Self { provider, block_number, lowest_available_blocks: Default::default() } } /// Create new `StateProvider` for historical block number and lowest block numbers at which /// account & storage histories are available. pub const fn new_with_lowest_available_blocks( - tx: &'b TX, + provider: &'b Provider, block_number: BlockNumber, lowest_available_blocks: LowestAvailableBlocks, - static_file_provider: StaticFileProvider, ) -> Self { - Self { tx, block_number, lowest_available_blocks, static_file_provider } + Self { provider, block_number, lowest_available_blocks } } /// Lookup an account in the `AccountsHistory` table @@ -117,15 +115,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { /// Checks and returns `true` if distance to historical block exceeds the provided limit. fn check_distance_against_limit(&self, limit: u64) -> ProviderResult { - let tip = self - .tx - .cursor_read::()? - .last()? - .map(|(tip, _)| tip) - .or_else(|| { - self.static_file_provider.get_highest_static_file_block(StaticFileSegment::Headers) - }) - .ok_or(ProviderError::BestBlockNotFound)?; + let tip = self.provider.last_block_number()?; Ok(tip.saturating_sub(self.block_number) > limit) } @@ -146,7 +136,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ); } - Ok(HashedPostState::from_reverts(self.tx, self.block_number)?) + Ok(HashedPostState::from_reverts(self.tx(), self.block_number)?) } /// Retrieve revert hashed storage for this history provider and target address. @@ -163,7 +153,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ); } - Ok(HashedStorage::from_reverts(self.tx, address, self.block_number)?) + Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?) } fn history_info( @@ -175,7 +165,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { where T: Table, { - let mut cursor = self.tx.cursor_read::()?; + let mut cursor = self.tx().cursor_read::()?; // Lookup the history chunk in the history index. If they key does not appear in the // index, the first chunk for the next key will be returned so we filter out chunks that @@ -248,13 +238,21 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { } } -impl AccountReader for HistoricalStateProviderRef<'_, TX> { +impl HistoricalStateProviderRef<'_, Provider> { + fn tx(&self) -> &Provider::Tx { + self.provider.tx_ref() + } +} + +impl AccountReader + for HistoricalStateProviderRef<'_, Provider> +{ /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { match self.account_history_lookup(address)? { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(self - .tx + .tx() .cursor_dup_read::()? .seek_by_key_subkey(changeset_block_number, address)? .filter(|acc| acc.address == address) @@ -264,21 +262,18 @@ impl AccountReader for HistoricalStateProviderRef<'_, TX> { })? .info), HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { - Ok(self.tx.get::(address)?) + Ok(self.tx().get::(address)?) } } } } -impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { +impl BlockHashReader + for HistoricalStateProviderRef<'_, Provider> +{ /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { - self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::Headers, - number, - |static_file| static_file.block_hash(number), - || Ok(self.tx.get::(number)?), - ) + self.provider.block_hash(number) } fn canonical_hashes_range( @@ -286,37 +281,23 @@ impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.static_file_provider.get_range_with_static_file_or_database( - StaticFileSegment::Headers, - start..end, - |static_file, range, _| static_file.canonical_hashes_range(range.start, range.end), - |range, _| { - self.tx - .cursor_read::() - .map(|mut cursor| { - cursor - .walk_range(range)? - .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() - })? - .map_err(Into::into) - }, - |_| true, - ) + self.provider.canonical_hashes_range(start, end) } } -impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { +impl StateRootProvider + for HistoricalStateProviderRef<'_, Provider> +{ fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state); - StateRoot::overlay_root(self.tx, revert_state) + StateRoot::overlay_root(self.tx(), revert_state) .map_err(|err| ProviderError::Database(err.into())) } fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { input.prepend(self.revert_state()?); - StateRoot::overlay_root_from_nodes(self.tx, input) + StateRoot::overlay_root_from_nodes(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } @@ -326,7 +307,7 @@ impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult<(B256, TrieUpdates)> { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state); - StateRoot::overlay_root_with_updates(self.tx, revert_state) + StateRoot::overlay_root_with_updates(self.tx(), revert_state) .map_err(|err| ProviderError::Database(err.into())) } @@ -335,12 +316,14 @@ impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { input.prepend(self.revert_state()?); - StateRoot::overlay_root_from_nodes_with_updates(self.tx, input) + StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } } -impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { +impl StorageRootProvider + for HistoricalStateProviderRef<'_, Provider> +{ fn storage_root( &self, address: Address, @@ -348,7 +331,7 @@ impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult { let mut revert_storage = self.revert_storage(address)?; revert_storage.extend(&hashed_storage); - StorageRoot::overlay_root(self.tx, address, revert_storage) + StorageRoot::overlay_root(self.tx(), address, revert_storage) .map_err(|err| ProviderError::Database(err.into())) } @@ -360,12 +343,26 @@ impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult { let mut revert_storage = self.revert_storage(address)?; revert_storage.extend(&hashed_storage); - StorageProof::overlay_storage_proof(self.tx, address, slot, revert_storage) + StorageProof::overlay_storage_proof(self.tx(), address, slot, revert_storage) + .map_err(Into::::into) + } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut revert_storage = self.revert_storage(address)?; + revert_storage.extend(&hashed_storage); + StorageProof::overlay_storage_multiproof(self.tx(), address, slots, revert_storage) .map_err(Into::::into) } } -impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { +impl StateProofProvider + for HistoricalStateProviderRef<'_, Provider> +{ /// Get account and storage proofs. fn proof( &self, @@ -374,7 +371,7 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { slots: &[B256], ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_account_proof(self.tx, input, address, slots) + Proof::overlay_account_proof(self.tx(), input, address, slots) .map_err(Into::::into) } @@ -384,7 +381,7 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { targets: HashMap>, ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_multiproof(self.tx, input, targets).map_err(Into::::into) + Proof::overlay_multiproof(self.tx(), input, targets).map_err(Into::::into) } fn witness( @@ -393,11 +390,13 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { target: HashedPostState, ) -> ProviderResult> { input.prepend(self.revert_state()?); - TrieWitness::overlay_witness(self.tx, input, target).map_err(Into::::into) + TrieWitness::overlay_witness(self.tx(), input, target).map_err(Into::::into) } } -impl StateProvider for HistoricalStateProviderRef<'_, TX> { +impl + StateProvider for HistoricalStateProviderRef<'_, Provider> +{ /// Get storage. fn storage( &self, @@ -407,7 +406,7 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { match self.storage_history_lookup(address, storage_key)? { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(Some( - self.tx + self.tx() .cursor_dup_read::()? .seek_by_key_subkey((changeset_block_number, address).into(), storage_key)? .filter(|entry| entry.key == storage_key) @@ -419,7 +418,7 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { .value, )), HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self - .tx + .tx() .cursor_dup_read::()? .seek_by_key_subkey(address, storage_key)? .filter(|entry| entry.key == storage_key) @@ -430,32 +429,28 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - self.tx.get::(code_hash).map_err(Into::into) + self.tx().get::(code_hash).map_err(Into::into) } } /// State provider for a given block number. /// For more detailed description, see [`HistoricalStateProviderRef`]. #[derive(Debug)] -pub struct HistoricalStateProvider { - /// Database transaction - tx: TX, +pub struct HistoricalStateProvider { + /// Database provider. + provider: Provider, /// State at the block number is the main indexer of the state. block_number: BlockNumber, /// Lowest blocks at which different parts of the state are available. lowest_available_blocks: LowestAvailableBlocks, - /// Static File provider - static_file_provider: StaticFileProvider, } -impl HistoricalStateProvider { +impl + HistoricalStateProvider +{ /// Create new `StateProvider` for historical block number - pub fn new( - tx: TX, - block_number: BlockNumber, - static_file_provider: StaticFileProvider, - ) -> Self { - Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } + pub fn new(provider: Provider, block_number: BlockNumber) -> Self { + Self { provider, block_number, lowest_available_blocks: Default::default() } } /// Set the lowest block number at which the account history is available. @@ -478,18 +473,17 @@ impl HistoricalStateProvider { /// Returns a new provider that takes the `TX` as reference #[inline(always)] - fn as_ref(&self) -> HistoricalStateProviderRef<'_, TX> { + const fn as_ref(&self) -> HistoricalStateProviderRef<'_, Provider> { HistoricalStateProviderRef::new_with_lowest_available_blocks( - &self.tx, + &self.provider, self.block_number, self.lowest_available_blocks, - self.static_file_provider.clone(), ) } } // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [TX: DbTx]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -525,7 +519,6 @@ mod tests { providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, test_utils::create_test_provider_factory, AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, - StaticFileProviderFactory, }; use alloy_primitives::{address, b256, Address, B256, U256}; use reth_db::{tables, BlockNumberList}; @@ -534,6 +527,10 @@ mod tests { transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; + use reth_storage_api::{ + BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory, + StateCommitmentProvider, + }; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); @@ -542,7 +539,9 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_historical_state_provider() { + const fn assert_historical_state_provider< + T: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider, + >() { assert_state_provider::>(); } @@ -550,7 +549,6 @@ mod tests { fn history_provider_get_account() { let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); tx.put::( ShardedKey { key: ADDRESS, highest_block_number: 7 }, @@ -610,63 +608,46 @@ mod tests { tx.put::(HIGHER_ADDRESS, higher_acc_plain).unwrap(); tx.commit().unwrap(); - let tx = factory.provider().unwrap().into_tx(); + let db = factory.provider().unwrap(); // run + assert_eq!(HistoricalStateProviderRef::new(&db, 1).basic_account(ADDRESS), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .basic_account(ADDRESS), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 2, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 2).basic_account(ADDRESS), Ok(Some(acc_at3)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 3, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 3).basic_account(ADDRESS), Ok(Some(acc_at3)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 4, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 4).basic_account(ADDRESS), Ok(Some(acc_at7)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 7, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 7).basic_account(ADDRESS), Ok(Some(acc_at7)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 9, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 9).basic_account(ADDRESS), Ok(Some(acc_at10)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 10, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 10).basic_account(ADDRESS), Ok(Some(acc_at10)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 11, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 11).basic_account(ADDRESS), Ok(Some(acc_at15)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 16, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 16).basic_account(ADDRESS), Ok(Some(acc_plain)) ); + assert_eq!(HistoricalStateProviderRef::new(&db, 1).basic_account(HIGHER_ADDRESS), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .basic_account(HIGHER_ADDRESS), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 1000, static_file_provider) - .basic_account(HIGHER_ADDRESS), + HistoricalStateProviderRef::new(&db, 1000).basic_account(HIGHER_ADDRESS), Ok(Some(higher_acc_plain)) ); } @@ -675,7 +656,6 @@ mod tests { fn history_provider_get_storage() { let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); tx.put::( StorageShardedKey { @@ -722,57 +702,44 @@ mod tests { tx.put::(HIGHER_ADDRESS, higher_entry_plain).unwrap(); tx.commit().unwrap(); - let tx = factory.provider().unwrap().into_tx(); + let db = factory.provider().unwrap(); // run + assert_eq!(HistoricalStateProviderRef::new(&db, 0).storage(ADDRESS, STORAGE), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 0, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 3, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 3).storage(ADDRESS, STORAGE), Ok(Some(U256::ZERO)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 4, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 4).storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 7, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 7).storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 9, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 9).storage(ADDRESS, STORAGE), Ok(Some(entry_at10.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 10, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 10).storage(ADDRESS, STORAGE), Ok(Some(entry_at10.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 11, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 11).storage(ADDRESS, STORAGE), Ok(Some(entry_at15.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 16, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 16).storage(ADDRESS, STORAGE), Ok(Some(entry_plain.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .storage(HIGHER_ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 1).storage(HIGHER_ADDRESS, STORAGE), Ok(None) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1000, static_file_provider) - .storage(HIGHER_ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 1000).storage(HIGHER_ADDRESS, STORAGE), Ok(Some(higher_entry_plain.value)) ); } @@ -780,19 +747,17 @@ mod tests { #[test] fn history_provider_unavailable() { let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); + let db = factory.database_provider_rw().unwrap(); // provider block_number < lowest available block number, // i.e. state at provider block is pruned let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(3), storage_history_block_number: Some(3), }, - static_file_provider.clone(), ); assert_eq!( provider.account_history_lookup(ADDRESS), @@ -806,13 +771,12 @@ mod tests { // provider block_number == lowest available block number, // i.e. state at provider block is available let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(2), storage_history_block_number: Some(2), }, - static_file_provider.clone(), ); assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState)); assert_eq!( @@ -823,13 +787,12 @@ mod tests { // provider block_number == lowest available block number, // i.e. state at provider block is available let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(1), storage_history_block_number: Some(1), }, - static_file_provider, ); assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState)); assert_eq!( diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index fdcbfc4937fe..a2ec4972d105 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,24 +1,24 @@ use crate::{ - providers::{state::macros::delegate_provider_impls, StaticFileProvider}, - AccountReader, BlockHashReader, StateProvider, StateRootProvider, + providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, + StateProvider, StateRootProvider, }; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_db::tables; -use reth_db_api::{ - cursor::{DbCursorRO, DbDupCursorRO}, - transaction::DbTx, +use reth_db_api::{cursor::DbDupCursorRO, transaction::DbTx}; +use reth_primitives::{Account, Bytecode}; +use reth_storage_api::{ + DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, }; -use reth_primitives::{Account, Bytecode, StaticFileSegment}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, + StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, @@ -26,37 +26,33 @@ use reth_trie_db::{ }; /// State provider over latest state that takes tx reference. +/// +/// Wraps a [`DBProvider`] to get access to database. #[derive(Debug)] -pub struct LatestStateProviderRef<'b, TX: DbTx> { - /// database transaction - tx: &'b TX, - /// Static File provider - static_file_provider: StaticFileProvider, -} +pub struct LatestStateProviderRef<'b, Provider>(&'b Provider); -impl<'b, TX: DbTx> LatestStateProviderRef<'b, TX> { +impl<'b, Provider: DBProvider> LatestStateProviderRef<'b, Provider> { /// Create new state provider - pub const fn new(tx: &'b TX, static_file_provider: StaticFileProvider) -> Self { - Self { tx, static_file_provider } + pub const fn new(provider: &'b Provider) -> Self { + Self(provider) + } + + fn tx(&self) -> &Provider::Tx { + self.0.tx_ref() } } -impl AccountReader for LatestStateProviderRef<'_, TX> { +impl AccountReader for LatestStateProviderRef<'_, Provider> { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { - self.tx.get::(address).map_err(Into::into) + self.tx().get::(address).map_err(Into::into) } } -impl BlockHashReader for LatestStateProviderRef<'_, TX> { +impl BlockHashReader for LatestStateProviderRef<'_, Provider> { /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { - self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::Headers, - number, - |static_file| static_file.block_hash(number), - || Ok(self.tx.get::(number)?), - ) + self.0.block_hash(number) } fn canonical_hashes_range( @@ -64,34 +60,20 @@ impl BlockHashReader for LatestStateProviderRef<'_, TX> { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.static_file_provider.get_range_with_static_file_or_database( - StaticFileSegment::Headers, - start..end, - |static_file, range, _| static_file.canonical_hashes_range(range.start, range.end), - |range, _| { - self.tx - .cursor_read::() - .map(|mut cursor| { - cursor - .walk_range(range)? - .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() - })? - .map_err(Into::into) - }, - |_| true, - ) + self.0.canonical_hashes_range(start, end) } } -impl StateRootProvider for LatestStateProviderRef<'_, TX> { +impl StateRootProvider + for LatestStateProviderRef<'_, Provider> +{ fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { - StateRoot::overlay_root(self.tx, hashed_state) + StateRoot::overlay_root(self.tx(), hashed_state) .map_err(|err| ProviderError::Database(err.into())) } fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult { - StateRoot::overlay_root_from_nodes(self.tx, input) + StateRoot::overlay_root_from_nodes(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } @@ -99,7 +81,7 @@ impl StateRootProvider for LatestStateProviderRef<'_, TX> { &self, hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_with_updates(self.tx, hashed_state) + StateRoot::overlay_root_with_updates(self.tx(), hashed_state) .map_err(|err| ProviderError::Database(err.into())) } @@ -107,18 +89,20 @@ impl StateRootProvider for LatestStateProviderRef<'_, TX> { &self, input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_from_nodes_with_updates(self.tx, input) + StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } } -impl StorageRootProvider for LatestStateProviderRef<'_, TX> { +impl StorageRootProvider + for LatestStateProviderRef<'_, Provider> +{ fn storage_root( &self, address: Address, hashed_storage: HashedStorage, ) -> ProviderResult { - StorageRoot::overlay_root(self.tx, address, hashed_storage) + StorageRoot::overlay_root(self.tx(), address, hashed_storage) .map_err(|err| ProviderError::Database(err.into())) } @@ -128,19 +112,31 @@ impl StorageRootProvider for LatestStateProviderRef<'_, TX> { slot: B256, hashed_storage: HashedStorage, ) -> ProviderResult { - StorageProof::overlay_storage_proof(self.tx, address, slot, hashed_storage) + StorageProof::overlay_storage_proof(self.tx(), address, slot, hashed_storage) + .map_err(Into::::into) + } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + StorageProof::overlay_storage_multiproof(self.tx(), address, slots, hashed_storage) .map_err(Into::::into) } } -impl StateProofProvider for LatestStateProviderRef<'_, TX> { +impl StateProofProvider + for LatestStateProviderRef<'_, Provider> +{ fn proof( &self, input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx, input, address, slots) + Proof::overlay_account_proof(self.tx(), input, address, slots) .map_err(Into::::into) } @@ -149,7 +145,7 @@ impl StateProofProvider for LatestStateProviderRef<'_, TX> { input: TrieInput, targets: HashMap>, ) -> ProviderResult { - Proof::overlay_multiproof(self.tx, input, targets).map_err(Into::::into) + Proof::overlay_multiproof(self.tx(), input, targets).map_err(Into::::into) } fn witness( @@ -157,18 +153,20 @@ impl StateProofProvider for LatestStateProviderRef<'_, TX> { input: TrieInput, target: HashedPostState, ) -> ProviderResult> { - TrieWitness::overlay_witness(self.tx, input, target).map_err(Into::::into) + TrieWitness::overlay_witness(self.tx(), input, target).map_err(Into::::into) } } -impl StateProvider for LatestStateProviderRef<'_, TX> { +impl StateProvider + for LatestStateProviderRef<'_, Provider> +{ /// Get storage. fn storage( &self, account: Address, storage_key: StorageKey, ) -> ProviderResult> { - let mut cursor = self.tx.cursor_dup_read::()?; + let mut cursor = self.tx().cursor_dup_read::()?; if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? { if entry.key == storage_key { return Ok(Some(entry.value)) @@ -179,34 +177,29 @@ impl StateProvider for LatestStateProviderRef<'_, TX> { /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - self.tx.get::(code_hash).map_err(Into::into) + self.tx().get::(code_hash).map_err(Into::into) } } /// State provider for the latest state. #[derive(Debug)] -pub struct LatestStateProvider { - /// database transaction - db: TX, - /// Static File provider - static_file_provider: StaticFileProvider, -} +pub struct LatestStateProvider(Provider); -impl LatestStateProvider { +impl LatestStateProvider { /// Create new state provider - pub const fn new(db: TX, static_file_provider: StaticFileProvider) -> Self { - Self { db, static_file_provider } + pub const fn new(db: Provider) -> Self { + Self(db) } /// Returns a new provider that takes the `TX` as reference #[inline(always)] - fn as_ref(&self) -> LatestStateProviderRef<'_, TX> { - LatestStateProviderRef::new(&self.db, self.static_file_provider.clone()) + const fn as_ref(&self) -> LatestStateProviderRef<'_, Provider> { + LatestStateProviderRef::new(&self.0) } } // Delegates all provider impls to [LatestStateProviderRef] -delegate_provider_impls!(LatestStateProvider where [TX: DbTx]); +delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader + StateCommitmentProvider]); #[cfg(test)] mod tests { @@ -214,7 +207,9 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_latest_state_provider() { + const fn assert_latest_state_provider< + T: DBProvider + BlockHashReader + StateCommitmentProvider, + >() { assert_state_provider::>(); } } diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index b90924354c43..f2648fb15e6a 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -50,6 +50,7 @@ macro_rules! delegate_provider_impls { StorageRootProvider $(where [$($generics)*])? { fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; fn storage_proof(&self, address: alloy_primitives::Address, slot: alloy_primitives::B256, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; + fn storage_multiproof(&self, address: alloy_primitives::Address, slots: &[alloy_primitives::B256], storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; } StateProofProvider $(where [$($generics)*])? { fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 8d1dbd117cfb..659b093d9d6a 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,44 +6,59 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_consensus::Header; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; -use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; -use reth_db_api::models::CompactU256; -use reth_primitives::{ - Header, Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, +use reth_db::{ + static_file::{ + BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, + TDWithHashMask, TotalDifficultyMask, TransactionMask, + }, + table::Decompress, }; +use reth_node_types::NodePrimitives; +use reth_primitives::{transaction::recover_signers, SealedHeader, TransactionMeta}; +use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ + fmt::Debug, ops::{Deref, RangeBounds}, sync::Arc, }; /// Provider over a specific `NippyJar` and range. #[derive(Debug)] -pub struct StaticFileJarProvider<'a> { +pub struct StaticFileJarProvider<'a, N> { /// Main static file segment jar: LoadedJarRef<'a>, /// Another kind of static file segment to help query data from the main one. auxiliary_jar: Option>, + /// Metrics for the static files. metrics: Option>, + /// Node primitives + _pd: std::marker::PhantomData, } -impl<'a> Deref for StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> Deref for StaticFileJarProvider<'a, N> { type Target = LoadedJarRef<'a>; fn deref(&self) -> &Self::Target { &self.jar } } -impl<'a> From> for StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> From> for StaticFileJarProvider<'a, N> { fn from(value: LoadedJarRef<'a>) -> Self { - StaticFileJarProvider { jar: value, auxiliary_jar: None, metrics: None } + StaticFileJarProvider { + jar: value, + auxiliary_jar: None, + metrics: None, + _pd: Default::default(), + } } } -impl<'a> StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> StaticFileJarProvider<'a, N> { /// Provides a cursor for more granular data access. pub fn cursor<'b>(&'b self) -> ProviderResult> where @@ -75,11 +90,11 @@ impl<'a> StaticFileJarProvider<'a> { } } -impl HeaderProvider for StaticFileJarProvider<'_> { +impl HeaderProvider for StaticFileJarProvider<'_, N> { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(header, _)| header)) } @@ -91,13 +106,13 @@ impl HeaderProvider for StaticFileJarProvider<'_> { fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(td, _)| td.into())) } fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { - Ok(self.cursor()?.get_one::>(num.into())?.map(Into::into)) + Ok(self.cursor()?.get_one::(num.into())?.map(Into::into)) } fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { @@ -118,7 +133,7 @@ impl HeaderProvider for StaticFileJarProvider<'_> { fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) } @@ -134,7 +149,7 @@ impl HeaderProvider for StaticFileJarProvider<'_> { for number in range { if let Some((header, hash)) = - cursor.get_two::>(number.into())? + cursor.get_two::>(number.into())? { let sealed = SealedHeader::new(header, hash); if !predicate(&sealed) { @@ -147,9 +162,9 @@ impl HeaderProvider for StaticFileJarProvider<'_> { } } -impl BlockHashReader for StaticFileJarProvider<'_> { +impl BlockHashReader for StaticFileJarProvider<'_, N> { fn block_hash(&self, number: u64) -> ProviderResult> { - self.cursor()?.get_one::>(number.into()) + self.cursor()?.get_one::(number.into()) } fn canonical_hashes_range( @@ -161,7 +176,7 @@ impl BlockHashReader for StaticFileJarProvider<'_> { let mut hashes = Vec::with_capacity((end - start) as usize); for number in start..end { - if let Some(hash) = cursor.get_one::>(number.into())? { + if let Some(hash) = cursor.get_one::(number.into())? { hashes.push(hash) } } @@ -169,7 +184,7 @@ impl BlockHashReader for StaticFileJarProvider<'_> { } } -impl BlockNumReader for StaticFileJarProvider<'_> { +impl BlockNumReader for StaticFileJarProvider<'_, N> { fn chain_info(&self) -> ProviderResult { // Information on live database Err(ProviderError::UnsupportedProvider) @@ -189,45 +204,43 @@ impl BlockNumReader for StaticFileJarProvider<'_> { let mut cursor = self.cursor()?; Ok(cursor - .get_one::>((&hash).into())? + .get_one::((&hash).into())? .and_then(|res| (res == hash).then(|| cursor.number()).flatten())) } } -impl TransactionsProvider for StaticFileJarProvider<'_> { +impl> TransactionsProvider + for StaticFileJarProvider<'_, N> +{ + type Transaction = N::SignedTx; + fn transaction_id(&self, hash: TxHash) -> ProviderResult> { let mut cursor = self.cursor()?; Ok(cursor - .get_one::>((&hash).into())? - .and_then(|res| (res.hash() == hash).then(|| cursor.number()).flatten())) + .get_one::>((&hash).into())? + .and_then(|res| (res.trie_hash() == hash).then(|| cursor.number()).flatten())) } - fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>(num.into())? - .map(|tx| tx.with_hash())) + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, num: TxNumber, - ) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + ) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>((&hash).into())? - .map(|tx| tx.with_hash())) + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.cursor()?.get_one::>((&hash).into()) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { // Information required on indexing table [`tables::TransactionBlocks`] Err(ProviderError::UnsupportedProvider) } @@ -240,7 +253,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call static file // provider with `transactions_by_tx_range` instead. Err(ProviderError::UnsupportedProvider) @@ -249,7 +262,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call static file // provider with `transactions_by_tx_range` instead. Err(ProviderError::UnsupportedProvider) @@ -258,15 +271,13 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut txes = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(tx) = - cursor.get_one::>(num.into())? - { + if let Some(tx) = cursor.get_one::>(num.into())? { txes.push(tx) } } @@ -278,24 +289,27 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { range: impl RangeBounds, ) -> ProviderResult> { let txs = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txs, txs.len()) - .ok_or(ProviderError::SenderRecoveryError) + recover_signers(&txs, txs.len()).ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { Ok(self .cursor()? - .get_one::>(num.into())? + .get_one::>(num.into())? .and_then(|tx| tx.recover_signer())) } } -impl ReceiptProvider for StaticFileJarProvider<'_> { - fn receipt(&self, num: TxNumber) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) +impl> + ReceiptProvider for StaticFileJarProvider<'_, N> +{ + type Receipt = N::Receipt; + + fn receipt(&self, num: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx_static_file) = &self.auxiliary_jar { if let Some(num) = tx_static_file.transaction_id(hash)? { return self.receipt(num) @@ -304,7 +318,10 @@ impl ReceiptProvider for StaticFileJarProvider<'_> { Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { // Related to indexing tables. StaticFile should get the tx_range and call static file // provider with `receipt()` instead for each Err(ProviderError::UnsupportedProvider) @@ -313,13 +330,13 @@ impl ReceiptProvider for StaticFileJarProvider<'_> { fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut receipts = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(tx) = cursor.get_one::>(num.into())? { + if let Some(tx) = cursor.get_one::>(num.into())? { receipts.push(tx) } } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index cb270a6da46f..3b49f8d401f9 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -7,7 +7,9 @@ use crate::{ ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{ + eip2718::Encodable2718, eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, }; @@ -18,29 +20,35 @@ use parking_lot::RwLock; use reth_chainspec::{ChainInfo, ChainSpecProvider}; use reth_db::{ lockfile::StorageLock, - static_file::{iter_static_files, HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}, + static_file::{ + iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, + StaticFileCursor, TDWithHashMask, TransactionMask, + }, + table::{Decompress, Value}, tables, }; use reth_db_api::{ - cursor::DbCursorRO, - models::{CompactU256, StoredBlockBodyIndices}, - table::Table, - transaction::DbTx, + cursor::DbCursorRO, models::StoredBlockBodyIndices, table::Table, transaction::DbTx, }; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; +use reth_node_types::{FullNodePrimitives, NodePrimitives}; use reth_primitives::{ static_file::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + transaction::recover_signers, + BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, TransactionMeta, TransactionSignedNoHash, }; +use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::DBProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, + fmt::Debug, + marker::PhantomData, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, sync::{mpsc, Arc}, @@ -76,10 +84,16 @@ impl StaticFileAccess { } /// [`StaticFileProvider`] manages all existing [`StaticFileJarProvider`]. -#[derive(Debug, Clone)] -pub struct StaticFileProvider(pub(crate) Arc); +#[derive(Debug)] +pub struct StaticFileProvider(pub(crate) Arc>); + +impl Clone for StaticFileProvider { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} -impl StaticFileProvider { +impl StaticFileProvider { /// Creates a new [`StaticFileProvider`]. fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { let provider = Self(Arc::new(StaticFileProviderInner::new(path, access)?)); @@ -190,8 +204,8 @@ impl StaticFileProvider { } } -impl Deref for StaticFileProvider { - type Target = StaticFileProviderInner; +impl Deref for StaticFileProvider { + type Target = StaticFileProviderInner; fn deref(&self) -> &Self::Target { &self.0 @@ -200,7 +214,7 @@ impl Deref for StaticFileProvider { /// [`StaticFileProviderInner`] manages all existing [`StaticFileJarProvider`]. #[derive(Debug)] -pub struct StaticFileProviderInner { +pub struct StaticFileProviderInner { /// Maintains a map which allows for concurrent access to different `NippyJars`, over different /// segments and ranges. map: DashMap<(BlockNumber, StaticFileSegment), LoadedJar>, @@ -211,7 +225,8 @@ pub struct StaticFileProviderInner { /// Directory where `static_files` are located path: PathBuf, /// Maintains a writer set of [`StaticFileSegment`]. - writers: StaticFileWriters, + writers: StaticFileWriters, + /// Metrics for the static files. metrics: Option>, /// Access rights of the provider. access: StaticFileAccess, @@ -219,9 +234,11 @@ pub struct StaticFileProviderInner { blocks_per_file: u64, /// Write lock for when access is [`StaticFileAccess::RW`]. _lock_file: Option, + /// Node primitives + _pd: PhantomData, } -impl StaticFileProviderInner { +impl StaticFileProviderInner { /// Creates a new [`StaticFileProviderInner`]. fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { let _lock_file = if access.is_read_write() { @@ -240,6 +257,7 @@ impl StaticFileProviderInner { access, blocks_per_file: DEFAULT_BLOCKS_PER_STATIC_FILE, _lock_file, + _pd: Default::default(), }; Ok(provider) @@ -256,7 +274,7 @@ impl StaticFileProviderInner { } } -impl StaticFileProvider { +impl StaticFileProvider { /// Set a custom number of blocks per file. #[cfg(any(test, feature = "test-utils"))] pub fn with_custom_blocks_per_file(self, blocks_per_file: u64) -> Self { @@ -322,7 +340,7 @@ impl StaticFileProvider { segment: StaticFileSegment, block: BlockNumber, path: Option<&Path>, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider( segment, || self.get_segment_ranges_from_block(segment, block), @@ -337,7 +355,7 @@ impl StaticFileProvider { segment: StaticFileSegment, tx: TxNumber, path: Option<&Path>, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider( segment, || self.get_segment_ranges_from_transaction(segment, tx), @@ -354,7 +372,7 @@ impl StaticFileProvider { segment: StaticFileSegment, fn_range: impl Fn() -> Option, path: Option<&Path>, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // If we have a path, then get the block range from its name. // Otherwise, check `self.available_static_files` let block_range = match path { @@ -425,12 +443,12 @@ impl StaticFileProvider { &self, segment: StaticFileSegment, fixed_block_range: &SegmentRangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult> { let key = (fixed_block_range.end(), segment); // Avoid using `entry` directly to avoid a write lock in the common case. trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Getting provider"); - let mut provider: StaticFileJarProvider<'_> = if let Some(jar) = self.map.get(&key) { + let mut provider: StaticFileJarProvider<'_, N> = if let Some(jar) = self.map.get(&key) { trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Jar found in cache"); jar.into() } else { @@ -923,7 +941,7 @@ impl StaticFileProvider { pub fn find_static_file( &self, segment: StaticFileSegment, - func: impl Fn(StaticFileJarProvider<'_>) -> ProviderResult>, + func: impl Fn(StaticFileJarProvider<'_, N>) -> ProviderResult>, ) -> ProviderResult> { if let Some(highest_block) = self.get_highest_static_file_block(segment) { let mut range = self.find_fixed_range(highest_block); @@ -1093,7 +1111,7 @@ impl StaticFileProvider { }; if static_file_upper_bound - .map_or(false, |static_file_upper_bound| static_file_upper_bound >= number) + .is_some_and(|static_file_upper_bound| static_file_upper_bound >= number) { return fetch_from_static_file(self) } @@ -1166,30 +1184,35 @@ impl StaticFileProvider { /// Helper trait to manage different [`StaticFileProviderRW`] of an `Arc ProviderResult>; + ) -> ProviderResult>; /// Returns a mutable reference to a [`StaticFileProviderRW`] of the latest /// [`StaticFileSegment`]. fn latest_writer( &self, segment: StaticFileSegment, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Commits all changes of all [`StaticFileProviderRW`] of all [`StaticFileSegment`]. fn commit(&self) -> ProviderResult<()>; } -impl StaticFileWriter for StaticFileProvider { +impl StaticFileWriter for StaticFileProvider { + type Primitives = N; + fn get_writer( &self, block: BlockNumber, segment: StaticFileSegment, - ) -> ProviderResult> { + ) -> ProviderResult> { if self.access.is_read_only() { return Err(ProviderError::ReadOnlyStaticFileAccess) } @@ -1203,7 +1226,7 @@ impl StaticFileWriter for StaticFileProvider { fn latest_writer( &self, segment: StaticFileSegment, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_writer(self.get_highest_static_file_block(segment).unwrap_or_default(), segment) } @@ -1212,12 +1235,12 @@ impl StaticFileWriter for StaticFileProvider { } } -impl HeaderProvider for StaticFileProvider { +impl HeaderProvider for StaticFileProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .and_then(|(header, hash)| { if &hash == block_hash { return Some(header) @@ -1243,7 +1266,7 @@ impl HeaderProvider for StaticFileProvider { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::(block_hash.into())? .and_then(|(td, hash)| (&hash == block_hash).then_some(td.0))) }) } @@ -1291,7 +1314,7 @@ impl HeaderProvider for StaticFileProvider { to_range(range), |cursor, number| { Ok(cursor - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) }, predicate, @@ -1299,7 +1322,7 @@ impl HeaderProvider for StaticFileProvider { } } -impl BlockHashReader for StaticFileProvider { +impl BlockHashReader for StaticFileProvider { fn block_hash(&self, num: u64) -> ProviderResult> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None)?.block_hash(num) } @@ -1312,14 +1335,18 @@ impl BlockHashReader for StaticFileProvider { self.fetch_range_with_predicate( StaticFileSegment::Headers, start..end, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::(number.into()), |_| true, ) } } -impl ReceiptProvider for StaticFileProvider { - fn receipt(&self, num: TxNumber) -> ProviderResult> { +impl> ReceiptProvider + for StaticFileProvider +{ + type Receipt = N::Receipt; + + fn receipt(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) .or_else(|err| { @@ -1331,31 +1358,36 @@ impl ReceiptProvider for StaticFileProvider { }) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(num) = self.transaction_id(hash)? { return self.receipt(num) } Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { unreachable!() } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Receipts, to_range(range), - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } } -impl TransactionsProviderExt for StaticFileProvider { +impl> TransactionsProviderExt + for StaticFileProvider +{ fn transaction_hashes_by_range( &self, tx_range: Range, @@ -1390,7 +1422,7 @@ impl TransactionsProviderExt for StaticFileProvider { chunk_range, |cursor, number| { Ok(cursor - .get_one::>(number.into())? + .get_one::>(number.into())? .map(|transaction| { rlp_buf.clear(); let _ = channel_tx @@ -1416,13 +1448,17 @@ impl TransactionsProviderExt for StaticFileProvider { } } -impl TransactionsProvider for StaticFileProvider { +impl> TransactionsProvider + for StaticFileProvider +{ + type Transaction = N::SignedTx; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { let mut cursor = jar_provider.cursor()?; if cursor - .get_one::>((&tx_hash).into())? - .and_then(|tx| (tx.hash() == tx_hash).then_some(tx)) + .get_one::>((&tx_hash).into())? + .and_then(|tx| (tx.trie_hash() == tx_hash).then_some(tx)) .is_some() { Ok(cursor.number()) @@ -1432,7 +1468,7 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id(num)) .or_else(|err| { @@ -1444,12 +1480,12 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, num: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) - .and_then(|provider| provider.transaction_by_id_no_hash(num)) + .and_then(|provider| provider.transaction_by_id_unhashed(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { Ok(None) @@ -1459,20 +1495,19 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { Ok(jar_provider .cursor()? - .get_one::>((&hash).into())? - .map(|tx| tx.with_hash()) - .and_then(|tx| (tx.hash_ref() == &hash).then_some(tx))) + .get_one::>((&hash).into())? + .and_then(|tx| (tx.trie_hash() == hash).then_some(tx))) }) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1485,7 +1520,7 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1493,7 +1528,7 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1501,13 +1536,11 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Transactions, to_range(range), - |cursor, number| { - cursor.get_one::>(number.into()) - }, + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } @@ -1517,18 +1550,17 @@ impl TransactionsProvider for StaticFileProvider { range: impl RangeBounds, ) -> ProviderResult> { let txes = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txes, txes.len()) - .ok_or(ProviderError::SenderRecoveryError) + recover_signers(&txes, txes.len()).ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - Ok(self.transaction_by_id_no_hash(id)?.and_then(|tx| tx.recover_signer())) + Ok(self.transaction_by_id_unhashed(id)?.and_then(|tx| tx.recover_signer())) } } /* Cannot be successfully implemented but must exist for trait requirements */ -impl BlockNumReader for StaticFileProvider { +impl BlockNumReader for StaticFileProvider { fn chain_info(&self) -> ProviderResult { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) @@ -1550,32 +1582,38 @@ impl BlockNumReader for StaticFileProvider { } } -impl BlockReader for StaticFileProvider { +impl> BlockReader for StaticFileProvider { + type Block = N::Block; + fn find_block_by_hash( &self, _hash: B256, _source: BlockSource, - ) -> ProviderResult> { + ) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1594,7 +1632,7 @@ impl BlockReader for StaticFileProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1603,12 +1641,12 @@ impl BlockReader for StaticFileProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1616,19 +1654,19 @@ impl BlockReader for StaticFileProvider { fn block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } fn sealed_block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } } -impl WithdrawalsProvider for StaticFileProvider { +impl WithdrawalsProvider for StaticFileProvider { fn withdrawals_by_block( &self, _id: BlockHashOrNumber, @@ -1644,7 +1682,7 @@ impl WithdrawalsProvider for StaticFileProvider { } } -impl StatsReader for StaticFileProvider { +impl StatsReader for StaticFileProvider { fn count_entries(&self) -> ProviderResult { match T::NAME { tables::CanonicalHeaders::NAME | @@ -1654,7 +1692,7 @@ impl StatsReader for StaticFileProvider { .map(|block| block + 1) .unwrap_or_default() as usize), - tables::Receipts::NAME => Ok(self + tables::Receipts::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|receipts| receipts + 1) .unwrap_or_default() as usize), @@ -1670,11 +1708,14 @@ impl StatsReader for StaticFileProvider { /// Calculates the tx hash for the given transaction and its id. #[inline] -fn calculate_hash( - entry: (TxNumber, TransactionSignedNoHash), +fn calculate_hash( + entry: (TxNumber, T), rlp_buf: &mut Vec, -) -> Result<(B256, TxNumber), Box> { +) -> Result<(B256, TxNumber), Box> +where + T: Encodable2718, +{ let (tx_id, tx) = entry; - tx.transaction.eip2718_encode(&tx.signature, rlp_buf); + tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 52eb6ed666ea..71c6bf755e22 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -55,8 +55,10 @@ impl Deref for LoadedJar { #[cfg(test)] mod tests { use super::*; - use crate::{test_utils::create_test_provider_factory, HeaderProvider}; - use alloy_consensus::Transaction; + use crate::{ + test_utils::create_test_provider_factory, HeaderProvider, StaticFileProviderFactory, + }; + use alloy_consensus::{Header, Transaction}; use alloy_primitives::{BlockHash, TxNumber, B256, U256}; use rand::seq::SliceRandom; use reth_db::{ @@ -66,7 +68,7 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_primitives::{ static_file::{find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE}, - Header, Receipt, TransactionSignedNoHash, + EthPrimitives, Receipt, TransactionSigned, }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; @@ -116,7 +118,7 @@ mod tests { // Create StaticFile { - let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let manager = factory.static_file_provider(); let mut writer = manager.latest_writer(StaticFileSegment::Headers).unwrap(); let mut td = U256::ZERO; @@ -131,7 +133,7 @@ mod tests { // Use providers to query Header data and compare if it matches { let db_provider = factory.provider().unwrap(); - let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let manager = db_provider.static_file_provider(); let jar_provider = manager .get_segment_provider_from_block(StaticFileSegment::Headers, 0, Some(&static_file)) .unwrap(); @@ -170,7 +172,7 @@ mod tests { // [ Headers Creation and Commit ] { - let sf_rw = StaticFileProvider::read_write(&static_dir) + let sf_rw = StaticFileProvider::::read_write(&static_dir) .expect("Failed to create static file provider") .with_custom_blocks_per_file(blocks_per_file); @@ -189,8 +191,8 @@ mod tests { // Helper function to prune headers and validate truncation results fn prune_and_validate( - writer: &mut StaticFileProviderRWRefMut<'_>, - sf_rw: &StaticFileProvider, + writer: &mut StaticFileProviderRWRefMut<'_, EthPrimitives>, + sf_rw: &StaticFileProvider, static_dir: impl AsRef, prune_count: u64, expected_tip: Option, @@ -302,20 +304,20 @@ mod tests { /// * `10..=19`: no txs/receipts /// * `20..=29`: only one tx/receipt fn setup_tx_based_scenario( - sf_rw: &StaticFileProvider, + sf_rw: &StaticFileProvider, segment: StaticFileSegment, blocks_per_file: u64, ) { fn setup_block_ranges( - writer: &mut StaticFileProviderRWRefMut<'_>, - sf_rw: &StaticFileProvider, + writer: &mut StaticFileProviderRWRefMut<'_, EthPrimitives>, + sf_rw: &StaticFileProvider, segment: StaticFileSegment, block_range: &Range, mut tx_count: u64, next_tx_num: &mut u64, ) { let mut receipt = Receipt::default(); - let mut tx = TransactionSignedNoHash::default(); + let mut tx = TransactionSigned::default(); for block in block_range.clone() { writer.increment_block(block).unwrap(); @@ -413,7 +415,7 @@ mod tests { #[allow(clippy::too_many_arguments)] fn prune_and_validate( - sf_rw: &StaticFileProvider, + sf_rw: &StaticFileProvider, static_dir: impl AsRef, segment: StaticFileSegment, prune_count: u64, diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 8c31c021f218..6f5335ec6657 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -2,18 +2,21 @@ use super::{ manager::StaticFileProviderInner, metrics::StaticFileProviderMetrics, StaticFileProvider, }; use crate::providers::static_file::metrics::StaticFileProviderOperation; +use alloy_consensus::Header; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; +use reth_node_types::NodePrimitives; use reth_primitives::{ static_file::{SegmentHeader, SegmentRangeInclusive}, - Header, Receipt, StaticFileSegment, TransactionSignedNoHash, + Receipt, StaticFileSegment, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ borrow::Borrow, + fmt::Debug, path::{Path, PathBuf}, sync::{Arc, Weak}, time::Instant, @@ -24,19 +27,29 @@ use tracing::debug; /// /// WARNING: Trying to use more than one writer for the same segment type **will result in a /// deadlock**. -#[derive(Debug, Default)] -pub(crate) struct StaticFileWriters { - headers: RwLock>, - transactions: RwLock>, - receipts: RwLock>, +#[derive(Debug)] +pub(crate) struct StaticFileWriters { + headers: RwLock>>, + transactions: RwLock>>, + receipts: RwLock>>, +} + +impl Default for StaticFileWriters { + fn default() -> Self { + Self { + headers: Default::default(), + transactions: Default::default(), + receipts: Default::default(), + } + } } -impl StaticFileWriters { +impl StaticFileWriters { pub(crate) fn get_or_create( &self, segment: StaticFileSegment, - create_fn: impl FnOnce() -> ProviderResult, - ) -> ProviderResult> { + create_fn: impl FnOnce() -> ProviderResult>, + ) -> ProviderResult> { let mut write_guard = match segment { StaticFileSegment::Headers => self.headers.write(), StaticFileSegment::Transactions => self.transactions.write(), @@ -63,19 +76,19 @@ impl StaticFileWriters { /// Mutable reference to a [`StaticFileProviderRW`] behind a [`RwLockWriteGuard`]. #[derive(Debug)] -pub struct StaticFileProviderRWRefMut<'a>( - pub(crate) RwLockWriteGuard<'a, RawRwLock, Option>, +pub struct StaticFileProviderRWRefMut<'a, N>( + pub(crate) RwLockWriteGuard<'a, RawRwLock, Option>>, ); -impl std::ops::DerefMut for StaticFileProviderRWRefMut<'_> { +impl std::ops::DerefMut for StaticFileProviderRWRefMut<'_, N> { fn deref_mut(&mut self) -> &mut Self::Target { // This is always created by [`StaticFileWriters::get_or_create`] self.0.as_mut().expect("static file writer provider should be init") } } -impl std::ops::Deref for StaticFileProviderRWRefMut<'_> { - type Target = StaticFileProviderRW; +impl std::ops::Deref for StaticFileProviderRWRefMut<'_, N> { + type Target = StaticFileProviderRW; fn deref(&self) -> &Self::Target { // This is always created by [`StaticFileWriters::get_or_create`] @@ -85,11 +98,11 @@ impl std::ops::Deref for StaticFileProviderRWRefMut<'_> { #[derive(Debug)] /// Extends `StaticFileProvider` with writing capabilities -pub struct StaticFileProviderRW { +pub struct StaticFileProviderRW { /// Reference back to the provider. We need [Weak] here because [`StaticFileProviderRW`] is /// stored in a [`dashmap::DashMap`] inside the parent [`StaticFileProvider`].which is an /// [Arc]. If we were to use an [Arc] here, we would create a reference cycle. - reader: Weak, + reader: Weak>, /// A [`NippyJarWriter`] instance. writer: NippyJarWriter, /// Path to opened file. @@ -103,7 +116,7 @@ pub struct StaticFileProviderRW { prune_on_commit: Option<(u64, Option)>, } -impl StaticFileProviderRW { +impl StaticFileProviderRW { /// Creates a new [`StaticFileProviderRW`] for a [`StaticFileSegment`]. /// /// Before use, transaction based segments should ensure the block end range is the expected @@ -111,7 +124,7 @@ impl StaticFileProviderRW { pub fn new( segment: StaticFileSegment, block: BlockNumber, - reader: Weak, + reader: Weak>, metrics: Option>, ) -> ProviderResult { let (writer, data_path) = Self::open(segment, block, reader.clone(), metrics.clone())?; @@ -132,7 +145,7 @@ impl StaticFileProviderRW { fn open( segment: StaticFileSegment, block: u64, - reader: Weak, + reader: Weak>, metrics: Option>, ) -> ProviderResult<(NippyJarWriter, PathBuf)> { let start = Instant::now(); @@ -307,10 +320,7 @@ impl StaticFileProviderRW { /// and create the next one if we are past the end range. /// /// Returns the current [`BlockNumber`] as seen in the static file. - pub fn increment_block( - &mut self, - expected_block_number: BlockNumber, - ) -> ProviderResult { + pub fn increment_block(&mut self, expected_block_number: BlockNumber) -> ProviderResult<()> { let segment = self.writer.user_header().segment(); self.check_next_block_number(expected_block_number)?; @@ -337,7 +347,7 @@ impl StaticFileProviderRW { } } - let block = self.writer.user_header_mut().increment_block(); + self.writer.user_header_mut().increment_block(); if let Some(metrics) = &self.metrics { metrics.record_segment_operation( segment, @@ -346,7 +356,7 @@ impl StaticFileProviderRW { ); } - Ok(block) + Ok(()) } /// Verifies if the incoming block number matches the next expected block number @@ -488,16 +498,24 @@ impl StaticFileProviderRW { &mut self, tx_num: TxNumber, value: V, - ) -> ProviderResult { - if self.writer.user_header().tx_range().is_none() { - self.writer.user_header_mut().set_tx_range(tx_num, tx_num); - } else { + ) -> ProviderResult<()> { + if let Some(range) = self.writer.user_header().tx_range() { + let next_tx = range.end() + 1; + if next_tx != tx_num { + return Err(ProviderError::UnexpectedStaticFileTxNumber( + self.writer.user_header().segment(), + tx_num, + next_tx, + )) + } self.writer.user_header_mut().increment_tx(); + } else { + self.writer.user_header_mut().set_tx_range(tx_num, tx_num); } self.append_column(value)?; - Ok(self.writer.user_header().tx_end().expect("qed")) + Ok(()) } /// Appends header to static file. @@ -511,13 +529,13 @@ impl StaticFileProviderRW { header: &Header, total_difficulty: U256, hash: &BlockHash, - ) -> ProviderResult { + ) -> ProviderResult<()> { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); - let block_number = self.increment_block(header.number)?; + self.increment_block(header.number)?; self.append_column(header)?; self.append_column(CompactU256::from(total_difficulty))?; @@ -531,7 +549,7 @@ impl StaticFileProviderRW { ); } - Ok(block_number) + Ok(()) } /// Appends transaction to static file. @@ -540,16 +558,15 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_transaction( - &mut self, - tx_num: TxNumber, - tx: &TransactionSignedNoHash, - ) -> ProviderResult { + pub fn append_transaction(&mut self, tx_num: TxNumber, tx: &N::SignedTx) -> ProviderResult<()> + where + N::SignedTx: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Transactions); - let result = self.append_with_tx_number(tx_num, tx)?; + self.append_with_tx_number(tx_num, tx)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -559,7 +576,7 @@ impl StaticFileProviderRW { ); } - Ok(result) + Ok(()) } /// Appends receipt to static file. @@ -568,16 +585,15 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_receipt( - &mut self, - tx_num: TxNumber, - receipt: &Receipt, - ) -> ProviderResult { + pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &N::Receipt) -> ProviderResult<()> + where + N::Receipt: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Receipts); - let result = self.append_with_tx_number(tx_num, receipt)?; + self.append_with_tx_number(tx_num, receipt)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -587,7 +603,7 @@ impl StaticFileProviderRW { ); } - Ok(result) + Ok(()) } /// Appends multiple receipts to the static file. @@ -615,7 +631,8 @@ impl StaticFileProviderRW { for receipt_result in receipts_iter { let (tx_num, receipt) = receipt_result?; - tx_number = self.append_with_tx_number(tx_num, receipt.borrow())?; + self.append_with_tx_number(tx_num, receipt.borrow())?; + tx_number = tx_num; count += 1; } @@ -750,7 +767,7 @@ impl StaticFileProviderRW { Ok(()) } - fn reader(&self) -> StaticFileProvider { + fn reader(&self) -> StaticFileProvider { Self::upgrade_provider_to_strong_reference(&self.reader) } @@ -763,8 +780,8 @@ impl StaticFileProviderRW { /// active. In reality, it's impossible to detach the [`StaticFileProviderRW`] from the /// [`StaticFileProvider`]. fn upgrade_provider_to_strong_reference( - provider: &Weak, - ) -> StaticFileProvider { + provider: &Weak>, + ) -> StaticFileProvider { provider.upgrade().map(StaticFileProvider).expect("StaticFileProvider is dropped") } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 19a6cbf6a5c4..fdded2807aab 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -2,18 +2,18 @@ use crate::{DBProvider, DatabaseProviderRW, ExecutionOutcome}; use alloy_consensus::{TxLegacy, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{ - b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Sealable, TxKind, B256, - U256, + b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, TxKind, B256, U256, }; +use alloy_consensus::Header; use alloy_eips::eip4895::{Withdrawal, Withdrawals}; use alloy_primitives::PrimitiveSignature as Signature; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - Transaction, TransactionSigned, TxType, + Account, BlockBody, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, TxType, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{db::BundleState, primitives::AccountInfo}; @@ -88,9 +88,14 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo hex!("cf7b274520720b50e6a4c3e5c4d553101f44945396827705518ce17cb7219a42").into(), ), body: BlockBody { - transactions: vec![TransactionSigned { - hash: hex!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397").into(), - signature: Signature::new( + transactions: vec![TransactionSigned::new( + Transaction::Legacy(TxLegacy { + gas_price: 10, + gas_limit: 400_000, + to: TxKind::Call(hex!("095e7baea6a6c7c4c2dfeb977efac326af552d87").into()), + ..Default::default() + }), + Signature::new( U256::from_str( "51983300959770368863831494747186777928121405155922056726144551509338672451120", ) @@ -101,13 +106,8 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo .unwrap(), false, ), - transaction: Transaction::Legacy(TxLegacy { - gas_price: 10, - gas_limit: 400_000, - to: TxKind::Call(hex!("095e7baea6a6c7c4c2dfeb977efac326af552d87").into()), - ..Default::default() - }), - }], + b256!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397"), + )], ..Default::default() }, }); @@ -233,9 +233,7 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { header.number = number; header.state_root = state_root; header.parent_hash = B256::ZERO; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x30; 20])] }, execution_outcome) } @@ -299,9 +297,7 @@ fn block2( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -365,9 +361,7 @@ fn block3( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -456,9 +450,7 @@ fn block4( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -544,9 +536,7 @@ fn block5( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 6e4331566db5..12c0330ac0e0 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,11 +1,11 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, DatabaseProvider, EvmEnvProvider, HeaderProvider, - ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, StateReader, - StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + ChainSpecProvider, ChangeSetReader, DatabaseProvider, EthStorage, EvmEnvProvider, + HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, + StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_consensus::constants::EMPTY_ROOT_HASH; +use alloy_consensus::{constants::EMPTY_ROOT_HASH, Header}; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumberOrTag, @@ -13,8 +13,7 @@ use alloy_eips::{ use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, - Address, BlockHash, BlockNumber, Bytes, Sealable, StorageKey, StorageValue, TxHash, TxNumber, - B256, U256, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use parking_lot::Mutex; use reth_chainspec::{ChainInfo, ChainSpec}; @@ -24,18 +23,18 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, + Account, Block, BlockWithSenders, Bytecode, EthPrimitives, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, }; +use reth_primitives_traits::SignedTransaction; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ DatabaseProviderFactory, StageCheckpointReader, StateProofProvider, StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, StorageProof, TrieInput, }; use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -159,9 +158,10 @@ impl MockEthProvider { pub struct MockNode; impl NodeTypes for MockNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } impl DatabaseProviderFactory for MockEthProvider { @@ -218,11 +218,7 @@ impl HeaderProvider for MockEthProvider { } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - Ok(self.header_by_number(number)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - })) + Ok(self.header_by_number(number)?.map(SealedHeader::seal)) } fn sealed_headers_while( @@ -233,11 +229,7 @@ impl HeaderProvider for MockEthProvider { Ok(self .headers_range(range)? .into_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) + .map(SealedHeader::seal) .take_while(|h| predicate(h)) .collect()) } @@ -252,6 +244,8 @@ impl ChainSpecProvider for MockEthProvider { } impl TransactionsProvider for MockEthProvider { + type Transaction = TransactionSigned; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { let lock = self.blocks.lock(); let tx_number = lock @@ -263,7 +257,7 @@ impl TransactionsProvider for MockEthProvider { Ok(tx_number) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { let lock = self.blocks.lock(); let transaction = lock.values().flat_map(|block| &block.body.transactions).nth(id as usize).cloned(); @@ -271,16 +265,13 @@ impl TransactionsProvider for MockEthProvider { Ok(transaction) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); - let transaction = lock - .values() - .flat_map(|block| &block.body.transactions) - .nth(id as usize) - .map(|tx| Into::::into(tx.clone())); + let transaction = + lock.values().flat_map(|block| &block.body.transactions).nth(id as usize).cloned(); Ok(transaction) } @@ -294,7 +285,7 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); for (block_hash, block) in lock.iter() { for (index, tx) in block.body.transactions.iter().enumerate() { @@ -330,14 +321,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(self.block(id)?.map(|b| b.body.transactions)) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // init btreemap so we can return in order let mut map = BTreeMap::new(); for (_, block) in self.blocks.lock().iter() { @@ -352,14 +343,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); let transactions = lock .values() .flat_map(|block| &block.body.transactions) .enumerate() .filter(|&(tx_number, _)| range.contains(&(tx_number as TxNumber))) - .map(|(_, tx)| tx.clone().into()) + .map(|(_, tx)| tx.clone()) .collect(); Ok(transactions) @@ -388,6 +379,8 @@ impl TransactionsProvider for MockEthProvider { } impl ReceiptProvider for MockEthProvider { + type Receipt = Receipt; + fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } @@ -480,6 +473,8 @@ impl BlockIdReader for MockEthProvider { } impl BlockReader for MockEthProvider { + type Block = Block; + fn find_block_by_hash( &self, hash: B256, @@ -566,14 +561,7 @@ impl BlockReaderIdExt for MockEthProvider { } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - self.header_by_id(id)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ) + self.header_by_id(id)?.map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) } fn header_by_id(&self, id: BlockId) -> ProviderResult> { @@ -654,6 +642,15 @@ impl StorageRootProvider for MockEthProvider { ) -> ProviderResult { Ok(StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageMultiProof::empty()) + } } impl StateProofProvider for MockEthProvider { @@ -833,6 +830,8 @@ impl ChangeSetReader for MockEthProvider { } impl StateReader for MockEthProvider { + type Receipt = Receipt; + fn get_state(&self, _block: BlockNumber) -> ProviderResult> { Ok(None) } diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index c0e80930b318..2c3795573c20 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,4 +1,7 @@ -use crate::{providers::StaticFileProvider, HashingWriter, ProviderFactory, TrieWriter}; +use crate::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + HashingWriter, ProviderFactory, TrieWriter, +}; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ @@ -6,7 +9,7 @@ use reth_db::{ DatabaseEnv, }; use reth_errors::ProviderResult; -use reth_node_types::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives::{Account, StorageEntry}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -22,10 +25,11 @@ pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; /// Mock [`reth_node_types::NodeTypes`] for testing. pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< - (), + reth_primitives::EthPrimitives, reth_ethereum_engine_primitives::EthEngineTypes, reth_chainspec::ChainSpec, reth_trie_db::MerklePatriciaTrie, + crate::EthStorage, >; /// Mock [`reth_node_types::NodeTypesWithDB`] for testing. @@ -51,7 +55,7 @@ pub fn create_test_provider_factory_with_chain_spec( } /// Inserts the genesis alloc from the provided chain spec into the trie. -pub fn insert_genesis>( +pub fn insert_genesis>( provider_factory: &ProviderFactory, chain_spec: Arc, ) -> ProviderResult { diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 7c3848b4a53c..ff6b3fccbe10 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -4,6 +4,7 @@ use std::{ sync::Arc, }; +use alloy_consensus::Header; use alloy_eips::{ eip4895::{Withdrawal, Withdrawals}, BlockHashOrNumber, BlockId, BlockNumberOrTag, @@ -21,13 +22,12 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, + Account, Block, BlockWithSenders, Bytecode, EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{NodePrimitivesProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -92,6 +92,8 @@ impl BlockNumReader for NoopProvider { } impl BlockReader for NoopProvider { + type Block = Block; + fn find_block_by_hash( &self, hash: B256, @@ -192,29 +194,31 @@ impl BlockIdReader for NoopProvider { } impl TransactionsProvider for NoopProvider { + type Transaction = TransactionSigned; + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { Ok(None) } - fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, _id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } - fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { Ok(None) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } @@ -225,21 +229,21 @@ impl TransactionsProvider for NoopProvider { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(None) } fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(Vec::default()) } fn transactions_by_tx_range( &self, _range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(Vec::default()) } @@ -256,6 +260,7 @@ impl TransactionsProvider for NoopProvider { } impl ReceiptProvider for NoopProvider { + type Receipt = Receipt; fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } @@ -368,6 +373,15 @@ impl StorageRootProvider for NoopProvider { ) -> ProviderResult { Ok(reth_trie::StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageMultiProof::empty()) + } } impl StateProofProvider for NoopProvider { @@ -555,8 +569,12 @@ impl PruneCheckpointReader for NoopProvider { } } +impl NodePrimitivesProvider for NoopProvider { + type Primitives = EthPrimitives; +} + impl StaticFileProviderFactory for NoopProvider { - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { StaticFileProvider::read_only(PathBuf::default(), false).unwrap() } } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 7202c405f068..d12f240e6164 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,44 +1,143 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_node_types::NodePrimitives; use reth_primitives::SealedBlockWithSenders; +use reth_storage_api::NodePrimitivesProvider; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; -use std::ops::RangeInclusive; -/// BlockExecution Writer -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockExecutionWriter: BlockWriter + Send + Sync { - /// Take range of blocks and its execution result - fn take_block_and_execution_range( +/// An enum that represents the storage location for a piece of data. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum StorageLocation { + /// Write only to static files. + StaticFiles, + /// Write only to the database. + Database, + /// Write to both the database and static files. + Both, +} + +impl StorageLocation { + /// Returns true if the storage location includes static files. + pub const fn static_files(&self) -> bool { + matches!(self, Self::StaticFiles | Self::Both) + } + + /// Returns true if the storage location includes the database. + pub const fn database(&self) -> bool { + matches!(self, Self::Database | Self::Both) + } +} + +/// `BlockExecution` Writer +pub trait BlockExecutionWriter: + NodePrimitivesProvider> + BlockWriter + Send + Sync +{ + /// Take all of the blocks above the provided number and their execution result + /// + /// The passed block number will stay in the database. + /// + /// Accepts [`StorageLocation`] specifying from where should transactions and receipts be + /// removed. + fn take_block_and_execution_above( &self, - range: RangeInclusive, - ) -> ProviderResult; + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult>; - /// Remove range of blocks and its execution result - fn remove_block_and_execution_range( + /// Remove all of the blocks above the provided number and their execution result + /// + /// The passed block number will stay in the database. + /// + /// Accepts [`StorageLocation`] specifying from where should transactions and receipts be + /// removed. + fn remove_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_from: StorageLocation, ) -> ProviderResult<()>; } +impl BlockExecutionWriter for &T { + fn take_block_and_execution_above( + &self, + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult> { + (*self).take_block_and_execution_above(block, remove_from) + } + + fn remove_block_and_execution_above( + &self, + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult<()> { + (*self).remove_block_and_execution_above(block, remove_from) + } +} + /// This just receives state, or [`ExecutionOutcome`], from the provider #[auto_impl::auto_impl(&, Arc, Box)] pub trait StateReader: Send + Sync { + /// Receipt type in [`ExecutionOutcome`]. + type Receipt: Send + Sync; + /// Get the [`ExecutionOutcome`] for the given block - fn get_state(&self, block: BlockNumber) -> ProviderResult>; + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>>; } /// Block Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockWriter: Send + Sync { + /// The body this writer can write. + type Block: reth_primitives_traits::Block; + /// The receipt type for [`ExecutionOutcome`]. + type Receipt: Send + Sync; + /// Insert full block and make it canonical. Parent tx num and transition id is taken from /// parent block in database. /// /// Return [StoredBlockBodyIndices] that contains indices of the first and last transactions and /// transition in the block. - fn insert_block(&self, block: SealedBlockWithSenders) - -> ProviderResult; + /// + /// Accepts [`StorageLocation`] value which specifies where transactions and headers should be + /// written. + fn insert_block( + &self, + block: SealedBlockWithSenders, + write_to: StorageLocation, + ) -> ProviderResult; + + /// Appends a batch of block bodies extending the canonical chain. This is invoked during + /// `Bodies` stage and does not write to `TransactionHashNumbers` and `TransactionSenders` + /// tables which are populated on later stages. + /// + /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. + fn append_block_bodies( + &self, + bodies: Vec<(BlockNumber, Option<::Body>)>, + write_transactions_to: StorageLocation, + ) -> ProviderResult<()>; + + /// Removes all blocks above the given block number from the database. + /// + /// Note: This does not remove state or execution data. + fn remove_blocks_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()>; + + /// Removes all block bodies above the given block number from the database. + fn remove_bodies_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()>; /// Appends a batch of sealed blocks to the blockchain, including sender information, and /// updates the post-state. @@ -56,8 +155,8 @@ pub trait BlockWriter: Send + Sync { /// Returns `Ok(())` on success, or an error if any operation fails. fn append_blocks_with_state( &self, - blocks: Vec, - execution_outcome: ExecutionOutcome, + blocks: Vec>, + execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()>; diff --git a/crates/storage/provider/src/traits/finalized_block.rs b/crates/storage/provider/src/traits/finalized_block.rs deleted file mode 100644 index 98a6d9d0e343..000000000000 --- a/crates/storage/provider/src/traits/finalized_block.rs +++ /dev/null @@ -1,23 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_errors::ProviderResult; - -/// Functionality to read the last known chain blocks from the database. -pub trait ChainStateBlockReader: Send + Sync { - /// Returns the last finalized block number. - /// - /// If no finalized block has been written yet, this returns `None`. - fn last_finalized_block_number(&self) -> ProviderResult>; - /// Returns the last safe block number. - /// - /// If no safe block has been written yet, this returns `None`. - fn last_safe_block_number(&self) -> ProviderResult>; -} - -/// Functionality to write the last known chain blocks to the database. -pub trait ChainStateBlockWriter: Send + Sync { - /// Saves the given finalized block number in the DB. - fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; - - /// Saves the given safe block number in the DB. - fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; -} diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 4998e9741656..0d28f83739b0 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,13 +7,15 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{BlockTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_storage_api::NodePrimitivesProvider; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory + + NodePrimitivesProvider + StaticFileProviderFactory - + BlockReaderIdExt + + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> + AccountReader + StateProviderFactory + EvmEnvProvider @@ -30,8 +32,9 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory + + NodePrimitivesProvider + StaticFileProviderFactory - + BlockReaderIdExt + + BlockReaderIdExt, Block = BlockTy, Receipt = ReceiptTy> + AccountReader + StateProviderFactory + EvmEnvProvider diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c31c7c1e2f21..d82e97d1db79 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -9,37 +9,19 @@ pub use reth_evm::provider::EvmEnvProvider; mod block; pub use block::*; -mod chain_info; -pub use chain_info::CanonChainTracker; - mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; mod state; -pub use state::{StateChangeWriter, StateWriter}; +pub use state::StateWriter; pub use reth_chainspec::ChainSpecProvider; -mod hashing; -pub use hashing::HashingWriter; - -mod trie; -pub use trie::{StorageTrieWriter, TrieWriter}; - -mod history; -pub use history::HistoryWriter; - mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; -mod stats; -pub use stats::StatsReader; - mod full; pub use full::{FullProvider, FullRpcProvider}; mod tree_viewer; pub use tree_viewer::TreeViewer; - -mod finalized_block; -pub use finalized_block::{ChainStateBlockReader, ChainStateBlockWriter}; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 3d62b1886e88..2c4ee2cfa8d3 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -6,21 +6,23 @@ use revm::db::{ states::{PlainStateReverts, StateChangeset}, OriginalValuesKnown, }; -use std::ops::RangeInclusive; -/// A helper trait for [`ExecutionOutcome`] to write state and receipts to storage. +use super::StorageLocation; + +/// A trait specifically for writing state changes or reverts pub trait StateWriter { - /// Write the data and receipts to the database or static files if `static_file_producer` is + /// Receipt type included into [`ExecutionOutcome`]. + type Receipt; + + /// Write the state and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, + fn write_state( + &self, + execution_outcome: ExecutionOutcome, is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, ) -> ProviderResult<()>; -} -/// A trait specifically for writing state changes or reverts -pub trait StateChangeWriter { /// Write state reverts to the database. /// /// NOTE: Reverts will delete all wiped storage from plain state. @@ -36,9 +38,19 @@ pub trait StateChangeWriter { /// Writes the hashed state changes to the database fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()>; - /// Remove the block range of state. - fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()>; + /// Remove the block range of state above the given block. The state of the passed block is not + /// removed. + fn remove_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult<()>; - /// Take the block range of state, recreating the [`ExecutionOutcome`]. - fn take_state(&self, range: RangeInclusive) -> ProviderResult; + /// Take the block range of state, recreating the [`ExecutionOutcome`]. The state of the passed + /// block is not removed. + fn take_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs index 24d69569205c..9daab7e5a8f9 100644 --- a/crates/storage/provider/src/traits/static_file_provider.rs +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -1,7 +1,9 @@ +use reth_storage_api::NodePrimitivesProvider; + use crate::providers::StaticFileProvider; /// Static file provider factory. -pub trait StaticFileProviderFactory { +pub trait StaticFileProviderFactory: NodePrimitivesProvider { /// Create new instance of static file provider. - fn static_file_provider(&self) -> StaticFileProvider; + fn static_file_provider(&self) -> StaticFileProvider; } diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/provider/src/traits/trie.rs deleted file mode 100644 index 2edb4e072dd3..000000000000 --- a/crates/storage/provider/src/traits/trie.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::collections::HashMap; - -use alloy_primitives::B256; -use auto_impl::auto_impl; -use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::{StorageTrieUpdates, TrieUpdates}; - -/// Trie Writer -#[auto_impl(&, Arc, Box)] -pub trait TrieWriter: Send + Sync { - /// Writes trie updates to the database. - /// - /// Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; -} - -/// Storage Trie Writer -#[auto_impl(&, Arc, Box)] -pub trait StorageTrieWriter: Send + Sync { - /// Writes storage trie updates from the given storage trie map. - /// - /// First sorts the storage trie updates by the hashed address key, writing in sorted order. - /// - /// Returns the number of entries modified. - fn write_storage_trie_updates( - &self, - storage_tries: &HashMap, - ) -> ProviderResult; - - /// Writes storage trie updates for the given hashed address. - fn write_individual_storage_trie_updates( - &self, - hashed_address: B256, - updates: &StorageTrieUpdates, - ) -> ProviderResult; -} diff --git a/crates/storage/provider/src/writer/database.rs b/crates/storage/provider/src/writer/database.rs deleted file mode 100644 index 1436fb8a6ab9..000000000000 --- a/crates/storage/provider/src/writer/database.rs +++ /dev/null @@ -1,29 +0,0 @@ -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_db::{ - cursor::{DbCursorRO, DbCursorRW}, - tables, -}; -use reth_errors::ProviderResult; -use reth_primitives::Receipt; -use reth_storage_api::ReceiptWriter; - -pub(crate) struct DatabaseWriter<'a, W>(pub(crate) &'a mut W); - -impl ReceiptWriter for DatabaseWriter<'_, W> -where - W: DbCursorRO + DbCursorRW, -{ - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - _: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()> { - for (tx_idx, receipt) in receipts.into_iter().enumerate() { - if let Some(receipt) = receipt { - self.0.append(first_tx_index + tx_idx as u64, receipt)?; - } - } - Ok(()) - } -} diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 37092a5dd51e..02e912050d5e 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,36 +1,19 @@ use crate::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter as SfWriter}, - writer::static_file::StaticFileWriter, - BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, TrieWriter, + providers::{StaticFileProvider, StaticFileWriter as SfWriter}, + BlockExecutionWriter, BlockWriter, HistoryWriter, StateWriter, StaticFileProviderFactory, + StorageLocation, TrieWriter, }; -use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_consensus::BlockHeader; use reth_chain_state::ExecutedBlock; -use reth_db::{ - cursor::DbCursorRO, - models::CompactU256, - tables, - transaction::{DbTx, DbTxMut}, -}; -use reth_errors::{ProviderError, ProviderResult}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{Header, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; -use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, -}; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_errors::ProviderResult; +use reth_primitives::{NodePrimitives, StaticFileSegment}; +use reth_primitives_traits::SignedTransaction; +use reth_storage_api::{DBProvider, StageCheckpointWriter, TransactionsProviderExt}; use reth_storage_errors::writer::UnifiedStorageWriterError; use revm::db::OriginalValuesKnown; -use std::{borrow::Borrow, sync::Arc}; -use tracing::{debug, instrument}; - -mod database; -mod static_file; -use database::DatabaseWriter; - -enum StorageType { - Database(C), - StaticFile(S), -} +use std::sync::Arc; +use tracing::debug; /// [`UnifiedStorageWriter`] is responsible for managing the writing to storage with both database /// and static file providers. @@ -83,14 +66,6 @@ impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF self.static_file.as_ref().expect("should exist") } - /// Returns a mutable reference to the static file instance. - /// - /// # Panics - /// If the static file instance is not set. - fn static_file_mut(&mut self) -> &mut ProviderSF { - self.static_file.as_mut().expect("should exist") - } - /// Ensures that the static file instance is set. /// /// # Returns @@ -114,15 +89,13 @@ impl UnifiedStorageWriter<'_, (), ()> { /// start-up. /// /// NOTE: If unwinding data from storage, use `commit_unwind` instead! - pub fn commit

( - database: impl Into

+ AsRef

, - static_file: StaticFileProvider, - ) -> ProviderResult<()> + pub fn commit

(provider: P) -> ProviderResult<()> where - P: DBProvider, + P: DBProvider + StaticFileProviderFactory, { + let static_file = provider.static_file_provider(); static_file.commit()?; - database.into().into_tx().commit()?; + provider.commit()?; Ok(()) } @@ -134,33 +107,36 @@ impl UnifiedStorageWriter<'_, (), ()> { /// checkpoints on the next start-up. /// /// NOTE: Should only be used after unwinding data from storage! - pub fn commit_unwind

( - database: impl Into

+ AsRef

, - static_file: StaticFileProvider, - ) -> ProviderResult<()> + pub fn commit_unwind

(provider: P) -> ProviderResult<()> where - P: DBProvider, + P: DBProvider + StaticFileProviderFactory, { - database.into().into_tx().commit()?; + let static_file = provider.static_file_provider(); + provider.commit()?; static_file.commit()?; Ok(()) } } -impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> +impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider + BlockWriter + TransactionsProviderExt - + StateChangeWriter + TrieWriter + + StateWriter + HistoryWriter + StageCheckpointWriter + BlockExecutionWriter - + AsRef, + + AsRef + + StaticFileProviderFactory, { /// Writes executed blocks and receipts to storage. - pub fn save_blocks(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { + pub fn save_blocks(&self, blocks: Vec>) -> ProviderResult<()> + where + N: NodePrimitives, + ProviderDB: BlockWriter + StateWriter, + { if blocks.is_empty() { debug!(target: "provider::storage_writer", "Attempted to write empty block range"); return Ok(()) @@ -168,23 +144,14 @@ where // NOTE: checked non-empty above let first_block = blocks.first().unwrap().block(); - let last_block = blocks.last().unwrap().block().clone(); - let first_number = first_block.number; - let last_block_number = last_block.number; - debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); + let last_block = blocks.last().unwrap().block(); + let first_number = first_block.number(); + let last_block_number = last_block.number(); - // Only write receipts to static files if there is no receipt pruning configured. - let mut state_writer = if self.database().prune_modes_ref().has_receipts_pruning() { - UnifiedStorageWriter::from_database(self.database()) - } else { - UnifiedStorageWriter::from( - self.database(), - self.static_file().get_writer(first_block.number, StaticFileSegment::Receipts)?, - ) - }; + debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); - // TODO: remove all the clones and do performant / batched writes for each type of object + // TODO: Do performant / batched writes for each type of object // instead of a loop over all blocks, // meaning: // * blocks @@ -193,24 +160,24 @@ where // * trie updates (cannot naively extend, need helper) // * indices (already done basically) // Insert the blocks - for block in blocks { - let sealed_block = - block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); - self.database().insert_block(sealed_block)?; - self.save_header_and_transactions(block.block.clone())?; + for ExecutedBlock { block, senders, execution_output, hashed_state, trie } in blocks { + let sealed_block = Arc::unwrap_or_clone(block) + .try_with_senders_unchecked(Arc::unwrap_or_clone(senders)) + .unwrap(); + self.database().insert_block(sealed_block, StorageLocation::Both)?; // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - let execution_outcome = block.execution_outcome().clone(); - state_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + self.database().write_state( + Arc::unwrap_or_clone(execution_output), + OriginalValuesKnown::No, + StorageLocation::StaticFiles, + )?; // insert hashes and intermediate merkle nodes - { - let trie_updates = block.trie_updates().clone(); - let hashed_state = block.hashed_state(); - self.database().write_hashed_state(&hashed_state.clone().into_sorted())?; - self.database().write_trie_updates(&trie_updates)?; - } + self.database() + .write_hashed_state(&Arc::unwrap_or_clone(hashed_state).into_sorted())?; + self.database().write_trie_updates(&trie)?; } // update history indices @@ -224,76 +191,20 @@ where Ok(()) } - /// Writes the header & transactions to static files, and updates their respective checkpoints - /// on database. - #[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()) target = "storage")] - fn save_header_and_transactions(&self, block: Arc) -> ProviderResult<()> { - debug!(target: "provider::storage_writer", "Writing headers and transactions."); - - { - let header_writer = - self.static_file().get_writer(block.number, StaticFileSegment::Headers)?; - let mut storage_writer = UnifiedStorageWriter::from(self.database(), header_writer); - let td = storage_writer.append_headers_from_blocks( - block.header().number, - std::iter::once(&(block.header(), block.hash())), - )?; - - debug!(target: "provider::storage_writer", block_num=block.number, "Updating transaction metadata after writing"); - self.database() - .tx_ref() - .put::(block.number, CompactU256(td))?; - self.database() - .save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block.number))?; - } - - { - let transactions_writer = - self.static_file().get_writer(block.number, StaticFileSegment::Transactions)?; - let mut storage_writer = - UnifiedStorageWriter::from(self.database(), transactions_writer); - let no_hash_transactions = block - .body - .transactions - .clone() - .into_iter() - .map(TransactionSignedNoHash::from) - .collect(); - storage_writer.append_transactions_from_blocks( - block.header().number, - std::iter::once(&no_hash_transactions), - )?; - self.database() - .save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block.number))?; - } - - Ok(()) - } - /// Removes all block, transaction and receipt data above the given block number from the /// database and static files. This is exclusive, i.e., it only removes blocks above /// `block_number`, and does not remove `block_number`. pub fn remove_blocks_above(&self, block_number: u64) -> ProviderResult<()> { + // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block + debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); + self.database().remove_block_and_execution_above(block_number, StorageLocation::Both)?; + // Get highest static file block for the total block range let highest_static_file_block = self .static_file() .get_highest_static_file_block(StaticFileSegment::Headers) .expect("todo: error handling, headers should exist"); - // Get the total txs for the block range, so we have the correct number of columns for - // receipts and transactions - // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block - let tx_range = self - .database() - .transaction_range_by_block_range(block_number + 1..=highest_static_file_block)?; - let total_txs = tx_range.end().saturating_sub(*tx_range.start()); - - // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block - debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); - self.database().remove_block_and_execution_range( - block_number + 1..=self.database().last_block_number()?, - )?; - // IMPORTANT: we use `highest_static_file_block.saturating_sub(block_number)` to make sure // we remove only what is ABOVE the block. // @@ -304,236 +215,6 @@ where .get_writer(block_number, StaticFileSegment::Headers)? .prune_headers(highest_static_file_block.saturating_sub(block_number))?; - self.static_file() - .get_writer(block_number, StaticFileSegment::Transactions)? - .prune_transactions(total_txs, block_number)?; - - if !self.database().prune_modes_ref().has_receipts_pruning() { - self.static_file() - .get_writer(block_number, StaticFileSegment::Receipts)? - .prune_receipts(total_txs, block_number)?; - } - - Ok(()) - } -} - -impl UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> -where - ProviderDB: DBProvider + HeaderProvider, -{ - /// Ensures that the static file writer is set and of the right [`StaticFileSegment`] variant. - /// - /// # Returns - /// - `Ok(())` if the static file writer is set. - /// - `Err(StorageWriterError::MissingStaticFileWriter)` if the static file instance is not set. - fn ensure_static_file_segment( - &self, - segment: StaticFileSegment, - ) -> Result<(), UnifiedStorageWriterError> { - match &self.static_file { - Some(writer) => { - if writer.user_header().segment() == segment { - Ok(()) - } else { - Err(UnifiedStorageWriterError::IncorrectStaticFileWriter( - writer.user_header().segment(), - segment, - )) - } - } - None => Err(UnifiedStorageWriterError::MissingStaticFileWriter), - } - } - - /// Appends headers to static files, using the - /// [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) table to determine the - /// total difficulty of the parent block during header insertion. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Headers segment. - pub fn append_headers_from_blocks( - &mut self, - initial_block_number: BlockNumber, - headers: impl Iterator, - ) -> ProviderResult - where - I: Borrow<(H, B256)>, - H: Borrow

, - { - self.ensure_static_file_segment(StaticFileSegment::Headers)?; - - let mut td = self - .database() - .header_td_by_number(initial_block_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(initial_block_number))?; - - for pair in headers { - let (header, hash) = pair.borrow(); - let header = header.borrow(); - td += header.difficulty; - self.static_file_mut().append_header(header, td, hash)?; - } - - Ok(td) - } - - /// Appends transactions to static files, using the - /// [`BlockBodyIndices`](tables::BlockBodyIndices) table to determine the transaction number - /// when appending to static files. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Transactions segment. - pub fn append_transactions_from_blocks( - &mut self, - initial_block_number: BlockNumber, - transactions: impl Iterator, - ) -> ProviderResult<()> - where - T: Borrow>, - { - self.ensure_static_file_segment(StaticFileSegment::Transactions)?; - - let mut bodies_cursor = - self.database().tx_ref().cursor_read::()?; - - let mut last_tx_idx = None; - for (idx, transactions) in transactions.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let mut tx_index = first_tx_index - .or(last_tx_idx) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - for tx in transactions.borrow() { - self.static_file_mut().append_transaction(tx_index, tx)?; - tx_index += 1; - } - - self.static_file_mut().increment_block(block_number)?; - - // update index - last_tx_idx = Some(tx_index); - } - Ok(()) - } -} - -impl UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> -where - ProviderDB: DBProvider + HeaderProvider, -{ - /// Appends receipts block by block. - /// - /// ATTENTION: If called from [`UnifiedStorageWriter`] without a static file producer, it will - /// always write them to database. Otherwise, it will look into the pruning configuration to - /// decide. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Receipts segment. - /// - /// # Parameters - /// - `initial_block_number`: The starting block number. - /// - `blocks`: An iterator over blocks, each block having a vector of optional receipts. If - /// `receipt` is `None`, it has been pruned. - pub fn append_receipts_from_blocks( - &mut self, - initial_block_number: BlockNumber, - blocks: impl Iterator>>, - ) -> ProviderResult<()> { - let mut bodies_cursor = - self.database().tx_ref().cursor_read::()?; - - // We write receipts to database in two situations: - // * If we are in live sync. In this case, `UnifiedStorageWriter` is built without a static - // file writer. - // * If there is any kind of receipt pruning - let mut storage_type = if self.static_file.is_none() || - self.database().prune_modes_ref().has_receipts_pruning() - { - StorageType::Database(self.database().tx_ref().cursor_write::()?) - } else { - self.ensure_static_file_segment(StaticFileSegment::Receipts)?; - StorageType::StaticFile(self.static_file_mut()) - }; - - let mut last_tx_idx = None; - for (idx, receipts) in blocks.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let first_tx_index = first_tx_index - .or(last_tx_idx) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - // update for empty blocks - last_tx_idx = Some(first_tx_index); - - match &mut storage_type { - StorageType::Database(cursor) => { - DatabaseWriter(cursor).append_block_receipts( - first_tx_index, - block_number, - receipts, - )?; - } - StorageType::StaticFile(sf) => { - StaticFileWriter(*sf).append_block_receipts( - first_tx_index, - block_number, - receipts, - )?; - } - }; - } - - Ok(()) - } -} - -impl StateWriter - for UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> -where - ProviderDB: DBProvider + StateChangeWriter + HeaderProvider, -{ - /// Write the data and receipts to the database or static files if `static_file_producer` is - /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> { - let (plain_state, reverts) = - execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); - - self.database().write_state_reverts(reverts, execution_outcome.first_block)?; - - self.append_receipts_from_blocks( - execution_outcome.first_block, - execution_outcome.receipts.into_iter(), - )?; - - self.database().write_state_changes(plain_state)?; - Ok(()) } } @@ -551,6 +232,7 @@ mod tests { models::{AccountBeforeTx, BlockNumberAddress}, transaction::{DbTx, DbTxMut}, }; + use reth_execution_types::ExecutionOutcome; use reth_primitives::{Account, Receipt, Receipts, StorageEntry}; use reth_storage_api::DatabaseProviderFactory; use reth_trie::{ @@ -813,9 +495,8 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); // Check plain storage state @@ -914,9 +595,8 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); assert_eq!( @@ -982,9 +662,8 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1129,10 +808,10 @@ mod tests { let bundle = state.take_bundle(); - let outcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + let outcome: ExecutionOutcome = + ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -1296,9 +975,8 @@ mod tests { init_state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1344,9 +1022,8 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -1375,7 +1052,7 @@ mod tests { #[test] fn revert_to_indices() { - let base = ExecutionOutcome { + let base: ExecutionOutcome = ExecutionOutcome { bundle: BundleState::default(), receipts: vec![vec![Some(Receipt::default()); 2]; 7].into(), first_block: 10, @@ -1441,7 +1118,7 @@ mod tests { assert_eq!( StateRoot::overlay_root( tx, - ExecutionOutcome::new( + ExecutionOutcome::::new( state.bundle_state.clone(), Receipts::default(), 0, @@ -1592,7 +1269,7 @@ mod tests { .build(); assert_eq!(previous_state.reverts.len(), 1); - let mut test = ExecutionOutcome { + let mut test: ExecutionOutcome = ExecutionOutcome { bundle: present_state, receipts: vec![vec![Some(Receipt::default()); 2]; 1].into(), first_block: 2, diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs deleted file mode 100644 index 5514e211e58f..000000000000 --- a/crates/storage/provider/src/writer/static_file.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::providers::StaticFileProviderRWRefMut; -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_errors::ProviderResult; -use reth_primitives::Receipt; -use reth_storage_api::ReceiptWriter; - -pub(crate) struct StaticFileWriter<'a, W>(pub(crate) &'a mut W); - -impl ReceiptWriter for StaticFileWriter<'_, StaticFileProviderRWRefMut<'_>> { - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - block_number: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()> { - // Increment block on static file header. - self.0.increment_block(block_number)?; - let receipts = receipts.iter().enumerate().map(|(tx_idx, receipt)| { - Ok(( - first_tx_index + tx_idx as u64, - receipt - .as_ref() - .expect("receipt should not be filtered when saving to static files."), - )) - }); - self.0.append_receipts(receipts)?; - Ok(()) - } -} diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 0ae8b284588e..ba2ccf1b1573 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -18,14 +18,18 @@ reth-db-models.workspace = true reth-db-api.workspace = true reth-execution-types.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie.workspace = true +reth-trie-db.workspace = true +reth-db.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-rpc-types-engine.workspace = true auto_impl.workspace = true diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 01238be745e2..204e9027da28 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -2,12 +2,11 @@ use crate::{ BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; -use reth_primitives::{ - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, -}; +use reth_primitives::{BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -46,7 +45,6 @@ impl BlockSource { /// /// If not requested otherwise, implementers of this trait should prioritize fetching blocks from /// the database. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockReader: BlockNumReader + HeaderProvider @@ -56,32 +54,46 @@ pub trait BlockReader: + Send + Sync { + /// The block type this provider reads. + type Block: reth_primitives_traits::Block< + Body: reth_primitives_traits::BlockBody, + >; + /// Tries to find in the given block source. /// /// Note: this only operates on the hash because the number might be ambiguous. /// /// Returns `None` if block is not found. - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult>; + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult>; /// Returns the block with given id from the database. /// /// Returns `None` if block is not found. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; + fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; /// Returns the pending block if available /// - /// Note: This returns a [SealedBlock] because it's expected that this is sealed by the provider - /// and the caller does not know the hash. - fn pending_block(&self) -> ProviderResult>; + /// Note: This returns a [`SealedBlockFor`] because it's expected that this is sealed by the + /// provider and the caller does not know the hash. + fn pending_block(&self) -> ProviderResult>>; /// Returns the pending block if available /// - /// Note: This returns a [SealedBlockWithSenders] because it's expected that this is sealed by + /// Note: This returns a [`SealedBlockWithSenders`] because it's expected that this is sealed by /// the provider and the caller does not know the hash. - fn pending_block_with_senders(&self) -> ProviderResult>; + fn pending_block_with_senders( + &self, + ) -> ProviderResult>>; /// Returns the pending block and receipts if available. - fn pending_block_and_receipts(&self) -> ProviderResult)>>; + #[allow(clippy::type_complexity)] + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>>; /// Returns the ommers/uncle headers of the given block from the database. /// @@ -91,14 +103,14 @@ pub trait BlockReader: /// Returns the block with matching hash from the database. /// /// Returns `None` if block is not found. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { + fn block_by_hash(&self, hash: B256) -> ProviderResult> { self.block(hash.into()) } /// Returns the block with matching number from database. /// /// Returns `None` if block is not found. - fn block_by_number(&self, num: u64) -> ProviderResult> { + fn block_by_number(&self, num: u64) -> ProviderResult> { self.block(num.into()) } @@ -116,7 +128,7 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns the sealed block with senders with matching number or hash from database. /// @@ -127,26 +139,164 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns all blocks in the given inclusive range. /// /// Note: returns only available blocks - fn block_range(&self, range: RangeInclusive) -> ProviderResult>; + fn block_range(&self, range: RangeInclusive) -> ProviderResult>; /// Returns a range of blocks from the database, along with the senders of each /// transaction in the blocks. fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns a range of sealed blocks from the database, along with the senders of each /// transaction in the blocks. fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>; + ) -> ProviderResult>>; +} + +impl BlockReader for std::sync::Arc { + type Block = T::Block; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + T::find_block_by_hash(self, hash, source) + } + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + T::block(self, id) + } + fn pending_block(&self) -> ProviderResult>> { + T::pending_block(self) + } + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + T::pending_block_with_senders(self) + } + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + T::pending_block_and_receipts(self) + } + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + T::ommers(self, id) + } + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + T::block_by_hash(self, hash) + } + fn block_by_number(&self, num: u64) -> ProviderResult> { + T::block_by_number(self, num) + } + fn block_body_indices(&self, num: u64) -> ProviderResult> { + T::block_body_indices(self, num) + } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::block_with_senders(self, id, transaction_kind) + } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::sealed_block_with_senders(self, id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + T::block_range(self, range) + } + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::block_with_senders_range(self, range) + } + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::sealed_block_with_senders_range(self, range) + } +} + +impl BlockReader for &T { + type Block = T::Block; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + T::find_block_by_hash(self, hash, source) + } + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + T::block(self, id) + } + fn pending_block(&self) -> ProviderResult>> { + T::pending_block(self) + } + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + T::pending_block_with_senders(self) + } + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + T::pending_block_and_receipts(self) + } + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + T::ommers(self, id) + } + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + T::block_by_hash(self, hash) + } + fn block_by_number(&self, num: u64) -> ProviderResult> { + T::block_by_number(self, num) + } + fn block_body_indices(&self, num: u64) -> ProviderResult> { + T::block_body_indices(self, num) + } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::block_with_senders(self, id, transaction_kind) + } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::sealed_block_with_senders(self, id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + T::block_range(self, range) + } + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::block_with_senders_range(self, range) + } + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::sealed_block_with_senders_range(self, range) + } } /// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. @@ -159,12 +309,11 @@ pub trait BlockReader: /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the block should be done using the type's `BlockReader` methods. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the block with matching tag from the database /// /// Returns `None` if block is not found. - fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.block(num.into())) } @@ -203,7 +352,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the block with the matching [`BlockId`] from the database. /// /// Returns `None` if block is not found. - fn block_by_id(&self, id: BlockId) -> ProviderResult>; + fn block_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the block with senders with matching [`BlockId`]. /// @@ -214,7 +363,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { &self, id: BlockId, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { match id { BlockId::Hash(hash) => { self.block_with_senders(hash.block_hash.into(), transaction_kind) @@ -243,14 +392,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { ) -> ProviderResult> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? - .map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ) + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) } /// Returns the sealed header with the matching `BlockId` from the database. @@ -273,3 +415,24 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns `None` if block is not found. fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; } + +/// Functionality to read the last known chain blocks from the database. +pub trait ChainStateBlockReader: Send + Sync { + /// Returns the last finalized block number. + /// + /// If no finalized block has been written yet, this returns `None`. + fn last_finalized_block_number(&self) -> ProviderResult>; + /// Returns the last safe block number. + /// + /// If no safe block has been written yet, this returns `None`. + fn last_safe_block_number(&self) -> ProviderResult>; +} + +/// Functionality to write the last known chain blocks to the database. +pub trait ChainStateBlockWriter: Send + Sync { + /// Saves the given finalized block number in the DB. + fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; + + /// Saves the given safe block number in the DB. + fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; +} diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs new file mode 100644 index 000000000000..9b9c24c68633 --- /dev/null +++ b/crates/storage/storage-api/src/chain.rs @@ -0,0 +1,169 @@ +use crate::DBProvider; +use alloy_primitives::BlockNumber; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_db::{ + cursor::{DbCursorRO, DbCursorRW}, + models::{StoredBlockOmmers, StoredBlockWithdrawals}, + tables, + transaction::{DbTx, DbTxMut}, + DbTxUnwindExt, +}; +use reth_primitives_traits::{Block, BlockBody, FullNodePrimitives}; +use reth_storage_errors::provider::ProviderResult; + +/// Trait that implements how block bodies are written to the storage. +/// +/// Note: Within the current abstraction, this should only write to tables unrelated to +/// transactions. Writing of transactions is handled separately. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockBodyWriter { + /// Writes a set of block bodies to the storage. + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(BlockNumber, Option)>, + ) -> ProviderResult<()>; + + /// Removes all block bodies above the given block number from the database. + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: BlockNumber, + ) -> ProviderResult<()>; +} + +/// Trait that implements how chain-specific types are written to the storage. +pub trait ChainStorageWriter: + BlockBodyWriter::Body> +{ +} +impl ChainStorageWriter for T where + T: BlockBodyWriter::Body> +{ +} + +/// Input for reading a block body. Contains a header of block being read and a list of pre-fetched +/// transactions. +pub type ReadBodyInput<'a, B> = + (&'a ::Header, Vec<<::Body as BlockBody>::Transaction>); + +/// Trait that implements how block bodies are read from the storage. +/// +/// Note: Within the current abstraction, transactions persistence is handled separately, thus this +/// trait is provided with transactions read beforehand and is expected to construct the block body +/// from those transactions and additional data read from elsewhere. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockBodyReader { + /// The block type. + type Block: Block; + + /// Receives a list of block headers along with block transactions and returns the block bodies. + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult::Body>>; +} + +/// Trait that implements how chain-specific types are read from storage. +pub trait ChainStorageReader: + BlockBodyReader +{ +} +impl ChainStorageReader for T where + T: BlockBodyReader +{ +} + +/// Ethereum storage implementation. +#[derive(Debug, Default, Clone, Copy)] +pub struct EthStorage; + +impl BlockBodyWriter for EthStorage +where + Provider: DBProvider, +{ + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(u64, Option)>, + ) -> ProviderResult<()> { + let mut ommers_cursor = provider.tx_ref().cursor_write::()?; + let mut withdrawals_cursor = + provider.tx_ref().cursor_write::()?; + + for (block_number, body) in bodies { + let Some(body) = body else { continue }; + + // Write ommers if any + if !body.ommers.is_empty() { + ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; + } + + // Write withdrawals if any + if let Some(withdrawals) = body.withdrawals { + if !withdrawals.is_empty() { + withdrawals_cursor + .append(block_number, StoredBlockWithdrawals { withdrawals })?; + } + } + } + + Ok(()) + } + + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: BlockNumber, + ) -> ProviderResult<()> { + provider.tx_ref().unwind_table_by_num::(block)?; + provider.tx_ref().unwind_table_by_num::(block)?; + + Ok(()) + } +} + +impl BlockBodyReader for EthStorage +where + Provider: DBProvider + ChainSpecProvider, +{ + type Block = reth_primitives::Block; + + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult::Body>> { + // TODO: Ideally storage should hold its own copy of chain spec + let chain_spec = provider.chain_spec(); + + let mut ommers_cursor = provider.tx_ref().cursor_read::()?; + let mut withdrawals_cursor = provider.tx_ref().cursor_read::()?; + + let mut bodies = Vec::with_capacity(inputs.len()); + + for (header, transactions) in inputs { + // If we are past shanghai, then all blocks should have a withdrawal list, + // even if empty + let withdrawals = if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + withdrawals_cursor + .seek_exact(header.number)? + .map(|(_, w)| w.withdrawals) + .unwrap_or_default() + .into() + } else { + None + }; + let ommers = if chain_spec.final_paris_total_difficulty(header.number).is_some() { + Vec::new() + } else { + ommers_cursor.seek_exact(header.number)?.map(|(_, o)| o.ommers).unwrap_or_default() + }; + + bodies.push(reth_primitives::BlockBody { transactions, ommers, withdrawals }); + } + + Ok(bodies) + } +} diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/storage-api/src/chain_info.rs similarity index 100% rename from crates/storage/provider/src/traits/chain_info.rs rename to crates/storage/storage-api/src/chain_info.rs diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/storage-api/src/hashing.rs similarity index 96% rename from crates/storage/provider/src/traits/hashing.rs rename to crates/storage/storage-api/src/hashing.rs index c6958aa4d644..7cd30a82510c 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/storage-api/src/hashing.rs @@ -1,10 +1,10 @@ -use alloy_primitives::{Address, BlockNumber, B256}; +use alloy_primitives::{map::HashMap, Address, BlockNumber, B256}; use auto_impl::auto_impl; use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, + collections::{BTreeMap, BTreeSet}, ops::{RangeBounds, RangeInclusive}, }; diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 7202f51ddf1f..c068f7c1d295 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,6 +1,7 @@ +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, U256}; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/storage-api/src/history.rs similarity index 100% rename from crates/storage/provider/src/traits/history.rs rename to crates/storage/storage-api/src/history.rs diff --git a/crates/storage/storage-api/src/legacy.rs b/crates/storage/storage-api/src/legacy.rs new file mode 100644 index 000000000000..e53a5d8bfa2b --- /dev/null +++ b/crates/storage/storage-api/src/legacy.rs @@ -0,0 +1,83 @@ +//! Traits used by the legacy execution engine. +//! +//! This module is scheduled for removal in the future. + +use alloy_eips::BlockNumHash; +use alloy_primitives::{BlockHash, BlockNumber}; +use auto_impl::auto_impl; +use reth_execution_types::ExecutionOutcome; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; + +/// Blockchain trait provider that gives access to the blockchain state that is not yet committed +/// (pending). +pub trait BlockchainTreePendingStateProvider: Send + Sync { + /// Returns a state provider that includes all state changes of the given (pending) block hash. + /// + /// In other words, the state provider will return the state after all transactions of the given + /// hash have been executed. + fn pending_state_provider( + &self, + block_hash: BlockHash, + ) -> ProviderResult> { + self.find_pending_state_provider(block_hash) + .ok_or(ProviderError::StateForHashNotFound(block_hash)) + } + + /// Returns state provider if a matching block exists. + fn find_pending_state_provider( + &self, + block_hash: BlockHash, + ) -> Option>; +} + +/// Provides data required for post-block execution. +/// +/// This trait offers methods to access essential post-execution data, including the state changes +/// in accounts and storage, as well as block hashes for both the pending and canonical chains. +/// +/// The trait includes: +/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. +/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical +/// blocks. +#[auto_impl(&, Box)] +pub trait ExecutionDataProvider: Send + Sync { + /// Return the execution outcome. + fn execution_outcome(&self) -> &ExecutionOutcome; + /// Return block hash by block number of pending or canonical chain. + fn block_hash(&self, block_number: BlockNumber) -> Option; +} + +impl ExecutionDataProvider for ExecutionOutcome { + fn execution_outcome(&self) -> &ExecutionOutcome { + self + } + + /// Always returns [None] because we don't have any information about the block header. + fn block_hash(&self, _block_number: BlockNumber) -> Option { + None + } +} + +/// Fork data needed for execution on it. +/// +/// It contains a canonical fork, the block on what pending chain was forked from. +#[auto_impl(&, Box)] +pub trait BlockExecutionForkProvider { + /// Return canonical fork, the block on what post state was forked from. + /// + /// Needed to create state provider. + fn canonical_fork(&self) -> BlockNumHash; +} + +/// Provides comprehensive post-execution state data required for further execution. +/// +/// This trait is used to create a state provider over the pending state and is a combination of +/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. +/// +/// The pending state includes: +/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. +/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. +/// * Canonical fork: Denotes the block from which the pending chain forked. +pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} + +impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 4e589242a91e..4c5d2ab02e7d 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -22,6 +22,9 @@ pub use block_id::*; mod block_hash; pub use block_hash::*; +mod chain; +pub use chain::*; + mod header; pub use header::*; @@ -46,6 +49,9 @@ pub use transactions::*; mod trie; pub use trie::*; +mod chain_info; +pub use chain_info::*; + mod withdrawals; pub use withdrawals::*; @@ -53,3 +59,17 @@ mod database_provider; pub use database_provider::*; pub mod noop; + +mod history; +pub use history::*; + +mod hashing; +pub use hashing::*; +mod stats; +pub use stats::*; + +mod legacy; +pub use legacy::*; + +mod primitives; +pub use primitives::*; diff --git a/crates/storage/storage-api/src/primitives.rs b/crates/storage/storage-api/src/primitives.rs new file mode 100644 index 000000000000..ae2a72e6e531 --- /dev/null +++ b/crates/storage/storage-api/src/primitives.rs @@ -0,0 +1,8 @@ +use reth_primitives::NodePrimitives; + +/// Provider implementation that knows configured [`NodePrimitives`]. +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait NodePrimitivesProvider { + /// The node primitive types. + type Primitives: NodePrimitives; +} diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index 06c6103ee9bb..67257cce67ce 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,33 +1,38 @@ use crate::BlockIdReader; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, TxHash, TxNumber}; -use reth_primitives::Receipt; +use alloy_primitives::{TxHash, TxNumber}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; -/// Client trait for fetching [Receipt] data . +/// Client trait for fetching receipt data. #[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProvider: Send + Sync { + /// The receipt type. + type Receipt: Send + Sync; + /// Get receipt by transaction number /// /// Returns `None` if the transaction is not found. - fn receipt(&self, id: TxNumber) -> ProviderResult>; + fn receipt(&self, id: TxNumber) -> ProviderResult>; /// Get receipt by transaction hash. /// /// Returns `None` if the transaction is not found. - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get receipts by block num or hash. /// /// Returns `None` if the block is not found. - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>>; + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>>; /// Get receipts by tx range. fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult>; + ) -> ProviderResult>; } /// Trait extension for `ReceiptProvider`, for types that implement `BlockId` conversion. @@ -40,10 +45,9 @@ pub trait ReceiptProvider: Send + Sync { /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the receipts should be done using the type's `ReceiptProvider` methods. -#[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { /// Get receipt by block id - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { let id = match block { BlockId::Hash(hash) => BlockHashOrNumber::Hash(hash.block_hash), BlockId::Number(num_tag) => { @@ -64,24 +68,7 @@ pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { fn receipts_by_number_or_tag( &self, number_or_tag: BlockNumberOrTag, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.receipts_by_block_id(number_or_tag.into()) } } - -/// Writer trait for writing [`Receipt`] data. -pub trait ReceiptWriter { - /// Appends receipts for a block. - /// - /// # Parameters - /// - `first_tx_index`: The transaction number of the first receipt in the block. - /// - `block_number`: The block number to which the receipts belong. - /// - `receipts`: A vector of optional receipts in the block. If `None`, it means they were - /// pruned. - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - block_number: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()>; -} diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index d37940f04787..0cb26d307434 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -3,12 +3,12 @@ use super::{ StorageRootProvider, }; use alloy_consensus::constants::KECCAK_EMPTY; -use alloy_eips::{BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; use auto_impl::auto_impl; -use reth_execution_types::ExecutionOutcome; use reth_primitives::Bytecode; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_storage_errors::provider::ProviderResult; +use reth_trie_db::StateCommitment; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -82,6 +82,12 @@ pub trait StateProvider: } } +/// Trait implemented for database providers that can provide the [`StateCommitment`] type. +pub trait StateCommitmentProvider { + /// The [`StateCommitment`] type that can be used to perform state commitment operations. + type StateCommitment: StateCommitment; +} + /// Trait implemented for database providers that can be converted into a historical state provider. pub trait TryIntoHistoricalStateProvider { /// Returns a historical [`StateProvider`] indexed by the given historic block number. @@ -167,77 +173,3 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// If the block couldn't be found, returns `None`. fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult>; } - -/// Blockchain trait provider that gives access to the blockchain state that is not yet committed -/// (pending). -pub trait BlockchainTreePendingStateProvider: Send + Sync { - /// Returns a state provider that includes all state changes of the given (pending) block hash. - /// - /// In other words, the state provider will return the state after all transactions of the given - /// hash have been executed. - fn pending_state_provider( - &self, - block_hash: BlockHash, - ) -> ProviderResult> { - self.find_pending_state_provider(block_hash) - .ok_or(ProviderError::StateForHashNotFound(block_hash)) - } - - /// Returns state provider if a matching block exists. - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option>; -} - -/// Provides data required for post-block execution. -/// -/// This trait offers methods to access essential post-execution data, including the state changes -/// in accounts and storage, as well as block hashes for both the pending and canonical chains. -/// -/// The trait includes: -/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. -/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical -/// blocks. -#[auto_impl(&, Box)] -pub trait ExecutionDataProvider: Send + Sync { - /// Return the execution outcome. - fn execution_outcome(&self) -> &ExecutionOutcome; - /// Return block hash by block number of pending or canonical chain. - fn block_hash(&self, block_number: BlockNumber) -> Option; -} - -impl ExecutionDataProvider for ExecutionOutcome { - fn execution_outcome(&self) -> &ExecutionOutcome { - self - } - - /// Always returns [None] because we don't have any information about the block header. - fn block_hash(&self, _block_number: BlockNumber) -> Option { - None - } -} - -/// Fork data needed for execution on it. -/// -/// It contains a canonical fork, the block on what pending chain was forked from. -#[auto_impl(&, Box)] -pub trait BlockExecutionForkProvider { - /// Return canonical fork, the block on what post state was forked from. - /// - /// Needed to create state provider. - fn canonical_fork(&self) -> BlockNumHash; -} - -/// Provides comprehensive post-execution state data required for further execution. -/// -/// This trait is used to create a state provider over the pending state and is a combination of -/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. -/// -/// The pending state includes: -/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. -/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. -/// * Canonical fork: Denotes the block from which the pending chain forked. -pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} - -impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} diff --git a/crates/storage/provider/src/traits/stats.rs b/crates/storage/storage-api/src/stats.rs similarity index 100% rename from crates/storage/provider/src/traits/stats.rs rename to crates/storage/storage-api/src/stats.rs diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index f2c44e9e140b..3bb20b7e161a 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,7 +1,8 @@ -use crate::{BlockNumReader, BlockReader}; +use crate::{BlockNumReader, BlockReader, ReceiptProvider}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; -use reth_primitives::{TransactionMeta, TransactionSigned, TransactionSignedNoHash}; +use reth_primitives::TransactionMeta; +use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::ops::{Range, RangeBounds, RangeInclusive}; @@ -18,9 +19,12 @@ pub enum TransactionVariant { WithHash, } -/// Client trait for fetching [TransactionSigned] related data. +/// Client trait for fetching transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProvider: BlockNumReader + Send + Sync { + /// The transaction type this provider reads. + type Transaction: Send + Sync + SignedTransaction; + /// Get internal transaction identifier by transaction hash. /// /// This is the inverse of [TransactionsProvider::transaction_by_id]. @@ -28,23 +32,21 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult>; /// Get transaction by id, computes hash every time so more expensive. - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. - fn transaction_by_id_no_hash( - &self, - id: TxNumber, - ) -> ProviderResult>; + fn transaction_by_id_unhashed(&self, id: TxNumber) + -> ProviderResult>; /// Get transaction by transaction hash. - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get transaction by transaction hash and additional metadata of the block the transaction was /// mined in fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get transaction block number fn transaction_block(&self, id: TxNumber) -> ProviderResult>; @@ -53,19 +55,19 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transactions_by_block( &self, block: BlockHashOrNumber, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Get transactions by block range. fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Get transactions by tx range. fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get Senders from a tx range. fn senders_by_tx_range( @@ -79,7 +81,13 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_sender(&self, id: TxNumber) -> ProviderResult>; } -/// Client trait for fetching additional [TransactionSigned] related data. +/// A helper type alias to access [`TransactionsProvider::Transaction`]. +pub type ProviderTx

=

::Transaction; + +/// A helper type alias to access [`ReceiptProvider::Receipt`]. +pub type ProviderReceipt

=

::Receipt; + +/// Client trait for fetching additional transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProviderExt: BlockReader + Send + Sync { /// Get transactions range by block range. diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index f7d41066d069..ee1ca1de1800 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -4,7 +4,8 @@ use alloy_primitives::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, + updates::{StorageTrieUpdates, TrieUpdates}, + AccountProof, HashedPostState, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, }; @@ -56,6 +57,14 @@ pub trait StorageRootProvider: Send + Sync { slot: B256, hashed_storage: HashedStorage, ) -> ProviderResult; + + /// Returns the storage multiproof for target slots. + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult; } /// A type that can generate state proof on top of a given post state. @@ -85,3 +94,33 @@ pub trait StateProofProvider: Send + Sync { target: HashedPostState, ) -> ProviderResult>; } + +/// Trie Writer +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait TrieWriter: Send + Sync { + /// Writes trie updates to the database. + /// + /// Returns the number of entries modified. + fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; +} + +/// Storage Trie Writer +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait StorageTrieWriter: Send + Sync { + /// Writes storage trie updates from the given storage trie map. + /// + /// First sorts the storage trie updates by the hashed address key, writing in sorted order. + /// + /// Returns the number of entries modified. + fn write_storage_trie_updates( + &self, + storage_tries: &HashMap, + ) -> ProviderResult; + + /// Writes storage trie updates for the given hashed address. + fn write_individual_storage_trie_updates( + &self, + hashed_address: B256, + updates: &StorageTrieUpdates, + ) -> ProviderResult; +} diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index 82c80c0932b8..68d8e9589791 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # async tokio = { workspace = true, features = ["sync", "rt"] } -tracing-futures = "0.2" +tracing-futures.workspace = true futures-util.workspace = true # metrics diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 28b5eaba9ffb..340e925ec56b 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -111,6 +111,13 @@ dyn_clone::clone_trait_object!(TaskSpawner); #[non_exhaustive] pub struct TokioTaskExecutor; +impl TokioTaskExecutor { + /// Converts the instance to a boxed [`TaskSpawner`]. + pub fn boxed(self) -> Box { + Box::new(self) + } +} + impl TaskSpawner for TokioTaskExecutor { fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { tokio::task::spawn(fut) diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 1bfb10d86d77..214633188167 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -17,6 +17,8 @@ reth-chain-state.workspace = true reth-chainspec.workspace = true reth-eth-wire-types.workspace = true reth-primitives = { workspace = true, features = ["c-kzg", "secp256k1"] } +reth-primitives-traits.workspace = true +reth-payload-util.workspace = true reth-execution-types.workspace = true reth-fs-util.workspace = true reth-storage-api.workspace = true @@ -84,7 +86,8 @@ serde = [ "parking_lot/serde", "rand?/serde", "revm/serde", - "smallvec/serde" + "smallvec/serde", + "reth-primitives-traits/serde", ] test-utils = [ "rand", @@ -94,7 +97,8 @@ test-utils = [ "reth-chainspec/test-utils", "reth-primitives/test-utils", "reth-provider/test-utils", - "revm/test-utils" + "revm/test-utils", + "reth-primitives-traits/test-utils", ] arbitrary = [ "proptest", @@ -107,7 +111,8 @@ arbitrary = [ "alloy-primitives/arbitrary", "bitflags/arbitrary", "revm/arbitrary", - "smallvec/arbitrary" + "reth-primitives-traits/arbitrary", + "smallvec/arbitrary", ] [[bench]] diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 9d02276db85a..67c36a659981 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -75,10 +75,7 @@ impl BlobStore for DiskFileBlobStore { } fn cleanup(&self) -> BlobStoreCleanupStat { - let txs_to_delete = { - let mut txs_to_delete = self.inner.txs_to_delete.write(); - std::mem::take(&mut *txs_to_delete) - }; + let txs_to_delete = std::mem::take(&mut *self.inner.txs_to_delete.write()); let mut stat = BlobStoreCleanupStat::default(); let mut subsize = 0; debug!(target:"txpool::blob", num_blobs=%txs_to_delete.len(), "Removing blobs from disk"); @@ -554,4 +551,136 @@ mod tests { assert_eq!(store.data_size_hint(), Some(0)); assert_eq!(store.inner.size_tracker.num_blobs.load(Ordering::Relaxed), 0); } + + #[test] + fn disk_insert_and_retrieve() { + let (store, _dir) = tmp_store(); + + let (tx, blob) = rng_blobs(1).into_iter().next().unwrap(); + store.insert(tx, blob.clone()).unwrap(); + + assert!(store.is_cached(&tx)); + let retrieved_blob = store.get(tx).unwrap().map(Arc::unwrap_or_clone).unwrap(); + assert_eq!(retrieved_blob, blob); + } + + #[test] + fn disk_delete_blob() { + let (store, _dir) = tmp_store(); + + let (tx, blob) = rng_blobs(1).into_iter().next().unwrap(); + store.insert(tx, blob).unwrap(); + assert!(store.is_cached(&tx)); + + store.delete(tx).unwrap(); + assert!(store.inner.txs_to_delete.read().contains(&tx)); + store.cleanup(); + + let result = store.get(tx).unwrap(); + assert_eq!( + result, + Some(Arc::new(BlobTransactionSidecar { + blobs: vec![], + commitments: vec![], + proofs: vec![] + })) + ); + } + + #[test] + fn disk_insert_all_and_delete_all() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(5); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + for (tx, _) in &blobs { + assert!(store.is_cached(tx)); + } + + store.delete_all(txs.clone()).unwrap(); + store.cleanup(); + + for tx in txs { + let result = store.get(tx).unwrap(); + assert_eq!( + result, + Some(Arc::new(BlobTransactionSidecar { + blobs: vec![], + commitments: vec![], + proofs: vec![] + })) + ); + } + } + + #[test] + fn disk_get_all_blobs() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + let retrieved_blobs = store.get_all(txs.clone()).unwrap(); + for (tx, blob) in retrieved_blobs { + assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob)))); + } + + store.delete_all(txs).unwrap(); + store.cleanup(); + } + + #[test] + fn disk_get_exact_blobs_success() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + let retrieved_blobs = store.get_exact(txs).unwrap(); + for (retrieved_blob, (_, original_blob)) in retrieved_blobs.into_iter().zip(blobs) { + assert_eq!(Arc::unwrap_or_clone(retrieved_blob), original_blob); + } + } + + #[test] + fn disk_get_exact_blobs_failure() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(2); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs).unwrap(); + + // Try to get a blob that was never inserted + let missing_tx = TxHash::random(); + let result = store.get_exact(vec![txs[0], missing_tx]); + assert!(result.is_err()); + } + + #[test] + fn disk_data_size_hint() { + let (store, _dir) = tmp_store(); + assert_eq!(store.data_size_hint(), Some(0)); + + let blobs = rng_blobs(2); + store.insert_all(blobs).unwrap(); + assert!(store.data_size_hint().unwrap() > 0); + } + + #[test] + fn disk_cleanup_stat() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs).unwrap(); + + store.delete_all(txs).unwrap(); + let stat = store.cleanup(); + assert_eq!(stat.delete_succeed, 3); + assert_eq!(stat.delete_failed, 0); + } } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index f1612bcd022e..a21cea6e06c4 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -152,7 +152,7 @@ impl PartialEq for BlobStoreSize { } /// Statistics for the cleanup operation. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct BlobStoreCleanupStat { /// the number of successfully deleted blobs pub delete_succeed: usize, diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index f22dcf5706e5..3fdcbe8b4eae 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -1,7 +1,9 @@ //! Support for maintaining the blob pool. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{BlockNumber, B256}; use reth_execution_types::ChainBlocks; +use reth_primitives_traits::{Block, BlockBody, SignedTransaction, TxType}; use std::collections::BTreeMap; /// The type that is used to track canonical blob transactions. @@ -37,13 +39,17 @@ impl BlobStoreCanonTracker { /// /// Note: In case this is a chain that's part of a reorg, this replaces previously tracked /// blocks. - pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { + pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_, B>) + where + B: Block>, + { let blob_txs = blocks.iter().map(|(num, block)| { let iter = block .body .transactions() - .filter(|tx| tx.transaction.is_eip4844()) - .map(|tx| tx.hash); + .iter() + .filter(|tx| tx.tx_type().is_eip4844()) + .map(|tx| tx.trie_hash()); (*num, iter) }); self.add_blocks(blob_txs); @@ -82,6 +88,7 @@ pub enum BlobStoreUpdates { #[cfg(test)] mod tests { use alloy_consensus::Header; + use alloy_primitives::PrimitiveSignature as Signature; use reth_execution_types::Chain; use reth_primitives::{ BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, @@ -127,22 +134,22 @@ mod tests { ), body: BlockBody { transactions: vec![ - TransactionSigned { - hash: tx1_hash, - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }, - TransactionSigned { - hash: tx2_hash, - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip4844(Default::default()), + Signature::test_signature(), + tx1_hash, + ), + TransactionSigned::new( + Transaction::Eip4844(Default::default()), + Signature::test_signature(), + tx2_hash, + ), // Another transaction that is not EIP-4844 - TransactionSigned { - hash: B256::random(), - transaction: Transaction::Eip7702(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip7702(Default::default()), + Signature::test_signature(), + B256::random(), + ), ], ..Default::default() }, @@ -160,16 +167,16 @@ mod tests { ), body: BlockBody { transactions: vec![ - TransactionSigned { - hash: tx3_hash, - transaction: Transaction::Eip1559(Default::default()), - ..Default::default() - }, - TransactionSigned { - hash: tx2_hash, - transaction: Transaction::Eip2930(Default::default()), - ..Default::default() - }, + TransactionSigned::new( + Transaction::Eip1559(Default::default()), + Signature::test_signature(), + tx3_hash, + ), + TransactionSigned::new( + Transaction::Eip2930(Default::default()), + Signature::test_signature(), + tx2_hash, + ), ], ..Default::default() }, @@ -178,7 +185,7 @@ mod tests { }; // Extract blocks from the chain - let chain = Chain::new(vec![block1, block2], Default::default(), None); + let chain: Chain = Chain::new(vec![block1, block2], Default::default(), None); let blocks = chain.into_inner().0; // Add new chain blocks to the tracker diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 8d11d7595b14..1c383e8edf01 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -156,7 +156,6 @@ use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives::PooledTransactionsElement; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -409,18 +408,32 @@ where &self, max: usize, ) -> Vec>> { - self.pooled_transactions().into_iter().take(max).collect() + self.pool.pooled_transactions_max(max) } fn get_pooled_transaction_elements( &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec<<::Transaction as PoolTransaction>::Pooled> { self.pool.get_pooled_transaction_elements(tx_hashes, limit) } - fn get_pooled_transaction_element(&self, tx_hash: TxHash) -> Option { + fn get_pooled_transactions_as

( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec

+ where + ::Pooled: Into

, + { + self.pool.get_pooled_transactions_as(tx_hashes, limit) + } + + fn get_pooled_transaction_element( + &self, + tx_hash: TxHash, + ) -> Option<<::Transaction as PoolTransaction>::Pooled> { self.pool.get_pooled_transaction_element(tx_hash) } @@ -441,6 +454,13 @@ where self.pool.pending_transactions() } + fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.pool.pending_transactions_max(max) + } + fn queued_transactions(&self) -> Vec>> { self.pool.queued_transactions() } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 608f8d5745a2..02f218d4b098 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -5,10 +5,10 @@ use crate::{ error::PoolError, metrics::MaintainPoolMetrics, traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, - BlockInfo, PoolTransaction, + BlockInfo, PoolTransaction, PoolUpdateKind, }; use alloy_eips::BlockNumberOrTag; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable}; +use alloy_primitives::{Address, BlockHash, BlockNumber}; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -21,6 +21,7 @@ use reth_primitives::{ PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, }; +use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; use std::{ @@ -106,9 +107,7 @@ pub async fn maintain_transaction_pool( let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config; // ensure the pool points to latest state if let Ok(Some(latest)) = client.header_by_number_or_tag(BlockNumberOrTag::Latest) { - let sealed = latest.seal_slow(); - let (header, seal) = sealed.into_parts(); - let latest = SealedHeader::new(header, seal); + let latest = SealedHeader::seal(latest); let chain_spec = client.chain_spec(); let info = BlockInfo { block_gas_limit: latest.gas_limit, @@ -319,7 +318,7 @@ pub async fn maintain_transaction_pool( // find all transactions that were mined in the old chain but not in the new chain let pruned_old_transactions = old_blocks .transactions_ecrecovered() - .filter(|tx| !new_mined_transactions.contains(&tx.hash)) + .filter(|tx| !new_mined_transactions.contains(tx.tx_hash())) .filter_map(|tx| { if tx.is_eip4844() { // reorged blobs no longer include the blob, which is necessary for @@ -327,7 +326,7 @@ pub async fn maintain_transaction_pool( // been validated previously, we still need the blob in order to // accurately set the transaction's // encoded-length which is propagated over the network. - pool.get_blob(tx.hash) + pool.get_blob(TransactionSigned::hash(&tx)) .ok() .flatten() .map(Arc::unwrap_or_clone) @@ -354,6 +353,7 @@ pub async fn maintain_transaction_pool( changed_accounts, // all transactions mined in the new chain need to be removed from the pool mined_transactions: new_blocks.transaction_hashes().collect(), + update_kind: PoolUpdateKind::Reorg, }; pool.on_canonical_state_change(update); @@ -436,6 +436,7 @@ pub async fn maintain_transaction_pool( pending_block_blob_fee, changed_accounts, mined_transactions, + update_kind: PoolUpdateKind::Commit, }; pool.on_canonical_state_change(update); diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index cf2270978abe..3a068d3a5936 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -13,8 +13,8 @@ use crate::{ validate::ValidTransaction, AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, - PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, - TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, + TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use alloy_eips::{ eip1559::ETHEREUM_BLOCK_GAS_LIMIT, @@ -135,14 +135,25 @@ impl TransactionPool for NoopTransactionPool { &self, _tx_hashes: Vec, _limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec<::Pooled> { + vec![] + } + + fn get_pooled_transactions_as( + &self, + _tx_hashes: Vec, + _limit: GetPooledTransactionLimit, + ) -> Vec + where + ::Pooled: Into, + { vec![] } fn get_pooled_transaction_element( &self, _tx_hash: TxHash, - ) -> Option { + ) -> Option<::Pooled> { None } @@ -163,6 +174,13 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn pending_transactions_max( + &self, + _max: usize, + ) -> Vec>> { + vec![] + } + fn queued_transactions(&self) -> Vec>> { vec![] } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 17165611794e..a4c91aae7268 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,12 +1,13 @@ use crate::{ + error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, identifier::{SenderId, TransactionId}, pool::pending::PendingTransaction, - PayloadTransactions, PoolTransaction, TransactionOrdering, ValidPoolTransaction, + PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; -use alloy_consensus::Transaction; use alloy_primitives::Address; use core::fmt; -use reth_primitives::TransactionSignedEcRecovered; +use reth_payload_util::PayloadTransactions; +use reth_primitives::{InvalidTransactionError, TransactionSignedEcRecovered}; use std::{ collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, @@ -27,8 +28,8 @@ pub(crate) struct BestTransactionsWithFees { } impl crate::traits::BestTransactions for BestTransactionsWithFees { - fn mark_invalid(&mut self, tx: &Self::Item) { - BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -60,7 +61,11 @@ impl Iterator for BestTransactionsWithFees { { return Some(best); } - crate::traits::BestTransactions::mark_invalid(self, &best); + crate::traits::BestTransactions::mark_invalid( + self, + &best, + InvalidPoolTransactionError::Underpriced, + ); } } } @@ -95,7 +100,11 @@ pub(crate) struct BestTransactions { impl BestTransactions { /// Mark the transaction and it's descendants as invalid. - pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { + pub(crate) fn mark_invalid( + &mut self, + tx: &Arc>, + _kind: InvalidPoolTransactionError, + ) { self.invalid.insert(tx.sender_id()); } @@ -154,8 +163,8 @@ impl BestTransactions { } impl crate::traits::BestTransactions for BestTransactions { - fn mark_invalid(&mut self, tx: &Self::Item) { - Self::mark_invalid(self, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + Self::mark_invalid(self, tx, kind) } fn no_updates(&mut self) { @@ -199,7 +208,12 @@ impl Iterator for BestTransactions { if self.skip_blobs && best.transaction.transaction.is_eip4844() { // blobs should be skipped, marking them as invalid will ensure that no dependent // transactions are returned - self.mark_invalid(&best.transaction) + self.mark_invalid( + &best.transaction, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::NoEip4844Blobs, + ), + ) } else { return Some(best.transaction) } @@ -280,7 +294,10 @@ where if (self.predicate)(&best) { return Some(best) } - self.best.mark_invalid(&best); + self.best.mark_invalid( + &best, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); } } } @@ -290,8 +307,8 @@ where I: crate::traits::BestTransactions, P: FnMut(&::Item) -> bool + Send, { - fn mark_invalid(&mut self, tx: &Self::Item) { - crate::traits::BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + crate::traits::BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -379,8 +396,8 @@ where I: crate::traits::BestTransactions>>, T: PoolTransaction, { - fn mark_invalid(&mut self, tx: &Self::Item) { - self.inner.mark_invalid(tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + self.inner.mark_invalid(tx, kind) } fn no_updates(&mut self) { @@ -395,139 +412,16 @@ where } } -/// An implementation of [`crate::traits::PayloadTransactions`] that yields -/// a pre-defined set of transactions. -/// -/// This is useful to put a sequencer-specified set of transactions into the block -/// and compose it with the rest of the transactions. -#[derive(Debug)] -pub struct PayloadTransactionsFixed { - transactions: Vec, - index: usize, -} - -impl PayloadTransactionsFixed { - /// Constructs a new [`PayloadTransactionsFixed`]. - pub fn new(transactions: Vec) -> Self { - Self { transactions, index: Default::default() } - } - - /// Constructs a new [`PayloadTransactionsFixed`] with a single transaction. - pub fn single(transaction: T) -> Self { - Self { transactions: vec![transaction], index: Default::default() } - } -} - -impl PayloadTransactions for PayloadTransactionsFixed { - fn next(&mut self, _ctx: ()) -> Option { - (self.index < self.transactions.len()).then(|| { - let tx = self.transactions[self.index].clone(); - self.index += 1; - tx - }) - } - - fn mark_invalid(&mut self, _sender: Address, _nonce: u64) {} -} - -/// Wrapper over [`crate::traits::PayloadTransactions`] that combines transactions from multiple -/// `PayloadTransactions` iterators and keeps track of the gas for both of iterators. -/// -/// We can't use [`Iterator::chain`], because: -/// (a) we need to propagate the `mark_invalid` and `no_updates` -/// (b) we need to keep track of the gas -/// -/// Notes that [`PayloadTransactionsChain`] fully drains the first iterator -/// before moving to the second one. -/// -/// If the `before` iterator has transactions that are not fitting into the block, -/// the after iterator will get propagated a `mark_invalid` call for each of them. -#[derive(Debug)] -pub struct PayloadTransactionsChain { - /// Iterator that will be used first - before: B, - /// Allowed gas for the transactions from `before` iterator. If `None`, no gas limit is - /// enforced. - before_max_gas: Option, - /// Gas used by the transactions from `before` iterator - before_gas: u64, - /// Iterator that will be used after `before` iterator - after: A, - /// Allowed gas for the transactions from `after` iterator. If `None`, no gas limit is - /// enforced. - after_max_gas: Option, - /// Gas used by the transactions from `after` iterator - after_gas: u64, -} - -impl PayloadTransactionsChain { - /// Constructs a new [`PayloadTransactionsChain`]. - pub fn new( - before: B, - before_max_gas: Option, - after: A, - after_max_gas: Option, - ) -> Self { - Self { - before, - before_max_gas, - before_gas: Default::default(), - after, - after_max_gas, - after_gas: Default::default(), - } - } -} - -impl PayloadTransactions for PayloadTransactionsChain -where - B: PayloadTransactions, - A: PayloadTransactions, -{ - fn next(&mut self, ctx: ()) -> Option { - while let Some(tx) = self.before.next(ctx) { - if let Some(before_max_gas) = self.before_max_gas { - if self.before_gas + tx.transaction.gas_limit() <= before_max_gas { - self.before_gas += tx.transaction.gas_limit(); - return Some(tx); - } - self.before.mark_invalid(tx.signer(), tx.transaction.nonce()); - self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); - } else { - return Some(tx); - } - } - - while let Some(tx) = self.after.next(ctx) { - if let Some(after_max_gas) = self.after_max_gas { - if self.after_gas + tx.transaction.gas_limit() <= after_max_gas { - self.after_gas += tx.transaction.gas_limit(); - return Some(tx); - } - self.after.mark_invalid(tx.signer(), tx.transaction.nonce()); - } else { - return Some(tx); - } - } - - None - } - - fn mark_invalid(&mut self, sender: Address, nonce: u64) { - self.before.mark_invalid(sender, nonce); - self.after.mark_invalid(sender, nonce); - } -} - #[cfg(test)] mod tests { use super::*; use crate::{ pool::pending::PendingPool, test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, - Priority, + BestTransactions, Priority, }; use alloy_primitives::U256; + use reth_payload_util::{PayloadTransactionsChain, PayloadTransactionsFixed}; #[test] fn test_best_iter() { @@ -573,7 +467,10 @@ mod tests { // mark the first tx as invalid let invalid = best.independent.iter().next().unwrap(); - best.mark_invalid(&invalid.transaction.clone()); + best.mark_invalid( + &invalid.transaction.clone(), + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); // iterator is empty assert!(best.next().is_none()); @@ -598,7 +495,11 @@ mod tests { > = Box::new(pool.best()); let tx = Iterator::next(&mut best).unwrap(); - crate::traits::BestTransactions::mark_invalid(&mut *best, &tx); + crate::traits::BestTransactions::mark_invalid( + &mut *best, + &tx, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); assert!(Iterator::next(&mut best).is_none()); } @@ -1020,5 +921,127 @@ mod tests { assert_eq!(block.next(()).unwrap().signer(), address_regular); } + #[test] + fn test_best_with_fees_iter_no_blob_fee_required() { + // Tests transactions without blob fees where base fees are checked. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 0; // No blob fee requirement + + // Insert transactions with max_fee_per_gas above the base fee + for nonce in 0..5 { + let tx = MockTransaction::eip1559() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // All transactions should be returned as no blob fee requirement is imposed + for nonce in 0..5 { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.nonce(), nonce); + } + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_mix_of_blob_and_non_blob_transactions() { + // Tests mixed scenarios with both blob and non-blob transactions. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 15; + + // Add a non-blob transaction that satisfies the base fee + let tx_non_blob = + MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(base_fee as u128 + 5); + pool.add_transaction(Arc::new(f.validated(tx_non_blob.clone())), 0); + + // Add a blob transaction that satisfies both base fee and blob fee + let tx_blob = MockTransaction::eip4844() + .rng_hash() + .with_nonce(1) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 + 5); + pool.add_transaction(Arc::new(f.validated(tx_blob.clone())), 0); + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // Verify both transactions are returned + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_non_blob); + + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_blob); + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_transactions_with_skipping_blobs() { + // Tests the skip_blobs functionality to ensure blob transactions are skipped. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add a blob transaction + let tx_blob = MockTransaction::eip4844().rng_hash().with_nonce(0).with_blob_fee(100); + let valid_blob_tx = f.validated(tx_blob); + pool.add_transaction(Arc::new(valid_blob_tx), 0); + + // Add a non-blob transaction + let tx_non_blob = MockTransaction::eip1559().rng_hash().with_nonce(1).with_max_fee(200); + let valid_non_blob_tx = f.validated(tx_non_blob.clone()); + pool.add_transaction(Arc::new(valid_non_blob_tx), 0); + + let mut best = pool.best(); + best.skip_blobs(); + + // Only the non-blob transaction should be returned + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_non_blob); + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_transactions_no_updates() { + // Tests the no_updates functionality to ensure it properly clears the + // new_transaction_receiver. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add a transaction + let tx = MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(100); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (_tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // Ensure receiver is set + assert!(best.new_transaction_receiver.is_some()); + + // Call no_updates to clear the receiver + best.no_updates(); + + // Ensure receiver is cleared + assert!(best.new_transaction_receiver.is_none()); + } + // TODO: Same nonce test } diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index ac39c6ab781a..e6c0cb245c3f 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -693,4 +693,102 @@ mod tests { ); } } + + #[test] + fn test_empty_pool_operations() { + let mut pool: BlobTransactions = BlobTransactions::default(); + + // Ensure pool is empty + assert!(pool.is_empty()); + assert_eq!(pool.len(), 0); + assert_eq!(pool.size(), 0); + + // Attempt to remove a non-existent transaction + let non_existent_id = TransactionId::new(0.into(), 0); + assert!(pool.remove_transaction(&non_existent_id).is_none()); + + // Check contains method on empty pool + assert!(!pool.contains(&non_existent_id)); + } + + #[test] + fn test_transaction_removal() { + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + + // Add a transaction + let tx = factory.validated_arc(MockTransaction::eip4844()); + let tx_id = *tx.id(); + pool.add_transaction(tx); + + // Remove the transaction + let removed = pool.remove_transaction(&tx_id); + assert!(removed.is_some()); + assert_eq!(*removed.unwrap().id(), tx_id); + assert!(pool.is_empty()); + } + + #[test] + fn test_satisfy_attributes_empty_pool() { + let pool: BlobTransactions = BlobTransactions::default(); + let attributes = BestTransactionsAttributes { blob_fee: Some(100), basefee: 100 }; + // Satisfy attributes on an empty pool should return an empty vector + let satisfied = pool.satisfy_attributes(attributes); + assert!(satisfied.is_empty()); + } + + #[test] + #[should_panic(expected = "transaction is not a blob tx")] + fn test_add_non_blob_transaction() { + // Ensure that adding a non-blob transaction causes a panic + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx = factory.validated_arc(MockTransaction::eip1559()); // Not a blob transaction + pool.add_transaction(tx); + } + + #[test] + #[should_panic(expected = "transaction already included")] + fn test_add_duplicate_blob_transaction() { + // Ensure that adding a duplicate blob transaction causes a panic + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx = factory.validated_arc(MockTransaction::eip4844()); + pool.add_transaction(tx.clone()); // First addition + pool.add_transaction(tx); // Attempt to add the same transaction again + } + + #[test] + fn test_remove_transactions_until_limit() { + // Test truncating the pool until it satisfies the given size limit + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx1 = factory.validated_arc(MockTransaction::eip4844().with_size(100)); + let tx2 = factory.validated_arc(MockTransaction::eip4844().with_size(200)); + let tx3 = factory.validated_arc(MockTransaction::eip4844().with_size(300)); + + // Add transactions to the pool + pool.add_transaction(tx1); + pool.add_transaction(tx2); + pool.add_transaction(tx3); + + // Set a size limit that requires truncation + let limit = SubPoolLimit { max_txs: 2, max_size: 300 }; + let removed = pool.truncate_pool(limit); + + // Check that only one transaction was removed to satisfy the limit + assert_eq!(removed.len(), 1); + assert_eq!(pool.len(), 2); + assert!(pool.size() <= limit.max_size); + } + + #[test] + fn test_empty_pool_invariants() { + // Ensure that the invariants hold for an empty pool + let pool: BlobTransactions = BlobTransactions::default(); + pool.assert_invariants(); + assert!(pool.is_empty()); + assert_eq!(pool.size(), 0); + assert_eq!(pool.len(), 0); + } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 76b2490b12fa..b5391b6e8d73 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -78,7 +78,8 @@ use crate::{ PoolTransaction, PropagatedTransactions, TransactionOrigin, }, validate::{TransactionValidationOutcome, ValidPoolTransaction}, - CanonicalStateUpdate, PoolConfig, TransactionOrdering, TransactionValidator, + CanonicalStateUpdate, EthPoolTransaction, PoolConfig, TransactionOrdering, + TransactionValidator, }; use alloy_primitives::{Address, TxHash, B256}; use best::BestTransactions; @@ -87,9 +88,6 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use alloy_eips::eip4844::BlobTransactionSidecar; -use reth_primitives::{ - BlobTransaction, PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered, -}; use std::{ collections::{HashMap, HashSet}, fmt, @@ -108,7 +106,6 @@ use crate::{ }; pub use best::{ BestPayloadTransactions, BestTransactionFilter, BestTransactionsWithPrioritizedSenders, - PayloadTransactionsChain, PayloadTransactionsFixed, }; pub use blob::{blob_tx_priority, fee_delta}; pub use events::{FullTransactionEvent, TransactionEvent}; @@ -305,60 +302,76 @@ where self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).collect() } - /// Returns the [`BlobTransaction`] for the given transaction if the sidecar exists. + /// Returns only the first `max` transactions in the pool. + pub(crate) fn pooled_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).take(max).collect() + } + + /// Converts the internally tracked transaction to the pooled format. /// - /// Caution: this assumes the given transaction is eip-4844 - fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { - if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { - if let Ok(blob) = - BlobTransaction::try_from_signed(transaction, Arc::unwrap_or_clone(sidecar)) - { - return Some(blob) - } + /// If the transaction is an EIP-4844 transaction, the blob sidecar is fetched from the blob + /// store and attached to the transaction. + fn to_pooled_transaction( + &self, + transaction: Arc>, + ) -> Option<<::Transaction as PoolTransaction>::Pooled> + where + ::Transaction: EthPoolTransaction, + { + if transaction.is_eip4844() { + let sidecar = self.blob_store.get(*transaction.hash()).ok()??; + transaction.transaction.clone().try_into_pooled_eip4844(sidecar) + } else { + transaction + .transaction + .clone() + .try_into_pooled() + .inspect_err(|err| { + debug!( + target: "txpool", %err, + "failed to convert transaction to pooled element; skipping", + ); + }) + .ok() } - None } - /// Returns converted [`PooledTransactionsElement`] for the given transaction hashes. + /// Returns pooled transactions for the given transaction hashes. pub(crate) fn get_pooled_transaction_elements( &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec + ) -> Vec<<::Transaction as PoolTransaction>::Pooled> where - ::Transaction: - PoolTransaction>, + ::Transaction: EthPoolTransaction, + { + self.get_pooled_transactions_as(tx_hashes, limit) + } + + /// Returns pooled transactions for the given transaction hashes as the requested type. + pub(crate) fn get_pooled_transactions_as

( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec

+ where + ::Transaction: EthPoolTransaction, + <::Transaction as PoolTransaction>::Pooled: Into

, { let transactions = self.get_all(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); let mut size = 0; for transaction in transactions { let encoded_len = transaction.encoded_length(); - let recovered: TransactionSignedEcRecovered = - transaction.transaction.clone().into_consensus().into(); - let tx = recovered.into_signed(); - let pooled = if tx.is_eip4844() { - // for EIP-4844 transactions, we need to fetch the blob sidecar from the blob store - if let Some(blob) = self.get_blob_transaction(tx) { - PooledTransactionsElement::BlobTransaction(blob) - } else { - continue - } - } else { - match PooledTransactionsElement::try_from(tx) { - Ok(element) => element, - Err(err) => { - debug!( - target: "txpool", %err, - "failed to convert transaction to pooled element; skipping", - ); - continue - } - } + let Some(pooled) = self.to_pooled_transaction(transaction) else { + continue; }; size += encoded_len; - elements.push(pooled); + elements.push(pooled.into()); if limit.exceeds(size) { break @@ -368,25 +381,15 @@ where elements } - /// Returns converted [`PooledTransactionsElement`] for the given transaction hash. + /// Returns converted pooled transaction for the given transaction hash. pub(crate) fn get_pooled_transaction_element( &self, tx_hash: TxHash, - ) -> Option + ) -> Option<<::Transaction as PoolTransaction>::Pooled> where - ::Transaction: - PoolTransaction>, + ::Transaction: EthPoolTransaction, { - self.get(&tx_hash).and_then(|transaction| { - let recovered: TransactionSignedEcRecovered = - transaction.transaction.clone().into_consensus().into(); - let tx = recovered.into_signed(); - if tx.is_eip4844() { - self.get_blob_transaction(tx).map(PooledTransactionsElement::BlobTransaction) - } else { - PooledTransactionsElement::try_from(tx).ok() - } - }) + self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx)) } /// Updates the entire pool after a new block was executed. @@ -394,7 +397,9 @@ where trace!(target: "txpool", ?update, "updating pool on canonical state change"); let block_info = update.block_info(); - let CanonicalStateUpdate { new_tip, changed_accounts, mined_transactions, .. } = update; + let CanonicalStateUpdate { + new_tip, changed_accounts, mined_transactions, update_kind, .. + } = update; self.validator.on_new_head_block(new_tip); let changed_senders = self.changed_senders(changed_accounts.into_iter()); @@ -404,6 +409,7 @@ where block_info, mined_transactions, changed_senders, + update_kind, ); // This will discard outdated transactions based on the account's nonce @@ -684,6 +690,14 @@ where self.get_pool_data().best_transactions_with_attributes(best_transactions_attributes) } + /// Returns only the first `max` transactions in the pending pool. + pub(crate) fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.get_pool_data().pending_transactions_iter().take(max).collect() + } + /// Returns all transactions from the pending sub-pool pub(crate) fn pending_transactions(&self) -> Vec>> { self.get_pool_data().pending_transactions() @@ -1300,7 +1314,7 @@ mod tests { // Insert the sidecar into the blob store if the current index is within the blob limit. if n < blob_limit.max_txs { - blob_store.insert(tx.get_hash(), sidecar.clone()).unwrap(); + blob_store.insert(*tx.get_hash(), sidecar.clone()).unwrap(); } // Add the transaction to the pool with external origin and valid outcome. diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 407f04fd5be3..29216af47d02 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -916,4 +916,146 @@ mod tests { SenderTransactionCount { count: 1, last_submission_id: 3 } ); } + + #[test] + fn test_pool_size() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Create a transaction with a specific size and add it to the pool + let tx = f.validated_arc(MockTransaction::eip1559().set_size(1024).clone()); + pool.add_transaction(tx); + + // Assert that the reported size of the pool is correct + assert_eq!(pool.size(), 1024); + } + + #[test] + fn test_pool_len() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Initially, the pool should have zero transactions + assert_eq!(pool.len(), 0); + + // Add a transaction to the pool and check the length + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx); + assert_eq!(pool.len(), 1); + } + + #[test] + fn test_pool_contains() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Create a transaction and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + + // Before adding, the transaction should not be in the pool + assert!(!pool.contains(&tx_id)); + + // After adding, the transaction should be present in the pool + pool.add_transaction(tx); + assert!(pool.contains(&tx_id)); + } + + #[test] + fn test_get_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add a transaction to the pool and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + pool.add_transaction(tx.clone()); + + // Retrieve the transaction using `get()` and assert it matches the added transaction + let retrieved = pool.get(&tx_id).expect("Transaction should exist in the pool"); + assert_eq!(retrieved.transaction.id(), tx.id()); + } + + #[test] + fn test_all_transactions() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559()); + let tx2 = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx1.clone()); + pool.add_transaction(tx2.clone()); + + // Collect all transaction IDs from the pool + let all_txs: Vec<_> = pool.all().map(|tx| *tx.id()).collect(); + assert_eq!(all_txs.len(), 2); + + // Check that the IDs of both transactions are present + assert!(all_txs.contains(tx1.id())); + assert!(all_txs.contains(tx2.id())); + } + + #[test] + fn test_truncate_pool_edge_case() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559()); + let tx2 = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx1); + pool.add_transaction(tx2); + + // Set a limit that matches the current number of transactions + let limit = SubPoolLimit { max_txs: 2, max_size: usize::MAX }; + let removed = pool.truncate_pool(limit); + + // No transactions should be removed + assert!(removed.is_empty()); + + // Set a stricter limit that requires truncating one transaction + let limit = SubPoolLimit { max_txs: 1, max_size: usize::MAX }; + let removed = pool.truncate_pool(limit); + + // One transaction should be removed, and the pool should have one left + assert_eq!(removed.len(), 1); + assert_eq!(pool.len(), 1); + } + + #[test] + fn test_satisfy_base_fee_transactions() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions with different max fees + let tx1 = f.validated_arc(MockTransaction::eip1559().set_max_fee(100).clone()); + let tx2 = f.validated_arc(MockTransaction::eip1559().set_max_fee(200).clone()); + pool.add_transaction(tx1); + pool.add_transaction(tx2.clone()); + + // Check that only the second transaction satisfies the base fee requirement + let satisfied = pool.satisfy_base_fee_transactions(150); + assert_eq!(satisfied.len(), 1); + assert_eq!(satisfied[0].id(), tx2.id()) + } + + #[test] + fn test_remove_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add a transaction to the pool and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + pool.add_transaction(tx); + + // Ensure the transaction is in the pool before removal + assert!(pool.contains(&tx_id)); + + // Remove the transaction and check that it is no longer in the pool + let removed = pool.remove_transaction(&tx_id); + assert!(removed.is_some()); + assert!(!pool.contains(&tx_id)); + } } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index f4bce8c85a63..89e673aad998 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -8,7 +8,7 @@ use crate::{ }; use std::{ cmp::Ordering, - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap}, + collections::{hash_map::Entry, BTreeMap, HashMap}, ops::Bound::Unbounded, sync::Arc, }; @@ -34,8 +34,6 @@ pub struct PendingPool { submission_id: u64, /// _All_ Transactions that are currently inside the pool grouped by their identifier. by_id: BTreeMap>, - /// _All_ transactions sorted by priority - all: BTreeSet>, /// The highest nonce transactions for each sender - like the `independent` set, but the /// highest instead of lowest nonce. highest_nonces: HashMap>, @@ -61,7 +59,6 @@ impl PendingPool { ordering, submission_id: 0, by_id: Default::default(), - all: Default::default(), independent_transactions: Default::default(), highest_nonces: Default::default(), size_of: Default::default(), @@ -78,7 +75,6 @@ impl PendingPool { fn clear_transactions(&mut self) -> BTreeMap> { self.independent_transactions.clear(); self.highest_nonces.clear(); - self.all.clear(); self.size_of.reset(); std::mem::take(&mut self.by_id) } @@ -194,7 +190,6 @@ impl PendingPool { } else { self.size_of += tx.transaction.size(); self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); self.by_id.insert(id, tx); } } @@ -240,7 +235,6 @@ impl PendingPool { self.size_of += tx.transaction.size(); self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); self.by_id.insert(id, tx); } } @@ -307,7 +301,6 @@ impl PendingPool { let tx = PendingTransaction { submission_id, transaction: tx, priority }; self.update_independents_and_highest_nonces(&tx); - self.all.insert(tx.clone()); // send the new transaction to any existing pendingpool static file iterators if self.new_transaction_notifier.receiver_count() > 0 { @@ -337,7 +330,6 @@ impl PendingPool { let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); - self.all.remove(&tx); if let Some(highest) = self.highest_nonces.get(&id.sender) { if highest.transaction.nonce() == id.nonce { @@ -538,13 +530,12 @@ impl PendingPool { /// Asserts that the bijection between `by_id` and `all` is valid. #[cfg(any(test, feature = "test-utils"))] pub(crate) fn assert_invariants(&self) { - assert_eq!(self.by_id.len(), self.all.len(), "by_id.len() != all.len()"); assert!( - self.independent_transactions.len() <= self.all.len(), + self.independent_transactions.len() <= self.by_id.len(), "independent.len() > all.len()" ); assert!( - self.highest_nonces.len() <= self.all.len(), + self.highest_nonces.len() <= self.by_id.len(), "independent_descendants.len() > all.len()" ); assert_eq!( @@ -880,4 +871,104 @@ mod tests { } } } + + #[test] + fn test_empty_pool_behavior() { + let mut pool = PendingPool::::new(MockOrdering::default()); + + // Ensure the pool is empty + assert!(pool.is_empty()); + assert_eq!(pool.len(), 0); + assert_eq!(pool.size(), 0); + + // Verify that attempting to truncate an empty pool does not panic and returns an empty vec + let removed = pool.truncate_pool(SubPoolLimit { max_txs: 10, max_size: 1000 }); + assert!(removed.is_empty()); + + // Verify that retrieving transactions from an empty pool yields nothing + let all_txs: Vec<_> = pool.all().collect(); + assert!(all_txs.is_empty()); + } + + #[test] + fn test_add_remove_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add a transaction and check if it's in the pool + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx.clone(), 0); + assert!(pool.contains(tx.id())); + assert_eq!(pool.len(), 1); + + // Remove the transaction and ensure it's no longer in the pool + let removed_tx = pool.remove_transaction(tx.id()).unwrap(); + assert_eq!(removed_tx.id(), tx.id()); + assert!(!pool.contains(tx.id())); + assert_eq!(pool.len(), 0); + } + + #[test] + fn test_reorder_on_basefee_update() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add two transactions with different fees + let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price()); + let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price_by(20)); + pool.add_transaction(tx1.clone(), 0); + pool.add_transaction(tx2.clone(), 0); + + // Ensure the transactions are in the correct order + let mut best = pool.best(); + assert_eq!(best.next().unwrap().hash(), tx2.hash()); + assert_eq!(best.next().unwrap().hash(), tx1.hash()); + + // Update the base fee to a value higher than tx1's fee, causing it to be removed + let removed = pool.update_base_fee((tx1.max_fee_per_gas() + 1) as u64); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].hash(), tx1.hash()); + + // Verify that only tx2 remains in the pool + assert_eq!(pool.len(), 1); + assert!(pool.contains(tx2.id())); + assert!(!pool.contains(tx1.id())); + } + + #[test] + #[should_panic(expected = "transaction already included")] + fn test_handle_duplicates() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add the same transaction twice and ensure it only appears once + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx.clone(), 0); + assert!(pool.contains(tx.id())); + assert_eq!(pool.len(), 1); + + // Attempt to add the same transaction again, which should be ignored + pool.add_transaction(tx, 0); + } + + #[test] + fn test_update_blob_fee() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add transactions with varying blob fees + let tx1 = f.validated_arc(MockTransaction::eip4844().set_blob_fee(50).clone()); + let tx2 = f.validated_arc(MockTransaction::eip4844().set_blob_fee(150).clone()); + pool.add_transaction(tx1.clone(), 0); + pool.add_transaction(tx2.clone(), 0); + + // Update the blob fee to a value that causes tx1 to be removed + let removed = pool.update_blob_fee(100); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].hash(), tx1.hash()); + + // Verify that only tx2 remains in the pool + assert!(pool.contains(tx2.id())); + assert!(!pool.contains(tx1.id())); + } } diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index d0a3b10f8cb9..d65fc05b03f6 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -46,8 +46,6 @@ bitflags::bitflags! { } } -// === impl TxState === - impl TxState { /// The state of a transaction is considered `pending`, if the transaction has: /// - _No_ parked ancestors @@ -89,8 +87,6 @@ pub enum SubPool { Pending, } -// === impl SubPool === - impl SubPool { /// Whether this transaction is to be moved to the pending sub-pool. #[inline] @@ -126,16 +122,15 @@ impl SubPool { impl From for SubPool { fn from(value: TxState) -> Self { if value.is_pending() { - return Self::Pending - } - if value.is_blob() { + Self::Pending + } else if value.is_blob() { // all _non-pending_ blob transactions are in the blob sub-pool - return Self::Blob + Self::Blob + } else if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { + Self::Queued + } else { + Self::BaseFee } - if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { - return Self::Queued - } - Self::BaseFee } } @@ -204,4 +199,61 @@ mod tests { assert!(state.is_blob()); assert!(!state.is_pending()); } + + #[test] + fn test_tx_state_no_nonce_gap() { + let mut state = TxState::default(); + state |= TxState::NO_NONCE_GAPS; + assert!(!state.has_nonce_gap()); + } + + #[test] + fn test_tx_state_with_nonce_gap() { + let state = TxState::default(); + assert!(state.has_nonce_gap()); + } + + #[test] + fn test_tx_state_enough_balance() { + let mut state = TxState::default(); + state.insert(TxState::ENOUGH_BALANCE); + assert!(state.contains(TxState::ENOUGH_BALANCE)); + } + + #[test] + fn test_tx_state_not_too_much_gas() { + let mut state = TxState::default(); + state.insert(TxState::NOT_TOO_MUCH_GAS); + assert!(state.contains(TxState::NOT_TOO_MUCH_GAS)); + } + + #[test] + fn test_tx_state_enough_fee_cap_block() { + let mut state = TxState::default(); + state.insert(TxState::ENOUGH_FEE_CAP_BLOCK); + assert!(state.contains(TxState::ENOUGH_FEE_CAP_BLOCK)); + } + + #[test] + fn test_tx_base_fee() { + let state = TxState::BASE_FEE_POOL_BITS; + assert_eq!(SubPool::BaseFee, state.into()); + } + + #[test] + fn test_blob_transaction_only() { + let state = TxState::BLOB_TRANSACTION; + assert_eq!(SubPool::Blob, state.into()); + assert!(state.is_blob()); + assert!(!state.is_pending()); + } + + #[test] + fn test_blob_transaction_with_base_fee_bits() { + let mut state = TxState::BASE_FEE_POOL_BITS; + state.insert(TxState::BLOB_TRANSACTION); + assert_eq!(SubPool::Blob, state.into()); + assert!(state.is_blob()); + assert!(!state.is_pending()); + } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 1d35f742ab6e..86bf5f741c3d 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -15,7 +15,7 @@ use crate::{ AddedPendingTransaction, AddedTransaction, OnNewCanonicalStateOutcome, }, traits::{BestTransactionsAttributes, BlockInfo, PoolSize}, - PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, + PoolConfig, PoolResult, PoolTransaction, PoolUpdateKind, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; use alloy_consensus::constants::{ @@ -76,6 +76,8 @@ pub struct TxPool { all_transactions: AllTransactions, /// Transaction pool metrics metrics: TxPoolMetrics, + /// The last update kind that was applied to the pool. + latest_update_kind: Option, } // === impl TxPool === @@ -92,6 +94,7 @@ impl TxPool { all_transactions: AllTransactions::new(&config), config, metrics: Default::default(), + latest_update_kind: None, } } @@ -315,7 +318,7 @@ impl TxPool { // blob pool that are valid with the lower blob fee if best_transactions_attributes .blob_fee - .map_or(false, |fee| fee < self.all_transactions.pending_fees.blob_fee as u64) + .is_some_and(|fee| fee < self.all_transactions.pending_fees.blob_fee as u64) { let unlocked_by_blob_fee = self.blob_pool.satisfy_attributes(best_transactions_attributes); @@ -479,6 +482,7 @@ impl TxPool { block_info: BlockInfo, mined_transactions: Vec, changed_senders: HashMap, + update_kind: PoolUpdateKind, ) -> OnNewCanonicalStateOutcome { // update block info let block_hash = block_info.last_seen_block_hash; @@ -497,6 +501,9 @@ impl TxPool { self.update_transaction_type_metrics(); self.metrics.performed_state_updates.increment(1); + // Update the latest update kind + self.latest_update_kind = Some(update_kind); + OnNewCanonicalStateOutcome { block_hash, mined: mined_transactions, promoted, discarded } } @@ -650,7 +657,7 @@ impl TxPool { InsertErr::Overdraft { transaction } => Err(PoolError::new( *transaction.hash(), PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Overdraft { - cost: transaction.cost(), + cost: *transaction.cost(), balance: on_chain_balance, }), )), @@ -1222,7 +1229,7 @@ impl AllTransactions { tx.state.insert(TxState::NO_NONCE_GAPS); tx.state.insert(TxState::NO_PARKED_ANCESTORS); tx.cumulative_cost = U256::ZERO; - if tx.transaction.cost() > info.balance { + if tx.transaction.cost() > &info.balance { // sender lacks sufficient funds to pay for this transaction tx.state.remove(TxState::ENOUGH_BALANCE); } else { @@ -1321,7 +1328,7 @@ impl AllTransactions { id: *tx.transaction.id(), hash: *tx.transaction.hash(), current: current_pool, - destination: Destination::Pool(tx.subpool), + destination: tx.subpool.into(), }) } } @@ -1439,7 +1446,7 @@ impl AllTransactions { fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction) -> bool { self.txs_iter(tx.transaction_id.sender) .next() - .map_or(false, |(_, existing)| tx.tx_type_conflicts_with(&existing.transaction)) + .is_some_and(|(_, existing)| tx.tx_type_conflicts_with(&existing.transaction)) } /// Additional checks for a new transaction. @@ -1535,7 +1542,7 @@ impl AllTransactions { } } } - } else if new_blob_tx.cost() > on_chain_balance { + } else if new_blob_tx.cost() > &on_chain_balance { // the transaction would go into overdraft return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) }) } @@ -1731,7 +1738,7 @@ impl AllTransactions { id: *id, hash: *tx.transaction.hash(), current: current_pool, - destination: Destination::Pool(tx.subpool), + destination: tx.subpool.into(), }) } } @@ -2479,8 +2486,7 @@ mod tests { let tx = MockTransaction::eip1559().inc_price().inc_limit(); let first = f.validated(tx.clone()); pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); - let tx = - MockTransaction::eip4844().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = MockTransaction::eip4844().set_sender(tx.sender()).inc_price_by(100).inc_limit(); let blob = f.validated(tx); let err = pool.insert_tx(blob, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}"); @@ -2495,8 +2501,7 @@ mod tests { let tx = MockTransaction::eip4844().inc_price().inc_limit(); let first = f.validated(tx.clone()); pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); - let tx = - MockTransaction::eip1559().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = MockTransaction::eip1559().set_sender(tx.sender()).inc_price_by(100).inc_limit(); let tx = f.validated(tx); let err = pool.insert_tx(tx, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}"); @@ -2615,7 +2620,7 @@ mod tests { assert_eq!( pool.max_account_slots, - pool.tx_count(f.ids.sender_id(&tx.get_sender()).unwrap()) + pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap()) ); let err = @@ -2647,7 +2652,7 @@ mod tests { assert_eq!( pool.max_account_slots, - pool.tx_count(f.ids.sender_id(&tx.get_sender()).unwrap()) + pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap()) ); pool.insert_tx( @@ -2822,7 +2827,7 @@ mod tests { let mut changed_senders = HashMap::default(); changed_senders.insert( id.sender, - SenderInfo { state_nonce: next.get_nonce(), balance: U256::from(1_000) }, + SenderInfo { state_nonce: next.nonce(), balance: U256::from(1_000) }, ); let outcome = pool.update_accounts(changed_senders); assert_eq!(outcome.discarded.len(), 1); diff --git a/crates/transaction-pool/src/pool/update.rs b/crates/transaction-pool/src/pool/update.rs index a5cce8291fab..d62b1792e7b7 100644 --- a/crates/transaction-pool/src/pool/update.rs +++ b/crates/transaction-pool/src/pool/update.rs @@ -26,3 +26,9 @@ pub(crate) enum Destination { /// Move transaction to pool Pool(SubPool), } + +impl From for Destination { + fn from(sub_pool: SubPool) -> Self { + Self::Pool(sub_pool) + } +} diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 858098ec91ad..95a179aec814 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -199,7 +199,7 @@ impl TransactionBuilder { /// Signs the provided transaction using the specified signer and returns a signed transaction. fn signed(transaction: Transaction, signer: B256) -> TransactionSigned { let signature = sign_message(signer, transaction.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(transaction, signature) + TransactionSigned::new_unhashed(transaction, signature) } /// Sets the signer for the transaction builder. diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 92f74665279d..afa1638c8516 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -28,7 +28,7 @@ use reth_primitives::{ transaction::TryFromRecoveredTransactionError, PooledTransactionsElementEcRecovered, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, }; - +use reth_primitives_traits::InMemorySize; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; /// A transaction pool implementation using [`MockOrdering`] for transaction ordering. @@ -59,6 +59,8 @@ macro_rules! set_value { *$field = new_value; } } + // Ensure the tx cost is always correct after each mutation. + $this.update_cost(); }; } @@ -69,7 +71,7 @@ macro_rules! get_value { MockTransaction::Legacy { $field, .. } | MockTransaction::Eip1559 { $field, .. } | MockTransaction::Eip4844 { $field, .. } | - MockTransaction::Eip2930 { $field, .. } => $field.clone(), + MockTransaction::Eip2930 { $field, .. } => $field, } }; } @@ -91,7 +93,7 @@ macro_rules! make_setters_getters { } /// Gets the value of the specified field. - pub fn [](&self) -> $t { + pub const fn [](&self) -> &$t { get_value!(self => $name) } )*} @@ -123,6 +125,8 @@ pub enum MockTransaction { input: Bytes, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-2930 transaction type. Eip2930 { @@ -148,6 +152,8 @@ pub enum MockTransaction { access_list: AccessList, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-1559 transaction type. Eip1559 { @@ -175,6 +181,8 @@ pub enum MockTransaction { input: Bytes, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-4844 transaction type. Eip4844 { @@ -206,6 +214,8 @@ pub enum MockTransaction { sidecar: BlobTransactionSidecar, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, } @@ -235,6 +245,7 @@ impl MockTransaction { value: Default::default(), input: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -252,6 +263,7 @@ impl MockTransaction { gas_price: 0, access_list: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -270,6 +282,7 @@ impl MockTransaction { input: Bytes::new(), access_list: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -290,6 +303,7 @@ impl MockTransaction { access_list: Default::default(), sidecar: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -560,6 +574,19 @@ impl MockTransaction { pub const fn is_eip2930(&self) -> bool { matches!(self, Self::Eip2930 { .. }) } + + fn update_cost(&mut self) { + match self { + Self::Legacy { cost, gas_limit, gas_price, value, .. } | + Self::Eip2930 { cost, gas_limit, gas_price, value, .. } => { + *cost = U256::from(*gas_limit) * U256::from(*gas_price) + *value + } + Self::Eip1559 { cost, gas_limit, max_fee_per_gas, value, .. } | + Self::Eip4844 { cost, gas_limit, max_fee_per_gas, value, .. } => { + *cost = U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value + } + }; + } } impl PoolTransaction for MockTransaction { @@ -581,48 +608,39 @@ impl PoolTransaction for MockTransaction { pooled.into() } + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result { + Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + } + fn hash(&self) -> &TxHash { - match self { - Self::Legacy { hash, .. } | - Self::Eip1559 { hash, .. } | - Self::Eip4844 { hash, .. } | - Self::Eip2930 { hash, .. } => hash, - } + self.get_hash() } fn sender(&self) -> Address { - match self { - Self::Legacy { sender, .. } | - Self::Eip1559 { sender, .. } | - Self::Eip4844 { sender, .. } | - Self::Eip2930 { sender, .. } => *sender, - } + *self.get_sender() } fn nonce(&self) -> u64 { - match self { - Self::Legacy { nonce, .. } | - Self::Eip1559 { nonce, .. } | - Self::Eip4844 { nonce, .. } | - Self::Eip2930 { nonce, .. } => *nonce, - } + *self.get_nonce() } - fn cost(&self) -> U256 { + // Having `get_cost` from `make_setters_getters` would be cleaner but we didn't + // want to also generate the error-prone cost setters. For now cost should be + // correct at construction and auto-updated per field update via `update_cost`, + // not to be manually set. + fn cost(&self) -> &U256 { match self { - Self::Legacy { gas_price, value, gas_limit, .. } | - Self::Eip2930 { gas_limit, gas_price, value, .. } => { - U256::from(*gas_limit) * U256::from(*gas_price) + *value - } - Self::Eip1559 { max_fee_per_gas, value, gas_limit, .. } | - Self::Eip4844 { max_fee_per_gas, value, gas_limit, .. } => { - U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value - } + Self::Legacy { cost, .. } | + Self::Eip2930 { cost, .. } | + Self::Eip1559 { cost, .. } | + Self::Eip4844 { cost, .. } => cost, } } fn gas_limit(&self) -> u64 { - self.get_gas_limit() + *self.get_gas_limit() } fn max_fee_per_gas(&self) -> u128 { @@ -703,22 +721,12 @@ impl PoolTransaction for MockTransaction { /// Returns the input data associated with the transaction. fn input(&self) -> &[u8] { - match self { - Self::Legacy { .. } => &[], - Self::Eip1559 { input, .. } | - Self::Eip4844 { input, .. } | - Self::Eip2930 { input, .. } => input, - } + self.get_input() } /// Returns the size of the transaction. fn size(&self) -> usize { - match self { - Self::Legacy { size, .. } | - Self::Eip1559 { size, .. } | - Self::Eip4844 { size, .. } | - Self::Eip2930 { size, .. } => *size, - } + *self.get_size() } /// Returns the transaction type as a byte identifier. @@ -762,6 +770,14 @@ impl EthPoolTransaction for MockTransaction { } } + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option { + Self::Pooled::try_from_blob_transaction( + self.into_consensus(), + Arc::unwrap_or_clone(sidecar), + ) + .ok() + } + fn validate_blob( &self, _blob: &BlobTransactionSidecar, @@ -808,6 +824,7 @@ impl TryFrom for MockTransaction { value, input, size, + cost: U256::from(gas_limit) * U256::from(gas_price) + value, }), Transaction::Eip2930(TxEip2930 { chain_id, @@ -830,6 +847,7 @@ impl TryFrom for MockTransaction { input, access_list, size, + cost: U256::from(gas_limit) * U256::from(gas_price) + value, }), Transaction::Eip1559(TxEip1559 { chain_id, @@ -854,6 +872,7 @@ impl TryFrom for MockTransaction { input, access_list, size, + cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), Transaction::Eip4844(TxEip4844 { chain_id, @@ -882,6 +901,7 @@ impl TryFrom for MockTransaction { access_list, sidecar: BlobTransactionSidecar::default(), size, + cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), _ => unreachable!("Invalid transaction type"), } @@ -898,11 +918,8 @@ impl From for MockTransaction { impl From for TransactionSignedEcRecovered { fn from(tx: MockTransaction) -> Self { - let signed_tx = TransactionSigned { - hash: *tx.hash(), - signature: Signature::test_signature(), - transaction: tx.clone().into(), - }; + let signed_tx = + TransactionSigned::new(tx.clone().into(), Signature::test_signature(), *tx.hash()); Self::from_signed_transaction(signed_tx, tx.sender()) } @@ -913,28 +930,24 @@ impl From for Transaction { match mock { MockTransaction::Legacy { chain_id, - hash: _, - sender: _, nonce, gas_price, gas_limit, to, value, input, - size: _, + .. } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), MockTransaction::Eip2930 { chain_id, - hash: _, - sender: _, nonce, - to, + gas_price, gas_limit, - input, + to, value, - gas_price, access_list, - size: _, + input, + .. } => Self::Eip2930(TxEip2930 { chain_id, nonce, @@ -947,17 +960,15 @@ impl From for Transaction { }), MockTransaction::Eip1559 { chain_id, - hash: _, - sender: _, nonce, + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - gas_limit, to, value, access_list, input, - size: _, + .. } => Self::Eip1559(TxEip1559 { chain_id, nonce, @@ -971,19 +982,17 @@ impl From for Transaction { }), MockTransaction::Eip4844 { chain_id, - hash: _, - sender: _, nonce, + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - max_fee_per_blob_gas, - gas_limit, to, value, access_list, - input, sidecar, - size: _, + max_fee_per_blob_gas, + input, + .. } => Self::Eip4844(TxEip4844 { chain_id, nonce, @@ -1008,107 +1017,13 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { use proptest::prelude::Strategy; use proptest_arbitrary_interop::arb; - arb::<(Transaction, Address, B256)>() - .prop_map(|(tx, sender, tx_hash)| match &tx { - Transaction::Legacy(TxLegacy { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - input, - }) => Self::Legacy { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - gas_price: *gas_price, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - size: tx.size(), - }, - - Transaction::Eip2930(TxEip2930 { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - access_list, - input, - }) => Self::Eip2930 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - gas_price: *gas_price, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - size: tx.size(), - }, - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - access_list, - }) => Self::Eip1559 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - max_fee_per_gas: *max_fee_per_gas, - max_priority_fee_per_gas: *max_priority_fee_per_gas, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - size: tx.size(), - }, - Transaction::Eip4844(TxEip4844 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - max_fee_per_blob_gas, - access_list, - blob_versioned_hashes: _, - }) => Self::Eip4844 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - max_fee_per_gas: *max_fee_per_gas, - max_priority_fee_per_gas: *max_priority_fee_per_gas, - max_fee_per_blob_gas: *max_fee_per_blob_gas, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - // only generate a sidecar if it is a 4844 tx - also for the sake of - // performance just use a default sidecar - sidecar: BlobTransactionSidecar::default(), - size: tx.size(), - }, - #[allow(unreachable_patterns)] - _ => unimplemented!(), + arb::<(TransactionSigned, Address)>() + .prop_map(|(signed_transaction, signer)| { + TransactionSignedEcRecovered::from_signed_transaction(signed_transaction, signer) + .try_into() + .expect( + "Failed to create an Arbitrary MockTransaction via TransactionSignedEcRecovered", + ) }) .boxed() } @@ -1127,8 +1042,8 @@ pub struct MockTransactionFactory { impl MockTransactionFactory { /// Generates a transaction ID for the given [`MockTransaction`]. pub fn tx_id(&mut self, tx: &MockTransaction) -> TransactionId { - let sender = self.ids.sender_id_or_create(tx.get_sender()); - TransactionId::new(sender, tx.get_nonce()) + let sender = self.ids.sender_id_or_create(tx.sender()); + TransactionId::new(sender, tx.nonce()) } /// Validates a [`MockTransaction`] and returns a [`MockValidTx`]. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 185c08c109a8..b5fc0db5204d 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1,5 +1,3 @@ -#![allow(deprecated)] - use crate::{ blobstore::BlobStoreError, error::{InvalidPoolTransactionError, PoolResult}, @@ -22,8 +20,10 @@ use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, + PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSigned, + TransactionSignedEcRecovered, }; +use reth_primitives_traits::SignedTransaction; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -232,17 +232,41 @@ pub trait TransactionPool: Send + Sync + Clone { &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec; + ) -> Vec<::Pooled>; - /// Returns converted [PooledTransactionsElement] for the given transaction hash. + /// Returns the pooled transaction variant for the given transaction hash as the requested type. + fn get_pooled_transactions_as( + &self, + tx_hashes: Vec, + limit: GetPooledTransactionLimit, + ) -> Vec + where + ::Pooled: Into; + + /// Returns the pooled transaction variant for the given transaction hash. /// /// This adheres to the expected behavior of /// [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): /// /// If the transaction is a blob transaction, the sidecar will be included. /// + /// It is expected that this variant represents the valid p2p format for full transactions. + /// E.g. for EIP-4844 transactions this is the consensus transaction format with the blob + /// sidecar. + /// /// Consumer: P2P - fn get_pooled_transaction_element(&self, tx_hash: TxHash) -> Option; + fn get_pooled_transaction_element( + &self, + tx_hash: TxHash, + ) -> Option<::Pooled>; + + /// Returns the pooled transaction variant for the given transaction hash as the requested type. + fn get_pooled_transaction_as(&self, tx_hash: TxHash) -> Option + where + ::Pooled: Into, + { + self.get_pooled_transaction_element(tx_hash).map(Into::into) + } /// Returns an iterator that yields transactions that are ready for block production. /// @@ -271,6 +295,15 @@ pub trait TransactionPool: Send + Sync + Clone { /// Consumer: RPC fn pending_transactions(&self) -> Vec>>; + /// Returns first `max` transactions that can be included in the next block. + /// See + /// + /// Consumer: Block production + fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>>; + /// Returns all transactions that can be included in _future_ blocks. /// /// This and [Self::pending_transactions] are mutually exclusive. @@ -485,7 +518,7 @@ pub trait TransactionPoolExt: TransactionPool { /// /// ## Fee changes /// - /// The [CanonicalStateUpdate] includes the base and blob fee of the pending block, which + /// The [`CanonicalStateUpdate`] includes the base and blob fee of the pending block, which /// affects the dynamic fee requirement of pending transactions in the pool. /// /// ## EIP-4844 Blob transactions @@ -553,6 +586,11 @@ impl AllPoolTransactions { pub fn queued_recovered(&self) -> impl Iterator + '_ { self.queued.iter().map(|tx| tx.transaction.clone().into()) } + + /// Returns an iterator over all transactions, both pending and queued. + pub fn all(&self) -> impl Iterator + '_ { + self.pending.iter().chain(self.queued.iter()).map(|tx| tx.transaction.clone().into()) + } } impl Default for AllPoolTransactions { @@ -671,6 +709,15 @@ impl TransactionOrigin { } } +/// Represents the kind of update to the canonical state. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PoolUpdateKind { + /// The update was due to a block commit. + Commit, + /// The update was due to a reorganization. + Reorg, +} + /// Represents changes after a new canonical block or range of canonical blocks was added to the /// chain. /// @@ -695,6 +742,8 @@ pub struct CanonicalStateUpdate<'a> { pub changed_accounts: Vec, /// All mined transactions in the block range. pub mined_transactions: Vec, + /// The kind of update to the canonical state. + pub update_kind: PoolUpdateKind, } impl CanonicalStateUpdate<'_> { @@ -757,7 +806,7 @@ pub trait BestTransactions: Iterator + Send { /// Implementers must ensure all subsequent transaction _don't_ depend on this transaction. /// In other words, this must remove the given transaction _and_ drain all transaction that /// depend on it. - fn mark_invalid(&mut self, transaction: &Self::Item); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError); /// An iterator may be able to receive additional pending transactions that weren't present it /// the pool when it was created. @@ -819,8 +868,8 @@ impl BestTransactions for Box where T: BestTransactions + ?Sized, { - fn mark_invalid(&mut self, transaction: &Self::Item) { - (**self).mark_invalid(transaction); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError) { + (**self).mark_invalid(transaction, kind) } fn no_updates(&mut self) { @@ -838,7 +887,7 @@ where /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { - fn mark_invalid(&mut self, _tx: &T) {} + fn mark_invalid(&mut self, _tx: &T, _kind: InvalidPoolTransactionError) {} fn no_updates(&mut self) {} @@ -906,16 +955,24 @@ impl BestTransactionsAttributes { } } -/// Trait for transaction types used inside the pool +/// Trait for transaction types used inside the pool. +/// +/// This supports two transaction formats +/// - Consensus format: the form the transaction takes when it is included in a block. +/// - Pooled format: the form the transaction takes when it is gossiping around the network. +/// +/// This distinction is necessary for the EIP-4844 blob transactions, which require an additional +/// sidecar when they are gossiped around the network. It is expected that the `Consensus` format is +/// a subset of the `Pooled` format. pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Associated error type for the `try_from_consensus` method. - type TryFromConsensusError; + type TryFromConsensusError: fmt::Display; /// Associated type representing the raw consensus variant of the transaction. type Consensus: From + TryInto; /// Associated type representing the recovered pooled variant of the transaction. - type Pooled: Into; + type Pooled: Encodable2718 + Into; /// Define a method to convert from the `Consensus` type to `Self` fn try_from_consensus(tx: Self::Consensus) -> Result { @@ -932,6 +989,16 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { pooled.into() } + /// Tries to convert the `Consensus` type into the `Pooled` type. + fn try_into_pooled(self) -> Result { + Self::try_consensus_into_pooled(self.into_consensus()) + } + + /// Tries to convert the `Consensus` type into the `Pooled` type. + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result; + /// Hash of the transaction. fn hash(&self) -> &TxHash; @@ -947,7 +1014,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// For legacy transactions: `gas_price * gas_limit + tx_value`. /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + /// max_blob_fee_per_gas * blob_gas_used`. - fn cost(&self) -> U256; + fn cost(&self) -> &U256; /// Amount of gas that should be used in executing this transaction. This is paid up-front. fn gas_limit(&self) -> u64; @@ -1046,11 +1113,19 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { } } -/// Super trait for transactions that can be converted to and from Eth transactions +/// Super trait for transactions that can be converted to and from Eth transactions intended for the +/// ethereum style pool. +/// +/// This extends the [`PoolTransaction`] trait with additional methods that are specific to the +/// Ethereum pool. pub trait EthPoolTransaction: PoolTransaction< - Consensus: From + Into, - Pooled: From + Into, + Consensus: From + + Into + + Into, + Pooled: From + + Into + + Into, > { /// Extracts the blob sidecar from the transaction. @@ -1059,6 +1134,13 @@ pub trait EthPoolTransaction: /// Returns the number of blobs this transaction has. fn blob_count(&self) -> usize; + /// A specialization for the EIP-4844 transaction type. + /// Tries to reattach the blob sidecar to the transaction. + /// + /// This returns an option, but callers should ensure that the transaction is an EIP-4844 + /// transaction: [`PoolTransaction::is_eip4844`]. + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option; + /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( &self, @@ -1075,9 +1157,9 @@ pub trait EthPoolTransaction: /// This type is essentially a wrapper around [`TransactionSignedEcRecovered`] with additional /// fields derived from the transaction that are frequently used by the pools for ordering. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct EthPooledTransaction { - /// `EcRecovered` transaction info - pub(crate) transaction: TransactionSignedEcRecovered, +pub struct EthPooledTransaction { + /// `EcRecovered` transaction, the consensus format. + pub(crate) transaction: T, /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. @@ -1093,30 +1175,6 @@ pub struct EthPooledTransaction { pub(crate) blob_sidecar: EthBlobTransactionSidecar, } -/// Represents the blob sidecar of the [`EthPooledTransaction`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum EthBlobTransactionSidecar { - /// This transaction does not have a blob sidecar - None, - /// This transaction has a blob sidecar (EIP-4844) but it is missing - /// - /// It was either extracted after being inserted into the pool or re-injected after reorg - /// without the blob sidecar - Missing, - /// The eip-4844 transaction was pulled from the network and still has its blob sidecar - Present(BlobTransactionSidecar), -} - -impl EthBlobTransactionSidecar { - /// Returns the blob sidecar if it is present - pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { - match self { - Self::Present(sidecar) => Some(sidecar), - _ => None, - } - } -} - impl EthPooledTransaction { /// Create new instance of [Self]. /// @@ -1125,34 +1183,20 @@ impl EthPooledTransaction { pub fn new(transaction: TransactionSignedEcRecovered, encoded_length: usize) -> Self { let mut blob_sidecar = EthBlobTransactionSidecar::None; - #[allow(unreachable_patterns)] - let gas_cost = match &transaction.transaction { - Transaction::Legacy(t) => { - U256::from(t.gas_price).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip2930(t) => { - U256::from(t.gas_price).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip1559(t) => { - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip4844(t) => { - blob_sidecar = EthBlobTransactionSidecar::Missing; - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip7702(t) => { - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - _ => U256::ZERO, - }; - let mut cost = transaction.value(); - cost = cost.saturating_add(gas_cost); + let gas_cost = U256::from(transaction.transaction.max_fee_per_gas()) + .saturating_mul(U256::from(transaction.transaction.gas_limit())); + + let mut cost = gas_cost.saturating_add(transaction.value()); if let Some(blob_tx) = transaction.as_eip4844() { // Add max blob cost using saturating math to avoid overflow cost = cost.saturating_add(U256::from( blob_tx.max_fee_per_blob_gas.saturating_mul(blob_tx.blob_gas() as u128), )); + + // because the blob sidecar is not included in this transaction variant, mark it as + // missing + blob_sidecar = EthBlobTransactionSidecar::Missing; } Self { transaction, cost, encoded_length, blob_sidecar } @@ -1193,9 +1237,15 @@ impl PoolTransaction for EthPooledTransaction { type Pooled = PooledTransactionsElementEcRecovered; + fn try_consensus_into_pooled( + tx: Self::Consensus, + ) -> Result { + Self::Pooled::try_from(tx).map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + } + /// Returns hash of the transaction. fn hash(&self) -> &TxHash { - self.transaction.hash_ref() + self.transaction.tx_hash() } /// Returns the Sender of the transaction. @@ -1214,8 +1264,8 @@ impl PoolTransaction for EthPooledTransaction { /// For legacy transactions: `gas_price * gas_limit + tx_value`. /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + /// max_blob_fee_per_gas * blob_gas_used`. - fn cost(&self) -> U256 { - self.cost + fn cost(&self) -> &U256 { + &self.cost } /// Amount of gas that should be used in executing this transaction. This is paid up-front. @@ -1229,15 +1279,7 @@ impl PoolTransaction for EthPooledTransaction { /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). fn max_fee_per_gas(&self) -> u128 { - #[allow(unreachable_patterns)] - match &self.transaction.transaction { - Transaction::Legacy(tx) => tx.gas_price, - Transaction::Eip2930(tx) => tx.gas_price, - Transaction::Eip1559(tx) => tx.max_fee_per_gas, - Transaction::Eip4844(tx) => tx.max_fee_per_gas, - Transaction::Eip7702(tx) => tx.max_fee_per_gas, - _ => 0, - } + self.transaction.transaction.max_fee_per_gas() } fn access_list(&self) -> Option<&AccessList> { @@ -1248,14 +1290,7 @@ impl PoolTransaction for EthPooledTransaction { /// /// This will return `None` for non-EIP1559 transactions fn max_priority_fee_per_gas(&self) -> Option { - #[allow(unreachable_patterns, clippy::match_same_arms)] - match &self.transaction.transaction { - Transaction::Legacy(_) | Transaction::Eip2930(_) => None, - Transaction::Eip1559(tx) => Some(tx.max_priority_fee_per_gas), - Transaction::Eip4844(tx) => Some(tx.max_priority_fee_per_gas), - Transaction::Eip7702(tx) => Some(tx.max_priority_fee_per_gas), - _ => None, - } + self.transaction.transaction.max_priority_fee_per_gas() } fn max_fee_per_blob_gas(&self) -> Option { @@ -1323,6 +1358,14 @@ impl EthPoolTransaction for EthPooledTransaction { } } + fn try_into_pooled_eip4844(self, sidecar: Arc) -> Option { + PooledTransactionsElementEcRecovered::try_from_blob_transaction( + self.into_consensus(), + Arc::unwrap_or_clone(sidecar), + ) + .ok() + } + fn validate_blob( &self, sidecar: &BlobTransactionSidecar, @@ -1375,6 +1418,30 @@ impl From for TransactionSignedEcRecovered { } } +/// Represents the blob sidecar of the [`EthPooledTransaction`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum EthBlobTransactionSidecar { + /// This transaction does not have a blob sidecar + None, + /// This transaction has a blob sidecar (EIP-4844) but it is missing + /// + /// It was either extracted after being inserted into the pool or re-injected after reorg + /// without the blob sidecar + Missing, + /// The eip-4844 transaction was pulled from the network and still has its blob sidecar + Present(BlobTransactionSidecar), +} + +impl EthBlobTransactionSidecar { + /// Returns the blob sidecar if it is present + pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { + match self { + Self::Present(sidecar) => Some(sidecar), + _ => None, + } + } +} + /// Represents the current status of the pool. #[derive(Debug, Clone, Copy, Default)] pub struct PoolSize { @@ -1501,24 +1568,6 @@ impl Stream for NewSubpoolTransactionStream { } } -/// Iterator that returns transactions for the block building process in the order they should be -/// included in the block. -/// -/// Can include transactions from the pool and other sources (alternative pools, -/// sequencer-originated transactions, etc.). -pub trait PayloadTransactions { - /// Returns the next transaction to include in the block. - fn next( - &mut self, - // In the future, `ctx` can include access to state for block building purposes. - ctx: (), - ) -> Option; - - /// Exclude descendants of the transaction with given sender and nonce from the iterator, - /// because this transaction won't be included in the block. - fn mark_invalid(&mut self, sender: Address, nonce: u64); -} - #[cfg(test)] mod tests { use super::*; @@ -1574,7 +1623,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1596,7 +1645,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1618,7 +1667,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); @@ -1642,7 +1691,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 300); @@ -1666,7 +1715,7 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); let transaction = TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 62e9f3f2917d..ca7452225755 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -17,7 +17,8 @@ use alloy_consensus::constants::{ }; use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_primitives::{GotExpected, InvalidTransactionError, SealedBlock}; +use reth_primitives::{InvalidTransactionError, SealedBlock}; +use reth_primitives_traits::GotExpected; use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use revm::{ @@ -106,6 +107,19 @@ where } /// A [`TransactionValidator`] implementation that validates ethereum transaction. +/// +/// It supports all known ethereum transaction types: +/// - Legacy +/// - EIP-2718 +/// - EIP-1559 +/// - EIP-4844 +/// - EIP-7702 +/// +/// And enforces additional constraints such as: +/// - Maximum transaction size +/// - Maximum gas limit +/// +/// And adheres to the configured [`LocalTransactionConfig`]. #[derive(Debug)] pub(crate) struct EthTransactionValidatorInner { /// Spec of the chain @@ -383,11 +397,12 @@ where let cost = transaction.cost(); // Checks for max cost - if cost > account.balance { + if cost > &account.balance { + let expected = *cost; return TransactionValidationOutcome::Invalid( transaction, InvalidTransactionError::InsufficientFunds( - GotExpected { got: account.balance, expected: cost }.into(), + GotExpected { got: account.balance, expected }.into(), ) .into(), ) diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 6a3b0b96e976..35e3a85537ee 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -312,7 +312,7 @@ impl ValidPoolTransaction { /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. - pub fn cost(&self) -> U256 { + pub fn cost(&self) -> &U256 { self.transaction.cost() } @@ -453,9 +453,11 @@ impl Clone for ValidPoolTransaction { impl fmt::Debug for ValidPoolTransaction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidPoolTransaction") + .field("id", &self.transaction_id) + .field("pragate", &self.propagate) + .field("origin", &self.origin) .field("hash", self.transaction.hash()) - .field("provides", &self.transaction_id) - .field("raw_tx", &self.transaction) + .field("tx", &self.transaction) .finish() } } diff --git a/crates/transaction-pool/tests/it/blobs.rs b/crates/transaction-pool/tests/it/blobs.rs index 0cdc6d088c04..9417c62278b7 100644 --- a/crates/transaction-pool/tests/it/blobs.rs +++ b/crates/transaction-pool/tests/it/blobs.rs @@ -3,7 +3,7 @@ use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{MockTransaction, MockTransactionFactory, TestPoolBuilder}, - TransactionOrigin, TransactionPool, + PoolTransaction, TransactionOrigin, TransactionPool, }; #[tokio::test(flavor = "multi_thread")] @@ -16,23 +16,22 @@ async fn blobs_exclusive() { .add_transaction(TransactionOrigin::External, blob_tx.transaction.clone()) .await .unwrap(); - assert_eq!(hash, blob_tx.transaction.get_hash()); + assert_eq!(hash, *blob_tx.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_eq!(best_txns.next().unwrap().transaction.get_hash(), blob_tx.transaction.get_hash()); assert!(best_txns.next().is_none()); - let eip1559_tx = MockTransaction::eip1559() - .set_sender(blob_tx.transaction.get_sender()) - .inc_price_by(10_000); + let eip1559_tx = + MockTransaction::eip1559().set_sender(blob_tx.transaction.sender()).inc_price_by(10_000); let res = txpool.add_transaction(TransactionOrigin::External, eip1559_tx.clone()).await.unwrap_err(); - assert_eq!(res.hash, eip1559_tx.get_hash()); + assert_eq!(res.hash, *eip1559_tx.get_hash()); match res.kind { PoolErrorKind::ExistingConflictingTransactionType(addr, tx_type) => { - assert_eq!(addr, eip1559_tx.get_sender()); + assert_eq!(addr, eip1559_tx.sender()); assert_eq!(tx_type, eip1559_tx.tx_type()); } _ => unreachable!(), diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index fea50962fd9e..3b74b8cb2300 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -8,7 +8,8 @@ use reth_transaction_pool::{ test_utils::{ MockFeeRange, MockTransactionDistribution, MockTransactionRatio, TestPool, TestPoolBuilder, }, - BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionPool, TransactionPoolExt, + BlockInfo, PoolConfig, PoolTransaction, SubPoolLimit, TransactionOrigin, TransactionPool, + TransactionPoolExt, }; #[tokio::test(flavor = "multi_thread")] @@ -87,7 +88,7 @@ async fn only_blobs_eviction() { let set = set.into_vec(); // ensure that the first nonce is 0 - assert_eq!(set[0].get_nonce(), 0); + assert_eq!(set[0].nonce(), 0); // and finally insert it into the pool let results = pool.add_transactions(TransactionOrigin::External, set).await; @@ -194,7 +195,7 @@ async fn mixed_eviction() { ); let set = set.into_inner().into_vec(); - assert_eq!(set[0].get_nonce(), 0); + assert_eq!(set[0].nonce(), 0); let results = pool.add_transactions(TransactionOrigin::External, set).await; for (i, result) in results.iter().enumerate() { diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index ad13af22a6a2..0f8a0b19e2bc 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -33,11 +33,11 @@ async fn txpool_listener_all() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); assert_matches!( all_tx_events.next().await, - Some(FullTransactionEvent::Pending(hash)) if hash == transaction.transaction.get_hash() + Some(FullTransactionEvent::Pending(hash)) if hash == *transaction.transaction.get_hash() ); } diff --git a/crates/transaction-pool/tests/it/pending.rs b/crates/transaction-pool/tests/it/pending.rs index 0b6349b24cc3..be559c71eec4 100644 --- a/crates/transaction-pool/tests/it/pending.rs +++ b/crates/transaction-pool/tests/it/pending.rs @@ -12,7 +12,7 @@ async fn txpool_new_pending_txs() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); @@ -20,6 +20,6 @@ async fn txpool_new_pending_txs() { let transaction = mock_tx_factory.create_eip1559(); let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); } diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 0616e2597109..8b0d930b0c2c 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -12,21 +12,26 @@ description = "Commonly used types for trie usage in reth." workspace = true [dependencies] -reth-primitives-traits.workspace = true -reth-codecs.workspace = true - +# alloy alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["arrayvec"] } -alloy-trie = { workspace = true, features = ["serde"] } +alloy-trie.workspace = true alloy-consensus.workspace = true alloy-genesis.workspace = true + +reth-primitives-traits.workspace = true +reth-codecs.workspace = true revm-primitives.workspace = true bytes.workspace = true derive_more.workspace = true -serde.workspace = true itertools.workspace = true -nybbles = { workspace = true, features = ["serde", "rlp"] } +nybbles = { workspace = true, features = ["rlp"] } + +# `serde` feature +serde = { workspace = true, optional = true } + +serde_with = { workspace = true, optional = true } # `test-utils` feature hash-db = { version = "=0.15.2", optional = true } @@ -34,27 +39,56 @@ plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["serde"] } +alloy-primitives = { workspace = true, features = ["getrandom"] } +alloy-trie = { workspace = true, features = ["arbitrary", "serde"] } +hash-db = "=0.15.2" +plain_hasher = "0.2" arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -hash-db = "=0.15.2" -plain_hasher = "0.2" +criterion.workspace = true +bincode.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true [features] +serde = [ + "dep:serde", + "bytes/serde", + "nybbles/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-trie/serde", + "revm-primitives/serde", + "reth-primitives-traits/serde", + "reth-codecs/serde" +] +serde-bincode-compat = [ + "serde", + "reth-primitives-traits/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "dep:serde_with" +] test-utils = [ - "dep:plain_hasher", - "dep:hash-db", - "arbitrary", - "reth-primitives-traits/test-utils", - "reth-codecs/test-utils" + "dep:plain_hasher", + "dep:hash-db", + "arbitrary", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", ] arbitrary = [ - "alloy-trie/arbitrary", - "dep:arbitrary", - "reth-primitives-traits/arbitrary", - "alloy-consensus/arbitrary", - "alloy-primitives/arbitrary", - "nybbles/arbitrary", - "revm-primitives/arbitrary", - "reth-codecs/arbitrary" + "alloy-trie/arbitrary", + "dep:arbitrary", + "reth-primitives-traits/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "nybbles/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs/arbitrary", ] + +[[bench]] +name = "prefix_set" +harness = false diff --git a/crates/trie/trie/benches/prefix_set.rs b/crates/trie/common/benches/prefix_set.rs similarity index 99% rename from crates/trie/trie/benches/prefix_set.rs rename to crates/trie/common/benches/prefix_set.rs index cae08d129f68..b61d58e02729 100644 --- a/crates/trie/trie/benches/prefix_set.rs +++ b/crates/trie/common/benches/prefix_set.rs @@ -7,7 +7,7 @@ use proptest::{ strategy::ValueTree, test_runner::{basic_result_cache, TestRunner}, }; -use reth_trie::{ +use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, Nibbles, }; diff --git a/crates/trie/common/src/constants.rs b/crates/trie/common/src/constants.rs new file mode 100644 index 000000000000..471b8bd9dcc6 --- /dev/null +++ b/crates/trie/common/src/constants.rs @@ -0,0 +1,24 @@ +/// The maximum size of RLP encoded trie account in bytes. +/// 2 (header) + 4 * 1 (field lens) + 8 (nonce) + 32 * 3 (balance, storage root, code hash) +pub const TRIE_ACCOUNT_RLP_MAX_SIZE: usize = 110; + +#[cfg(test)] +mod tests { + use super::*; + use crate::TrieAccount; + use alloy_primitives::{B256, U256}; + use alloy_rlp::Encodable; + + #[test] + fn account_rlp_max_size() { + let account = TrieAccount { + nonce: u64::MAX, + balance: U256::MAX, + storage_root: B256::from_slice(&[u8::MAX; 32]), + code_hash: B256::from_slice(&[u8::MAX; 32]), + }; + let mut encoded = Vec::new(); + account.encode(&mut encoded); + assert_eq!(encoded.len(), TRIE_ACCOUNT_RLP_MAX_SIZE); + } +} diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index c5cae21a1a3d..ec6b102d44ec 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -3,11 +3,11 @@ use alloy_trie::{hash_builder::HashBuilderValue, nodes::RlpNode, HashBuilder}; use bytes::Buf; use nybbles::Nibbles; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; /// The hash builder state for storing in the database. /// Check the `reth-trie` crate for more info on hash builder. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), diff --git a/crates/trie/common/src/key.rs b/crates/trie/common/src/key.rs index 9e440d199fa1..71f8019bff54 100644 --- a/crates/trie/common/src/key.rs +++ b/crates/trie/common/src/key.rs @@ -1,5 +1,4 @@ -use alloy_primitives::B256; -use revm_primitives::keccak256; +use alloy_primitives::{keccak256, B256}; /// Trait for hashing keys in state. pub trait KeyHasher: Default + Clone + Send + Sync + 'static { diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index 7645ebd3a1cb..6647de67811c 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -11,6 +11,10 @@ /// The implementation of hash builder. pub mod hash_builder; +/// Constants related to the trie computation. +mod constants; +pub use constants::*; + mod account; pub use account::TrieAccount; @@ -26,6 +30,10 @@ pub use storage::StorageTrieEntry; mod subnode; pub use subnode::StoredSubNode; +/// The implementation of a container for storing intermediate changes to a trie. +/// The container indicates when the trie has been modified. +pub mod prefix_set; + mod proofs; #[cfg(any(test, feature = "test-utils"))] pub use proofs::triehash; @@ -33,4 +41,19 @@ pub use proofs::*; pub mod root; +/// Buffer for trie updates. +pub mod updates; + +/// Bincode-compatible serde implementations for trie types. +/// +/// `bincode` crate allows for more efficient serialization of trie types, because it allows +/// non-string map keys. +/// +/// Read more: +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::updates::serde_bincode_compat as updates; +} + +/// Re-export pub use alloy_trie::{nodes::*, proof, BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH}; diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index cf94f135f54b..2d4e34b3e3bf 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -1,24 +1,12 @@ use bytes::Buf; use derive_more::Deref; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; pub use nybbles::Nibbles; /// The representation of nibbles of the merkle trie stored in the database. -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - derive_more::Index, -)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, derive_more::Index)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibbles(pub Nibbles); @@ -74,7 +62,8 @@ impl Compact for StoredNibbles { } /// The representation of nibbles of the merkle trie stored in the database. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord, Hash, Deref)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deref)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibblesSubKey(pub Nibbles); @@ -120,3 +109,97 @@ impl Compact for StoredNibblesSubKey { (Self(Nibbles::from_nibbles_unchecked(&buf[..len])), &buf[65..]) } } + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + + #[test] + fn test_stored_nibbles_from_nibbles() { + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34, 0x56]); + let stored = StoredNibbles::from(nibbles.clone()); + assert_eq!(stored.0, nibbles); + } + + #[test] + fn test_stored_nibbles_from_vec() { + let bytes = vec![0x12, 0x34, 0x56]; + let stored = StoredNibbles::from(bytes.clone()); + assert_eq!(stored.0.as_slice(), bytes.as_slice()); + } + + #[test] + fn test_stored_nibbles_equality() { + let bytes = vec![0x12, 0x34]; + let stored = StoredNibbles::from(bytes.clone()); + assert_eq!(stored, *bytes.as_slice()); + } + + #[test] + fn test_stored_nibbles_partial_cmp() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let other = vec![0x12, 0x35]; + assert!(stored < *other.as_slice()); + } + + #[test] + fn test_stored_nibbles_to_compact() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let mut buf = BytesMut::with_capacity(10); + let len = stored.to_compact(&mut buf); + assert_eq!(len, 2); + assert_eq!(buf, &vec![0x12, 0x34][..]); + } + + #[test] + fn test_stored_nibbles_from_compact() { + let buf = vec![0x12, 0x34, 0x56]; + let (stored, remaining) = StoredNibbles::from_compact(&buf, 2); + assert_eq!(stored.0.as_slice(), &[0x12, 0x34]); + assert_eq!(remaining, &[0x56]); + } + + #[test] + fn test_stored_nibbles_subkey_from_nibbles() { + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34]); + let subkey = StoredNibblesSubKey::from(nibbles.clone()); + assert_eq!(subkey.0, nibbles); + } + + #[test] + fn test_stored_nibbles_subkey_to_compact() { + let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let mut buf = BytesMut::with_capacity(65); + let len = subkey.to_compact(&mut buf); + assert_eq!(len, 65); + assert_eq!(buf[..2], [0x12, 0x34]); + assert_eq!(buf[64], 2); // Length byte + } + + #[test] + fn test_stored_nibbles_subkey_from_compact() { + let mut buf = vec![0x12, 0x34]; + buf.resize(65, 0); + buf[64] = 2; + let (subkey, remaining) = StoredNibblesSubKey::from_compact(&buf, 65); + assert_eq!(subkey.0.as_slice(), &[0x12, 0x34]); + assert_eq!(remaining, &[] as &[u8]); + } + + #[test] + fn test_serialization_stored_nibbles() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let serialized = serde_json::to_string(&stored).unwrap(); + let deserialized: StoredNibbles = serde_json::from_str(&serialized).unwrap(); + assert_eq!(stored, deserialized); + } + + #[test] + fn test_serialization_stored_nibbles_subkey() { + let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let serialized = serde_json::to_string(&subkey).unwrap(); + let deserialized: StoredNibblesSubKey = serde_json::from_str(&serialized).unwrap(); + assert_eq!(subkey, deserialized); + } +} diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs similarity index 97% rename from crates/trie/trie/src/prefix_set.rs rename to crates/trie/common/src/prefix_set.rs index d904ef38fdd5..2536a41ff0c0 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -1,9 +1,9 @@ use crate::Nibbles; -use alloy_primitives::B256; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, }; +use std::sync::Arc; /// Collection of mutable prefix sets. #[derive(Clone, Default, Debug)] @@ -73,7 +73,7 @@ pub struct TriePrefixSets { /// # Examples /// /// ``` -/// use reth_trie::{prefix_set::PrefixSetMut, Nibbles}; +/// use reth_trie_common::{prefix_set::PrefixSetMut, Nibbles}; /// /// let mut prefix_set_mut = PrefixSetMut::default(); /// prefix_set_mut.insert(Nibbles::from_nibbles_unchecked(&[0xa, 0xb])); @@ -211,8 +211,8 @@ impl PrefixSet { } impl<'a> IntoIterator for &'a PrefixSet { - type Item = &'a reth_trie_common::Nibbles; - type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; + type Item = &'a Nibbles; + type IntoIter = std::slice::Iter<'a, Nibbles>; fn into_iter(self) -> Self::IntoIter { self.iter() } diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index a94b2b96fbdf..78659116c3e2 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -2,7 +2,11 @@ use crate::{Nibbles, TrieAccount}; use alloy_consensus::constants::KECCAK_EMPTY; -use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap}, + Address, Bytes, B256, U256, +}; use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; use alloy_trie::{ nodes::TrieNode, @@ -11,13 +15,11 @@ use alloy_trie::{ }; use itertools::Itertools; use reth_primitives_traits::Account; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes /// in the paths of target accounts. -#[derive(Clone, Default, Debug)] +#[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct MultiProof { /// State trie multiproof for requested accounts. pub account_subtree: ProofNodes, @@ -26,6 +28,31 @@ pub struct MultiProof { } impl MultiProof { + /// Return the account proof nodes for the given account path. + pub fn account_proof_nodes(&self, path: &Nibbles) -> Vec<(Nibbles, Bytes)> { + self.account_subtree.matching_nodes_sorted(path) + } + + /// Return the storage proof nodes for the given storage slots of the account path. + pub fn storage_proof_nodes( + &self, + hashed_address: B256, + slots: impl IntoIterator, + ) -> Vec<(B256, Vec<(Nibbles, Bytes)>)> { + self.storages + .get(&hashed_address) + .map(|storage_mp| { + slots + .into_iter() + .map(|slot| { + let nibbles = Nibbles::unpack(slot); + (slot, storage_mp.subtree.matching_nodes_sorted(&nibbles)) + }) + .collect() + }) + .unwrap_or_default() + } + /// Construct the account proof from the multiproof. pub fn account_proof( &self, @@ -37,10 +64,9 @@ impl MultiProof { // Retrieve the account proof. let proof = self - .account_subtree - .matching_nodes_iter(&nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)) - .map(|(_, node)| node.clone()) + .account_proof_nodes(&nibbles) + .into_iter() + .map(|(_, node)| node) .collect::>(); // Inspect the last node in the proof. If it's a leaf node with matching suffix, @@ -76,10 +102,28 @@ impl MultiProof { } Ok(AccountProof { address, info, proof, storage_root, storage_proofs }) } + + /// Extends this multiproof with another one, merging both account and storage + /// proofs. + pub fn extend(&mut self, other: Self) { + self.account_subtree.extend_from(other.account_subtree); + + for (hashed_address, storage) in other.storages { + match self.storages.entry(hashed_address) { + hash_map::Entry::Occupied(mut entry) => { + debug_assert_eq!(entry.get().root, storage.root); + entry.get_mut().subtree.extend_from(storage.subtree); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(storage); + } + } + } + } } /// The merkle multiproof of storage trie. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct StorageMultiProof { /// Storage trie root. pub root: B256, @@ -129,8 +173,9 @@ impl StorageMultiProof { } /// The merkle proof with the relevant account info. -#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), serde(rename_all = "camelCase"))] pub struct AccountProof { /// The address associated with the account. pub address: Address, @@ -185,7 +230,8 @@ impl AccountProof { } /// The merkle proof of the storage entry. -#[derive(Clone, PartialEq, Eq, Default, Debug, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Default, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageProof { /// The raw storage key. pub key: B256, @@ -234,11 +280,12 @@ impl StorageProof { #[cfg(any(test, feature = "test-utils"))] pub mod triehash { use alloy_primitives::{keccak256, B256}; + use alloy_rlp::RlpEncodable; use hash_db::Hasher; use plain_hasher::PlainHasher; /// A [Hasher] that calculates a keccak256 hash of the given data. - #[derive(Default, Debug, Clone, PartialEq, Eq)] + #[derive(Default, Debug, Clone, PartialEq, Eq, RlpEncodable)] #[non_exhaustive] pub struct KeccakHasher; @@ -254,3 +301,61 @@ pub mod triehash { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_multiproof_extend_account_proofs() { + let mut proof1 = MultiProof::default(); + let mut proof2 = MultiProof::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + + proof1.account_subtree.insert( + Nibbles::unpack(addr1), + alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), + ); + proof2.account_subtree.insert( + Nibbles::unpack(addr2), + alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), + ); + + proof1.extend(proof2); + + assert!(proof1.account_subtree.contains_key(&Nibbles::unpack(addr1))); + assert!(proof1.account_subtree.contains_key(&Nibbles::unpack(addr2))); + } + + #[test] + fn test_multiproof_extend_storage_proofs() { + let mut proof1 = MultiProof::default(); + let mut proof2 = MultiProof::default(); + + let addr = B256::random(); + let root = B256::random(); + + let mut subtree1 = ProofNodes::default(); + subtree1.insert( + Nibbles::from_nibbles(vec![0]), + alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), + ); + proof1.storages.insert(addr, StorageMultiProof { root, subtree: subtree1 }); + + let mut subtree2 = ProofNodes::default(); + subtree2.insert( + Nibbles::from_nibbles(vec![1]), + alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), + ); + proof2.storages.insert(addr, StorageMultiProof { root, subtree: subtree2 }); + + proof1.extend(proof2); + + let storage = proof1.storages.get(&addr).unwrap(); + assert_eq!(storage.root, root); + assert!(storage.subtree.contains_key(&Nibbles::from_nibbles(vec![0]))); + assert!(storage.subtree.contains_key(&Nibbles::from_nibbles(vec![1]))); + } +} diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index 20f3ba1366d5..982dec98837f 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -7,49 +7,6 @@ use alloy_trie::HashBuilder; use itertools::Itertools; use nybbles::Nibbles; -/// Adjust the index of an item for rlp encoding. -pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { - if i > 0x7f { - i - } else if i == 0x7f || i + 1 == len { - 0 - } else { - i + 1 - } -} - -/// Compute a trie root of the collection of rlp encodable items. -pub fn ordered_trie_root(items: &[T]) -> B256 { - ordered_trie_root_with_encoder(items, |item, buf| item.encode(buf)) -} - -/// Compute a trie root of the collection of items with a custom encoder. -pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 -where - F: FnMut(&T, &mut Vec), -{ - if items.is_empty() { - return alloy_trie::EMPTY_ROOT_HASH; - } - - let mut value_buffer = Vec::new(); - - let mut hb = HashBuilder::default(); - let items_len = items.len(); - for i in 0..items_len { - let index = adjust_index_for_rlp(i, items_len); - - let index_buffer = alloy_rlp::encode_fixed_size(&index); - - value_buffer.clear(); - encode(&items[index], &mut value_buffer); - - hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); - } - - hb.root() -} - /// Hashes and sorts account keys, then proceeds to calculating the root hash of the state /// represented as MPT. /// See [`state_root_unsorted`] for more info. diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index b61abb116888..cf2945d9101a 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,9 +1,9 @@ use super::{BranchNodeCompact, StoredNibblesSubKey}; use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; /// Account storage trie node. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieEntry { /// The nibbles of the intermediate node pub nibbles: StoredNibblesSubKey, diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/common/src/updates.rs similarity index 87% rename from crates/trie/trie/src/updates.rs rename to crates/trie/common/src/updates.rs index 6d1bcab63d8f..6f80eb16553e 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,16 +1,21 @@ -use crate::{walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles}; -use alloy_primitives::B256; -use std::collections::{HashMap, HashSet}; +use crate::{BranchNodeCompact, HashBuilder, Nibbles}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; /// The aggregation of trie updates. #[derive(PartialEq, Eq, Clone, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdates { - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] - pub(crate) account_nodes: HashMap, - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] - pub(crate) removed_nodes: HashSet, - pub(crate) storage_tries: HashMap, + /// Collection of updated intermediate account nodes indexed by full path. + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_map"))] + pub account_nodes: HashMap, + /// Collection of removed intermediate account nodes indexed by full path. + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] + pub removed_nodes: HashSet, + /// Collection of updated storage tries indexed by the hashed address. + pub storage_tries: HashMap, } impl TrieUpdates { @@ -75,20 +80,19 @@ impl TrieUpdates { } /// Finalize state trie updates. - pub fn finalize( + pub fn finalize( &mut self, - walker: TrieWalker, hash_builder: HashBuilder, + removed_keys: HashSet, destroyed_accounts: HashSet, ) { - // Retrieve deleted keys from trie walker. - let (_, removed_node_keys) = walker.split(); - self.removed_nodes.extend(exclude_empty(removed_node_keys)); - // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); self.account_nodes.extend(exclude_empty_from_pair(updated_nodes)); + // Add deleted node paths. + self.removed_nodes.extend(exclude_empty(removed_keys)); + // Add deleted storage tries for destroyed accounts. for destroyed in destroyed_accounts { self.storage_tries.entry(destroyed).or_default().set_deleted(true); @@ -110,16 +114,16 @@ impl TrieUpdates { /// Trie updates for storage trie of a single account. #[derive(PartialEq, Eq, Clone, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieUpdates { /// Flag indicating whether the trie was deleted. - pub(crate) is_deleted: bool, + pub is_deleted: bool, /// Collection of updated storage trie nodes. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] - pub(crate) storage_nodes: HashMap, + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_map"))] + pub storage_nodes: HashMap, /// Collection of removed storage trie nodes. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] - pub(crate) removed_nodes: HashSet, + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] + pub removed_nodes: HashSet, } #[cfg(feature = "test-utils")] @@ -198,14 +202,13 @@ impl StorageTrieUpdates { } /// Finalize storage trie updates for by taking updates from walker and hash builder. - pub fn finalize(&mut self, walker: TrieWalker, hash_builder: HashBuilder) { - // Retrieve deleted keys from trie walker. - let (_, removed_keys) = walker.split(); - self.removed_nodes.extend(exclude_empty(removed_keys)); - + pub fn finalize(&mut self, hash_builder: HashBuilder, removed_keys: HashSet) { // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); self.storage_nodes.extend(exclude_empty_from_pair(updated_nodes)); + + // Add deleted node paths. + self.removed_nodes.extend(exclude_empty(removed_keys)); } /// Convert storage trie updates into [`StorageTrieUpdatesSorted`]. @@ -224,11 +227,10 @@ impl StorageTrieUpdates { /// hex-encoded packed representation. /// /// This also sorts the set before serializing. -#[cfg(feature = "serde")] +#[cfg(any(test, feature = "serde"))] mod serde_nibbles_set { - use std::collections::HashSet; - - use reth_trie_common::Nibbles; + use crate::Nibbles; + use alloy_primitives::map::HashSet; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; pub(super) fn serialize(map: &HashSet, serializer: S) -> Result @@ -261,17 +263,16 @@ mod serde_nibbles_set { /// hex-encoded packed representation. /// /// This also sorts the map's keys before encoding and serializing. -#[cfg(feature = "serde")] +#[cfg(any(test, feature = "serde"))] mod serde_nibbles_map { - use std::{collections::HashMap, marker::PhantomData}; - - use alloy_primitives::hex; - use reth_trie_common::Nibbles; + use crate::Nibbles; + use alloy_primitives::{hex, map::HashMap}; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer, }; + use std::marker::PhantomData; pub(super) fn serialize( map: &HashMap, @@ -315,7 +316,10 @@ mod serde_nibbles_map { where A: MapAccess<'de>, { - let mut result = HashMap::with_capacity(map.size_hint().unwrap_or(0)); + let mut result = HashMap::with_capacity_and_hasher( + map.size_hint().unwrap_or(0), + Default::default(), + ); while let Some((key, value)) = map.next_entry::()? { let decoded_key = @@ -337,9 +341,13 @@ mod serde_nibbles_map { /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct TrieUpdatesSorted { - pub(crate) account_nodes: Vec<(Nibbles, BranchNodeCompact)>, - pub(crate) removed_nodes: HashSet, - pub(crate) storage_tries: HashMap, + /// Sorted collection of updated state nodes with corresponding paths. + pub account_nodes: Vec<(Nibbles, BranchNodeCompact)>, + /// The set of removed state node keys. + pub removed_nodes: HashSet, + /// Storage tries storage stored by hashed address of the account + /// the trie belongs to. + pub storage_tries: HashMap, } impl TrieUpdatesSorted { @@ -362,9 +370,12 @@ impl TrieUpdatesSorted { /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct StorageTrieUpdatesSorted { - pub(crate) is_deleted: bool, - pub(crate) storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, - pub(crate) removed_nodes: HashSet, + /// Flag indicating whether the trie has been deleted/wiped. + pub is_deleted: bool, + /// Sorted collection of updated storage nodes with corresponding paths. + pub storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, + /// The set of removed storage node keys. + pub removed_nodes: HashSet, } impl StorageTrieUpdatesSorted { @@ -397,23 +408,22 @@ fn exclude_empty_from_pair( } /// Bincode-compatible trie updates type serde implementations. -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { - use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, + use crate::{BranchNodeCompact, Nibbles}; + use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, }; - - use alloy_primitives::B256; - use reth_trie_common::{BranchNodeCompact, Nibbles}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use std::borrow::Cow; /// Bincode-compatible [`super::TrieUpdates`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_trie::{serde_bincode_compat, updates::TrieUpdates}; + /// use reth_trie_common::{serde_bincode_compat, updates::TrieUpdates}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -477,7 +487,7 @@ pub mod serde_bincode_compat { /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_trie::{serde_bincode_compat, updates::StorageTrieUpdates}; + /// use reth_trie_common::{serde_bincode_compat, updates::StorageTrieUpdates}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -538,12 +548,12 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { - use crate::updates::StorageTrieUpdates; - - use super::super::{serde_bincode_compat, TrieUpdates}; - + use crate::{ + serde_bincode_compat, + updates::{StorageTrieUpdates, TrieUpdates}, + BranchNodeCompact, Nibbles, + }; use alloy_primitives::B256; - use reth_trie_common::{BranchNodeCompact, Nibbles}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -552,7 +562,7 @@ pub mod serde_bincode_compat { #[serde_as] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { - #[serde_as(as = "serde_bincode_compat::TrieUpdates")] + #[serde_as(as = "serde_bincode_compat::updates::TrieUpdates")] trie_updates: TrieUpdates, } @@ -585,7 +595,7 @@ pub mod serde_bincode_compat { #[serde_as] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { - #[serde_as(as = "serde_bincode_compat::StorageTrieUpdates")] + #[serde_as(as = "serde_bincode_compat::updates::StorageTrieUpdates")] trie_updates: StorageTrieUpdates, } diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index 55fa9a851b17..2fbdf1d57561 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -18,7 +18,6 @@ reth-execution-errors.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-storage-errors.workspace = true -reth-trie-common.workspace = true reth-trie.workspace = true revm.workspace = true @@ -68,15 +67,17 @@ similar-asserts.workspace = true metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] serde = [ "dep:serde", - "reth-provider/serde", - "reth-trie/serde", + "similar-asserts/serde", + "revm/serde", "alloy-consensus/serde", "alloy-primitives/serde", - "revm/serde", - "similar-asserts/serde" + "reth-trie/serde", + "reth-trie-common/serde", + "reth-provider/serde", ] test-utils = [ "triehash", + "revm/test-utils", "reth-trie-common/test-utils", "reth-chainspec/test-utils", "reth-primitives/test-utils", @@ -84,5 +85,4 @@ test-utils = [ "reth-db-api/test-utils", "reth-provider/test-utils", "reth-trie/test-utils", - "revm/test-utils" ] diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index cd50503bc703..ac8c3b05304c 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -8,8 +8,10 @@ use reth_db_api::{ DatabaseError, }; use reth_primitives::StorageEntry; -use reth_trie::prefix_set::{PrefixSetMut, TriePrefixSets}; -use reth_trie_common::Nibbles; +use reth_trie::{ + prefix_set::{PrefixSetMut, TriePrefixSets}, + Nibbles, +}; use std::{ collections::{HashMap, HashSet}, ops::RangeInclusive, diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 9bf08fe136f7..99c87bf05ebf 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -10,9 +10,8 @@ use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, proof::{Proof, StorageProof}, trie_cursor::InMemoryTrieCursorFactory, - HashedPostStateSorted, HashedStorage, MultiProof, TrieInput, + AccountProof, HashedPostStateSorted, HashedStorage, MultiProof, StorageMultiProof, TrieInput, }; -use reth_trie_common::AccountProof; /// Extends [`Proof`] with operations specific for working with a database transaction. pub trait DatabaseProof<'a, TX> { @@ -96,7 +95,15 @@ pub trait DatabaseStorageProof<'a, TX> { address: Address, slot: B256, storage: HashedStorage, - ) -> Result; + ) -> Result; + + /// Generates the storage multiproof for target slots based on [`TrieInput`]. + fn overlay_storage_multiproof( + tx: &'a TX, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> Result; } impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> @@ -111,12 +118,12 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> address: Address, slot: B256, storage: HashedStorage, - ) -> Result { + ) -> Result { let hashed_address = keccak256(address); let prefix_set = storage.construct_prefix_set(); let state_sorted = HashedPostStateSorted::new( Default::default(), - HashMap::from([(hashed_address, storage.into_sorted())]), + HashMap::from_iter([(hashed_address, storage.into_sorted())]), ); Self::from_tx(tx, address) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( @@ -126,4 +133,26 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> .with_prefix_set_mut(prefix_set) .storage_proof(slot) } + + fn overlay_storage_multiproof( + tx: &'a TX, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> Result { + let hashed_address = keccak256(address); + let targets = slots.iter().map(keccak256).collect(); + let prefix_set = storage.construct_prefix_set(); + let state_sorted = HashedPostStateSorted::new( + Default::default(), + HashMap::from_iter([(hashed_address, storage.into_sorted())]), + ); + Self::from_tx(tx, address) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &state_sorted, + )) + .with_prefix_set_mut(prefix_set) + .storage_multiproof(targets) + } } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 0d2171604d5f..6e2cea5051d0 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -265,8 +265,7 @@ mod tests { use alloy_primitives::{hex, map::HashMap, Address, U256}; use reth_db::test_utils::create_test_rw_db; use reth_db_api::database::Database; - use reth_primitives::revm_primitives::AccountInfo; - use revm::db::BundleState; + use revm::{db::BundleState, primitives::AccountInfo}; #[test] fn from_bundle_state_with_rayon() { diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index bfded342ba04..b364e9a86f14 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -11,9 +11,8 @@ use reth_storage_errors::db::DatabaseError; use reth_trie::{ trie_cursor::{TrieCursor, TrieCursorFactory}, updates::StorageTrieUpdates, - BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey, + BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, }; -use reth_trie_common::StorageTrieEntry; /// Wrapper struct for database transaction implementing trie cursor factory trait. #[derive(Debug)] diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 79a2ce96fced..eedeee276db8 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -6,8 +6,7 @@ use alloy_rlp::EMPTY_STRING_CODE; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; use reth_primitives::Account; use reth_provider::test_utils::{create_test_provider_factory, insert_genesis}; -use reth_trie::{proof::Proof, Nibbles}; -use reth_trie_common::{AccountProof, StorageProof}; +use reth_trie::{proof::Proof, AccountProof, Nibbles, StorageProof}; use reth_trie_db::DatabaseProof; use std::{ str::FromStr, diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index aee264364798..4c614d83be6c 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -1,13 +1,14 @@ #![allow(missing_docs)] use alloy_consensus::EMPTY_ROOT_HASH; -use alloy_primitives::{hex_literal::hex, keccak256, Address, B256, U256}; +use alloy_primitives::{hex_literal::hex, keccak256, map::HashMap, Address, B256, U256}; +use alloy_rlp::Encodable; use proptest::{prelude::ProptestConfig, proptest}; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; use reth_provider::{ @@ -15,25 +16,15 @@ use reth_provider::{ StorageTrieWriter, TrieWriter, }; use reth_trie::{ - prefix_set::PrefixSetMut, + prefix_set::{PrefixSetMut, TriePrefixSets}, test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, - BranchNodeCompact, StateRoot, StorageRoot, TrieMask, + triehash::KeccakHasher, + updates::StorageTrieUpdates, + BranchNodeCompact, HashBuilder, IntermediateStateRootState, Nibbles, StateRoot, + StateRootProgress, StorageRoot, TrieAccount, TrieMask, }; -use reth_trie_common::triehash::KeccakHasher; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; -use std::{ - collections::{BTreeMap, HashMap}, - ops::Mul, - str::FromStr, - sync::Arc, -}; - -use alloy_rlp::Encodable; -use reth_db_api::transaction::DbTx; -use reth_trie::{ - prefix_set::TriePrefixSets, updates::StorageTrieUpdates, HashBuilder, - IntermediateStateRootState, Nibbles, StateRootProgress, TrieAccount, -}; +use std::{collections::BTreeMap, ops::Mul, str::FromStr, sync::Arc}; fn insert_account( tx: &impl DbTxMut, diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index dd4bcd6da8fc..06355ff6d489 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -5,9 +5,9 @@ use reth_db::tables; use reth_db_api::{cursor::DbCursorRW, transaction::DbTxMut}; use reth_provider::test_utils::create_test_provider_factory; use reth_trie::{ - prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, StorageTrieEntry, + prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, BranchNodeCompact, + Nibbles, StorageTrieEntry, }; -use reth_trie_common::{BranchNodeCompact, Nibbles}; use reth_trie_db::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}; #[test] diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 8e00472b4738..385f6269f394 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -27,7 +27,7 @@ fn includes_empty_node_preimage() { assert_eq!( TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), storages: HashMap::default(), }) .unwrap(), @@ -44,8 +44,8 @@ fn includes_empty_node_preimage() { let witness = TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([( + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( hashed_address, HashedStorage::from_iter(false, [(hashed_slot, U256::from(1))]), )]), @@ -80,12 +80,16 @@ fn includes_nodes_for_destroyed_storage_nodes() { .multiproof(HashMap::from_iter([(hashed_address, HashSet::from_iter([hashed_slot]))])) .unwrap(); - let witness = TrieWitness::from_tx(provider.tx_ref()) - .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([(hashed_address, HashedStorage::from_iter(true, []))]), // destroyed - }) - .unwrap(); + let witness = + TrieWitness::from_tx(provider.tx_ref()) + .compute(HashedPostState { + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( + hashed_address, + HashedStorage::from_iter(true, []), + )]), // destroyed + }) + .unwrap(); assert!(witness.contains_key(&state_root)); for node in multiproof.account_subtree.values() { assert_eq!(witness.get(&keccak256(node)), Some(node)); @@ -126,8 +130,8 @@ fn correctly_decodes_branch_node_values() { let witness = TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([( + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( hashed_address, HashedStorage::from_iter( false, diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index d1ffe49dd0ad..a9300efa9b0d 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -5,15 +5,14 @@ use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; use reth_primitives::Account; use reth_provider::{ - providers::ConsistentDbView, test_utils::create_test_provider_factory, StateChangeWriter, - TrieWriter, + providers::ConsistentDbView, test_utils::create_test_provider_factory, StateWriter, TrieWriter, }; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie_parallel::root::ParallelStateRoot; use std::collections::HashMap; pub fn calculate_state_root(c: &mut Criterion) { diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index 25fcb4bac3a5..5be2a658387c 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -14,10 +14,10 @@ pub use storage_root_targets::StorageRootTargets; pub mod stats; /// Implementation of parallel state root computation. -pub mod parallel_root; +pub mod root; /// Implementation of parallel proof computation. -pub mod parallel_proof; +pub mod proof; /// Parallel state root metrics. #[cfg(feature = "metrics")] diff --git a/crates/trie/parallel/src/parallel_proof.rs b/crates/trie/parallel/src/proof.rs similarity index 68% rename from crates/trie/parallel/src/parallel_proof.rs rename to crates/trie/parallel/src/proof.rs index 9c7d6b6b8b37..f285079f2526 100644 --- a/crates/trie/parallel/src/parallel_proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -1,7 +1,8 @@ -use crate::{ - parallel_root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets, +use crate::{root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, }; -use alloy_primitives::{map::HashSet, B256}; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; use reth_db::DatabaseError; @@ -16,12 +17,12 @@ use reth_trie::{ proof::StorageProof, trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, walker::TrieWalker, - HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, + HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_common::proof::ProofRetainer; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use std::{collections::HashMap, sync::Arc}; -use tracing::debug; +use std::sync::Arc; +use tracing::{debug, error}; #[cfg(feature = "metrics")] use crate::metrics::ParallelStateRootMetrics; @@ -32,7 +33,7 @@ pub struct ParallelProof { /// Consistent view of the database. view: ConsistentDbView, /// Trie input. - input: TrieInput, + input: Arc, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, @@ -40,7 +41,7 @@ pub struct ParallelProof { impl ParallelProof { /// Create new state proof generator. - pub fn new(view: ConsistentDbView, input: TrieInput) -> Self { + pub fn new(view: ConsistentDbView, input: Arc) -> Self { Self { view, input, @@ -61,8 +62,8 @@ where ) -> Result { let mut tracker = ParallelTrieTracker::default(); - let trie_nodes_sorted = Arc::new(self.input.nodes.into_sorted()); - let hashed_state_sorted = Arc::new(self.input.state.into_sorted()); + let trie_nodes_sorted = self.input.nodes.clone().into_sorted(); + let hashed_state_sorted = self.input.state.clone().into_sorted(); // Extend prefix sets with targets let mut prefix_sets = self.input.prefix_sets.clone(); @@ -125,7 +126,9 @@ where )) }) })(); - let _ = tx.send(result); + if let Err(err) = tx.send(result) { + error!(target: "trie::parallel", ?hashed_address, err_content = ?err.0, "Failed to send proof result"); + } }); storage_proofs.insert(hashed_address, rx); } @@ -152,7 +155,7 @@ where let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); let mut storages = HashMap::default(); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new( walker, hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, @@ -212,3 +215,85 @@ where Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{keccak256, map::DefaultHashBuilder, Address, U256}; + use rand::Rng; + use reth_primitives::{Account, StorageEntry}; + use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; + use reth_trie::proof::Proof; + + #[test] + fn random_parallel_proof() { + let factory = create_test_provider_factory(); + let consistent_view = ConsistentDbView::new(factory.clone(), None); + + let mut rng = rand::thread_rng(); + let state = (0..100) + .map(|_| { + let address = Address::random(); + let account = + Account { balance: U256::from(rng.gen::()), ..Default::default() }; + let mut storage = HashMap::::default(); + let has_storage = rng.gen_bool(0.7); + if has_storage { + for _ in 0..100 { + storage.insert( + B256::from(U256::from(rng.gen::())), + U256::from(rng.gen::()), + ); + } + } + (address, (account, storage)) + }) + .collect::>(); + + { + let provider_rw = factory.provider_rw().unwrap(); + provider_rw + .insert_account_for_hashing( + state.iter().map(|(address, (account, _))| (*address, Some(*account))), + ) + .unwrap(); + provider_rw + .insert_storage_for_hashing(state.iter().map(|(address, (_, storage))| { + ( + *address, + storage + .iter() + .map(|(slot, value)| StorageEntry { key: *slot, value: *value }), + ) + })) + .unwrap(); + provider_rw.commit().unwrap(); + } + + let mut targets = + HashMap::, DefaultHashBuilder>::default(); + for (address, (_, storage)) in state.iter().take(10) { + let hashed_address = keccak256(*address); + let mut target_slots = HashSet::::default(); + + for (slot, _) in storage.iter().take(5) { + target_slots.insert(*slot); + } + + if !target_slots.is_empty() { + targets.insert(hashed_address, target_slots); + } + } + + let provider_rw = factory.provider_rw().unwrap(); + let trie_cursor_factory = DatabaseTrieCursorFactory::new(provider_rw.tx_ref()); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(provider_rw.tx_ref()); + + assert_eq!( + ParallelProof::new(consistent_view, Default::default()) + .multiproof(targets.clone()) + .unwrap(), + Proof::new(trie_cursor_factory, hashed_cursor_factory).multiproof(targets).unwrap() + ); + } +} diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/root.rs similarity index 96% rename from crates/trie/parallel/src/parallel_root.rs rename to crates/trie/parallel/src/root.rs index e432b91062ca..8d2b18f5e111 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/root.rs @@ -4,6 +4,7 @@ use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; +use reth_db::DatabaseError; use reth_execution_errors::StorageRootError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, @@ -14,7 +15,7 @@ use reth_trie::{ trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, + HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::{collections::HashMap, sync::Arc}; @@ -149,7 +150,7 @@ where ); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); while let Some(node) = account_node_iter.try_next().map_err(ProviderError::Database)? { match node { TrieElement::Branch(node) => { @@ -193,11 +194,8 @@ where let root = hash_builder.root(); - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - prefix_sets.destroyed_accounts, - ); + let removed_keys = account_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -228,6 +226,9 @@ pub enum ParallelStateRootError { /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// Other unspecified error. + #[error("{_0}")] + Other(String), } impl From for ProviderError { @@ -237,6 +238,7 @@ impl From for ProviderError { ParallelStateRootError::StorageRoot(StorageRootError::Database(error)) => { Self::Database(error) } + ParallelStateRootError::Other(other) => Self::Database(DatabaseError::Other(other)), } } } diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 1c5bb7d8a33e..efd68020ccd7 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -14,9 +14,9 @@ workspace = true [dependencies] # reth -reth-tracing.workspace = true +reth-primitives-traits.workspace = true reth-trie-common.workspace = true -reth-trie.workspace = true +reth-tracing.workspace = true # alloy alloy-primitives.workspace = true @@ -27,14 +27,17 @@ smallvec = { workspace = true, features = ["const_new"] } thiserror.workspace = true [dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +arbitrary.workspace = true assert_matches.workspace = true criterion.workspace = true itertools.workspace = true pretty_assertions = "1.4" +proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 30ce566fb5f6..d8d210c1b19d 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -146,7 +146,7 @@ pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { hb.root(); if storage_updates.peek().is_some() { - trie_updates.finalize(node_iter.walker, hb); + trie_updates.finalize(hb, node_iter.walker.take_removed_keys()); } } }, diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs index 506b206fdd79..a38a92395d9b 100644 --- a/crates/trie/sparse/src/errors.rs +++ b/crates/trie/sparse/src/errors.rs @@ -1,7 +1,7 @@ //! Errors for sparse trie. use alloy_primitives::{Bytes, B256}; -use reth_trie::Nibbles; +use reth_trie_common::Nibbles; use thiserror::Error; use crate::SparseNode; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 126e05e85824..549a86733f87 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,22 +1,43 @@ -use std::iter::Peekable; - -use crate::{SparseStateTrieError, SparseStateTrieResult, SparseTrie}; +use crate::{ + RevealedSparseTrie, SparseStateTrieError, SparseStateTrieResult, SparseTrie, SparseTrieError, +}; use alloy_primitives::{ map::{HashMap, HashSet}, Bytes, B256, }; -use alloy_rlp::Decodable; -use reth_trie::{Nibbles, TrieNode}; +use alloy_rlp::{Decodable, Encodable}; +use reth_primitives_traits::Account; +use reth_trie_common::{ + updates::{StorageTrieUpdates, TrieUpdates}, + MultiProof, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, +}; +use std::iter::Peekable; /// Sparse state trie representing lazy-loaded Ethereum state trie. -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SparseStateTrie { /// Sparse account trie. - pub(crate) state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - pub(crate) storages: HashMap, + storages: HashMap, /// Collection of revealed account and storage keys. - pub(crate) revealed: HashMap>, + revealed: HashMap>, + /// Flag indicating whether trie updates should be retained. + retain_updates: bool, + /// Reusable buffer for RLP encoding of trie accounts. + account_rlp_buf: Vec, +} + +impl Default for SparseStateTrie { + fn default() -> Self { + Self { + state: Default::default(), + storages: Default::default(), + revealed: Default::default(), + retain_updates: false, + account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), + } + } } impl SparseStateTrie { @@ -25,6 +46,12 @@ impl SparseStateTrie { Self { state, ..Default::default() } } + /// Set the retention of branch node updates and deletions. + pub const fn with_updates(mut self, retain_updates: bool) -> Self { + self.retain_updates = retain_updates; + self + } + /// Returns `true` if account was already revealed. pub fn is_account_revealed(&self, account: &B256) -> bool { self.revealed.contains_key(account) @@ -32,7 +59,12 @@ impl SparseStateTrie { /// Returns `true` if storage slot for account was already revealed. pub fn is_storage_slot_revealed(&self, account: &B256, slot: &B256) -> bool { - self.revealed.get(account).map_or(false, |slots| slots.contains(slot)) + self.revealed.get(account).is_some_and(|slots| slots.contains(slot)) + } + + /// Returns mutable reference to storage sparse trie if it was revealed. + pub fn storage_trie_mut(&mut self, account: &B256) -> Option<&mut RevealedSparseTrie> { + self.storages.get_mut(account).and_then(|e| e.as_revealed_mut()) } /// Reveal unknown trie paths from provided leaf path and its proof for the account. @@ -42,12 +74,16 @@ impl SparseStateTrie { account: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + if self.is_account_revealed(&account) { + return Ok(()); + } + let mut proof = proof.into_iter().peekable(); - let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; + let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node)?; + let trie = self.state.reveal_root(root_node, self.retain_updates)?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -69,12 +105,20 @@ impl SparseStateTrie { slot: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + if self.is_storage_slot_revealed(&account, &slot) { + return Ok(()); + } + let mut proof = proof.into_iter().peekable(); - let Some(root_node) = self.validate_proof(&mut proof)? else { return Ok(()) }; + let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.storages.entry(account).or_default().reveal_root(root_node)?; + let trie = self + .storages + .entry(account) + .or_default() + .reveal_root(root_node, self.retain_updates)?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -88,8 +132,56 @@ impl SparseStateTrie { Ok(()) } + /// Reveal unknown trie paths from multiproof and the list of included accounts and slots. + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_multiproof( + &mut self, + targets: HashMap>, + multiproof: MultiProof, + ) -> SparseStateTrieResult<()> { + let account_subtree = multiproof.account_subtree.into_nodes_sorted(); + let mut account_nodes = account_subtree.into_iter().peekable(); + + if let Some(root_node) = self.validate_root_node(&mut account_nodes)? { + // Reveal root node if it wasn't already. + let trie = self.state.reveal_root(root_node, self.retain_updates)?; + + // Reveal the remaining proof nodes. + for (path, bytes) in account_nodes { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + } + + for (account, storage_subtree) in multiproof.storages { + let storage_subtree = storage_subtree.subtree.into_nodes_sorted(); + let mut storage_nodes = storage_subtree.into_iter().peekable(); + + if let Some(root_node) = self.validate_root_node(&mut storage_nodes)? { + // Reveal root node if it wasn't already. + let trie = self + .storages + .entry(account) + .or_default() + .reveal_root(root_node, self.retain_updates)?; + + // Reveal the remaining proof nodes. + for (path, bytes) in storage_nodes { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node)?; + } + } + } + + for (account, slots) in targets { + self.revealed.entry(account).or_default().extend(slots); + } + + Ok(()) + } + /// Validates the root node of the proof and returns it if it exists and is valid. - fn validate_proof>( + fn validate_root_node>( &self, proof: &mut Peekable, ) -> SparseStateTrieResult> { @@ -110,30 +202,139 @@ impl SparseStateTrie { Ok(Some(root_node)) } - /// Update the leaf node. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseStateTrieResult<()> { + /// Update or remove trie account based on new account info. This method will either recompute + /// the storage root based on update storage trie or look it up from existing leaf value. + /// + /// If the new account info and storage trie are empty, the account leaf will be removed. + pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { + let nibbles = Nibbles::unpack(address); + let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + storage_trie.root().ok_or(SparseTrieError::Blind)? + } else if self.revealed.contains_key(&address) { + let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + // The account was revealed, either... + if let Some(value) = state.get_leaf_value(&nibbles) { + // ..it exists and we should take it's current storage root or... + TrieAccount::decode(&mut &value[..])?.storage_root + } else { + // ...the account is newly created and the storage trie is empty. + EMPTY_ROOT_HASH + } + } else { + return Err(SparseTrieError::Blind.into()) + }; + + if account.is_empty() && storage_root == EMPTY_ROOT_HASH { + self.remove_account_leaf(&nibbles) + } else { + self.account_rlp_buf.clear(); + TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); + self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) + } + } + + /// Update the account leaf node. + pub fn update_account_leaf( + &mut self, + path: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { self.state.update_leaf(path, value)?; Ok(()) } - /// Returns sparse trie root if the trie has been revealed. - pub fn root(&mut self) -> Option { - self.state.root() + /// Remove the account leaf node. + pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { + self.state.remove_leaf(path)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn update_storage_leaf( + &mut self, + address: B256, + slot: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + self.storages.entry(address).or_default().update_leaf(slot, value)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn remove_storage_leaf( + &mut self, + address: B256, + slot: &Nibbles, + ) -> SparseStateTrieResult<()> { + self.storages.entry(address).or_default().remove_leaf(slot)?; + Ok(()) + } + + /// Wipe the storage trie at the provided address. + pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { + if let Some(trie) = self.storages.get_mut(&address) { + trie.wipe()?; + } + Ok(()) + } + + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.state.calculate_below_level(level); } /// Returns storage sparse trie root if the trie has been revealed. pub fn storage_root(&mut self, account: B256) -> Option { self.storages.get_mut(&account).and_then(|trie| trie.root()) } + + /// Returns sparse trie root if the trie has been revealed. + pub fn root(&mut self) -> Option { + self.state.root() + } + + /// Returns [`TrieUpdates`] by taking the updates from the revealed sparse tries. + /// + /// Returns `None` if the accounts trie is not revealed. + pub fn take_trie_updates(&mut self) -> Option { + self.state.as_revealed_mut().map(|state| { + let updates = state.take_updates(); + TrieUpdates { + account_nodes: updates.updated_nodes, + removed_nodes: updates.removed_nodes, + storage_tries: self + .storages + .iter_mut() + .map(|(address, trie)| { + let trie = trie.as_revealed_mut().unwrap(); + let updates = trie.take_updates(); + let updates = StorageTrieUpdates { + is_deleted: updates.wiped, + storage_nodes: updates.updated_nodes, + removed_nodes: updates.removed_nodes, + }; + (*address, updates) + }) + .filter(|(_, updates)| !updates.is_empty()) + .collect(), + } + }) + } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Bytes; + use alloy_primitives::{b256, Bytes, U256}; use alloy_rlp::EMPTY_STRING_CODE; + use arbitrary::Arbitrary; use assert_matches::assert_matches; - use reth_trie::HashBuilder; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use reth_primitives_traits::Account; + use reth_trie::{ + updates::StorageTrieUpdates, BranchNodeCompact, HashBuilder, TrieAccount, TrieMask, + EMPTY_ROOT_HASH, + }; use reth_trie_common::proof::ProofRetainer; #[test] @@ -141,7 +342,7 @@ mod tests { let sparse = SparseStateTrie::default(); let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; assert_matches!( - sparse.validate_proof(&mut proof.into_iter().peekable()), + sparse.validate_root_node(&mut proof.into_iter().peekable()), Err(SparseStateTrieError::InvalidRootNode { .. }) ); } @@ -154,7 +355,7 @@ mod tests { (Nibbles::from_nibbles([0x1]), Bytes::new()), ]; assert_matches!( - sparse.validate_proof(&mut proof.into_iter().peekable()), + sparse.validate_root_node(&mut proof.into_iter().peekable()), Err(SparseStateTrieError::InvalidRootNode { .. }) ); } @@ -191,4 +392,139 @@ mod tests { HashMap::from_iter([(Default::default(), SparseTrie::revealed_empty())]) ); } + + #[test] + fn take_trie_updates() { + reth_tracing::init_test_tracing(); + + // let mut rng = generators::rng(); + let mut rng = StdRng::seed_from_u64(1); + + let mut bytes = [0u8; 1024]; + rng.fill(bytes.as_mut_slice()); + + let slot_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); + let slot_path_1 = Nibbles::unpack(slot_1); + let value_1 = U256::from(rng.gen::()); + let slot_2 = b256!("1100000000000000000000000000000000000000000000000000000000000000"); + let slot_path_2 = Nibbles::unpack(slot_2); + let value_2 = U256::from(rng.gen::()); + let slot_3 = b256!("2000000000000000000000000000000000000000000000000000000000000000"); + let slot_path_3 = Nibbles::unpack(slot_3); + let value_3 = U256::from(rng.gen::()); + + let mut storage_hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ + slot_path_1.clone(), + slot_path_2.clone(), + ])); + storage_hash_builder.add_leaf(slot_path_1.clone(), &alloy_rlp::encode_fixed_size(&value_1)); + storage_hash_builder.add_leaf(slot_path_2.clone(), &alloy_rlp::encode_fixed_size(&value_2)); + + let storage_root = storage_hash_builder.root(); + let proof_nodes = storage_hash_builder.take_proof_nodes(); + let storage_proof_1 = proof_nodes.matching_nodes_sorted(&slot_path_1); + let storage_proof_2 = proof_nodes.matching_nodes_sorted(&slot_path_2); + + let address_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); + let address_path_1 = Nibbles::unpack(address_1); + let account_1 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let mut trie_account_1 = TrieAccount::from((account_1, storage_root)); + let address_2 = b256!("1100000000000000000000000000000000000000000000000000000000000000"); + let address_path_2 = Nibbles::unpack(address_2); + let account_2 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let mut trie_account_2 = TrieAccount::from((account_2, EMPTY_ROOT_HASH)); + + let mut hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ + address_path_1.clone(), + address_path_2.clone(), + ])); + hash_builder.add_leaf(address_path_1.clone(), &alloy_rlp::encode(trie_account_1)); + hash_builder.add_leaf(address_path_2.clone(), &alloy_rlp::encode(trie_account_2)); + + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); + let proof_1 = proof_nodes.matching_nodes_sorted(&address_path_1); + let proof_2 = proof_nodes.matching_nodes_sorted(&address_path_2); + + let mut sparse = SparseStateTrie::default().with_updates(true); + sparse.reveal_account(address_1, proof_1).unwrap(); + sparse.reveal_account(address_2, proof_2).unwrap(); + sparse.reveal_storage_slot(address_1, slot_1, storage_proof_1.clone()).unwrap(); + sparse.reveal_storage_slot(address_1, slot_2, storage_proof_2.clone()).unwrap(); + sparse.reveal_storage_slot(address_2, slot_1, storage_proof_1).unwrap(); + sparse.reveal_storage_slot(address_2, slot_2, storage_proof_2).unwrap(); + + assert_eq!(sparse.root(), Some(root)); + + let address_3 = b256!("2000000000000000000000000000000000000000000000000000000000000000"); + let address_path_3 = Nibbles::unpack(address_3); + let account_3 = Account { nonce: account_1.nonce + 1, ..account_1 }; + let trie_account_3 = TrieAccount::from((account_3, EMPTY_ROOT_HASH)); + + sparse.update_account_leaf(address_path_3, alloy_rlp::encode(trie_account_3)).unwrap(); + + sparse.update_storage_leaf(address_1, slot_path_3, alloy_rlp::encode(value_3)).unwrap(); + trie_account_1.storage_root = sparse.storage_root(address_1).unwrap(); + sparse.update_account_leaf(address_path_1, alloy_rlp::encode(trie_account_1)).unwrap(); + + sparse.wipe_storage(address_2).unwrap(); + trie_account_2.storage_root = sparse.storage_root(address_2).unwrap(); + sparse.update_account_leaf(address_path_2, alloy_rlp::encode(trie_account_2)).unwrap(); + + sparse.root(); + + let sparse_updates = sparse.take_trie_updates().unwrap(); + // TODO(alexey): assert against real state root calculation updates + pretty_assertions::assert_eq!( + sparse_updates, + TrieUpdates { + account_nodes: HashMap::from_iter([ + ( + Nibbles::default(), + BranchNodeCompact { + state_mask: TrieMask::new(0b110), + tree_mask: TrieMask::new(0b000), + hash_mask: TrieMask::new(0b010), + hashes: vec![b256!( + "4c4ffbda3569fcf2c24ea2000b4cec86ef8b92cbf9ff415db43184c0f75a212e" + )], + root_hash: Some(b256!( + "60944bd29458529c3065d19f63c6e3d5269596fd3b04ca2e7b318912dc89ca4c" + )) + }, + ), + ]), + storage_tries: HashMap::from_iter([ + ( + b256!("1000000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: false, + storage_nodes: HashMap::from_iter([( + Nibbles::default(), + BranchNodeCompact { + state_mask: TrieMask::new(0b110), + tree_mask: TrieMask::new(0b000), + hash_mask: TrieMask::new(0b010), + hashes: vec![b256!("5bc8b4fdf51839c1e18b8d6a4bd3e2e52c9f641860f0e4d197b68c2679b0e436")], + root_hash: Some(b256!("c44abf1a9e1a92736ac479b20328e8d7998aa8838b6ef52620324c9ce85e3201")) + } + )]), + removed_nodes: HashSet::default() + } + ), + ( + b256!("1100000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: true, + storage_nodes: HashMap::default(), + removed_nodes: HashSet::default() + } + ) + ]), + removed_nodes: HashSet::default() + } + ); + } } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 9db1dff53131..97446680df44 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,17 +1,18 @@ use crate::{SparseTrieError, SparseTrieResult}; -use alloy_primitives::{hex, keccak256, map::HashMap, B256}; +use alloy_primitives::{ + hex, keccak256, + map::{HashMap, HashSet}, + B256, +}; use alloy_rlp::Decodable; use reth_tracing::tracing::debug; -use reth_trie::{ - prefix_set::{PrefixSet, PrefixSetMut}, - RlpNode, -}; use reth_trie_common::{ - BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, TrieMask, TrieNode, CHILD_INDEX_RANGE, - EMPTY_ROOT_HASH, + prefix_set::{PrefixSet, PrefixSetMut}, + BranchNodeCompact, BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieMask, + TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use std::fmt; +use std::{borrow::Cow, fmt}; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. @@ -21,13 +22,13 @@ pub enum SparseTrie { #[default] Blind, /// The trie nodes have been revealed. - Revealed(RevealedSparseTrie), + Revealed(Box), } impl SparseTrie { /// Creates new revealed empty trie. pub fn revealed_empty() -> Self { - Self::Revealed(RevealedSparseTrie::default()) + Self::Revealed(Box::default()) } /// Returns `true` if the sparse trie has no revealed nodes. @@ -49,9 +50,13 @@ impl SparseTrie { /// # Returns /// /// Mutable reference to [`RevealedSparseTrie`]. - pub fn reveal_root(&mut self, root: TrieNode) -> SparseTrieResult<&mut RevealedSparseTrie> { + pub fn reveal_root( + &mut self, + root: TrieNode, + retain_updates: bool, + ) -> SparseTrieResult<&mut RevealedSparseTrie> { if self.is_blind() { - *self = Self::Revealed(RevealedSparseTrie::from_root(root)?) + *self = Self::Revealed(Box::new(RevealedSparseTrie::from_root(root, retain_updates)?)) } Ok(self.as_revealed_mut().unwrap()) } @@ -63,10 +68,29 @@ impl SparseTrie { Ok(()) } + /// Remove the leaf node. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.remove_leaf(path)?; + Ok(()) + } + + /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. + pub fn wipe(&mut self) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.wipe(); + Ok(()) + } + /// Calculates and returns the trie root if the trie has been revealed. pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } + + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.as_revealed_mut().unwrap().update_rlp_node_level(level); + } } /// The representation of revealed sparse trie. @@ -87,6 +111,8 @@ pub struct RevealedSparseTrie { prefix_set: PrefixSetMut, /// Reusable buffer for RLP encoding of nodes. rlp_buf: Vec, + /// Retained trie updates. + updates: Option, } impl fmt::Debug for RevealedSparseTrie { @@ -96,6 +122,7 @@ impl fmt::Debug for RevealedSparseTrie { .field("values", &self.values) .field("prefix_set", &self.prefix_set) .field("rlp_buf", &hex::encode(&self.rlp_buf)) + .field("updates", &self.updates) .finish() } } @@ -107,26 +134,51 @@ impl Default for RevealedSparseTrie { values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), + updates: None, } } } impl RevealedSparseTrie { /// Create new revealed sparse trie from the given root node. - pub fn from_root(node: TrieNode) -> SparseTrieResult { + pub fn from_root(node: TrieNode, retain_updates: bool) -> SparseTrieResult { let mut this = Self { nodes: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), - }; + updates: None, + } + .with_updates(retain_updates); this.reveal_node(Nibbles::default(), node)?; Ok(this) } + /// Set the retention of branch node updates and deletions. + pub fn with_updates(mut self, retain_updates: bool) -> Self { + if retain_updates { + self.updates = Some(SparseTrieUpdates::default()); + } + self + } + + /// Returns a reference to the retained sparse node updates without taking them. + pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + + /// Returns a reference to the leaf value if present. + pub fn get_leaf_value(&self, path: &Nibbles) -> Option<&Vec> { + self.values.get(path) + } + + /// Takes and returns the retained sparse node updates + pub fn take_updates(&mut self) -> SparseTrieUpdates { + self.updates.take().unwrap_or_default() + } + /// Reveal the trie node only if it was not known already. pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { - // TODO: revise all inserts to not overwrite existing entries match node { TrieNode::EmptyRoot => { debug_assert!(path.is_empty()); @@ -146,10 +198,7 @@ impl RevealedSparseTrie { match self.nodes.get(&path) { // Blinded and non-existent nodes can be replaced. Some(SparseNode::Hash(_)) | None => { - self.nodes.insert( - path, - SparseNode::Branch { state_mask: branch.state_mask, hash: None }, - ); + self.nodes.insert(path, SparseNode::new_branch(branch.state_mask)); } // Branch node already exists, or an extension node was placed where a // branch node was before. @@ -165,7 +214,7 @@ impl RevealedSparseTrie { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(&ext.key); self.reveal_node_or_hash(child_path, &ext.child)?; - self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); + self.nodes.insert(path, SparseNode::new_ext(ext.key)); } // Extension node already exists, or an extension node was placed where a branch // node was before. @@ -390,7 +439,7 @@ impl RevealedSparseTrie { SparseNode::Branch { .. } => removed_node.node, } } - SparseNode::Branch { mut state_mask, hash: _ } => { + SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { // If the node is a branch node, we need to check the number of children left // after deleting the child at the given nibble. @@ -452,6 +501,10 @@ impl RevealedSparseTrie { self.nodes.remove(&child_path); } + if let Some(updates) = self.updates.as_mut() { + updates.removed_nodes.insert(removed_path.clone()); + } + new_node } // If more than one child is left set in the branch, we just re-insert it @@ -529,7 +582,7 @@ impl RevealedSparseTrie { let unset_branch_nibble = self .nodes .get(&child_path) - .map_or(false, move |node| match node { + .is_some_and(move |node| match node { SparseNode::Leaf { key, .. } => { // Get full path of the leaf node child_path.extend_from_slice_unchecked(key); @@ -553,16 +606,24 @@ impl RevealedSparseTrie { Ok(nodes) } + /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. + pub fn wipe(&mut self) { + let updates_retained = self.updates.is_some(); + *self = Self::default(); + self.prefix_set = PrefixSetMut::all(); + self.updates = updates_retained.then(SparseTrieUpdates::wiped); + } + /// Return the root of the sparse trie. /// Updates all remaining dirty nodes before calculating the root. pub fn root(&mut self) -> B256 { // take the current prefix set. let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); - let root_rlp = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); - if let Some(root_hash) = root_rlp.as_hash() { + let rlp_node = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); + if let Some(root_hash) = rlp_node.as_hash() { root_hash } else { - keccak256(root_rlp) + keccak256(rlp_node) } } @@ -608,7 +669,7 @@ impl RevealedSparseTrie { paths.push((path, level + 1)); } } - SparseNode::Branch { state_mask, hash } => { + SparseNode::Branch { state_mask, hash, .. } => { if hash.is_some() && !prefix_set.contains(&path) { continue } @@ -644,48 +705,70 @@ impl RevealedSparseTrie { let mut prefix_set_contains = |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); - let rlp_node = match self.nodes.get_mut(&path).unwrap() { - SparseNode::Empty => RlpNode::word_rlp(&EMPTY_ROOT_HASH), - SparseNode::Hash(hash) => RlpNode::word_rlp(hash), + let (rlp_node, calculated, node_type) = match self.nodes.get_mut(&path).unwrap() { + SparseNode::Empty => { + (RlpNode::word_rlp(&EMPTY_ROOT_HASH), false, SparseNodeType::Empty) + } + SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), false, SparseNodeType::Hash), SparseNode::Leaf { key, hash } => { self.rlp_buf.clear(); let mut path = path.clone(); path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - RlpNode::word_rlp(&hash) + (RlpNode::word_rlp(&hash), false, SparseNodeType::Leaf) } else { let value = self.values.get(&path).unwrap(); let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + (rlp_node, true, SparseNodeType::Leaf) } } SparseNode::Extension { key, hash } => { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - RlpNode::word_rlp(&hash) - } else if buffers.rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { - let (_, child) = buffers.rlp_node_stack.pop().unwrap(); + ( + RlpNode::word_rlp(&hash), + false, + SparseNodeType::Extension { store_in_db_trie: true }, + ) + } else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) { + let (_, child, _, node_type) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + + ( + rlp_node, + true, + SparseNodeType::Extension { + // Inherit the `store_in_db_trie` flag from the child node, which is + // always the branch node + store_in_db_trie: node_type.store_in_db_trie(), + }, + ) } else { // need to get rlp node for child first buffers.path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); continue } } - SparseNode::Branch { state_mask, hash } => { - if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - buffers.rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); + SparseNode::Branch { state_mask, hash, store_in_db_trie } => { + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + buffers.rlp_node_stack.push(( + path, + RlpNode::word_rlp(&hash), + false, + SparseNodeType::Branch { store_in_db_trie }, + )); continue } buffers.branch_child_buf.clear(); // Walk children in a reverse order from `f` to `0`, so we pop the `0` first - // from the stack. + // from the stack and keep walking in the sorted order. for bit in CHILD_INDEX_RANGE.rev() { if state_mask.is_bit_set(bit) { let mut child = path.clone(); @@ -698,11 +781,42 @@ impl RevealedSparseTrie { .branch_value_stack_buf .resize(buffers.branch_child_buf.len(), Default::default()); let mut added_children = false; + + // TODO(alexey): set the `TrieMask` bits directly + let mut tree_mask_values = Vec::new(); + let mut hash_mask_values = Vec::new(); + let mut hashes = Vec::new(); for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { - if buffers.rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { - let (_, child) = buffers.rlp_node_stack.pop().unwrap(); - // Insert children in the resulting buffer in a normal order, because - // initially we iterated in reverse. + if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) { + let (_, child, calculated, node_type) = + buffers.rlp_node_stack.pop().unwrap(); + + // Update the masks only if we need to retain trie updates + if self.updates.is_some() { + // Set the trie mask + if node_type.store_in_db_trie() { + // A branch or an extension node explicitly set the + // `store_in_db_trie` flag + tree_mask_values.push(true); + } else { + // Set the flag according to whether a child node was + // pre-calculated + // (`calculated = false`), meaning that it wasn't in the + // database + tree_mask_values.push(!calculated); + } + + // Set the hash mask. If a child node has a hash value AND is a + // branch node, set the hash mask and save the hash. + let hash = child.as_hash().filter(|_| node_type.is_branch()); + hash_mask_values.push(hash.is_some()); + if let Some(hash) = hash { + hashes.push(hash); + } + } + + // Insert children in the resulting buffer in a normal order, + // because initially we iterated in reverse. buffers.branch_value_stack_buf [buffers.branch_child_buf.len() - i - 1] = child; added_children = true; @@ -717,21 +831,101 @@ impl RevealedSparseTrie { } self.rlp_buf.clear(); - let rlp_node = BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask) - .rlp(&mut self.rlp_buf); + let branch_node_ref = + BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); + let rlp_node = branch_node_ref.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - rlp_node + + let store_in_db_trie_value = if let Some(updates) = self.updates.as_mut() { + let mut tree_mask_values = tree_mask_values.into_iter().rev(); + let mut hash_mask_values = hash_mask_values.into_iter().rev(); + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); + for (i, child) in branch_node_ref.children() { + if child.is_some() { + if hash_mask_values.next().unwrap() { + hash_mask.set_bit(i); + } + if tree_mask_values.next().unwrap() { + tree_mask.set_bit(i); + } + } + } + + // Store in DB trie if there are either any children that are stored in the + // DB trie, or any children represent hashed values + let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); + if store_in_db_trie { + hashes.reverse(); + let branch_node = BranchNodeCompact::new( + *state_mask, + tree_mask, + hash_mask, + hashes, + hash.filter(|_| path.len() == 0), + ); + updates.updated_nodes.insert(path.clone(), branch_node); + } + + store_in_db_trie + } else { + false + }; + *store_in_db_trie = Some(store_in_db_trie_value); + + ( + rlp_node, + true, + SparseNodeType::Branch { store_in_db_trie: store_in_db_trie_value }, + ) } }; - buffers.rlp_node_stack.push((path, rlp_node)); + buffers.rlp_node_stack.push((path, rlp_node, calculated, node_type)); } + debug_assert_eq!(buffers.rlp_node_stack.len(), 1); buffers.rlp_node_stack.pop().unwrap().1 } } +/// Enum representing sparse trie node type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum SparseNodeType { + /// Empty trie node. + Empty, + /// The hash of the node that was not revealed. + Hash, + /// Sparse leaf node. + Leaf, + /// Sparse extension node. + Extension { + /// A flag indicating whether the extension node should be stored in the database. + store_in_db_trie: bool, + }, + /// Sparse branch node. + Branch { + /// A flag indicating whether the branch node should be stored in the database. + store_in_db_trie: bool, + }, +} + +impl SparseNodeType { + const fn is_branch(&self) -> bool { + matches!(self, Self::Branch { .. }) + } + + const fn store_in_db_trie(&self) -> bool { + match *self { + Self::Extension { store_in_db_trie } | Self::Branch { store_in_db_trie } => { + store_in_db_trie + } + _ => false, + } + } +} + /// Enum representing trie nodes in sparse trie. -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum SparseNode { /// Empty trie node. Empty, @@ -760,6 +954,9 @@ pub enum SparseNode { /// Pre-computed hash of the sparse node. /// Can be reused unless this trie path has been updated. hash: Option, + /// Pre-computed flag indicating whether the trie node should be stored in the database. + /// Can be reused unless this trie path has been updated. + store_in_db_trie: Option, }, } @@ -776,7 +973,7 @@ impl SparseNode { /// Create new [`SparseNode::Branch`] from state mask. pub const fn new_branch(state_mask: TrieMask) -> Self { - Self::Branch { state_mask, hash: None } + Self::Branch { state_mask, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Branch`] with two bits set. @@ -785,7 +982,7 @@ impl SparseNode { // set bits for both children (1u16 << bit_a) | (1u16 << bit_b), ); - Self::Branch { state_mask, hash: None } + Self::Branch { state_mask, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Extension`] from the key slice. @@ -812,7 +1009,7 @@ struct RlpNodeBuffers { /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. path_stack: Vec<(Nibbles, Option)>, /// Stack of rlp nodes - rlp_node_stack: Vec<(Nibbles, RlpNode)>, + rlp_node_stack: Vec<(Nibbles, RlpNode, bool, SparseNodeType)>, /// Reusable branch child path branch_child_buf: SmallVec<[Nibbles; 16]>, /// Reusable branch value stack @@ -831,37 +1028,108 @@ impl RlpNodeBuffers { } } +/// The aggregation of sparse trie updates. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct SparseTrieUpdates { + pub(crate) updated_nodes: HashMap, + pub(crate) removed_nodes: HashSet, + pub(crate) wiped: bool, +} + +impl SparseTrieUpdates { + /// Create new wiped sparse trie updates. + pub fn wiped() -> Self { + Self { wiped: true, ..Default::default() } + } +} + #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use super::*; use alloy_primitives::{map::HashSet, U256}; + use alloy_rlp::Encodable; use assert_matches::assert_matches; use itertools::Itertools; use prop::sample::SizeRange; use proptest::prelude::*; + use proptest_arbitrary_interop::arb; use rand::seq::IteratorRandom; - use reth_trie::{BranchNode, ExtensionNode, LeafNode}; + use reth_primitives_traits::Account; + use reth_trie::{ + hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, + node_iter::{TrieElement, TrieNodeIter}, + trie_cursor::noop::NoopAccountTrieCursor, + walker::TrieWalker, + BranchNode, ExtensionNode, HashedPostState, LeafNode, TrieAccount, + }; use reth_trie_common::{ proof::{ProofNodes, ProofRetainer}, HashBuilder, }; + use std::collections::BTreeMap; + + /// Pad nibbles to the length of a B256 hash with zeros on the left. + fn pad_nibbles_left(nibbles: Nibbles) -> Nibbles { + let mut base = + Nibbles::from_nibbles_unchecked(vec![0; B256::len_bytes() * 2 - nibbles.len()]); + base.extend_from_slice_unchecked(&nibbles); + base + } + + /// Pad nibbles to the length of a B256 hash with zeros on the right. + fn pad_nibbles_right(mut nibbles: Nibbles) -> Nibbles { + nibbles.extend_from_slice_unchecked(&vec![0; B256::len_bytes() * 2 - nibbles.len()]); + nibbles + } /// Calculate the state root by feeding the provided state to the hash builder and retaining the /// proofs for the provided targets. /// /// Returns the state root and the retained proof nodes. - fn hash_builder_root_with_proofs>( - state: impl IntoIterator, + fn run_hash_builder( + state: impl IntoIterator + Clone, proof_targets: impl IntoIterator, - ) -> (B256, ProofNodes) { - let mut hash_builder = - HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter(proof_targets)); - for (key, value) in state { - hash_builder.add_leaf(key, value.as_ref()); + ) -> HashBuilder { + let mut account_rlp = Vec::new(); + + let mut hash_builder = HashBuilder::default() + .with_updates(true) + .with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + + let mut prefix_set = PrefixSetMut::default(); + prefix_set.extend_keys(state.clone().into_iter().map(|(nibbles, _)| nibbles)); + let walker = TrieWalker::new(NoopAccountTrieCursor::default(), prefix_set.freeze()) + .with_deletions_retained(true); + let hashed_post_state = HashedPostState::default() + .with_accounts(state.into_iter().map(|(nibbles, account)| { + (nibbles.pack().into_inner().unwrap().into(), Some(account)) + })) + .into_sorted(); + let mut node_iter = TrieNodeIter::new( + walker, + HashedPostStateAccountCursor::new( + NoopHashedAccountCursor::default(), + hashed_post_state.accounts(), + ), + ); + + while let Some(node) = node_iter.try_next().unwrap() { + match node { + TrieElement::Branch(branch) => { + hash_builder.add_branch(branch.key, branch.value, branch.children_are_in_trie); + } + TrieElement::Leaf(key, account) => { + let account = TrieAccount::from((account, EMPTY_ROOT_HASH)); + account.encode(&mut account_rlp); + + hash_builder.add_leaf(Nibbles::unpack(key), &account_rlp); + account_rlp.clear(); + } + } } - (hash_builder.root(), hash_builder.take_proof_nodes()) + hash_builder.root(); + + hash_builder } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. @@ -915,58 +1183,80 @@ mod tests { #[test] fn sparse_trie_empty_update_one() { - let path = Nibbles::unpack(B256::with_last_byte(42)); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let key = Nibbles::unpack(B256::with_last_byte(42)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs([(path.clone(), &value)], [path.clone()]); + let mut hash_builder = run_hash_builder([(key.clone(), value())], [key.clone()]); - let mut sparse = RevealedSparseTrie::default(); - sparse.update_leaf(path, value.to_vec()).unwrap(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); + sparse.update_leaf(key, value_encoded()).unwrap(); let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_multiple_lower_nibbles() { + reth_tracing::init_test_tracing(); + let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_multiple_upper_nibbles() { let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] @@ -980,55 +1270,79 @@ mod tests { }) }) .collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(|| value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(sparse_updates.updated_nodes), + BTreeMap::from_iter(hash_builder.updated_branch_nodes.take().unwrap()) + ); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] fn sparse_trie_empty_update_repeated() { let paths = (0..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); - let old_value = alloy_rlp::encode_fixed_size(&U256::from(1)); - let new_value = alloy_rlp::encode_fixed_size(&U256::from(2)); + let old_value = Account { nonce: 1, ..Default::default() }; + let old_value_encoded = { + let mut account_rlp = Vec::new(); + TrieAccount::from((old_value, EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + let new_value = Account { nonce: 2, ..Default::default() }; + let new_value_encoded = { + let mut account_rlp = Vec::new(); + TrieAccount::from((new_value, EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| old_value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), paths.clone(), ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), old_value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), old_value_encoded.clone()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.updates_ref(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); - let (hash_builder_root, hash_builder_proof_nodes) = hash_builder_root_with_proofs( - paths.iter().cloned().zip(std::iter::repeat_with(|| new_value.clone())), + let mut hash_builder = run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), paths.clone(), ); for path in &paths { - sparse.update_leaf(path.clone(), new_value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), new_value_encoded.clone()).unwrap(); } let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); - assert_eq!(sparse_root, hash_builder_root); - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder.take_proof_nodes()); } #[test] @@ -1286,7 +1600,7 @@ mod tests { TrieMask::new(0b11), )); - let mut sparse = RevealedSparseTrie::from_root(branch.clone()).unwrap(); + let mut sparse = RevealedSparseTrie::from_root(branch.clone(), false).unwrap(); // Reveal a branch node and one of its children // @@ -1311,30 +1625,43 @@ mod tests { // to test the sparse trie updates. const KEY_NIBBLES_LEN: usize = 3; - fn test(updates: Vec<(HashMap>, HashSet)>) { + fn test(updates: Vec<(HashMap, HashSet)>) { { let mut state = BTreeMap::default(); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for (update, keys_to_delete) in updates { // Insert state updates into the sparse trie and calculate the root - for (key, value) in update.clone() { - sparse.update_leaf(key, value).unwrap(); + for (key, account) in update.clone() { + let account = TrieAccount::from((account, EMPTY_ROOT_HASH)); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + sparse.update_leaf(key, account_rlp).unwrap(); } - let sparse_root = sparse.root(); + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); // Insert state updates into the hash builder and calculate the root state.extend(update); - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); + let mut hash_builder = + run_hash_builder(state.clone(), state.keys().cloned().collect::>()); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_root, hash_builder.root()); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + sparse_updates.updated_nodes, + hash_builder.updated_branch_nodes.take().unwrap() + ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder.take_proof_nodes(), + ); // Delete some keys from both the hash builder and the sparse trie and check // that the sparse trie root still matches the hash builder root @@ -1343,34 +1670,36 @@ mod tests { sparse.remove_leaf(&key).unwrap(); } - let sparse_root = sparse.root(); + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); - let (hash_builder_root, hash_builder_proof_nodes) = - hash_builder_root_with_proofs( - state.clone(), - state.keys().cloned().collect::>(), - ); + let mut hash_builder = + run_hash_builder(state.clone(), state.keys().cloned().collect::>()); // Assert that the sparse trie root matches the hash builder root - assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_root, hash_builder.root()); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + sparse_updates.updated_nodes, + hash_builder.updated_branch_nodes.take().unwrap() + ); // Assert that the sparse trie nodes match the hash builder proof nodes - assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + assert_eq_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder.take_proof_nodes(), + ); } } } - /// Pad nibbles of length [`KEY_NIBBLES_LEN`] with zeros to the length of a B256 hash. - fn pad_nibbles(nibbles: Nibbles) -> Nibbles { - let mut base = - Nibbles::from_nibbles_unchecked([0; { B256::len_bytes() / 2 - KEY_NIBBLES_LEN }]); - base.extend_from_slice_unchecked(&nibbles); - base - } - fn transform_updates( - updates: Vec>>, + updates: Vec>, mut rng: impl Rng, - ) -> Vec<(HashMap>, HashSet)> { + ) -> Vec<(HashMap, HashSet)> { let mut keys = HashSet::new(); updates .into_iter() @@ -1393,8 +1722,8 @@ mod tests { proptest!(ProptestConfig::with_cases(10), |( updates in proptest::collection::vec( proptest::collection::hash_map( - any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles), - any::>(), + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles_right), + arb::(), 1..100, ).prop_map(HashMap::from_iter), 1..100, @@ -1417,24 +1746,29 @@ mod tests { /// replacing it. #[test] fn sparse_trie_reveal_node_1() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00]); - let key2 = || Nibbles::from_nibbles_unchecked([0x01]); - let key3 = || Nibbles::from_nibbles_unchecked([0x02]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x02])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key3(), value())], - [Nibbles::default()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key3(), value())], [Nibbles::default()]) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); // Generate the proof for the first key and reveal it in the sparse trie - let (_, proof_nodes) = - hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key1()]); + let proof_nodes = + run_hash_builder([(key1(), value()), (key3(), value())], [key1()]).take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1446,7 +1780,7 @@ mod tests { ); // Insert the leaf for the second key - sparse.update_leaf(key2(), value().to_vec()).unwrap(); + sparse.update_leaf(key2(), value_encoded()).unwrap(); // Check that the branch node was updated and another nibble was set assert_eq!( @@ -1455,8 +1789,8 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, proof_nodes_3) = - hash_builder_root_with_proofs([(key1(), value()), (key3(), value())], [key3()]); + let proof_nodes_3 = + run_hash_builder([(key1(), value()), (key3(), value())], [key3()]).take_proof_nodes(); for (path, node) in proof_nodes_3.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1469,10 +1803,11 @@ mod tests { // Generate the nodes for the full trie with all three key using the hash builder, and // compare them to the sparse trie - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [key1(), key2(), key3()], - ); + ) + .take_proof_nodes(); assert_eq_sparse_trie_proof_nodes(&sparse, proof_nodes); } @@ -1489,27 +1824,30 @@ mod tests { /// into an extension node, so it should ignore this node. #[test] fn sparse_trie_reveal_node_2() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x00]); - let key2 = || Nibbles::from_nibbles_unchecked([0x01, 0x01]); - let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x02]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x02])); + let value = || Account::default(); // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [Nibbles::default()], - ); + ) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); // Generate the proof for the children of the root branch node and reveal it in the sparse // trie - let (_, proof_nodes) = hash_builder_root_with_proofs( + let proof_nodes = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], [key1(), Nibbles::from_nibbles_unchecked([0x01])], - ); + ) + .take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1530,10 +1868,9 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key2(), value()), (key3(), value())], - [key2()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value()), (key3(), value())], [key2()]) + .take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1555,18 +1892,23 @@ mod tests { /// overwritten with the extension node from the proof. #[test] fn sparse_trie_reveal_node_3() { - let key1 = || Nibbles::from_nibbles_unchecked([0x00, 0x01]); - let key2 = || Nibbles::from_nibbles_unchecked([0x00, 0x02]); - let key3 = || Nibbles::from_nibbles_unchecked([0x01, 0x00]); - let value = || alloy_rlp::encode_fixed_size(&B256::repeat_byte(1)); + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x01])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x02])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x00])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, proof_nodes) = hash_builder_root_with_proofs( - [(key1(), value()), (key2(), value())], - [Nibbles::default()], - ); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]) + .take_proof_nodes(); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + false, ) .unwrap(); @@ -1577,17 +1919,17 @@ mod tests { ); // Insert the leaf with a different prefix - sparse.update_leaf(key3(), value().to_vec()).unwrap(); + sparse.update_leaf(key3(), value_encoded()).unwrap(); // Check that the extension node was turned into a branch node assert_matches!( sparse.nodes.get(&Nibbles::default()), - Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) ); // Generate the proof for the first key and reveal it in the sparse trie - let (_, proof_nodes) = - hash_builder_root_with_proofs([(key1(), value()), (key2(), value())], [key1()]); + let proof_nodes = + run_hash_builder([(key1(), value()), (key2(), value())], [key1()]).take_proof_nodes(); for (path, node) in proof_nodes.nodes_sorted() { sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap()).unwrap(); } @@ -1595,7 +1937,7 @@ mod tests { // Check that the branch node wasn't overwritten by the extension node in the proof assert_matches!( sparse.nodes.get(&Nibbles::default()), - Some(SparseNode::Branch { state_mask, hash: None }) if *state_mask == TrieMask::new(0b11) + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) ); } @@ -1671,4 +2013,67 @@ mod tests { ] ); } + + #[test] + fn hash_builder_branch_hash_mask() { + let key1 = || pad_nibbles_left(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_left(Nibbles::from_nibbles_unchecked([0x01])); + let value = || Account { bytecode_hash: Some(B256::repeat_byte(1)), ..Default::default() }; + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + + let mut hash_builder = + run_hash_builder([(key1(), value()), (key2(), value())], [Nibbles::default()]); + let mut sparse = RevealedSparseTrie::default(); + sparse.update_leaf(key1(), value_encoded()).unwrap(); + sparse.update_leaf(key2(), value_encoded()).unwrap(); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder.root()); + assert_eq!(sparse_updates.updated_nodes, hash_builder.updated_branch_nodes.take().unwrap()); + } + + #[test] + fn sparse_trie_wipe() { + let mut sparse = RevealedSparseTrie::default().with_updates(true); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + // Extension (Key = 5) – Level 0 + // └── Branch (Mask = 1011) – Level 1 + // ├── 0 -> Extension (Key = 23) – Level 2 + // │ └── Branch (Mask = 0101) – Level 3 + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) – Level 4 + // │ └── 3 -> Leaf (Key = 3, Path = 50233) – Level 4 + // ├── 2 -> Leaf (Key = 013, Path = 52013) – Level 2 + // └── 3 -> Branch (Mask = 0101) – Level 2 + // ├── 1 -> Leaf (Key = 3102, Path = 53102) – Level 3 + // └── 3 -> Branch (Mask = 1010) – Level 3 + // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 + // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + sparse.wipe(); + + assert_eq!(sparse.root(), EMPTY_ROOT_HASH); + } } diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 134a3055c2b7..c1c3ae4dd876 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -25,6 +25,7 @@ revm.workspace = true alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true # tracing tracing.workspace = true @@ -41,12 +42,6 @@ metrics = { workspace = true, optional = true } # `test-utils` feature triehash = { version = "0.8", optional = true } -# `serde` feature -serde = { workspace = true, optional = true } - -# `serde-bincode-compat` feature -serde_with = { workspace = true, optional = true } - [dev-dependencies] # reth reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } @@ -60,33 +55,24 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true serde_json.workspace = true criterion.workspace = true -bincode.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] serde = [ - "dep:serde", - "alloy-consensus/serde", "alloy-primitives/serde", - "revm/serde" -] -serde-bincode-compat = [ - "serde_with", - "reth-primitives/serde-bincode-compat", - "alloy-consensus/serde-bincode-compat" + "alloy-consensus/serde", + "alloy-trie/serde", + "revm/serde", + "reth-trie-common/serde" ] test-utils = [ "triehash", - "reth-trie-common/test-utils", - "reth-primitives/test-utils", "revm/test-utils", + "reth-primitives/test-utils", + "reth-trie-common/test-utils", "reth-stages-types/test-utils" ] -[[bench]] -name = "prefix_set" -harness = false - [[bench]] name = "hash_post_state" harness = false diff --git a/crates/trie/trie/benches/trie_root.rs b/crates/trie/trie/benches/trie_root.rs index ad169936463a..893e6e9e9994 100644 --- a/crates/trie/trie/benches/trie_root.rs +++ b/crates/trie/trie/benches/trie_root.rs @@ -44,7 +44,8 @@ criterion_main!(benches); mod implementations { use super::*; use alloy_rlp::Encodable; - use reth_trie_common::{root::adjust_index_for_rlp, HashBuilder, Nibbles}; + use alloy_trie::root::adjust_index_for_rlp; + use reth_trie_common::{HashBuilder, Nibbles}; pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { triehash::ordered_trie_root::(receipts.iter().map(|receipt| { diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs index 6db214bb51a3..745fc351b904 100644 --- a/crates/trie/trie/src/forward_cursor.rs +++ b/crates/trie/trie/src/forward_cursor.rs @@ -30,7 +30,7 @@ where /// exhausted. Returns the first entry for which `comparator` returns `false` or `None`. fn advance_while_false(&mut self, comparator: impl Fn(&K) -> bool) -> Option<(K, V)> { let mut entry = self.entries.get(self.index); - while entry.map_or(false, |entry| comparator(&entry.0)) { + while entry.is_some_and(|entry| comparator(&entry.0)) { self.index += 1; entry = self.entries.get(self.index); } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 678914191527..e0689d450873 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -3,10 +3,9 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, HashedStorageSorted, }; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{map::HashSet, B256, U256}; use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; -use std::collections::HashSet; /// The hashed cursor factory for the post state. #[derive(Clone, Debug)] @@ -82,14 +81,14 @@ where // It's an exact match, return the account from post state without looking up in the // database. - if post_state_entry.map_or(false, |entry| entry.0 == key) { + if post_state_entry.is_some_and(|entry| entry.0 == key) { return Ok(post_state_entry) } // It's not an exact match, reposition to the first greater or equal account that wasn't // cleared. let mut db_entry = self.cursor.seek(key)?; - while db_entry.as_ref().map_or(false, |(address, _)| self.is_account_cleared(address)) { + while db_entry.as_ref().is_some_and(|(address, _)| self.is_account_cleared(address)) { db_entry = self.cursor.next()?; } @@ -103,7 +102,7 @@ where // If post state was given precedence or account was cleared, move the cursor forward. let mut db_entry = self.cursor.seek(last_account)?; - while db_entry.as_ref().map_or(false, |(address, _)| { + while db_entry.as_ref().is_some_and(|(address, _)| { address <= &last_account || self.is_account_cleared(address) }) { db_entry = self.cursor.next()?; @@ -200,14 +199,14 @@ where let post_state_cursor = post_state_storage.map(|s| ForwardInMemoryCursor::new(&s.non_zero_valued_slots)); let cleared_slots = post_state_storage.map(|s| &s.zero_valued_slots); - let storage_wiped = post_state_storage.map_or(false, |s| s.wiped); + let storage_wiped = post_state_storage.is_some_and(|s| s.wiped); Self { cursor, post_state_cursor, cleared_slots, storage_wiped, last_slot: None } } /// Check if the slot was zeroed out in the post state. /// The database is not checked since it already has no zero-valued slots. fn is_slot_zero_valued(&self, slot: &B256) -> bool { - self.cleared_slots.map_or(false, |s| s.contains(slot)) + self.cleared_slots.is_some_and(|s| s.contains(slot)) } /// Find the storage entry in post state or database that's greater or equal to provided subkey. @@ -217,14 +216,14 @@ where // If database storage was wiped or it's an exact match, // return the storage slot from post state without looking up in the database. - if self.storage_wiped || post_state_entry.map_or(false, |entry| entry.0 == subkey) { + if self.storage_wiped || post_state_entry.is_some_and(|entry| entry.0 == subkey) { return Ok(post_state_entry) } // It's not an exact match and storage was not wiped, // reposition to the first greater or equal account. let mut db_entry = self.cursor.seek(subkey)?; - while db_entry.as_ref().map_or(false, |entry| self.is_slot_zero_valued(&entry.0)) { + while db_entry.as_ref().is_some_and(|entry| self.is_slot_zero_valued(&entry.0)) { db_entry = self.cursor.next()?; } @@ -248,7 +247,7 @@ where let mut db_entry = self.cursor.seek(last_slot)?; while db_entry .as_ref() - .map_or(false, |entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) + .is_some_and(|entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) { db_entry = self.cursor.next()?; } diff --git a/crates/trie/trie/src/input.rs b/crates/trie/trie/src/input.rs index 18f9ada2f4ab..ea71558c2c1f 100644 --- a/crates/trie/trie/src/input.rs +++ b/crates/trie/trie/src/input.rs @@ -1,7 +1,7 @@ use crate::{prefix_set::TriePrefixSetsMut, updates::TrieUpdates, HashedPostState}; /// Inputs for trie-related computations. -#[derive(Default, Debug)] +#[derive(Default, Debug, Clone)] pub struct TrieInput { /// The collection of cached in-memory intermediate trie nodes that /// can be reused for computation. diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index bb568ae8b8cf..1e7eeb9b52b8 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -13,10 +13,6 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// The implementation of a container for storing intermediate changes to a trie. -/// The container indicates when the trie has been modified. -pub mod prefix_set; - /// The implementation of forward-only in-memory cursor. pub mod forward_cursor; @@ -50,9 +46,6 @@ pub mod witness; mod trie; pub use trie::{StateRoot, StorageRoot}; -/// Buffer for trie updates. -pub mod updates; - /// Utilities for state root checkpoint progress. mod progress; pub use progress::{IntermediateStateRootState, StateRootProgress}; @@ -63,17 +56,6 @@ pub mod stats; // re-export for convenience pub use reth_trie_common::*; -/// Bincode-compatible serde implementations for trie types. -/// -/// `bincode` crate allows for more efficient serialization of trie types, because it allows -/// non-string map keys. -/// -/// Read more: -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] -pub mod serde_bincode_compat { - pub use super::updates::serde_bincode_compat as updates; -} - /// Trie calculation metrics. #[cfg(feature = "metrics")] pub mod metrics; diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index feebe36e16e9..60219eedd7cb 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -106,7 +106,7 @@ where if let Some((hashed_key, value)) = self.current_hashed_entry.take() { // If the walker's key is less than the unpacked hashed key, // reset the checked status and continue - if self.walker.key().map_or(false, |key| key < &Nibbles::unpack(hashed_key)) { + if self.walker.key().is_some_and(|key| key < &Nibbles::unpack(hashed_key)) { self.current_walker_key_checked = false; continue } diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index e99d686aca7f..34315416cb8d 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -4,7 +4,7 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, trie_cursor::TrieCursorFactory, walker::TrieWalker, - HashBuilder, Nibbles, + HashBuilder, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_primitives::{ keccak256, @@ -103,8 +103,11 @@ where let retainer = targets.keys().map(Nibbles::unpack).collect(); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - let mut storages = HashMap::default(); - let mut account_rlp = Vec::with_capacity(128); + // Initialize all storage multiproofs as empty. + // Storage multiproofs for non empty tries will be overwritten if necessary. + let mut storages: HashMap<_, _> = + targets.keys().map(|key| (*key, StorageMultiProof::empty())).collect(); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { match account_node { @@ -132,6 +135,8 @@ where account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + + // Overwrite storage multiproof. storages.insert(hashed_address, storage_multiproof); } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index eca126744e96..fdfb86a53ddb 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -2,15 +2,16 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, Nibbles, }; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap, HashSet}, + Address, B256, U256, +}; use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_primitives::Account; use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; -use std::{ - borrow::Cow, - collections::{hash_map, HashMap, HashSet}, -}; +use std::borrow::Cow; /// Representation of in-memory hashed state. #[derive(PartialEq, Eq, Clone, Default, Debug)] @@ -41,8 +42,8 @@ impl HashedPostState { }) .collect::, HashedStorage))>>(); - let mut accounts = HashMap::with_capacity(hashed.len()); - let mut storages = HashMap::with_capacity(hashed.len()); + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); for (address, (account, storage)) in hashed { accounts.insert(address, account); storages.insert(address, storage); @@ -68,8 +69,8 @@ impl HashedPostState { }) .collect::, HashedStorage))>>(); - let mut accounts = HashMap::with_capacity(hashed.len()); - let mut storages = HashMap::with_capacity(hashed.len()); + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); for (address, (account, storage)) in hashed { accounts.insert(address, account); storages.insert(address, storage); @@ -79,7 +80,10 @@ impl HashedPostState { /// Construct [`HashedPostState`] from a single [`HashedStorage`]. pub fn from_hashed_storage(hashed_address: B256, storage: HashedStorage) -> Self { - Self { accounts: HashMap::default(), storages: HashMap::from([(hashed_address, storage)]) } + Self { + accounts: HashMap::default(), + storages: HashMap::from_iter([(hashed_address, storage)]), + } } /// Set account entries on hashed state. @@ -121,7 +125,8 @@ impl HashedPostState { } // Populate storage prefix sets. - let mut storage_prefix_sets = HashMap::with_capacity(self.storages.len()); + let mut storage_prefix_sets = + HashMap::with_capacity_and_hasher(self.storages.len(), Default::default()); for (hashed_address, hashed_storage) in &self.storages { account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets.insert(*hashed_address, hashed_storage.construct_prefix_set()); diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 1bf8cf1ce797..28517b23e90f 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -7,7 +7,7 @@ use crate::{ trie_cursor::TrieCursorFactory, updates::{StorageTrieUpdates, TrieUpdates}, walker::TrieWalker, - HashBuilder, Nibbles, TrieAccount, + HashBuilder, Nibbles, TrieAccount, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, B256}; @@ -178,7 +178,7 @@ where } }; - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut hashed_entries_walked = 0; let mut updated_storage_nodes = 0; while let Some(node) = account_node_iter.try_next()? { @@ -258,11 +258,8 @@ where let root = hash_builder.root(); - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - self.prefix_sets.destroyed_accounts, - ); + let removed_keys = account_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, self.prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -434,7 +431,8 @@ where let root = hash_builder.root(); let mut trie_updates = StorageTrieUpdates::default(); - trie_updates.finalize(storage_node_iter.walker, hash_builder); + let removed_keys = storage_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys); let stats = tracker.finish(); diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 851670f4267a..fa59b70d1fd9 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -3,10 +3,9 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, }; -use alloy_primitives::B256; +use alloy_primitives::{map::HashSet, B256}; use reth_storage_errors::db::DatabaseError; use reth_trie_common::{BranchNodeCompact, Nibbles}; -use std::collections::HashSet; /// The trie cursor factory for the trie updates. #[derive(Debug, Clone)] @@ -79,13 +78,13 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { exact: bool, ) -> Result, DatabaseError> { let in_memory = self.in_memory_cursor.seek(&key); - if exact && in_memory.as_ref().map_or(false, |entry| entry.0 == key) { + if exact && in_memory.as_ref().is_some_and(|entry| entry.0 == key) { return Ok(in_memory) } // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(key.clone())?; - while db_entry.as_ref().map_or(false, |entry| self.removed_nodes.contains(&entry.0)) { + while db_entry.as_ref().is_some_and(|entry| self.removed_nodes.contains(&entry.0)) { db_entry = self.cursor.next()?; } @@ -105,7 +104,7 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { let mut db_entry = self.cursor.seek(last.clone())?; while db_entry .as_ref() - .map_or(false, |entry| entry.0 < last || self.removed_nodes.contains(&entry.0)) + .is_some_and(|entry| entry.0 < last || self.removed_nodes.contains(&entry.0)) { db_entry = self.cursor.next()?; } @@ -184,7 +183,7 @@ impl<'a, C> InMemoryStorageTrieCursor<'a, C> { ) -> Self { let in_memory_cursor = updates.map(|u| ForwardInMemoryCursor::new(&u.storage_nodes)); let removed_nodes = updates.map(|u| &u.removed_nodes); - let storage_trie_cleared = updates.map_or(false, |u| u.is_deleted); + let storage_trie_cleared = updates.is_some_and(|u| u.is_deleted); Self { hashed_address, cursor, @@ -204,16 +203,17 @@ impl InMemoryStorageTrieCursor<'_, C> { ) -> Result, DatabaseError> { let in_memory = self.in_memory_cursor.as_mut().and_then(|c| c.seek(&key)); if self.storage_trie_cleared || - (exact && in_memory.as_ref().map_or(false, |entry| entry.0 == key)) + (exact && in_memory.as_ref().is_some_and(|entry| entry.0 == key)) { return Ok(in_memory.filter(|(nibbles, _)| !exact || nibbles == &key)) } // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(key.clone())?; - while db_entry.as_ref().map_or(false, |entry| { - self.removed_nodes.as_ref().map_or(false, |r| r.contains(&entry.0)) - }) { + while db_entry + .as_ref() + .is_some_and(|entry| self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0))) + { db_entry = self.cursor.next()?; } @@ -234,8 +234,8 @@ impl InMemoryStorageTrieCursor<'_, C> { // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(last.clone())?; - while db_entry.as_ref().map_or(false, |entry| { - entry.0 < last || self.removed_nodes.as_ref().map_or(false, |r| r.contains(&entry.0)) + while db_entry.as_ref().is_some_and(|entry| { + entry.0 < last || self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0)) }) { db_entry = self.cursor.next()?; } diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index 9d5a2770b268..c928028eb157 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -89,7 +89,7 @@ impl CursorSubNode { /// Returns `true` if the current nibble has a root hash. pub fn hash_flag(&self) -> bool { - self.node.as_ref().map_or(false, |node| match self.nibble { + self.node.as_ref().is_some_and(|node| match self.nibble { // This guy has it -1 => node.root_hash.is_some(), // Or get it from the children diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index aaff293b379d..d1c5247966da 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -3,9 +3,8 @@ use crate::{ trie_cursor::{CursorSubNode, TrieCursor}, BranchNodeCompact, Nibbles, }; -use alloy_primitives::B256; +use alloy_primitives::{map::HashSet, B256}; use reth_storage_errors::db::DatabaseError; -use std::collections::HashSet; #[cfg(feature = "metrics")] use crate::metrics::WalkerMetrics; @@ -58,8 +57,13 @@ impl TrieWalker { /// Split the walker into stack and trie updates. pub fn split(mut self) -> (Vec, HashSet) { - let keys = self.removed_keys.take(); - (self.stack, keys.unwrap_or_default()) + let keys = self.take_removed_keys(); + (self.stack, keys) + } + + /// Take removed keys from the walker. + pub fn take_removed_keys(&mut self) -> HashSet { + self.removed_keys.take().unwrap_or_default() } /// Prints the current stack of trie nodes. @@ -88,7 +92,7 @@ impl TrieWalker { /// Indicates whether the children of the current node are present in the trie. pub fn children_are_in_trie(&self) -> bool { - self.stack.last().map_or(false, |n| n.tree_flag()) + self.stack.last().is_some_and(|n| n.tree_flag()) } /// Returns the next unprocessed key in the trie. @@ -112,7 +116,7 @@ impl TrieWalker { self.can_skip_current_node = self .stack .last() - .map_or(false, |node| !self.changes.contains(node.full_key()) && node.hash_flag()); + .is_some_and(|node| !self.changes.contains(node.full_key()) && node.hash_flag()); } } diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 39d82a7bda7a..46f85c4d82e4 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -3,7 +3,7 @@ use crate::{ prefix_set::TriePrefixSetsMut, proof::{Proof, StorageProof}, trie_cursor::TrieCursorFactory, - HashedPostState, + HashedPostState, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ @@ -97,7 +97,7 @@ where // Attempt to compute state root from proofs and gather additional // information for the witness. - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_trie_nodes = BTreeMap::default(); for (hashed_address, hashed_slots) in proof_targets { let storage_multiproof = account_multiproof @@ -118,16 +118,15 @@ where account_rlp.clone() }); let key = Nibbles::unpack(hashed_address); - account_trie_nodes.extend( - self.target_nodes( - key.clone(), - value, - account_multiproof - .account_subtree - .matching_nodes_iter(&key) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?, - ); + account_trie_nodes.extend(target_nodes( + key.clone(), + value, + Some(&mut self.witness), + account_multiproof + .account_subtree + .matching_nodes_iter(&key) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?); // Gather and record storage trie nodes for this account. let mut storage_trie_nodes = BTreeMap::default(); @@ -138,19 +137,18 @@ where .and_then(|s| s.storage.get(&hashed_slot)) .filter(|v| !v.is_zero()) .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); - storage_trie_nodes.extend( - self.target_nodes( - slot_nibbles.clone(), - slot_value, - storage_multiproof - .subtree - .matching_nodes_iter(&slot_nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?, - ); + storage_trie_nodes.extend(target_nodes( + slot_nibbles.clone(), + slot_value, + Some(&mut self.witness), + storage_multiproof + .subtree + .matching_nodes_iter(&slot_nibbles) + .sorted_by(|a, b| a.0.cmp(b.0)), + )?); } - Self::next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { + next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); @@ -177,7 +175,7 @@ where })?; } - Self::next_root_from_proofs(account_trie_nodes, |key: Nibbles| { + next_root_from_proofs(account_trie_nodes, |key: Nibbles| { // Right pad the target with 0s. let mut padded_key = key.pack(); padded_key.resize(32, 0); @@ -197,63 +195,6 @@ where Ok(self.witness) } - /// Decodes and unrolls all nodes from the proof. Returns only sibling nodes - /// in the path of the target and the final leaf node with updated value. - fn target_nodes<'b>( - &mut self, - key: Nibbles, - value: Option>, - proof: impl IntoIterator, - ) -> Result>>, TrieWitnessError> { - let mut trie_nodes = BTreeMap::default(); - let mut proof_iter = proof.into_iter().enumerate().peekable(); - while let Some((idx, (path, encoded))) = proof_iter.next() { - // Record the node in witness. - self.witness.insert(keccak256(encoded.as_ref()), encoded.clone()); - - let mut next_path = path.clone(); - match TrieNode::decode(&mut &encoded[..])? { - TrieNode::Branch(branch) => { - next_path.push(key[path.len()]); - let children = branch_node_children(path.clone(), &branch); - for (child_path, value) in children { - if !key.starts_with(&child_path) { - let value = if value.len() < B256::len_bytes() { - Either::Right(value.to_vec()) - } else { - Either::Left(B256::from_slice(&value[1..])) - }; - trie_nodes.insert(child_path, value); - } - } - } - TrieNode::Extension(extension) => { - next_path.extend_from_slice(&extension.key); - } - TrieNode::Leaf(leaf) => { - next_path.extend_from_slice(&leaf.key); - if next_path != key { - trie_nodes.insert( - next_path.clone(), - Either::Right(leaf.value.as_slice().to_vec()), - ); - } - } - TrieNode::EmptyRoot => { - if idx != 0 || proof_iter.peek().is_some() { - return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) - } - } - }; - } - - if let Some(value) = value { - trie_nodes.insert(key, Either::Right(value)); - } - - Ok(trie_nodes) - } - /// Retrieve proof targets for incoming hashed state. /// This method will aggregate all accounts and slots present in the hash state as well as /// select all existing slots from the database for the accounts that have been destroyed. @@ -272,84 +213,140 @@ where let mut storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(*hashed_address)?; // position cursor at the start - if let Some((hashed_slot, _)) = storage_cursor.seek(B256::ZERO)? { - storage_keys.insert(hashed_slot); - } - while let Some((hashed_slot, _)) = storage_cursor.next()? { + let mut current_entry = storage_cursor.seek(B256::ZERO)?; + while let Some((hashed_slot, _)) = current_entry { storage_keys.insert(hashed_slot); + current_entry = storage_cursor.next()?; } } proof_targets.insert(*hashed_address, storage_keys); } Ok(proof_targets) } +} + +/// Decodes and unrolls all nodes from the proof. Returns only sibling nodes +/// in the path of the target and the final leaf node with updated value. +pub fn target_nodes<'b>( + key: Nibbles, + value: Option>, + mut witness: Option<&mut HashMap>, + proof: impl IntoIterator, +) -> Result>>, TrieWitnessError> { + let mut trie_nodes = BTreeMap::default(); + let mut proof_iter = proof.into_iter().enumerate().peekable(); + while let Some((idx, (path, encoded))) = proof_iter.next() { + // Record the node in witness. + if let Some(witness) = witness.as_mut() { + witness.insert(keccak256(encoded.as_ref()), encoded.clone()); + } - fn next_root_from_proofs( - trie_nodes: BTreeMap>>, - mut trie_node_provider: impl FnMut(Nibbles) -> Result, - ) -> Result { - // Ignore branch child hashes in the path of leaves or lower child hashes. - let mut keys = trie_nodes.keys().peekable(); - let mut ignored = HashSet::::default(); - while let Some(key) = keys.next() { - if keys.peek().map_or(false, |next| next.starts_with(key)) { - ignored.insert(key.clone()); + let mut next_path = path.clone(); + match TrieNode::decode(&mut &encoded[..])? { + TrieNode::Branch(branch) => { + next_path.push(key[path.len()]); + let children = branch_node_children(path.clone(), &branch); + for (child_path, value) in children { + if !key.starts_with(&child_path) { + let value = if value.len() < B256::len_bytes() { + Either::Right(value.to_vec()) + } else { + Either::Left(B256::from_slice(&value[1..])) + }; + trie_nodes.insert(child_path, value); + } + } + } + TrieNode::Extension(extension) => { + next_path.extend_from_slice(&extension.key); } + TrieNode::Leaf(leaf) => { + next_path.extend_from_slice(&leaf.key); + if next_path != key { + trie_nodes + .insert(next_path.clone(), Either::Right(leaf.value.as_slice().to_vec())); + } + } + TrieNode::EmptyRoot => { + if idx != 0 || proof_iter.peek().is_some() { + return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) + } + } + }; + } + + if let Some(value) = value { + trie_nodes.insert(key, Either::Right(value)); + } + + Ok(trie_nodes) +} + +/// Computes the next root hash of a trie by processing a set of trie nodes and +/// their provided values. +pub fn next_root_from_proofs( + trie_nodes: BTreeMap>>, + mut trie_node_provider: impl FnMut(Nibbles) -> Result, +) -> Result { + // Ignore branch child hashes in the path of leaves or lower child hashes. + let mut keys = trie_nodes.keys().peekable(); + let mut ignored = HashSet::::default(); + while let Some(key) = keys.next() { + if keys.peek().is_some_and(|next| next.starts_with(key)) { + ignored.insert(key.clone()); } + } - let mut hash_builder = HashBuilder::default(); - let mut trie_nodes = trie_nodes.into_iter().filter(|e| !ignored.contains(&e.0)).peekable(); - while let Some((path, value)) = trie_nodes.next() { - match value { - Either::Left(branch_hash) => { - let parent_branch_path = path.slice(..path.len() - 1); - if hash_builder.key.starts_with(&parent_branch_path) || - trie_nodes - .peek() - .map_or(false, |next| next.0.starts_with(&parent_branch_path)) - { - hash_builder.add_branch(path, branch_hash, false); - } else { - // Parent is a branch node that needs to be turned into an extension node. - let mut path = path.clone(); - loop { - let node = trie_node_provider(path.clone())?; - match TrieNode::decode(&mut &node[..])? { - TrieNode::Branch(branch) => { - let children = branch_node_children(path, &branch); - for (child_path, value) in children { - if value.len() < B256::len_bytes() { - hash_builder.add_leaf(child_path, value); - } else { - let hash = B256::from_slice(&value[1..]); - hash_builder.add_branch(child_path, hash, false); - } + let mut hash_builder = HashBuilder::default(); + let mut trie_nodes = trie_nodes.into_iter().filter(|e| !ignored.contains(&e.0)).peekable(); + while let Some((path, value)) = trie_nodes.next() { + match value { + Either::Left(branch_hash) => { + let parent_branch_path = path.slice(..path.len() - 1); + if hash_builder.key.starts_with(&parent_branch_path) || + trie_nodes.peek().is_some_and(|next| next.0.starts_with(&parent_branch_path)) + { + hash_builder.add_branch(path, branch_hash, false); + } else { + // Parent is a branch node that needs to be turned into an extension node. + let mut path = path.clone(); + loop { + let node = trie_node_provider(path.clone())?; + match TrieNode::decode(&mut &node[..])? { + TrieNode::Branch(branch) => { + let children = branch_node_children(path, &branch); + for (child_path, value) in children { + if value.len() < B256::len_bytes() { + hash_builder.add_leaf(child_path, value); + } else { + let hash = B256::from_slice(&value[1..]); + hash_builder.add_branch(child_path, hash, false); } - break - } - TrieNode::Leaf(leaf) => { - let mut child_path = path; - child_path.extend_from_slice(&leaf.key); - hash_builder.add_leaf(child_path, &leaf.value); - break - } - TrieNode::Extension(ext) => { - path.extend_from_slice(&ext.key); - } - TrieNode::EmptyRoot => { - return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) } + break + } + TrieNode::Leaf(leaf) => { + let mut child_path = path; + child_path.extend_from_slice(&leaf.key); + hash_builder.add_leaf(child_path, &leaf.value); + break + } + TrieNode::Extension(ext) => { + path.extend_from_slice(&ext.key); + } + TrieNode::EmptyRoot => { + return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) } } } } - Either::Right(leaf_value) => { - hash_builder.add_leaf(path, &leaf_value); - } + } + Either::Right(leaf_value) => { + hash_builder.add_leaf(path, &leaf_value); } } - Ok(hash_builder.root()) } + Ok(hash_builder.root()) } /// Returned branch node children with keys in order. diff --git a/deny.toml b/deny.toml index e8f60461c852..8d0807f9de5c 100644 --- a/deny.toml +++ b/deny.toml @@ -4,8 +4,12 @@ [advisories] yanked = "warn" ignore = [ - # proc-macro-error 1.0.4 unmaintained https://rustsec.org/advisories/RUSTSEC-2024-0370 - "RUSTSEC-2024-0370" + # https://rustsec.org/advisories/RUSTSEC-2024-0379 used by boa (js-tracer) + "RUSTSEC-2024-0379", + # https://rustsec.org/advisories/RUSTSEC-2024-0384 used by sse example + "RUSTSEC-2024-0384", + # https://rustsec.org/advisories/RUSTSEC-2024-0388 used by ssz, will be removed https://github.com/sigp/ethereum_ssz/pull/34 + "RUSTSEC-2024-0388" ] # This section is considered when running `cargo deny check bans`. diff --git a/docs/crates/db.md b/docs/crates/db.md index 79eeae5ee4ff..688f7ea76cc0 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -212,7 +212,7 @@ pub trait DbTxMut: Send + Sync { Let's take a look at the `DbTx` and `DbTxMut` traits in action. -Revisiting the `DatabaseProvider` struct as an exampl, the `DatabaseProvider::header_by_number()` function uses the `DbTx::get()` function to get a header from the `Headers` table. +Revisiting the `DatabaseProvider` struct as an example, the `DatabaseProvider::header_by_number()` function uses the `DbTx::get()` function to get a header from the `Headers` table. [File: crates/storage/provider/src/providers/database/provider.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/provider/src/providers/database/provider.rs#L1319-L1336) @@ -267,7 +267,7 @@ let mut headers_cursor = provider.tx_ref().cursor_read::()?; let headers_walker = headers_cursor.walk_range(block_range.clone())?; ``` -Lets look at an examples of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. +Let's look at an examples of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. [File: crates/stages/stages/src/stages/bodies.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/stages/stages/src/stages/bodies.rs#L267-L345) @@ -330,7 +330,7 @@ While this is a brief look at how cursors work in the context of database tables ## Summary -This chapter was packed with information, so lets do a quick review. The database is comprised of tables, with each table being a collection of key-value pairs representing various pieces of data in the blockchain. Any struct that implements the `Database` trait can view, update or delete entries in the various tables. The database design leverages nested traits and generic associated types to provide methods to interact with each table in the database. +This chapter was packed with information, so let's do a quick review. The database is comprised of tables, with each table being a collection of key-value pairs representing various pieces of data in the blockchain. Any struct that implements the `Database` trait can view, update or delete entries in the various tables. The database design leverages nested traits and generic associated types to provide methods to interact with each table in the database.
diff --git a/docs/crates/network.md b/docs/crates/network.md index a6ac24305658..be2c7cb3b143 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -991,9 +991,9 @@ fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec { // transaction was already inserted entry.get_mut().push(peer_id); diff --git a/docs/crates/stages.md b/docs/crates/stages.md index c7815b453b4e..14666c1f44f9 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -43,7 +43,7 @@ pub trait Stage: Send + Sync { } ``` -To get a better idea of what is happening at each part of the pipeline, lets walk through what is going on under the hood within the `execute()` function at each stage, starting with `HeaderStage`. +To get a better idea of what is happening at each part of the pipeline, let's walk through what is going on under the hood within the `execute()` function at each stage, starting with `HeaderStage`.
diff --git a/docs/design/metrics.md b/docs/design/metrics.md index 0ac1f71c90da..cc386a112516 100644 --- a/docs/design/metrics.md +++ b/docs/design/metrics.md @@ -42,7 +42,7 @@ There will only ever exist one description per metric `KeyName`; it is not possi The `metrics` crate provides three macros per metric variant: `register_!`, `!`, and `describe_!`. Prefer to use these where possible, since they generate the code necessary to register and update metrics under various conditions. - The `register_!` macro simply creates the metric and returns a handle to it (e.g. a `Counter`). These metric structs are thread-safe and cheap to clone. -- The `!` macro registers the metric if it does not exist, and updates it's value. +- The `!` macro registers the metric if it does not exist, and updates its value. - The `describe_!` macro adds an end-user description for the metric. How the metrics are exposed to the end-user is determined by the CLI. diff --git a/docs/repo/labels.md b/docs/repo/labels.md index 6b3dba97ee6d..6772b828ffcd 100644 --- a/docs/repo/labels.md +++ b/docs/repo/labels.md @@ -30,7 +30,7 @@ For easier at-a-glance communication of the status of issues and PRs the followi - https://github.com/paradigmxyz/reth/labels/S-duplicate - https://github.com/paradigmxyz/reth/labels/S-wontfix -**Misc.** +**Miscellaneous** - https://github.com/paradigmxyz/reth/labels/S-needs-triage - https://github.com/paradigmxyz/reth/labels/S-controversial diff --git a/docs/repo/layout.md b/docs/repo/layout.md index f78abe961227..dcb475e020eb 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -132,7 +132,7 @@ The IPC transport lives in [`rpc/ipc`](../../crates/rpc/ipc). - Supported transports: HTTP, WS, IPC - Supported namespaces: `eth_`, `engine_`, `debug_` - [`rpc/rpc-eth-api`](../../crates/rpc/rpc-eth-api/): Reth RPC 'eth' namespace API (including interface and implementation), this crate is re-exported by `rpc/rpc-api` -- [`rpc/rpc-eth-types`](../../crates/rpc/rpc-eth-types/): Types `supporting implementation` of 'eth' namespace RPC server API +- [`rpc/rpc-eth-types`](../../crates/rpc/rpc-eth-types/): Types `supporting the implementation` of 'eth' namespace RPC server API - [`rpc/rpc-server-types`](../../crates/rpc/rpc-server-types/): RPC server types and constants #### Utilities Crates @@ -159,7 +159,7 @@ These crates define primitive types or algorithms. ### Optimism -Crates related to the Optimism rollup are lives in [optimism](../../crates/optimism/). +Crates related to the Optimism rollup live in [optimism](../../crates/optimism/). ### Misc diff --git a/examples/beacon-api-sidecar-fetcher/Cargo.toml b/examples/beacon-api-sidecar-fetcher/Cargo.toml index 47a2a181f7e5..d9590f87e07a 100644 --- a/examples/beacon-api-sidecar-fetcher/Cargo.toml +++ b/examples/beacon-api-sidecar-fetcher/Cargo.toml @@ -11,6 +11,7 @@ reth-node-ethereum.workspace = true alloy-rpc-types-beacon.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true clap.workspace = true eyre.workspace = true diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 2436ee0210e4..5ab851191843 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,4 +1,5 @@ use crate::BeaconSidecarConfig; +use alloy_consensus::Transaction as _; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; use eyre::Result; @@ -97,6 +98,7 @@ where fn process_block(&mut self, block: &SealedBlockWithSenders) { let txs: Vec<_> = block .transactions() + .iter() .filter(|tx| tx.is_eip4844()) .map(|tx| (tx.clone(), tx.blob_versioned_hashes().unwrap().len())) .collect(); @@ -190,6 +192,7 @@ where for (_, block) in old.blocks().iter() { let txs: Vec = block .transactions() + .iter() .filter(|tx: &&reth::primitives::TransactionSigned| { tx.is_eip4844() }) diff --git a/examples/bsc-p2p/src/main.rs b/examples/bsc-p2p/src/main.rs index e46ea4bec357..9e83f34e92f3 100644 --- a/examples/bsc-p2p/src/main.rs +++ b/examples/bsc-p2p/src/main.rs @@ -14,7 +14,9 @@ use chainspec::{boot_nodes, bsc_chain_spec}; use reth_discv4::Discv4ConfigBuilder; -use reth_network::{NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager}; +use reth_network::{ + EthNetworkPrimitives, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, +}; use reth_network_api::PeersInfo; use reth_primitives::{ForkHash, ForkId}; use reth_tracing::{ @@ -62,7 +64,7 @@ async fn main() { // latest BSC forkId, we need to override this to allow connections from BSC nodes let fork_id = ForkId { hash: ForkHash([0x07, 0xb5, 0x43, 0x28]), next: 0 }; net_cfg.fork_filter.set_current_fork_id(fork_id); - let net_manager = NetworkManager::new(net_cfg).await.unwrap(); + let net_manager = NetworkManager::::new(net_cfg).await.unwrap(); // The network handle is our entrypoint into the network. let net_handle = net_manager.handle().clone(); diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 43e5f7428f63..ccba73afbc1d 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -15,7 +15,7 @@ use reth::{ providers::ProviderError, revm::{ interpreter::Host, - primitives::{Env, TransactTo, TxEnv}, + primitives::{address, Address, Bytes, Env, EnvWithHandlerCfg, TransactTo, TxEnv, U256}, Database, DatabaseCommit, Evm, State, }, }; @@ -26,12 +26,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::EthEvmConfig; use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; -use reth_primitives::{ - revm_primitives::{ - address, Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, U256, - }, - BlockWithSenders, Receipt, -}; +use reth_primitives::{BlockWithSenders, Receipt}; use std::{fmt::Display, sync::Arc}; pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); @@ -135,10 +130,7 @@ where header: &alloy_consensus::Header, total_difficulty: U256, ) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } } diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index 7fa44418c523..42bb83782aa3 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> eyre::Result<()> { let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next().unwrap(); + let tx = &head.tip().transactions()[0]; assert_eq!(tx.hash(), hash); println!("mined transaction: {hash}"); Ok(()) diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index 9afd16bea160..d6642a8edfe5 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -10,7 +10,6 @@ reth.workspace = true reth-chainspec.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-primitives.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 704ecb7e3c4b..f9ac5c238659 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -17,11 +17,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] -use std::{convert::Infallible, sync::Arc}; - -use serde::{Deserialize, Serialize}; -use thiserror::Error; - use alloy_eips::eip4895::Withdrawals; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; @@ -33,7 +28,7 @@ use alloy_rpc_types::{ Withdrawal, }; use reth::{ - api::PayloadTypes, + api::{InvalidPayloadAttributesError, PayloadTypes}, builder::{ components::{ComponentsBuilder, PayloadServiceBuilder}, node::{NodeTypes, NodeTypesWithEngine}, @@ -42,8 +37,13 @@ use reth::{ PayloadBuilderConfig, }, network::NetworkHandle, - providers::{CanonStateSubscriptions, StateProviderFactory}, - rpc::eth::EthApi, + payload::ExecutionPayloadValidator, + primitives::{Block, EthPrimitives, SealedBlockFor}, + providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, + rpc::{ + eth::EthApi, + types::engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}, + }, tasks::TaskManager, transaction_pool::TransactionPool, }; @@ -71,6 +71,9 @@ use reth_payload_builder::{ }; use reth_tracing::{RethTracer, Tracer}; use reth_trie_db::MerklePatriciaTrie; +use serde::{Deserialize, Serialize}; +use std::{convert::Infallible, sync::Arc}; +use thiserror::Error; /// A custom payload attributes type. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -170,19 +173,34 @@ impl EngineTypes for CustomEngineTypes { /// Custom engine validator #[derive(Debug, Clone)] pub struct CustomEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, +} + +impl CustomEngineValidator { + /// Instantiates a new validator. + pub const fn new(chain_spec: Arc) -> Self { + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &ChainSpec { + self.inner.chain_spec() + } } impl EngineValidator for CustomEngineValidator where T: EngineTypes, { + type Block = Block; + fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, T::PayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, payload_or_attrs) + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } fn ensure_well_formed_attributes( @@ -190,7 +208,7 @@ where version: EngineApiMessageVersion, attributes: &T::PayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into())?; + validate_version_specific_fields(self.chain_spec(), version, attributes.into())?; // custom validation logic - ensure that the custom field is not zero if attributes.custom == 0 { @@ -201,6 +219,23 @@ where Ok(()) } + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } + + fn validate_payload_attributes_against_header( + &self, + _attr: &::PayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } } /// Custom engine validator builder @@ -217,7 +252,7 @@ where type Validator = CustomEngineValidator; async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { - Ok(CustomEngineValidator { chain_spec: ctx.config.chain.clone() }) + Ok(CustomEngineValidator::new(ctx.config.chain.clone())) } } @@ -227,9 +262,10 @@ struct MyCustomNode; /// Configure the node types impl NodeTypes for MyCustomNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } /// Configure the node types with the custom engine types @@ -254,7 +290,14 @@ pub type MyNodeAddOns = RpcAddOns< /// This provides a preset configuration for the node impl Node for MyCustomNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, @@ -291,7 +334,11 @@ pub struct CustomPayloadServiceBuilder; impl PayloadServiceBuilder for CustomPayloadServiceBuilder where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, >, Pool: TransactionPool + Unpin + 'static, { diff --git a/examples/custom-evm/Cargo.toml b/examples/custom-evm/Cargo.toml index 53563ab9575b..e763a932eabf 100644 --- a/examples/custom-evm/Cargo.toml +++ b/examples/custom-evm/Cargo.toml @@ -16,6 +16,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 16aad63c0932..b9a4fc26a95b 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -2,6 +2,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{address, Address, Bytes, U256}; use reth::{ @@ -10,12 +11,11 @@ use reth::{ BuilderContext, NodeBuilder, }, payload::{EthBuiltPayload, EthPayloadBuilderAttributes}, - primitives::revm_primitives::{Env, PrecompileResult}, revm::{ handler::register::EvmHandler, inspector_handle_register, precompile::{Precompile, PrecompileOutput, PrecompileSpecId}, - primitives::BlockEnv, + primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, }, rpc::types::engine::PayloadAttributes, @@ -33,10 +33,7 @@ use reth_node_ethereum::{ node::{EthereumAddOns, EthereumPayloadBuilder}, BasicBlockExecutorProvider, EthExecutionStrategyFactory, EthereumNode, }; -use reth_primitives::{ - revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, - Header, TransactionSigned, -}; +use reth_primitives::{EthPrimitives, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use std::{convert::Infallible, sync::Arc}; @@ -184,7 +181,7 @@ pub struct MyPayloadBuilder { impl PayloadServiceBuilder for MyPayloadBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, Pool: TransactionPool + Unpin + 'static, Types::Engine: PayloadTypes< diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index d00b8a70224a..7924aabd8692 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -7,6 +7,7 @@ use reth::{ builder::{components::PoolBuilder, BuilderContext, FullNodeTypes}, chainspec::ChainSpec, cli::Cli, + primitives::EthPrimitives, providers::CanonStateSubscriptions, transaction_pool::{ blobstore::InMemoryBlobStore, EthTransactionPool, TransactionValidationTaskExecutor, @@ -47,7 +48,7 @@ pub struct CustomPoolBuilder { /// This will be used to build the transaction pool and its maintenance tasks during launch. impl PoolBuilder for CustomPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Pool = EthTransactionPool; diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index 2e264d017a3b..da48a0754f9c 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -9,7 +9,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::SealedHeader; +use reth_primitives::{BlockExt, SealedHeader}; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. @@ -48,7 +48,11 @@ impl EmptyBlockPayloadJobGenerator PayloadJobGenerator for EmptyBlockPayloadJobGenerator where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Client: StateProviderFactory + + BlockReaderIdExt + + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, Builder: PayloadBuilder + Unpin + 'static, diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index e46b969adaa1..6047da0dd1ba 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -24,6 +24,7 @@ use reth_chainspec::ChainSpec; use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthEvmConfig, EthereumNode}; use reth_payload_builder::PayloadBuilderService; +use reth_primitives::EthPrimitives; pub mod generator; pub mod job; @@ -34,7 +35,13 @@ pub struct CustomPayloadBuilder; impl PayloadServiceBuilder for CustomPayloadBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, + >, Pool: TransactionPool + Unpin + 'static, { async fn spawn_payload_service( diff --git a/examples/custom-rlpx-subprotocol/src/main.rs b/examples/custom-rlpx-subprotocol/src/main.rs index e16f71071c8c..702d0e8cf5ef 100644 --- a/examples/custom-rlpx-subprotocol/src/main.rs +++ b/examples/custom-rlpx-subprotocol/src/main.rs @@ -14,8 +14,8 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use reth::builder::NodeHandle; use reth_network::{ - config::SecretKey, protocol::IntoRlpxSubProtocol, NetworkConfig, NetworkManager, - NetworkProtocols, + config::SecretKey, protocol::IntoRlpxSubProtocol, EthNetworkPrimitives, NetworkConfig, + NetworkManager, NetworkProtocols, }; use reth_network_api::{test_utils::PeersHandleProvider, NetworkInfo}; use reth_node_ethereum::EthereumNode; @@ -53,7 +53,7 @@ fn main() -> eyre::Result<()> { .build_with_noop_provider(node.chain_spec()); // spawn the second network instance - let subnetwork = NetworkManager::new(net_cfg).await?; + let subnetwork = NetworkManager::::new(net_cfg).await?; let subnetwork_peer_id = *subnetwork.peer_id(); let subnetwork_peer_addr = subnetwork.local_addr(); let subnetwork_handle = subnetwork.peers_handle(); diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index c3e30fa1cee8..9f95fb51d91a 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,10 +1,10 @@ -use alloy_primitives::{Address, Sealable, B256}; +use alloy_primitives::{Address, B256}; use alloy_rpc_types_eth::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; -use reth_primitives::SealedHeader; +use reth_primitives::{BlockExt, SealedHeader, TransactionSigned}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, @@ -63,9 +63,7 @@ fn header_provider_example(provider: T, number: u64) -> eyre: // We can convert a header to a sealed header which contains the hash w/o needing to re-compute // it every time. - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let sealed_header = SealedHeader::new(header, seal); + let sealed_header = SealedHeader::seal(header); // Can also query the header by hash! let header_by_hash = @@ -85,7 +83,9 @@ fn header_provider_example(provider: T, number: u64) -> eyre: } /// The `TransactionsProvider` allows querying transaction-related information -fn txs_provider_example(provider: T) -> eyre::Result<()> { +fn txs_provider_example>( + provider: T, +) -> eyre::Result<()> { // Try the 5th tx let txid = 5; @@ -94,16 +94,17 @@ fn txs_provider_example(provider: T) -> eyre::Result<() // Can query the tx by hash let tx_by_hash = - provider.transaction_by_hash(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + provider.transaction_by_hash(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(tx, tx_by_hash); // Can query the tx by hash with info about the block it was included in - let (tx, meta) = - provider.transaction_by_hash_with_meta(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; - assert_eq!(tx.hash, meta.tx_hash); + let (tx, meta) = provider + .transaction_by_hash_with_meta(tx.hash())? + .ok_or(eyre::eyre!("txhash not found"))?; + assert_eq!(tx.hash(), meta.tx_hash); // Can reverse lookup the key too - let id = provider.transaction_id(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + let id = provider.transaction_id(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(id, txid); // Can find the block of a transaction given its key @@ -118,7 +119,10 @@ fn txs_provider_example(provider: T) -> eyre::Result<() } /// The `BlockReader` allows querying the headers-related tables. -fn block_provider_example(provider: T, number: u64) -> eyre::Result<()> { +fn block_provider_example>( + provider: T, + number: u64, +) -> eyre::Result<()> { // Can query a block by number let block = provider.block(number.into())?.ok_or(eyre::eyre!("block num not found"))?; assert_eq!(block.number, number); @@ -161,7 +165,11 @@ fn block_provider_example(provider: T, number: u64) -> eyre::Res } /// The `ReceiptProvider` allows querying the receipts tables. -fn receipts_provider_example( +fn receipts_provider_example< + T: ReceiptProvider + + TransactionsProvider + + HeaderProvider, +>( provider: T, ) -> eyre::Result<()> { let txid = 5; @@ -173,7 +181,7 @@ fn receipts_provider_example TransactionValidationOutcome { // Always return valid TransactionValidationOutcome::Valid { - balance: transaction.cost(), + balance: *transaction.cost(), state_nonce: transaction.nonce(), transaction: ValidTransaction::Valid(transaction), propagate: false, diff --git a/examples/network/src/main.rs b/examples/network/src/main.rs index 1d8f436f318f..bd4f232a754c 100644 --- a/examples/network/src/main.rs +++ b/examples/network/src/main.rs @@ -8,7 +8,8 @@ use futures::StreamExt; use reth_network::{ - config::rng_secret_key, NetworkConfig, NetworkEventListenerProvider, NetworkManager, + config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkEventListenerProvider, + NetworkManager, }; use reth_provider::test_utils::NoopProvider; @@ -24,7 +25,7 @@ async fn main() -> eyre::Result<()> { let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); // create the network instance - let network = NetworkManager::new(config).await?; + let network = NetworkManager::::new(config).await?; // get a handle to the network to interact with it let handle = network.handle().clone(); diff --git a/examples/polygon-p2p/src/main.rs b/examples/polygon-p2p/src/main.rs index 6078ae14cb85..bcc17a24f8d2 100644 --- a/examples/polygon-p2p/src/main.rs +++ b/examples/polygon-p2p/src/main.rs @@ -12,7 +12,8 @@ use chain_cfg::{boot_nodes, head, polygon_chain_spec}; use reth_discv4::Discv4ConfigBuilder; use reth_network::{ - config::NetworkMode, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, + config::NetworkMode, EthNetworkPrimitives, NetworkConfig, NetworkEvent, + NetworkEventListenerProvider, NetworkManager, }; use reth_tracing::{ tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, @@ -57,7 +58,7 @@ async fn main() { discv4_cfg.add_boot_nodes(boot_nodes()).lookup_interval(interval); let net_cfg = net_cfg.set_discovery_v4(discv4_cfg.build()); - let net_manager = NetworkManager::new(net_cfg).await.unwrap(); + let net_manager = NetworkManager::::new(net_cfg).await.unwrap(); // The network handle is our entrypoint into the network. let net_handle = net_manager.handle(); diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs index e38b6fc24d37..6cc7a4142f5f 100644 --- a/examples/rpc-db/src/myrpc_ext.rs +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -22,7 +22,7 @@ pub struct MyRpcExt { impl MyRpcExtApiServer for MyRpcExt where - Provider: BlockReaderIdExt + 'static, + Provider: BlockReaderIdExt + 'static, { /// Showcasing how to implement a custom rpc method /// using the provider. diff --git a/examples/stateful-precompile/Cargo.toml b/examples/stateful-precompile/Cargo.toml index 47a784c36e14..478886d061f8 100644 --- a/examples/stateful-precompile/Cargo.toml +++ b/examples/stateful-precompile/Cargo.toml @@ -15,6 +15,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true eyre.workspace = true parking_lot.workspace = true diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 371fbf4f78bc..f683af4e430a 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -2,17 +2,21 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{Address, Bytes, U256}; use parking_lot::RwLock; use reth::{ api::NextBlockEnvAttributes, builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, - primitives::revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, revm::{ handler::register::EvmHandler, inspector_handle_register, precompile::{Precompile, PrecompileSpecId}, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, SpecId, StatefulPrecompileMut, + TxEnv, + }, ContextPrecompile, ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, }, tasks::TaskManager, @@ -24,10 +28,7 @@ use reth_node_ethereum::{ node::EthereumAddOns, BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthereumNode, }; -use reth_primitives::{ - revm_primitives::{SpecId, StatefulPrecompileMut}, - Header, TransactionSigned, -}; +use reth_primitives::TransactionSigned; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; use std::{collections::HashMap, convert::Infallible, sync::Arc}; diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index de46f62675c3..2fc0c7512441 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -38,6 +38,7 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-consensus.workspace = true walkdir = "2.3.3" serde.workspace = true diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 2b6b3baa81ed..292b32e8ce0f 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -1,6 +1,7 @@ //! Shared models for use crate::{assert::assert_equal, Error}; +use alloy_consensus::Header as RethHeader; use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{keccak256, Address, Bloom, Bytes, B256, B64, U256}; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; @@ -9,9 +10,7 @@ use reth_db_api::{ cursor::DbDupCursorRO, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{ - Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, -}; +use reth_primitives::{Account as RethAccount, Bytecode, SealedHeader, StorageEntry}; use serde::Deserialize; use std::{collections::BTreeMap, ops::Deref}; diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 3e0f58a7bd08..a6197d7e0cf7 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -12,7 +12,7 @@ repository.workspace = true workspace = true [dependencies] -reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives = { workspace = true, features = ["secp256k1", "arbitrary"] } alloy-genesis.workspace = true alloy-primitives.workspace = true @@ -24,3 +24,4 @@ secp256k1 = { workspace = true, features = ["rand"] } [dev-dependencies] alloy-eips.workspace = true +reth-primitives-traits .workspace = true diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index c24840a26333..9963b447e96d 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,14 +1,14 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. -use alloy_consensus::{Transaction as _, TxLegacy}; +use alloy_consensus::{Header, Transaction as _, TxLegacy}; use alloy_eips::eip4895::{Withdrawal, Withdrawals}; -use alloy_primitives::{Address, BlockNumber, Bytes, Sealable, TxKind, B256, U256}; +use alloy_primitives::{Address, BlockNumber, Bytes, TxKind, B256, U256}; pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, }; use reth_primitives::{ - proofs, sign_message, Account, BlockBody, Header, Log, Receipt, SealedBlock, SealedHeader, + proofs, sign_message, Account, BlockBody, Log, Receipt, SealedBlock, SealedHeader, StorageEntry, Transaction, TransactionSigned, }; use secp256k1::{Keypair, Secp256k1}; @@ -99,16 +99,14 @@ pub fn random_header_range( /// /// The header is assumed to not be correct if validated. pub fn random_header(rng: &mut R, number: u64, parent: Option) -> SealedHeader { - let header = reth_primitives::Header { + let header = alloy_consensus::Header { number, nonce: rng.gen(), difficulty: U256::from(rng.gen::()), parent_hash: parent.unwrap_or_default(), ..Default::default() }; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) + SealedHeader::seal(header) } /// Generates a random legacy [Transaction]. @@ -151,7 +149,7 @@ pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionS let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) + TransactionSigned::new_unhashed(tx, signature) } /// Generates a set of [Keypair]s based on the desired count. @@ -203,7 +201,7 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) }); let withdrawals_root = withdrawals.as_ref().map(|w| proofs::calculate_withdrawals_root(w)); - let sealed = Header { + let header = Header { parent_hash: block_params.parent.unwrap_or_default(), number, gas_used: total_gas, @@ -215,13 +213,10 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) requests_hash: None, withdrawals_root, ..Default::default() - } - .seal_slow(); - - let (header, seal) = sealed.into_parts(); + }; SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions, ommers, withdrawals: withdrawals.map(Withdrawals::new) }, } } @@ -458,6 +453,7 @@ mod tests { use alloy_eips::eip2930::AccessList; use alloy_primitives::{hex, PrimitiveSignature as Signature}; use reth_primitives::public_key_to_address; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] @@ -484,7 +480,7 @@ mod tests { sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash) .unwrap(); - let signed = TransactionSigned::from_transaction_and_signature(tx.clone(), signature); + let signed = TransactionSigned::new_unhashed(tx.clone(), signature); let recovered = signed.recover_signer().unwrap(); let expected = public_key_to_address(key_pair.public_key());