diff --git a/.config/zepter.yaml b/.config/zepter.yaml new file mode 100644 index 00000000000..22b0bf609e6 --- /dev/null +++ b/.config/zepter.yaml @@ -0,0 +1,41 @@ +version: + format: 1 + # Minimum zepter version that is expected to work. This is just for printing a nice error + # message when someone tries to use an older version. + binary: 0.13.2 + +# The examples in the following comments assume crate `A` to have a dependency on crate `B`. +workflows: + check: + - [ + "lint", + # Check that `A` activates the features of `B`. + "propagate-feature", + # These are the features to check: + "--features=std,optimism,dev,asm-keccak,jemalloc,jemalloc-prof,tracy-allocator,serde-bincode-compat,serde,test-utils,arbitrary,bench", + # Do not try to add a new section into `[features]` of `A` only because `B` expose that feature. There are edge-cases where this is still needed, but we can add them manually. + "--left-side-feature-missing=ignore", + # Ignore the case that `A` it outside of the workspace. Otherwise it will report errors in external dependencies that we have no influence on. + + "--left-side-outside-workspace=ignore", + # Auxillary flags: + "--offline", + "--locked", + "--show-path", + "--quiet", + ] + default: + # Running `zepter` with no subcommand will check & fix. + - [$check.0, "--fix"] + +# Will be displayed when any workflow fails: +help: + text: | + Reth uses the Zepter CLI to detect abnormalities in Cargo features, e.g. missing propagation. + + It looks like one more checks failed; please check the console output. + + You can try to automatically address them by installing zepter (`cargo install zepter --locked`) and simply running `zepter` in the workspace root. + links: + - "https://github.com/paradigmxyz/reth/pull/11888" + - "https://github.com/ggwpez/zepter" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 488e6c90cf7..5a1d1df7261 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,11 +20,11 @@ crates/fs-util/ @onbjerg @emhane crates/metrics/ @onbjerg crates/net/ @emhane @mattsse @Rjected crates/net/downloaders/ @onbjerg @rkrasiuk @emhane -crates/node/ @mattsse @Rjected @onbjerg +crates/node/ @mattsse @Rjected @onbjerg @emhane @klkvr crates/optimism/ @mattsse @Rjected @fgimenez @emhane crates/payload/ @mattsse @Rjected -crates/primitives/ @Rjected -crates/primitives-traits/ @Rjected @joshieDo +crates/primitives/ @Rjected @emhane @mattsse @klkvr +crates/primitives-traits/ @Rjected @joshieDo @emhane @mattsse @klkvr crates/prune/ @shekhirin @joshieDo crates/revm/ @mattsse @rakita crates/rpc/ @mattsse @Rjected @emhane diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 1142a5bf251..b01d4518f75 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -60,6 +60,18 @@ body: - Mac (Apple Silicon) - Windows (x86) - Windows (ARM) + - type: dropdown + id: container_type + attributes: + label: Container Type + description: Were you running it in a container? + multiple: true + options: + - Not running in a container + - Docker + - Kubernetes + - LXC/LXD + - Other validations: required: true - type: textarea diff --git a/.github/assets/check_rv32imac.sh b/.github/assets/check_rv32imac.sh new file mode 100755 index 00000000000..075ffb6dc40 --- /dev/null +++ b/.github/assets/check_rv32imac.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +set +e # Disable immediate exit on error + +# Array of crates to check +crates_to_check=( + reth-codecs-derive + reth-ethereum-forks + reth-ethereum-primitives + reth-primitives-traits + reth-optimism-forks + reth-network-peers + # reth-evm + # reth-primitives + # reth-optimism-chainspec +) + +# Array to hold the results +results=() +# Flag to track if any command fails +any_failed=0 + +for crate in "${crates_to_check[@]}"; do + cmd="cargo +stable build -p $crate --target riscv32imac-unknown-none-elf --no-default-features" + + if [ -n "$CI" ]; then + echo "::group::$cmd" + else + printf "\n%s:\n %s\n" "$crate" "$cmd" + fi + + set +e # Disable immediate exit on error + # Run the command and capture the return code + $cmd + ret_code=$? + set -e # Re-enable immediate exit on error + + # Store the result in the dictionary + if [ $ret_code -eq 0 ]; then + results+=("1:✅:$crate") + else + results+=("2:❌:$crate") + any_failed=1 + fi + + if [ -n "$CI" ]; then + echo "::endgroup::" + fi +done + +# Sort the results by status and then by crate name +IFS=$'\n' sorted_results=($(sort <<<"${results[*]}")) +unset IFS + +# Print summary +echo -e "\nSummary of build results:" +for result in "${sorted_results[@]}"; do + status="${result#*:}" + status="${status%%:*}" + crate="${result##*:}" + echo "$status $crate" +done + +# Exit with a non-zero status if any command fails +exit $any_failed diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 1b1c0641fc0..971327f0cb2 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -3,17 +3,18 @@ set +e # Disable immediate exit on error # Array of crates to compile crates=($(cargo metadata --format-version=1 --no-deps | jq -r '.packages[].name' | grep '^reth' | sort)) + # Array of crates to exclude +# Used with the `contains` function. +# shellcheck disable=SC2034 exclude_crates=( # The following are not working yet, but known to be fixable reth-exex-types # https://github.com/paradigmxyz/reth/issues/9946 # The following require investigation if they can be fixed - reth-auto-seal-consensus reth-basic-payload-builder reth-beacon-consensus reth-bench reth-blockchain-tree - reth-chain-state reth-cli reth-cli-commands reth-cli-runner @@ -24,23 +25,19 @@ exclude_crates=( reth-dns-discovery reth-downloaders reth-e2e-test-utils - reth-engine-primitives reth-engine-service reth-engine-tree reth-engine-util reth-eth-wire reth-ethereum-cli - reth-ethereum-engine-primitives reth-ethereum-payload-builder reth-etl - reth-evm-ethereum reth-exex reth-exex-test-utils reth-ipc reth-net-nat reth-network reth-node-api - reth-node-types reth-node-builder reth-node-core reth-node-ethereum @@ -50,8 +47,7 @@ exclude_crates=( reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc - reth-payload-builder - reth-payload-primitives + reth-optimism-primitives reth-rpc reth-rpc-api reth-rpc-api-testing-util @@ -73,6 +69,8 @@ exclude_crates=( reth-static-file # tokio reth-transaction-pool # c-kzg reth-trie-parallel # tokio + reth-testing-utils + reth-network-peers ) # Array to hold the results diff --git a/.github/assets/hive/Dockerfile b/.github/assets/hive/Dockerfile index 9f75ba6f1cf..25b71bf2187 100644 --- a/.github/assets/hive/Dockerfile +++ b/.github/assets/hive/Dockerfile @@ -5,4 +5,5 @@ COPY dist/reth /usr/local/bin COPY LICENSE-* ./ EXPOSE 30303 30303/udp 9001 8545 8546 -ENTRYPOINT ["/usr/local/bin/reth"] \ No newline at end of file +ENV RUST_LOG=debug +ENTRYPOINT ["/usr/local/bin/reth"] diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index d4b3d2bcbd3..ec7bd054900 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -41,8 +41,6 @@ engine-withdrawals: - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) -# https://github.com/paradigmxyz/reth/issues/8305 -# https://github.com/paradigmxyz/reth/issues/6217 engine-api: [] # https://github.com/paradigmxyz/reth/issues/8305 @@ -58,6 +56,4 @@ engine-cancun: - Invalid NewPayload, Incomplete VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - Invalid NewPayload, Extra VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) -# https://github.com/paradigmxyz/reth/issues/8579 -sync: - - sync reth -> reth +sync: [] diff --git a/.github/assets/hive/no_sim_build.diff b/.github/assets/hive/no_sim_build.diff index 0b109efe7cd..6127a4ecb73 100644 --- a/.github/assets/hive/no_sim_build.diff +++ b/.github/assets/hive/no_sim_build.diff @@ -1,24 +1,23 @@ diff --git a/internal/libdocker/builder.go b/internal/libdocker/builder.go -index 4731c9d..d717f52 100644 +index e4bf99b6..2023f7e2 100644 --- a/internal/libdocker/builder.go +++ b/internal/libdocker/builder.go -@@ -7,9 +7,7 @@ import ( - "fmt" +@@ -8,7 +8,6 @@ import ( "io" "io/fs" + "log/slog" - "os" "path/filepath" -- "strings" - - "github.com/ethereum/hive/internal/libhive" - docker "github.com/fsouza/go-dockerclient" -@@ -53,24 +51,8 @@ func (b *Builder) BuildClientImage(ctx context.Context, client libhive.ClientDes + "slices" + "strings" +@@ -49,25 +48,8 @@ func (b *Builder) BuildClientImage(ctx context.Context, client libhive.ClientDes // BuildSimulatorImage builds a docker image of a simulator. - func (b *Builder) BuildSimulatorImage(ctx context.Context, name string) (string, error) { + func (b *Builder) BuildSimulatorImage(ctx context.Context, name string, buildArgs map[string]string) (string, error) { - dir := b.config.Inventory.SimulatorDirectory(name) - buildContextPath := dir - buildDockerfile := "Dockerfile" +- - // build context dir of simulator can be overridden with "hive_context.txt" file containing the desired build path - if contextPathBytes, err := os.ReadFile(filepath.Join(filepath.FromSlash(dir), "hive_context.txt")); err == nil { - buildContextPath = filepath.Join(dir, strings.TrimSpace(string(contextPathBytes))) @@ -32,14 +31,14 @@ index 4731c9d..d717f52 100644 - } - } tag := fmt.Sprintf("hive/simulators/%s:latest", name) -- err := b.buildImage(ctx, buildContextPath, buildDockerfile, tag, nil) +- err := b.buildImage(ctx, buildContextPath, buildDockerfile, tag, buildArgs) - return tag, err + return tag, nil } // BuildImage creates a container by archiving the given file system, diff --git a/internal/libdocker/proxy.go b/internal/libdocker/proxy.go -index a53e5af..0bb2ea9 100644 +index d3a14ae6..8779671e 100644 --- a/internal/libdocker/proxy.go +++ b/internal/libdocker/proxy.go @@ -16,7 +16,7 @@ const hiveproxyTag = "hive/hiveproxy" diff --git a/.github/assets/kurtosis_network_params.yaml b/.github/assets/kurtosis_network_params.yaml index 9c104de4950..e8cc1b51dc8 100644 --- a/.github/assets/kurtosis_network_params.yaml +++ b/.github/assets/kurtosis_network_params.yaml @@ -2,8 +2,6 @@ participants: - el_type: geth cl_type: lighthouse - el_type: reth - el_extra_params: - - --engine.experimental el_image: "ghcr.io/paradigmxyz/reth:kurtosis-ci" cl_type: teku additional_services: diff --git a/.github/assets/kurtosis_op_network_params.yaml b/.github/assets/kurtosis_op_network_params.yaml new file mode 100644 index 00000000000..0e1516cc889 --- /dev/null +++ b/.github/assets/kurtosis_op_network_params.yaml @@ -0,0 +1,15 @@ +ethereum_package: + participants: + - el_type: reth + cl_type: lighthouse +optimism_package: + chains: + - participants: + - el_type: op-geth + cl_type: op-node + - el_type: op-reth + el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" + cl_type: op-node + batcher_params: + extra_params: + - "--throttle-interval=0" diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml new file mode 100644 index 00000000000..484b27c820d --- /dev/null +++ b/.github/workflows/compact.yml @@ -0,0 +1,47 @@ +# Ensures that `Compact` codec changes are backwards compatible. +# +# 1) checkout `main` +# 2) randomly generate and serialize to disk many different type vectors with `Compact` (eg. Header, Transaction, etc) +# 3) checkout `pr` +# 4) deserialize previously generated test vectors + +on: + + pull_request: + merge_group: + push: + branches: [main] + +env: + CARGO_TERM_COLOR: always + +name: compact-codec +jobs: + compact-codec: + runs-on: + group: Reth + strategy: + matrix: + bin: + - cargo run --bin reth --features "dev" + - cargo run --bin op-reth --features "optimism dev" --manifest-path crates/optimism/bin/Cargo.toml + steps: + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Checkout base + uses: actions/checkout@v4 + with: + ref: ${{ github.base_ref || 'main' }} + # On `main` branch, generates test vectors and serializes them to disk using `Compact`. + - name: Generate compact vectors + run: | + ${{ matrix.bin }} -- test-vectors compact --write + - name: Checkout PR + uses: actions/checkout@v4 + with: + clean: false + # On incoming merge try to read and decode previously generated vectors with `Compact` + - name: Read vectors + run: ${{ matrix.bin }} -- test-vectors compact --read diff --git a/.github/workflows/deny.yml b/.github/workflows/deny.yml deleted file mode 100644 index f85484ca2ec..00000000000 --- a/.github/workflows/deny.yml +++ /dev/null @@ -1,27 +0,0 @@ -# Runs `cargo-deny` when modifying `Cargo.lock`. - -name: deny - -on: - push: - branches: [main] - paths: [Cargo.lock] - pull_request: - branches: [main] - paths: [Cargo.lock] - merge_group: - -env: - CARGO_TERM_COLOR: always - -concurrency: deny-${{ github.head_ref || github.run_id }} - -jobs: - deny: - name: deny - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: EmbarkStudios/cargo-deny-action@v2 - with: - command: check all diff --git a/.github/workflows/eth-sync.yml b/.github/workflows/eth-sync.yml deleted file mode 100644 index f473e29a57c..00000000000 --- a/.github/workflows/eth-sync.yml +++ /dev/null @@ -1,53 +0,0 @@ -# Runs an ethereum mainnet sync test. - -name: eth-sync-test - -on: - pull_request: - merge_group: - push: - branches: [ main ] - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - sync: - name: sync / 100k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Build reth - run: | - cargo install --features asm-keccak,jemalloc --path bin/reth - - name: Run sync - run: | - reth node \ - --debug.tip 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 \ - --debug.max-block 100000 \ - --debug.terminate - - name: Verify the target block hash - run: | - reth db get static-file headers 100000 \ - | grep 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 - - name: Run stage unwind for 100 blocks - run: | - reth stage unwind num-blocks 100 - - name: Run stage unwind to block hash - run: | - reth stage unwind to-block 0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 6c50923d3e6..b8d3f378fca 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -5,8 +5,8 @@ name: hive on: workflow_dispatch: schedule: - # every day - - cron: "0 0 * * *" + # run every 12 hours + - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml new file mode 100644 index 00000000000..c7307d10c7b --- /dev/null +++ b/.github/workflows/kurtosis-op.yml @@ -0,0 +1,121 @@ +# Runs simple OP stack setup in Kurtosis + +name: kurtosis-op + +on: + workflow_dispatch: + schedule: + # every day + - cron: "0 1 * * *" + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + prepare-reth: + if: github.repository == 'paradigmxyz/reth' + timeout-minutes: 45 + runs-on: + group: Reth + steps: + - uses: actions/checkout@v4 + - run: mkdir artifacts + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build reth + run: | + cargo build --features optimism,asm-keccak --profile hivetests --bin op-reth --manifest-path crates/optimism/bin/Cargo.toml --locked + mkdir dist && cp ./target/hivetests/op-reth ./dist/reth + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Build and export reth image + uses: docker/build-push-action@v6 + with: + context: . + file: .github/assets/hive/Dockerfile + tags: ghcr.io/paradigmxyz/op-reth:kurtosis-ci + outputs: type=docker,dest=./artifacts/reth_image.tar + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Upload reth image + uses: actions/upload-artifact@v4 + with: + name: artifacts + path: ./artifacts + + test: + timeout-minutes: 60 + strategy: + fail-fast: false + name: run kurtosis + runs-on: + group: Reth + needs: + - prepare-reth + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download reth image + uses: actions/download-artifact@v4 + with: + name: artifacts + path: /tmp + + - name: Load Docker image + run: | + docker load -i /tmp/reth_image.tar & + wait + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" + done + kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis + exit 1 + + + notify-on-error: + needs: test + if: failure() + runs-on: + group: Reth + steps: + - name: Slack Webhook Action + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 74d26dbd3ee..3e1b7432111 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -5,8 +5,8 @@ name: kurtosis on: workflow_dispatch: schedule: - # every day - - cron: "0 1 * * *" + # run every 12 hours + - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index efa38857e06..418fd4cc4e6 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,10 +18,10 @@ jobs: matrix: include: - type: ethereum - args: --bin reth --workspace + args: --bin reth --workspace --lib --examples --tests --benches --locked features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: optimism - args: --bin op-reth --workspace + args: --bin op-reth --workspace --lib --examples --tests --benches --locked features: "optimism asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: book args: --manifest-path book/sources/Cargo.toml --workspace --bins @@ -71,10 +71,26 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - uses: dcarbone/install-jq-action@v2 + - uses: dcarbone/install-jq-action@v3 - name: Run Wasm checks run: .github/assets/check_wasm.sh + riscv: + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: riscv32imac-unknown-none-elf + - uses: taiki-e/install-action@cargo-hack + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - uses: dcarbone/install-jq-action@v3 + - name: Run RISC-V checks + run: .github/assets/check_rv32imac.sh + crate-checks: runs-on: ubuntu-latest timeout-minutes: 30 @@ -103,7 +119,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master with: - toolchain: "1.81" # MSRV + toolchain: "1.82" # MSRV - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -163,7 +179,7 @@ jobs: - uses: dtolnay/rust-toolchain@nightly - uses: dtolnay/rust-toolchain@master with: - toolchain: "1.81" # MSRV + toolchain: "1.82" # MSRV - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -219,6 +235,25 @@ jobs: env: RUSTFLAGS: -D warnings + # Check crates correctly propagate features + feature-propagation: + runs-on: ubuntu-latest + timeout-minutes: 20 + steps: + - uses: actions/checkout@v4 + - name: fetch deps + run: | + # Eagerly pull dependencies + time cargo metadata --format-version=1 --locked > /dev/null + - name: run zepter + run: | + cargo install zepter -f --locked + zepter --version + time zepter run check + + deny: + uses: ithacaxyz/ci/.github/workflows/deny.yml@main + lint-success: name: lint success runs-on: ubuntu-latest @@ -236,6 +271,8 @@ jobs: - grafana - no-test-deps - features + - feature-propagation + - deny timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/op-sync.yml b/.github/workflows/op-sync.yml deleted file mode 100644 index 2a223391d71..00000000000 --- a/.github/workflows/op-sync.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Runs a base mainnet sync test. - -name: op-sync-test - -on: - pull_request: - merge_group: - push: - branches: [ main ] - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - sync: - name: op sync / 10k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: - group: Reth - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Build op-reth - run: make install-op - - name: Run sync - # https://basescan.org/block/10000 - run: | - op-reth node \ - --chain base \ - --debug.tip 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 \ - --debug.max-block 10000 \ - --debug.terminate - - name: Verify the target block hash - run: | - op-reth db --chain base get static-file headers 10000 \ - | grep 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 - - name: Run stage unwind for 100 blocks - run: | - op-reth stage --chain base unwind num-blocks 100 - - name: Run stage unwind to block hash - run: | - op-reth stage --chain base unwind to-block 0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de - diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml index 2142360e039..f7df80e81f9 100644 --- a/.github/workflows/release-dist.yml +++ b/.github/workflows/release-dist.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v3 + uses: dawidd6/action-homebrew-bump-formula@v4 with: token: ${{ secrets.HOMEBREW }} no_fork: true diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml new file mode 100644 index 00000000000..531d04b2e48 --- /dev/null +++ b/.github/workflows/sync.yml @@ -0,0 +1,63 @@ +# Runs sync tests. + +name: sync test + +on: + merge_group: + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: sync (${{ matrix.chain.bin }}) + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + strategy: + matrix: + chain: + - build: install + bin: reth + chain: mainnet + tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4" + block: 100000 + unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a" + - build: install-op + bin: op-reth + chain: base + tip: "0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7" + block: 10000 + unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build ${{ matrix.chain.bin }} + run: make ${{ matrix.chain.build }} + - name: Run sync + run: | + ${{ matrix.chain.bin }} node \ + --chain ${{ matrix.chain.chain }} \ + --debug.tip ${{ matrix.chain.tip }} \ + --debug.max-block ${{ matrix.chain.block }} \ + --debug.terminate + - name: Verify the target block hash + run: | + ${{ matrix.chain.bin }} db --chain ${{ matrix.chain.chain }} get static-file headers ${{ matrix.chain.block }} \ + | grep ${{ matrix.chain.tip }} + - name: Run stage unwind for 100 blocks + run: | + ${{ matrix.chain.bin }} stage --chain ${{ matrix.chain.chain }} unwind num-blocks 100 + - name: Run stage unwind to block hash + run: | + ${{ matrix.chain.bin }} stage --chain ${{ matrix.chain.chain }} unwind to-block ${{ matrix.chain.unwind-target }} diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index defd9a6f535..4c927df8be0 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -35,11 +35,11 @@ jobs: partition: 2 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 1 total_partitions: 2 - type: optimism - args: --features "asm-keccak optimism" --locked + args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 2 total_partitions: 2 - type: book @@ -61,7 +61,8 @@ jobs: - name: Run tests run: | cargo nextest run \ - ${{ matrix.args }} --workspace --exclude ef-tests \ + ${{ matrix.args }} --workspace \ + --exclude ef-tests --no-tests=warn \ --partition hash:${{ matrix.partition }}/2 \ -E "!kind(test)" diff --git a/Cargo.lock b/Cargo.lock index 740810d1b1b..fb25ea0121e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -74,6 +74,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e0966165eaf052580bd70eb1b32cb3d6245774c0104d1b2793e9650bf83b52a" +dependencies = [ + "equator", +] + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -91,15 +100,15 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.38" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "156bfc5dcd52ef9a5f33381701fa03310317e14c65093a9430d3e3557b08dcd3" +checksum = "a0161082e0edd9013d23083465cc04b20e44b7a15646d36ba7b0cdb7cd6fe18f" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -112,34 +121,70 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705687d5bfd019fee57cf9e206b27b30a9a9617535d5590a02b171e813208f8e" +checksum = "a101d4d016f47f13890a74290fdd17b05dd175191d9337bc600791fb96e4dea8" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", + "alloy-trie", "arbitrary", "auto_impl", "c-kzg", - "derive_more 1.0.0", + "derive_more", + "rand 0.8.5", "serde", "serde_with", ] +[[package]] +name = "alloy-consensus-any" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa60357dda9a3d0f738f18844bd6d0f4a5924cc5cf00bfad2ff1369897966123" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-contract" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2869e4fb31331d3b8c58c7db567d1e4e4e94ef64640beda3b6dd9b7045690941" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-sol-types", + "alloy-transport", + "futures", + "futures-util", + "thiserror 2.0.5", +] + [[package]] name = "alloy-dyn-abi" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6228abfc751a29cde117b0879b805a3e0b3b641358f063272c83ca459a56886" +checksum = "80759b3f57b3b20fa7cd8fef6479930fc95461b58ff8adea6e87e618449c8a1d" dependencies = [ "alloy-json-abi", "alloy-primitives", "alloy-sol-type-parser", "alloy-sol-types", "const-hex", - "derive_more 1.0.0", + "derive_more", "itoa", "serde", "serde_json", @@ -161,13 +206,14 @@ dependencies = [ [[package]] name = "alloy-eip7702" -version = "0.1.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea59dc42102bc9a1905dc57901edc6dd48b9f38115df86c7d252acba70d71d04" +checksum = "4c986539255fb839d1533c128e190e557e52ff652c9ef62939e233a81dd93f7e" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "derive_more", "k256", "rand 0.8.5", "serde", @@ -176,9 +222,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ffb906284a1e1f63c4607da2068c8197458a352d0b3e9796e67353d72a9be85" +checksum = "8b6755b093afef5925f25079dd5a7c8d096398b804ba60cb5275397b06b31689" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -187,7 +233,9 @@ dependencies = [ "alloy-serde", "arbitrary", "c-kzg", - "derive_more 1.0.0", + "derive_more", + "ethereum_ssz", + "ethereum_ssz_derive", "once_cell", "serde", "sha2 0.10.8", @@ -195,20 +243,21 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8429cf4554eed9b40feec7f4451113e76596086447550275e3def933faf47ce3" +checksum = "aeec8e6eab6e52b7c9f918748c9b811e87dbef7312a2e3a2ca1729a92966a6af" dependencies = [ "alloy-primitives", "alloy-serde", + "alloy-trie", "serde", ] [[package]] name = "alloy-json-abi" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d46eb5871592c216d39192499c95a99f7175cb94104f88c307e6dc960676d9f1" +checksum = "ac4b22b3e51cac09fd2adfcc73b55f447b4df669f983c13f7894ec82b607c63f" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -218,29 +267,31 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fa8a1a3c4cbd221f2b8e3693aeb328fca79a757fe556ed08e47bbbc2a70db7" +checksum = "4fa077efe0b834bcd89ff4ba547f48fb081e4fdc3673dd7da1b295a2cf2bb7b7" dependencies = [ "alloy-primitives", "alloy-sol-types", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", "tracing", ] [[package]] name = "alloy-network" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fa23a6a9d612b52e402c995f2d582c25165ec03ac6edf64c861a76bc5b87cd" +checksum = "209a1882a08e21aca4aac6e2a674dc6fcf614058ef8cb02947d63782b1899552" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-json-rpc", "alloy-network-primitives", "alloy-primitives", + "alloy-rpc-types-any", "alloy-rpc-types-eth", "alloy-serde", "alloy-signer", @@ -248,14 +299,16 @@ dependencies = [ "async-trait", "auto_impl", "futures-utils-wasm", - "thiserror", + "serde", + "serde_json", + "thiserror 2.0.5", ] [[package]] name = "alloy-network-primitives" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "801492711d4392b2ccf5fc0bc69e299fa1aab15167d74dcaa9aab96a54f684bd" +checksum = "c20219d1ad261da7a6331c16367214ee7ded41d001fabbbd656fbf71898b2773" dependencies = [ "alloy-consensus", "alloy-eips", @@ -266,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1334a738aa1710cb8227441b3fcc319202ce78e967ef37406940242df4a454" +checksum = "bffcf33dd319f21cd6f066d81cbdef0326d4bdaaf7cfe91110bc090707858e9f" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -276,16 +329,16 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror", + "thiserror 2.0.5", "tracing", "url", ] [[package]] name = "alloy-primitives" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f35429a652765189c1c5092870d8360ee7b7769b09b06d89ebaefd34676446" +checksum = "9db948902dfbae96a73c2fbf1f7abec62af034ab883e4c777c3fd29702bd6e2c" dependencies = [ "alloy-rlp", "arbitrary", @@ -293,12 +346,12 @@ dependencies = [ "cfg-if", "const-hex", "derive_arbitrary", - "derive_more 1.0.0", + "derive_more", "foldhash", "getrandom 0.2.15", - "hashbrown 0.15.0", + "hashbrown 0.15.2", "hex-literal", - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "k256", "keccak-asm", @@ -307,7 +360,7 @@ dependencies = [ "proptest-derive", "rand 0.8.5", "ruint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "sha3", "tiny-keccak", @@ -315,9 +368,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfaa4ffec0af04e3555686b8aacbcdf7d13638133a0672749209069750f78a6" +checksum = "9eefa6f4c798ad01f9b4202d02cea75f5ec11fa180502f4701e2b47965a8c0bb" dependencies = [ "alloy-chains", "alloy-consensus", @@ -341,21 +394,24 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", + "parking_lot", "pin-project", "reqwest", + "schnellru", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", "tokio", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-pubsub" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f32cef487122ae75c91eb50154c70801d71fabdb976fec6c49e0af5e6486ab15" +checksum = "aac9a7210e0812b1d814118f426f57eb7fc260a419224dd1c76d169879c06907" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -372,9 +428,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26154390b1d205a4a7ac7352aa2eb4f81f391399d4e2f546fb81a2f8bb383f62" +checksum = "f542548a609dca89fcd72b3b9f355928cf844d4363c5eed9c5273a3dd225e097" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -383,20 +439,20 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.8" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d0f2d905ebd295e7effec65e5f6868d153936130ae718352771de3e7d03c75c" +checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "alloy-rpc-client" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370143ed581aace6e663342d21d209c6b2e34ee6142f7d6675adb518deeaf0dc" +checksum = "ed30bf1041e84cabc5900f52978ca345dd9969f2194a945e6fdec25b0620705c" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -414,13 +470,14 @@ dependencies = [ "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-rpc-types" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ffc534b7919e18f35e3aa1f507b6f3d9d92ec298463a9f6beaac112809d8d06" +checksum = "5ab686b0fa475d2a4f5916c5f07797734a691ec58e44f0f55d4746ea39cbcefb" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -431,9 +488,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb520ed46cc5b7d8c014a73fdd77b6a310383a2a5c0a5ae3c9b8055881f062b7" +checksum = "1f0874a976ccdf83a178ad93b64bec5b8c91a47428d714d544ca70258acfa07b" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -443,34 +500,47 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d780adaa5d95b07ad92006b2feb68ecfa7e2015f7d5976ceaac4c906c73ebd07" +checksum = "d33bc190844626c08e21897736dbd7956ab323c09e6f141b118d1c8b7aff689e" dependencies = [ "alloy-primitives", + "alloy-rpc-types-eth", "alloy-serde", "serde", ] +[[package]] +name = "alloy-rpc-types-any" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200661999b6e235d9840be5d60a6e8ae2f0af9eb2a256dd378786744660e36ec" +dependencies = [ + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", +] + [[package]] name = "alloy-rpc-types-beacon" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a8dc5980fe30203d698627cddb5f0cedc57f900c8b5e1229c8b9448e37acb4a" +checksum = "cc37861dc8cbf5da35d346139fbe6e03ee7823cc21138a2c4a590d3b0b4b24be" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "alloy-serde", "serde", "serde_with", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "alloy-rpc-types-debug" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59d8f8c5bfb160081a772f1f68eb9a37e8929c4ef74e5d01f5b78c2b645a5c5e" +checksum = "f0294b553785eb3fa7fff2e8aec45e82817258e7e6c9365c034a90cb6baeebc9" dependencies = [ "alloy-primitives", "serde", @@ -478,16 +548,18 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0285c4c09f838ab830048b780d7f4a4f460f309aa1194bb049843309524c64c" +checksum = "5d297268357e3eae834ddd6888b15f764cbc0f4b3be9265f5f6ec239013f3d68" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", - "derive_more 1.0.0", + "derive_more", + "ethereum_ssz", + "ethereum_ssz_derive", "jsonrpsee-types", "jsonwebtoken", "rand 0.8.5", @@ -497,18 +569,20 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413f4aa3ccf2c3e4234a047c5fa4727916d7daf25a89f9b765df0ba09784fd87" +checksum = "a0600b8b5e2dc0cab12cbf91b5a885c35871789fb7b3a57b434bd4fced5b7a8b" dependencies = [ "alloy-consensus", + "alloy-consensus-any", "alloy-eips", "alloy-network-primitives", "alloy-primitives", "alloy-rlp", "alloy-serde", "alloy-sol-types", - "derive_more 1.0.0", + "arbitrary", + "derive_more", "itertools 0.13.0", "jsonrpsee-types", "serde", @@ -517,12 +591,13 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cec23ce56c869eec5f6b6fd6a8a92b5aa0cfaf8d7be3a96502e537554dc7430" +checksum = "093d618d5a42808e7ae26062f415a1e816fc27d3d32662c6ed52d0871b154894" dependencies = [ "alloy-eips", "alloy-primitives", + "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", @@ -530,23 +605,23 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017cad3e5793c5613588c1f9732bcbad77e820ba7d0feaba3527749f856fdbc5" +checksum = "4e073ab0e67429c60be281e181731132fd07d82e091c10c29ace6935101034bb" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "alloy-rpc-types-txpool" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b230e321c416be7f50530159392b4c41a45596d40d97e185575bcd0b545e521" +checksum = "7435f6bfb93912f16d64bb61f4278fa698469e054784f477337ef87ec0b2527b" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -556,9 +631,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dff0ab1cdd43ca001e324dc27ee0e8606bd2161d6623c63e0e0b8c4dfc13600" +checksum = "9afa753a97002a33b2ccb707d9f15f31c81b8c1b786c95b73cc62bb1d1fd0c3f" dependencies = [ "alloy-primitives", "arbitrary", @@ -568,23 +643,23 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd4e0ad79c81a27ca659be5d176ca12399141659fef2bcbfdc848da478f4504" +checksum = "9b2cbff01a673936c2efd7e00d4c0e9a4dbbd6d600e2ce298078d33efbb19cd7" dependencies = [ "alloy-primitives", "async-trait", "auto_impl", "elliptic-curve", "k256", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "alloy-signer-local" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494e0a256f3e99f2426f994bcd1be312c02cb8f88260088dacb33a8b8936475f" +checksum = "bd6d988cb6cd7d2f428a74476515b1a6e901e08c796767f9f93311ab74005c8b" dependencies = [ "alloy-consensus", "alloy-network", @@ -595,61 +670,61 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "alloy-sol-macro" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2395336745358cc47207442127c47c63801a7065ecc0aa928da844f8bb5576" +checksum = "3bfd7853b65a2b4f49629ec975fee274faf6dff15ab8894c620943398ef283c0" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "alloy-sol-macro-expander" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed5047c9a241df94327879c2b0729155b58b941eae7805a7ada2e19436e6b39" +checksum = "82ec42f342d9a9261699f8078e57a7a4fda8aaa73c1a212ed3987080e6a9cd13" dependencies = [ "alloy-sol-macro-input", "const-hex", "heck", - "indexmap 2.6.0", + "indexmap 2.7.0", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dee02a81f529c415082235129f0df8b8e60aa1601b9c9298ffe54d75f57210b" +checksum = "ed2c50e6a62ee2b4f7ab3c6d0366e5770a21cad426e109c2f40335a1b3aff3df" dependencies = [ "const-hex", "dunce", "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f631f0bd9a9d79619b27c91b6b1ab2c4ef4e606a65192369a1ee05d40dcf81cc" +checksum = "ac17c6e89a50fb4a758012e4b409d9a0ba575228e69b539fe37d7a1bd507ca4a" dependencies = [ "serde", "winnow", @@ -657,9 +732,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2841af22d99e2c0f82a78fe107b6481be3dd20b89bfb067290092794734343a" +checksum = "c9dc0fffe397aa17628160e16b89f704098bf3c9d74d5d369ebc239575936de5" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -670,9 +745,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ac3e97dad3d31770db0fc89bd6a63b789fbae78963086733f960cf32c483904" +checksum = "d69d36982b9e46075ae6b792b0f84208c6c2c15ad49f6c500304616ef67b70e0" dependencies = [ "alloy-json-rpc", "base64 0.22.1", @@ -680,18 +755,19 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", "tokio", "tower 0.5.1", "tracing", "url", + "wasmtimer", ] [[package]] name = "alloy-transport-http" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b367dcccada5b28987c2296717ee04b9a5637aacd78eacb1726ef211678b5212" +checksum = "2e02ffd5d93ffc51d72786e607c97de3b60736ca3e636ead0ec1f7dce68ea3fd" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -704,9 +780,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b90cf9cde7f2fce617da52768ee28f522264b282d148384a4ca0ea85af04fa3a" +checksum = "1b6f8b87cb84bae6d81ae6604b37741c8116f84f9784a0ecc6038c302e679d23" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -723,9 +799,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.4.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7153b88690de6a50bba81c11e1d706bc41dbb90126d607404d60b763f6a3947f" +checksum = "9c085c4e1e7680b723ffc558f61a22c061ed3f70eb3436f93f3936779c59cec1" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -741,16 +817,16 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.7.2" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd7f8b3a7c65ca09b3c7bdd7c7d72d7423d026f5247eda96af53d24e58315c1" +checksum = "3a5fd8fea044cc9a8c8a50bb6f28e31f0385d820f116c5b98f6f4e55d6e5590b" dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", "arrayvec", "derive_arbitrary", - "derive_more 1.0.0", + "derive_more", "nybbles", "proptest", "proptest-derive", @@ -782,9 +858,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -797,63 +873,63 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "aquamarine" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cc1548309245035eb18aa7f0967da6bc65587005170c56e6ef2788a4cf3f4e" +checksum = "0f50776554130342de4836ba542aa85a4ddb361690d7e8df13774d7284c3d5c2" dependencies = [ "include_dir", "itertools 0.10.5", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] @@ -1022,9 +1098,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.15" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e26a9844c659a2a293d239c7910b752f8487fe122c6c8bd1659bf85a6507c302" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "brotli", "flate2", @@ -1036,6 +1112,17 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "async-sse" version = "5.1.0" @@ -1069,7 +1156,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1080,7 +1167,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1118,7 +1205,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1129,11 +1216,11 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backon" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4fa97bb310c33c811334143cf64c5bb2b7b3c06e453db6b095d7061eff8f113" +checksum = "ba5289ec98f68f28dd809fd601059e6aa908bb8f6108620930828283d4ee23d7" dependencies = [ - "fastrand 2.1.1", + "fastrand 2.2.0", "tokio", ] @@ -1224,7 +1311,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1319,9 +1406,9 @@ dependencies = [ "bitflags 2.6.0", "boa_interner", "boa_macros", - "indexmap 2.6.0", + "indexmap 2.7.0", "num-bigint", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", ] [[package]] @@ -1345,7 +1432,7 @@ dependencies = [ "fast-float", "hashbrown 0.14.5", "icu_normalizer", - "indexmap 2.6.0", + "indexmap 2.7.0", "intrusive-collections", "itertools 0.13.0", "num-bigint", @@ -1357,7 +1444,7 @@ dependencies = [ "portable-atomic", "rand 0.8.5", "regress", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "ryu-js", "serde", "serde_json", @@ -1365,7 +1452,7 @@ dependencies = [ "static_assertions", "tap", "thin-vec", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -1391,10 +1478,10 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.5", - "indexmap 2.6.0", + "indexmap 2.7.0", "once_cell", "phf", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "static_assertions", ] @@ -1406,7 +1493,7 @@ checksum = "240f4126219a83519bad05c9a40bfc0303921eeb571fc2d7e44c17ffac99d3f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", "synstructure", ] @@ -1426,7 +1513,7 @@ dependencies = [ "num-bigint", "num-traits", "regress", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", ] [[package]] @@ -1443,7 +1530,7 @@ checksum = "ae85205289bab1f2c7c8a30ddf0541cf89ba2ff7dbd144feef50bbfa664288d4" dependencies = [ "fast-float", "paste", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "sptr", "static_assertions", ] @@ -1488,6 +1575,17 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "bstr" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a68f1f47cdf0ec8ee4b941b2eee2a80cb796db73118c0dd09ac63fbe405be22" +dependencies = [ + "memchr", + "regex-automata 0.4.9", + "serde", +] + [[package]] name = "bumpalo" version = "3.16.0" @@ -1502,9 +1600,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" dependencies = [ "bytemuck_derive", ] @@ -1517,7 +1615,7 @@ checksum = "bcfcc3cd946cb52f0bbfdbbcfa2f4e24f75ebb6c0e1002f7c25904fada18b9ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -1528,9 +1626,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] @@ -1561,9 +1659,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" dependencies = [ "serde", ] @@ -1579,7 +1677,7 @@ dependencies = [ "semver 1.0.23", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1605,9 +1703,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.30" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b16803a61b81d9eabb7eae2588776c4c1e584b738ede45fdbb4c972cec1e9945" +checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" dependencies = [ "jobserver", "libc", @@ -1635,6 +1733,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -1700,9 +1804,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -1710,9 +1814,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -1729,14 +1833,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "coins-bip32" @@ -1751,7 +1855,7 @@ dependencies = [ "k256", "serde", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1767,7 +1871,7 @@ dependencies = [ "pbkdf2", "rand 0.8.5", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1786,14 +1890,14 @@ dependencies = [ "serde", "sha2 0.10.8", "sha3", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "combine" @@ -1807,14 +1911,14 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.1.1" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" +checksum = "24f165e7b643266ea80cb858aed492ad9280e3e05ce24d4a99d7d7b889b6a4d9" dependencies = [ - "crossterm 0.27.0", + "crossterm", "strum", "strum_macros", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] @@ -1863,9 +1967,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" +checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" dependencies = [ "cfg-if", "cpufeatures", @@ -1882,9 +1986,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" dependencies = [ "const_format_proc_macros", "konst", @@ -1892,9 +1996,9 @@ dependencies = [ [[package]] name = "const_format_proc_macros" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" dependencies = [ "proc-macro2", "quote", @@ -1920,6 +2024,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -1946,9 +2060,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -2017,9 +2131,9 @@ dependencies = [ [[package]] name = "critical-section" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64009896348fc5af4222e9cf7d7d82a95a256c634ebcf61c53e4ea461422242" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" [[package]] name = "crossbeam-channel" @@ -2055,19 +2169,6 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" -[[package]] -name = "crossterm" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" -dependencies = [ - "bitflags 2.6.0", - "crossterm_winapi", - "libc", - "parking_lot 0.12.3", - "winapi", -] - [[package]] name = "crossterm" version = "0.28.1" @@ -2076,8 +2177,8 @@ checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ "bitflags 2.6.0", "crossterm_winapi", - "mio 1.0.2", - "parking_lot 0.12.3", + "mio 1.0.3", + "parking_lot", "rustix", "signal-hook", "signal-hook-mio", @@ -2134,9 +2235,9 @@ dependencies = [ [[package]] name = "csv" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +checksum = "acdc4883a9c96732e4733212c01447ebd805833b7275a73ca3ee080fd77afdaf" dependencies = [ "csv-core", "itoa", @@ -2186,7 +2287,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -2210,7 +2311,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -2221,7 +2322,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -2234,7 +2335,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core", ] [[package]] @@ -2248,7 +2349,8 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.10", + "parking_lot_core", + "serde", ] [[package]] @@ -2294,11 +2396,12 @@ dependencies = [ [[package]] name = "delay_map" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4355c25cbf99edcb6b4a0e906f6bdc6956eda149e84455bea49696429b2f8e8" +checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" dependencies = [ "futures", + "tokio", "tokio-util", ] @@ -2335,24 +2438,13 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", -] - -[[package]] -name = "derive_more" -version = "0.99.18" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -2373,7 +2465,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", "unicode-xid", ] @@ -2448,9 +2540,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f569b8c367554666c8652305621e8bae3634a2ff5c6378081d5bd8c399c99f23" +checksum = "23e6b70634e26c909d1edbb3142b3eaf3b89da0e52f284f00ca7c80d9901ad9e" dependencies = [ "aes", "aes-gcm", @@ -2469,13 +2561,13 @@ dependencies = [ "lru", "more-asserts", "multiaddr", - "parking_lot 0.11.2", + "parking_lot", "rand 0.8.5", "smallvec", - "socket2 0.4.10", + "socket2", "tokio", "tracing", - "uint", + "uint 0.10.0", "zeroize", ] @@ -2487,7 +2579,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -2555,8 +2647,10 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "rayon", @@ -2571,7 +2665,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", "walkdir", ] @@ -2635,7 +2729,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -2646,7 +2740,27 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", +] + +[[package]] +name = "equator" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c35da53b5a021d2484a7cc49b2ac7f2d840f8236a286f84202369bd338d761ea" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", ] [[package]] @@ -2657,12 +2771,52 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", +] + +[[package]] +name = "ethereum_serde_utils" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70cbccfccf81d67bff0ab36e591fa536c8a935b078a7b0e58c1d00d418332fc9" +dependencies = [ + "alloy-primitives", + "hex", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "ethereum_ssz" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "036c84bd29bff35e29bbee3c8fc0e2fb95db12b6f2f3cae82a827fbc97256f3a" +dependencies = [ + "alloy-primitives", + "ethereum_serde_utils", + "itertools 0.13.0", + "serde", + "serde_derive", + "smallvec", + "typenum", +] + +[[package]] +name = "ethereum_ssz_derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dc8e67e1f770f5aa4c2c2069aaaf9daee7ac21bed357a71b911b37a58966cfb" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.90", ] [[package]] @@ -2675,6 +2829,7 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" name = "example-beacon-api-sidecar-fetcher" version = "0.1.0" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-beacon", "clap", @@ -2685,7 +2840,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", ] [[package]] @@ -2721,7 +2876,24 @@ dependencies = [ ] [[package]] -name = "example-custom-dev-node" +name = "example-custom-beacon-withdrawals" +version = "0.0.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-sol-macro", + "alloy-sol-types", + "eyre", + "reth", + "reth-chainspec", + "reth-evm", + "reth-evm-ethereum", + "reth-node-ethereum", + "reth-primitives", +] + +[[package]] +name = "example-custom-dev-node" version = "0.0.0" dependencies = [ "alloy-genesis", @@ -2740,6 +2912,7 @@ dependencies = [ name = "example-custom-engine-types" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rpc-types", @@ -2752,10 +2925,10 @@ dependencies = [ "reth-node-core", "reth-node-ethereum", "reth-payload-builder", - "reth-primitives", "reth-tracing", + "reth-trie-db", "serde", - "thiserror", + "thiserror 2.0.5", "tokio", ] @@ -2763,6 +2936,7 @@ dependencies = [ name = "example-custom-evm" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", @@ -2781,8 +2955,9 @@ dependencies = [ name = "example-custom-inspector" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-eth", "clap", "futures-util", "reth", @@ -2804,6 +2979,7 @@ dependencies = [ name = "example-custom-payload-builder" version = "0.0.0" dependencies = [ + "alloy-eips", "alloy-primitives", "eyre", "futures-util", @@ -2831,8 +3007,6 @@ dependencies = [ "reth-network", "reth-network-api", "reth-node-ethereum", - "reth-primitives", - "reth-provider", "tokio", "tokio-stream", "tracing", @@ -2842,8 +3016,9 @@ dependencies = [ name = "example-db-access" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-eth", "eyre", "reth-chainspec", "reth-db", @@ -2857,6 +3032,7 @@ dependencies = [ name = "example-manual-p2p" version = "0.0.0" dependencies = [ + "alloy-consensus", "eyre", "futures", "reth-chainspec", @@ -2920,8 +3096,8 @@ dependencies = [ "reth-chainspec", "reth-discv4", "reth-network", + "reth-network-api", "reth-primitives", - "reth-provider", "reth-tracing", "secp256k1", "serde_json", @@ -2948,10 +3124,11 @@ dependencies = [ name = "example-stateful-precompile" version = "0.0.0" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "eyre", - "parking_lot 0.12.3", + "parking_lot", "reth", "reth-chainspec", "reth-node-api", @@ -3002,9 +3179,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fastrlp" @@ -3024,7 +3201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -3081,9 +3258,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "miniz_oxide", @@ -3202,7 +3379,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -3253,9 +3430,9 @@ checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" [[package]] name = "generator" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb949699c3e4df3a183b1d2142cb24277057055ed23c68ed58894f76c517223" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" dependencies = [ "cfg-if", "libc", @@ -3270,6 +3447,7 @@ version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ + "serde", "typenum", "version_check", "zeroize", @@ -3336,7 +3514,7 @@ dependencies = [ "pin-project", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -3380,9 +3558,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -3390,7 +3568,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -3437,9 +3615,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "allocator-api2", "equivalent", @@ -3449,9 +3627,9 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ "hashbrown 0.14.5", ] @@ -3499,6 +3677,54 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "hickory-proto" +version = "0.25.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d063c0692ee669aa6d261988aa19ca5510f1cc40e4f211024f50c888499a35d7" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.8.5", + "serde", + "thiserror 2.0.5", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.25.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42bc352e4412fb657e795f79b4efcf2bd60b59ee5ca0187f3554194cd1107a27" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.8.5", + "resolv-conf", + "serde", + "smallvec", + "thiserror 2.0.5", + "tokio", + "tracing", +] + [[package]] name = "hkdf" version = "0.12.4" @@ -3551,9 +3777,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -3585,9 +3811,9 @@ dependencies = [ [[package]] name = "http-range-header" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a397c49fec283e3d6211adbe480be95aae5f304cfb923e9970e08956d5168a" +checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c" [[package]] name = "http-types" @@ -3645,9 +3871,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -3676,7 +3902,7 @@ dependencies = [ "hyper-util", "log", "rustls", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", "tokio-rustls", @@ -3686,9 +3912,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -3697,7 +3923,7 @@ dependencies = [ "http-body", "hyper", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -3705,36 +3931,36 @@ dependencies = [ [[package]] name = "iai-callgrind" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bd871e6374d5ca2d9b48dd23b3c7ef63a4201728621f6d75937dfcc66e91809" +checksum = "22275f8051874cd2f05b2aa1e0098d5cbec34df30ff92f1a1e2686a4cefed870" dependencies = [ "bincode", - "derive_more 0.99.18", + "derive_more", "iai-callgrind-macros", "iai-callgrind-runner", ] [[package]] name = "iai-callgrind-macros" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397649417510422ded7033f86132f833cca8c2e5081d0dfbec939b2353da7021" +checksum = "e8e6677dc52bd798b988e62ffd6831bf7eb46e4348cb1c74c1164954ebd0e5a1" dependencies = [ - "derive_more 0.99.18", + "derive_more", "proc-macro-error2", "proc-macro2", "quote", "serde", "serde_json", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "iai-callgrind-runner" -version = "0.13.4" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3783c337f9e931af702b5d5835ff2a6824bf55e416461a4e042dfb4b8fdbbea" +checksum = "a02dd95fe4949513b45a328b5b18f527ee02e96f3428b48090aa7cf9043ab0b8" dependencies = [ "serde", ] @@ -3877,7 +4103,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -3888,22 +4114,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.4.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", ] [[package]] -name = "idna" -version = "0.5.0" +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -3927,13 +4154,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.90", ] [[package]] @@ -3974,16 +4201,22 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.2", "serde", ] +[[package]] +name = "indoc" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" + [[package]] name = "infer" version = "0.2.3" @@ -3997,7 +4230,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.6.0", + "indexmap 2.7.0", "is-terminal", "itoa", "log", @@ -4040,12 +4273,16 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b23a0c8dfe501baac4adf6ebbfa6eddf8f0c07f56b058cc1288017e32397846c" +checksum = "b829f37dead9dc39df40c2d3376c179fdfd2ac771f53f55d3c30dc096a3c0c6e" dependencies = [ + "darling", + "indoc", + "pretty_assertions", + "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -4059,9 +4296,9 @@ dependencies = [ [[package]] name = "interprocess" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f4e4a06d42fab3e85ab1b419ad32b09eab58b901d40c57935ff92db3287a13" +checksum = "894148491d817cb36b6f778017b8ac46b17408d522dd90f539d677ea938362eb" dependencies = [ "doctest-file", "futures-core", @@ -4087,7 +4324,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.7", + "socket2", "widestring", "windows-sys 0.48.0", "winreg", @@ -4146,9 +4383,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jni" @@ -4160,7 +4397,7 @@ dependencies = [ "combine", "jni-sys", "log", - "thiserror", + "thiserror 1.0.69", "walkdir", ] @@ -4181,18 +4418,19 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ + "once_cell", "wasm-bindgen", ] [[package]] name = "jsonrpsee" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f01f48e04e0d7da72280ab787c9943695699c9b32b99158ece105e8ad0afea" +checksum = "c5c71d8c1a731cc4227c2f698d377e7848ca12c8a48866fc5e6951c43a4db843" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4208,9 +4446,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d80eccbd47a7b9f1e67663fd846928e941cb49c65236e297dd11c9ea3c5e3387" +checksum = "548125b159ba1314104f5bb5f38519e03a41862786aa3925cf349aae9cdd546e" dependencies = [ "base64 0.22.1", "futures-channel", @@ -4223,7 +4461,7 @@ dependencies = [ "rustls-pki-types", "rustls-platform-verifier", "soketto", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-rustls", "tokio-util", @@ -4233,9 +4471,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2709a32915d816a6e8f625bf72cf74523ebe5d8829f895d6b041b1d3137818" +checksum = "f2882f6f8acb9fdaec7cefc4fd607119a9bd709831df7d7672a1d3b644628280" dependencies = [ "async-trait", "bytes", @@ -4245,13 +4483,13 @@ dependencies = [ "http-body", "http-body-util", "jsonrpsee-types", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tracing", @@ -4260,9 +4498,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc54db939002b030e794fbfc9d5a925aa2854889c5a2f0352b0bffa54681707e" +checksum = "b3638bc4617f96675973253b3a45006933bde93c2fd8a6170b33c777cc389e5b" dependencies = [ "async-trait", "base64 0.22.1", @@ -4276,7 +4514,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tower 0.4.13", "tracing", @@ -4285,22 +4523,22 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9a4b2eaba8cc928f49c4ccf4fcfa65b690a73997682da99ed08f3393b51f07" +checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ "heck", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "jsonrpsee-server" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e30110d0f2d7866c8cc6c86483bdab2eb9f4d2f0e20db55518b2bca84651ba8e" +checksum = "82ad8ddc14be1d4290cd68046e7d1d37acd408efed6d3ca08aefcc3ad6da069c" dependencies = [ "futures-util", "http", @@ -4315,7 +4553,7 @@ dependencies = [ "serde", "serde_json", "soketto", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", @@ -4325,21 +4563,21 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca331cd7b3fe95b33432825c2d4c9f5a43963e207fdc01ae67f9fd80ab0930f" +checksum = "a178c60086f24cc35bb82f57c651d0d25d99c4742b4d335de04e97fa1f08a8a1" dependencies = [ "http", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "jsonrpsee-wasm-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c603d97578071dc44d79d3cfaf0775437638fd5adc33c6b622dfe4fa2ec812d" +checksum = "1a01cd500915d24ab28ca17527e23901ef1be6d659a2322451e1045532516c25" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -4348,9 +4586,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.24.6" +version = "0.24.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755ca3da1c67671f1fae01cd1a47f41dfb2233a8f19a643e587ab0a663942044" +checksum = "0fe322e0896d0955a3ebdd5bf813571c53fea29edd713bc315b76620b327e86d" dependencies = [ "http", "jsonrpsee-client-transport", @@ -4453,31 +4691,31 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.159" +version = "0.2.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" dependencies = [ "asn1_der", "bs58", @@ -4487,7 +4725,7 @@ dependencies = [ "multihash", "quick-protobuf", "sha2 0.10.8", - "thiserror", + "thiserror 1.0.69", "tracing", "zeroize", ] @@ -4511,7 +4749,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.7", + "redox_syscall", ] [[package]] @@ -4575,6 +4813,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" dependencies = [ "linked-hash-map", + "serde", ] [[package]] @@ -4585,9 +4824,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" @@ -4597,6 +4836,7 @@ checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", + "serde", ] [[package]] @@ -4624,16 +4864,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", -] - -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", + "hashbrown 0.15.2", ] [[package]] @@ -4692,9 +4923,9 @@ dependencies = [ [[package]] name = "metrics" -version = "0.23.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884adb57038347dfbaf2d5065887b6cf4312330dc8e94bc30a1a839bd79d3261" +checksum = "7a7deb012b3b2767169ff203fadb4c6b0b82b947512e5eb9e0b78c2e186ad9e3" dependencies = [ "ahash", "portable-atomic", @@ -4709,28 +4940,28 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.15.3" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" +checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", - "indexmap 2.6.0", + "indexmap 2.7.0", "metrics", "metrics-util", "quanta", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "metrics-process" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69e6ced169644e186e060ddc15f3923fdf06862c811a867bb1e5e7c7824f4d0" +checksum = "4a82c8add4382f29a122fa64fff1891453ed0f6b2867d971e7d60cb8dfa322ff" dependencies = [ "libc", "libproc", @@ -4744,15 +4975,16 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.17.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4259040465c955f9f2f1a4a8a16dc46726169bca0f88e8fb2dbeced487c3e828" +checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" dependencies = [ "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.14.5", + "hashbrown 0.15.2", + "indexmap 2.7.0", "metrics", - "num_cpus", + "ordered-float", "quanta", "sketches-ddsketch", ] @@ -4772,7 +5004,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", ] @@ -4822,11 +5054,10 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", @@ -4835,9 +5066,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -4849,14 +5080,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -4880,6 +5111,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "moka" +version = "0.12.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cf62eb4dd975d2dde76432fb1075c49e3ee2331cf36f1f8fd4b66550d32b6f" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "once_cell", + "parking_lot", + "quanta", + "rustc_version 0.4.1", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "triomphe", + "uuid", +] + [[package]] name = "more-asserts" version = "0.3.1" @@ -4901,7 +5152,7 @@ dependencies = [ "percent-encoding", "serde", "static_assertions", - "unsigned-varint 0.8.0", + "unsigned-varint", "url", ] @@ -4918,12 +5169,12 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.1" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ "core2", - "unsigned-varint 0.7.2", + "unsigned-varint", ] [[package]] @@ -4961,6 +5212,7 @@ dependencies = [ "libc", "log", "mio 0.8.11", + "serde", "walkdir", "windows-sys 0.48.0", ] @@ -5103,7 +5355,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5156,9 +5408,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.4.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ea7162170c6f3cad8f67f4dd7108e3f78349fd553da5b8bebff1e7ef8f38896" +checksum = "78f0daa0d0936d436a21b57571b1e27c5663aa2ab62f6edae5ba5be999f9f93e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5166,17 +5418,17 @@ dependencies = [ "alloy-rlp", "alloy-serde", "arbitrary", - "derive_more 1.0.0", + "derive_more", "serde", "serde_with", - "spin", + "thiserror 2.0.5", ] [[package]] name = "op-alloy-genesis" -version = "0.4.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3d31dfbbd8dd898c7512f8ce7d30103980485416f668566100b0ed0994b958" +checksum = "3eb0964932faa7050b74689f017aca66ffa3e52501080278a81bb0a43836c8dd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5184,44 +5436,66 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", + "thiserror 2.0.5", ] [[package]] name = "op-alloy-network" -version = "0.4.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d113b325527ba7da271a8793f1c14bdf7f035ce9e0611e668c36fc6812568c7f" +checksum = "cd9a690fcc404e44c3589dd39cf22895df42f7ef8671a07828b8c376c39be46a" dependencies = [ "alloy-consensus", "alloy-network", "alloy-primitives", "alloy-rpc-types-eth", + "alloy-signer", "op-alloy-consensus", "op-alloy-rpc-types", ] [[package]] name = "op-alloy-protocol" -version = "0.4.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310873e4fbfc41986716c4fb6000a8b49d025d932d2c261af58271c434b05288" +checksum = "6d8c057c1a5bdf72d1f86c470a4d90f2d2ad1b273caa547c04cd6affe45b466d" dependencies = [ + "alloc-no-stdlib", "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-serde", - "derive_more 1.0.0", + "async-trait", + "brotli", + "cfg-if", + "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", "serde", + "thiserror 2.0.5", + "tracing", + "unsigned-varint", +] + +[[package]] +name = "op-alloy-rpc-jsonrpsee" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a98debc5266443e64e03195cd1a3b6cdbe8d8679e9d8c4b76a3670d24b2e267a" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "jsonrpsee", + "op-alloy-rpc-types", + "op-alloy-rpc-types-engine", ] [[package]] name = "op-alloy-rpc-types" -version = "0.4.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323c65880e2561aa87f74f8af260fd15b9cc930c448c88a60ae95af86c88c634" +checksum = "73741855ffaa2041b33cb616d7db7180c1149b648c68c23bee9e15501073fb32" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5229,6 +5503,8 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", + "arbitrary", + "derive_more", "op-alloy-consensus", "serde", "serde_json", @@ -5236,21 +5512,27 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.4.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349e7b420f45d1a00216ec4c65fcf3f0057a841bc39732c405c85ae782b94121" +checksum = "ebedc32e24013c8b3faea62d091bccbb90f871286fe2238c6f7e2ff29974df8e" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "alloy-serde", - "derive_more 1.0.0", + "derive_more", + "ethereum_ssz", + "op-alloy-consensus", + "op-alloy-genesis", "op-alloy-protocol", "serde", + "snap", + "thiserror 2.0.5", ] [[package]] name = "op-reth" -version = "1.1.0" +version = "1.1.4" dependencies = [ "clap", "reth-cli-util", @@ -5286,6 +5568,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "ordered-float" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c65ee1f9701bf938026630b455d5315f490640234259037edb259798b3bcf85e" +dependencies = [ + "num-traits", +] + [[package]] name = "overload" version = "0.1.1" @@ -5320,6 +5611,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ + "arbitrary", "arrayvec", "bitvec", "byte-slice-cast", @@ -5347,17 +5639,6 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - [[package]] name = "parking_lot" version = "0.12.3" @@ -5365,21 +5646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] @@ -5390,7 +5657,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] @@ -5429,12 +5696,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror", + "thiserror 2.0.5", "ucd-trie", ] @@ -5478,7 +5745,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5492,29 +5759,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -5595,9 +5862,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -5607,10 +5874,11 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "pprof" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" +checksum = "ebbe2f8898beba44815fdc9e5a4ae9c929e21c5dc29b0c774a15555f7f58d6d0" dependencies = [ + "aligned-vec", "backtrace", "cfg-if", "criterion", @@ -5620,11 +5888,11 @@ dependencies = [ "log", "nix", "once_cell", - "parking_lot 0.12.3", + "parking_lot", "smallvec", "symbolic-demangle", "tempfile", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -5674,12 +5942,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5699,7 +5967,7 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "uint", + "uint 0.9.5", ] [[package]] @@ -5711,30 +5979,6 @@ dependencies = [ "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -5754,14 +5998,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "proc-macro2" -version = "1.0.87" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -5852,7 +6096,7 @@ checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -5896,48 +6140,52 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", - "socket2 0.5.7", - "thiserror", + "socket2", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "quinn-proto" -version = "0.11.8" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", + "getrandom 0.2.15", "rand 0.8.5", "ring", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "rustls", + "rustls-pki-types", "slab", - "thiserror", + "thiserror 2.0.5", "tinyvec", "tracing", + "web-time", ] [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", - "socket2 0.5.7", + "socket2", "tracing", "windows-sys 0.59.0", ] @@ -6047,7 +6295,7 @@ dependencies = [ "bitflags 2.6.0", "cassowary", "compact_str", - "crossterm 0.28.1", + "crossterm", "instability", "itertools 0.13.0", "lru", @@ -6056,7 +6304,7 @@ dependencies = [ "strum_macros", "unicode-segmentation", "unicode-truncate", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -6094,15 +6342,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.5.7" @@ -6120,18 +6359,18 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom 0.2.15", "libredox", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.8", + "regex-automata 0.4.9", "regex-syntax 0.8.5", ] @@ -6146,9 +6385,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -6177,11 +6416,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "relative-path" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" + [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", @@ -6202,13 +6447,13 @@ dependencies = [ "pin-project-lite", "quinn", "rustls", - "rustls-native-certs 0.8.0", + "rustls-native-certs 0.8.1", "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tokio-rustls", "tokio-util", @@ -6234,7 +6479,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6259,7 +6504,6 @@ dependencies = [ "reth-consensus-common", "reth-db", "reth-db-api", - "reth-discv4", "reth-downloaders", "reth-engine-util", "reth-errors", @@ -6282,6 +6526,7 @@ dependencies = [ "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-revm", @@ -6305,50 +6550,25 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-auto-seal-consensus" -version = "1.1.0" -dependencies = [ - "alloy-primitives", - "alloy-rpc-types-engine", - "futures-util", - "reth-beacon-consensus", - "reth-chainspec", - "reth-consensus", - "reth-engine-primitives", - "reth-evm", - "reth-execution-errors", - "reth-execution-types", - "reth-network-p2p", - "reth-network-peers", - "reth-optimism-consensus", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-stages-api", - "reth-tokio-util", - "reth-transaction-pool", - "reth-trie", - "revm-primitives", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "reth-basic-payload-builder" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "futures-core", "futures-util", "metrics", "reth-chainspec", + "reth-evm", "reth-metrics", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-tasks", @@ -6360,8 +6580,10 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rpc-types-engine", @@ -6372,6 +6594,7 @@ dependencies = [ "reth-blockchain-tree", "reth-blockchain-tree-api", "reth-chainspec", + "reth-codecs", "reth-config", "reth-consensus", "reth-db", @@ -6388,9 +6611,11 @@ dependencies = [ "reth-network-p2p", "reth-node-types", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", @@ -6403,7 +6628,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "schnellru", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -6411,7 +6636,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6435,10 +6660,11 @@ dependencies = [ "reth-node-api", "reth-node-core", "reth-primitives", + "reth-primitives-traits", "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror", + "thiserror 2.0.5", "tokio", "tower 0.4.13", "tracing", @@ -6446,7 +6672,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6456,7 +6682,7 @@ dependencies = [ "assert_matches", "linked_hash_set", "metrics", - "parking_lot 0.12.3", + "parking_lot", "reth-blockchain-tree-api", "reth-chainspec", "reth-consensus", @@ -6484,29 +6710,31 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "reth-consensus", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-storage-errors", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "reth-chain-state" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-signer", "alloy-signer-local", - "auto_impl", - "derive_more 1.0.0", + "derive_more", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "reth-chainspec", @@ -6514,6 +6742,7 @@ dependencies = [ "reth-execution-types", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-storage-api", "reth-testing-utils", "reth-trie", @@ -6525,16 +6754,17 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", "alloy-rlp", "alloy-trie", "auto_impl", - "derive_more 1.0.0", + "derive_more", "once_cell", "reth-ethereum-forks", "reth-network-peers", @@ -6545,7 +6775,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-genesis", "clap", @@ -6558,16 +6788,18 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.1.0" +version = "1.1.4" dependencies = [ "ahash", + "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rlp", "arbitrary", "backon", "clap", "comfy-table", - "crossterm 0.28.1", + "crossterm", "eyre", "fdlimit", "futures", @@ -6581,6 +6813,7 @@ dependencies = [ "reth-cli", "reth-cli-runner", "reth-cli-util", + "reth-codecs", "reth-config", "reth-consensus", "reth-db", @@ -6597,6 +6830,7 @@ dependencies = [ "reth-network", "reth-network-p2p", "reth-network-peers", + "reth-node-api", "reth-node-builder", "reth-node-core", "reth-node-events", @@ -6604,10 +6838,13 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune", + "reth-prune-types", "reth-stages", + "reth-stages-types", "reth-static-file", "reth-static-file-types", "reth-trie", + "reth-trie-common", "reth-trie-db", "secp256k1", "serde", @@ -6619,7 +6856,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.1.0" +version = "1.1.4" dependencies = [ "reth-tasks", "tokio", @@ -6628,7 +6865,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6638,20 +6875,20 @@ dependencies = [ "rand 0.8.5", "reth-fs-util", "secp256k1", - "thiserror", + "serde", + "thiserror 2.0.5", "tikv-jemallocator", "tracy-client", ] [[package]] name = "reth-codecs" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", - "alloy-rlp", "alloy-trie", "arbitrary", "bytes", @@ -6659,27 +6896,28 @@ dependencies = [ "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", - "rand 0.8.5", "reth-codecs-derive", + "rstest", "serde", "serde_json", "test-fuzz", + "visibility", ] [[package]] name = "reth-codecs-derive" -version = "1.1.0" +version = "1.1.4" dependencies = [ "convert_case", "proc-macro2", "quote", "similar-asserts", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "reth-config" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "eyre", @@ -6695,39 +6933,44 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "auto_impl", - "derive_more 1.0.0", + "derive_more", "reth-primitives", + "reth-primitives-traits", ] [[package]] name = "reth-consensus-common" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "mockall", "rand 0.8.5", "reth-chainspec", "reth-consensus", "reth-primitives", + "reth-primitives-traits", "reth-storage-api", "revm-primitives", ] [[package]] name = "reth-consensus-debug-client" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-provider", - "alloy-rpc-types", "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "auto_impl", "eyre", "futures", @@ -6743,23 +6986,23 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-primitives", "arbitrary", "assert_matches", "bytes", "criterion", - "derive_more 1.0.0", + "derive_more", "eyre", "iai-callgrind", "metrics", "page_size", - "parking_lot 0.12.3", + "parking_lot", "paste", "pprof", "proptest", - "rand 0.8.5", "reth-db-api", "reth-fs-util", "reth-libmdbx", @@ -6772,25 +7015,26 @@ dependencies = [ "reth-storage-errors", "reth-tracing", "reth-trie-common", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", "serde_json", "strum", "sysinfo", "tempfile", "test-fuzz", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "reth-db-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "arbitrary", "bytes", - "derive_more 1.0.0", + "derive_more", "metrics", "modular-bitfield", "parity-scale-codec", @@ -6805,14 +7049,16 @@ dependencies = [ "reth-stages-types", "reth-storage-errors", "reth-trie-common", + "roaring", "serde", "test-fuzz", ] [[package]] name = "reth-db-common" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-genesis", "alloy-primitives", "boyer-moore-magiclen", @@ -6833,14 +7079,15 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", "tracing", ] [[package]] name = "reth-db-models" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-eips", "alloy-primitives", "arbitrary", "bytes", @@ -6848,14 +7095,14 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "reth-codecs", - "reth-primitives", + "reth-primitives-traits", "serde", "test-fuzz", ] [[package]] name = "reth-discv4" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6863,7 +7110,8 @@ dependencies = [ "discv5", "enr", "generic-array", - "parking_lot 0.12.3", + "itertools 0.13.0", + "parking_lot", "rand 0.8.5", "reth-ethereum-forks", "reth-net-banlist", @@ -6873,7 +7121,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -6881,11 +7129,11 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", - "derive_more 1.0.0", + "derive_more", "discv5", "enr", "futures", @@ -6898,22 +7146,23 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "reth-dns-discovery" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-primitives", "alloy-rlp", "data-encoding", "enr", + "hickory-resolver", "linked_hash_set", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-chainspec", "reth-ethereum-forks", @@ -6924,17 +7173,17 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", - "trust-dns-resolver", ] [[package]] name = "reth-downloaders" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", @@ -6955,13 +7204,14 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -6970,46 +7220,55 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-network", "alloy-primitives", - "alloy-rpc-types", + "alloy-rlp", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-signer", "alloy-signer-local", + "derive_more", "eyre", "futures-util", "jsonrpsee", - "jsonrpsee-types", "op-alloy-rpc-types-engine", - "reth", "reth-chainspec", "reth-db", "reth-engine-local", + "reth-network", + "reth-network-api", "reth-network-peers", + "reth-node-api", "reth-node-builder", - "reth-node-ethereum", + "reth-node-core", + "reth-optimism-primitives", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", - "reth-rpc", + "reth-rpc-api", + "reth-rpc-eth-api", "reth-rpc-layer", - "reth-rpc-types-compat", + "reth-rpc-server-types", "reth-stages-types", + "reth-tasks", "reth-tokio-util", "reth-tracing", "serde_json", "tokio", "tokio-stream", "tracing", + "url", ] [[package]] name = "reth-ecies" -version = "1.1.0" +version = "1.1.4" dependencies = [ "aes", "alloy-primitives", @@ -7029,7 +7288,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7039,8 +7298,9 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-engine", "eyre", @@ -7054,9 +7314,10 @@ dependencies = [ "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", + "reth-node-types", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-provider", "reth-prune", "reth-rpc-types-compat", @@ -7069,25 +7330,34 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-primitives", + "alloy-rpc-types-engine", + "futures", + "reth-errors", "reth-execution-types", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", + "reth-primitives-traits", "reth-trie", "serde", + "thiserror 2.0.5", + "tokio", ] [[package]] name = "reth-engine-service" -version = "1.1.0" +version = "1.1.4" dependencies = [ "futures", "pin-project", "reth-beacon-consensus", "reth-chainspec", "reth-consensus", + "reth-engine-primitives", "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-evm", @@ -7096,29 +7366,32 @@ dependencies = [ "reth-network-p2p", "reth-node-types", "reth-payload-builder", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", ] [[package]] name = "reth-engine-tree" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", "assert_matches", + "criterion", + "crossbeam-channel", "futures", "metrics", "rand 0.8.5", + "rayon", "reth-beacon-consensus", "reth-blockchain-tree", "reth-blockchain-tree-api", @@ -7134,9 +7407,10 @@ dependencies = [ "reth-metrics", "reth-network-p2p", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-prune-types", @@ -7146,26 +7420,31 @@ dependencies = [ "reth-stages-api", "reth-static-file", "reth-tasks", + "reth-testing-utils", "reth-tracing", "reth-trie", + "reth-trie-db", "reth-trie-parallel", - "thiserror", + "reth-trie-sparse", + "revm-primitives", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "reth-engine-util" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "eyre", "futures", "itertools 0.13.0", "pin-project", - "reth-beacon-consensus", + "reth-consensus-common", "reth-engine-primitives", "reth-errors", "reth-ethereum-forks", @@ -7187,45 +7466,47 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.1.0" +version = "1.1.4" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "reth-eth-wire" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-chains", "alloy-eips", "alloy-primitives", "alloy-rlp", "arbitrary", "async-stream", "bytes", - "derive_more 1.0.0", + "derive_more", "futures", "pin-project", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "reth-chainspec", "reth-codecs", "reth-ecies", "reth-eth-wire-types", + "reth-ethereum-forks", "reth-metrics", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-tracing", "secp256k1", "serde", "snap", "test-fuzz", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7234,7 +7515,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7244,20 +7525,22 @@ dependencies = [ "alloy-rlp", "arbitrary", "bytes", - "derive_more 1.0.0", + "derive_more", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", "reth-chainspec", "reth-codecs-derive", + "reth-ethereum-forks", "reth-primitives", + "reth-primitives-traits", "serde", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "reth-ethereum-cli" -version = "1.1.0" +version = "1.1.4" dependencies = [ "clap", "eyre", @@ -7268,19 +7551,22 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "reth-chainspec", "reth-consensus", "reth-consensus-common", "reth-primitives", + "reth-primitives-traits", "tracing", ] [[package]] name = "reth-ethereum-engine-primitives" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7290,6 +7576,7 @@ dependencies = [ "reth-chainspec", "reth-engine-primitives", "reth-payload-primitives", + "reth-payload-validator", "reth-primitives", "reth-rpc-types-compat", "serde", @@ -7299,9 +7586,10 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-primitives", "alloy-rlp", "arbitrary", @@ -7311,15 +7599,17 @@ dependencies = [ "once_cell", "proptest", "proptest-derive", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "serde", - "thiserror-no-std", + "thiserror 2.0.5", ] [[package]] name = "reth-ethereum-payload-builder" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "reth-basic-payload-builder", "reth-chain-state", @@ -7329,20 +7619,23 @@ dependencies = [ "reth-evm-ethereum", "reth-execution-types", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", "reth-revm", "reth-transaction-pool", - "reth-trie", "revm", - "revm-primitives", "tracing", ] +[[package]] +name = "reth-ethereum-primitives" +version = "1.1.4" + [[package]] name = "reth-etl" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "rayon", @@ -7352,16 +7645,20 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", "futures-util", "metrics", - "parking_lot 0.12.3", + "metrics-util", + "parking_lot", "reth-chainspec", "reth-consensus", + "reth-consensus-common", + "reth-ethereum-forks", "reth-execution-errors", "reth-execution-types", "reth-metrics", @@ -7376,7 +7673,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7390,7 +7687,6 @@ dependencies = [ "reth-evm", "reth-execution-types", "reth-primitives", - "reth-prune-types", "reth-revm", "reth-testing-utils", "revm-primitives", @@ -7400,23 +7696,24 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "derive_more 1.0.0", "nybbles", "reth-consensus", "reth-prune-types", "reth-storage-errors", "revm-primitives", + "thiserror 2.0.5", ] [[package]] name = "reth-execution-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "arbitrary", @@ -7424,7 +7721,9 @@ dependencies = [ "rand 0.8.5", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-trie", + "reth-trie-common", "revm", "serde", "serde_with", @@ -7432,7 +7731,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7442,13 +7741,12 @@ dependencies = [ "futures", "itertools 0.13.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "reth-blockchain-tree", "reth-chain-state", "reth-chainspec", "reth-config", - "reth-db-api", "reth-db-common", "reth-evm", "reth-evm-ethereum", @@ -7457,7 +7755,6 @@ dependencies = [ "reth-metrics", "reth-node-api", "reth-node-core", - "reth-payload-builder", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -7477,8 +7774,9 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-eips", "eyre", "futures-util", "rand 0.8.5", @@ -7488,7 +7786,6 @@ dependencies = [ "reth-consensus", "reth-db", "reth-db-common", - "reth-ethereum-engine-primitives", "reth-evm", "reth-execution-types", "reth-exex", @@ -7502,14 +7799,15 @@ dependencies = [ "reth-provider", "reth-tasks", "reth-transaction-pool", + "reth-trie-db", "tempfile", - "thiserror", + "thiserror 2.0.5", "tokio", ] [[package]] name = "reth-exex-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7519,23 +7817,25 @@ dependencies = [ "reth-chain-state", "reth-execution-types", "reth-primitives", + "reth-primitives-traits", "serde", "serde_with", ] [[package]] name = "reth-fs-util" -version = "1.1.0" +version = "1.1.4" dependencies = [ "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", ] [[package]] name = "reth-invalid-block-hooks" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-debug", @@ -7547,6 +7847,7 @@ dependencies = [ "reth-engine-primitives", "reth-evm", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", @@ -7558,7 +7859,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.1.0" +version = "1.1.4" dependencies = [ "async-trait", "bytes", @@ -7570,7 +7871,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7580,28 +7881,28 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.1.0" +version = "1.1.4" dependencies = [ "bitflags 2.6.0", "byteorder", "criterion", "dashmap 6.1.0", - "derive_more 1.0.0", - "indexmap 2.6.0", - "parking_lot 0.12.3", + "derive_more", + "indexmap 2.7.0", + "parking_lot", "pprof", "rand 0.8.5", "rand_xorshift", "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror", + "thiserror 2.0.5", "tracing", ] [[package]] name = "reth-mdbx-sys" -version = "1.1.0" +version = "1.1.4" dependencies = [ "bindgen", "cc", @@ -7609,7 +7910,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.1.0" +version = "1.1.4" dependencies = [ "futures", "metrics", @@ -7620,28 +7921,28 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.1.0" +version = "1.1.4" dependencies = [ "futures-util", "if-addrs", "reqwest", "reth-tracing", "serde_with", - "thiserror", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "reth-network" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7652,13 +7953,13 @@ dependencies = [ "aquamarine", "auto_impl", "criterion", - "derive_more 1.0.0", + "derive_more", "discv5", "enr", "futures", "itertools 0.13.0", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "pprof", "rand 0.8.5", @@ -7669,6 +7970,8 @@ dependencies = [ "reth-dns-discovery", "reth-ecies", "reth-eth-wire", + "reth-eth-wire-types", + "reth-ethereum-forks", "reth-fs-util", "reth-metrics", "reth-net-banlist", @@ -7678,20 +7981,21 @@ dependencies = [ "reth-network-peers", "reth-network-types", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", "reth-tokio-util", "reth-tracing", "reth-transaction-pool", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "schnellru", "secp256k1", "serde", "serial_test", "smallvec", "tempfile", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tokio-util", @@ -7701,12 +8005,12 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", "auto_impl", - "derive_more 1.0.0", + "derive_more", "enr", "futures", "reth-eth-wire-types", @@ -7716,26 +8020,28 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", ] [[package]] name = "reth-network-p2p" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "auto_impl", - "derive_more 1.0.0", + "derive_more", "futures", - "parking_lot 0.12.3", + "parking_lot", "reth-consensus", "reth-eth-wire-types", "reth-network-peers", "reth-network-types", "reth-primitives", + "reth-primitives-traits", "reth-storage-errors", "tokio", "tracing", @@ -7743,7 +8049,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7752,14 +8058,14 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror", + "thiserror 2.0.5", "tokio", "url", ] [[package]] name = "reth-network-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -7772,25 +8078,25 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.1.0" +version = "1.1.4" dependencies = [ "anyhow", "bincode", - "derive_more 1.0.0", + "derive_more", "lz4_flex", "memmap2", "rand 0.8.5", "reth-fs-util", "serde", "tempfile", - "thiserror", + "thiserror 2.0.5", "tracing", "zstd", ] [[package]] name = "reth-node-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -7801,19 +8107,18 @@ dependencies = [ "reth-network-api", "reth-node-core", "reth-node-types", - "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", - "reth-primitives", "reth-provider", - "reth-rpc-eth-api", "reth-tasks", "reth-transaction-pool", ] [[package]] name = "reth-node-builder" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "aquamarine", @@ -7822,7 +8127,6 @@ dependencies = [ "futures", "jsonrpsee", "rayon", - "reth-auto-seal-consensus", "reth-beacon-consensus", "reth-blockchain-tree", "reth-chain-state", @@ -7851,7 +8155,6 @@ dependencies = [ "reth-node-events", "reth-node-metrics", "reth-payload-builder", - "reth-payload-primitives", "reth-payload-validator", "reth-primitives", "reth-provider", @@ -7868,6 +8171,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "reth-transaction-pool", + "revm-primitives", "secp256k1", "tempfile", "tokio", @@ -7877,13 +8181,15 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "clap", "const_format", - "derive_more 1.0.0", + "derive_more", "dirs-next", "eyre", "futures", @@ -7893,18 +8199,18 @@ dependencies = [ "reth-chainspec", "reth-cli-util", "reth-config", - "reth-consensus-common", + "reth-consensus", "reth-db", "reth-discv4", "reth-discv5", + "reth-ethereum-forks", "reth-net-nat", "reth-network", "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", - "reth-rpc-api", - "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", "reth-rpc-types-compat", @@ -7917,8 +8223,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "tempfile", - "thiserror", + "thiserror 2.0.5", "tokio", "toml", "tracing", @@ -7927,15 +8232,22 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-contract", + "alloy-eips", "alloy-genesis", "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-beacon", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "alloy-signer", + "alloy-sol-types", "eyre", "futures", - "futures-util", - "reth", - "reth-auto-seal-consensus", + "rand 0.8.5", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", @@ -7944,6 +8256,7 @@ dependencies = [ "reth-e2e-test-utils", "reth-ethereum-engine-primitives", "reth-ethereum-payload-builder", + "reth-evm", "reth-evm-ethereum", "reth-exex", "reth-network", @@ -7951,13 +8264,16 @@ dependencies = [ "reth-node-builder", "reth-node-core", "reth-payload-builder", + "reth-payload-primitives", "reth-primitives", "reth-provider", "reth-revm", "reth-rpc", + "reth-rpc-eth-api", "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "revm", "serde_json", "tokio", @@ -7965,45 +8281,43 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "futures", "humantime", "pin-project", "reth-beacon-consensus", - "reth-network", + "reth-engine-primitives", "reth-network-api", - "reth-primitives", "reth-primitives-traits", - "reth-provider", - "reth-prune", + "reth-prune-types", "reth-stages", - "reth-static-file", + "reth-static-file-types", + "reth-storage-api", "tokio", "tracing", ] [[package]] name = "reth-node-metrics" -version = "1.1.0" +version = "1.1.4" dependencies = [ "eyre", "http", - "jsonrpsee", + "jsonrpsee-server", "metrics", "metrics-exporter-prometheus", "metrics-process", "metrics-util", "procfs 0.16.0", "reqwest", - "reth-chainspec", - "reth-db-api", "reth-metrics", - "reth-provider", "reth-tasks", - "socket2 0.5.7", + "socket2", "tikv-jemalloc-ctl", "tokio", "tower 0.4.13", @@ -8013,21 +8327,25 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ "reth-chainspec", "reth-db-api", "reth-engine-primitives", + "reth-primitives-traits", + "reth-trie-db", ] [[package]] name = "reth-optimism-chainspec" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-chains", + "alloy-consensus", + "alloy-eips", "alloy-genesis", "alloy-primitives", - "derive_more 1.0.0", + "derive_more", "once_cell", "op-alloy-rpc-types", "reth-chainspec", @@ -8040,13 +8358,18 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "clap", + "derive_more", "eyre", "futures-util", + "op-alloy-consensus", + "proptest", "reth-chainspec", "reth-cli", "reth-cli-commands", @@ -8059,6 +8382,7 @@ dependencies = [ "reth-downloaders", "reth-errors", "reth-execution-types", + "reth-fs-util", "reth-network-p2p", "reth-node-builder", "reth-node-core", @@ -8076,6 +8400,7 @@ dependencies = [ "reth-static-file", "reth-static-file-types", "reth-tracing", + "serde", "tempfile", "tokio", "tokio-util", @@ -8084,14 +8409,18 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", + "alloy-trie", "reth-chainspec", "reth-consensus", "reth-consensus-common", "reth-optimism-chainspec", "reth-optimism-forks", + "reth-optimism-primitives", "reth-primitives", "reth-trie-common", "tracing", @@ -8099,15 +8428,17 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", "alloy-primitives", + "derive_more", "op-alloy-consensus", "reth-chainspec", "reth-consensus", + "reth-consensus-common", "reth-ethereum-forks", "reth-evm", "reth-execution-errors", @@ -8115,18 +8446,19 @@ dependencies = [ "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-forks", + "reth-optimism-primitives", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-revm", "revm", "revm-primitives", - "thiserror", "tracing", ] [[package]] name = "reth-optimism-forks" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8137,67 +8469,69 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-rpc-types-engine", - "async-trait", + "alloy-signer-local", "clap", "eyre", - "jsonrpsee", - "jsonrpsee-types", + "futures", "op-alloy-consensus", "op-alloy-rpc-types-engine", - "parking_lot 0.12.3", - "reqwest", - "reth", - "reth-auto-seal-consensus", + "parking_lot", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-db", - "reth-discv5", "reth-e2e-test-utils", "reth-engine-local", "reth-evm", "reth-network", "reth-node-api", "reth-node-builder", + "reth-node-core", "reth-optimism-chainspec", "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-forks", + "reth-optimism-node", "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-optimism-rpc", "reth-payload-builder", + "reth-payload-util", + "reth-payload-validator", "reth-primitives", "reth-provider", "reth-revm", - "reth-rpc", - "reth-rpc-eth-api", - "reth-rpc-eth-types", - "reth-rpc-types-compat", + "reth-rpc-server-types", + "reth-tasks", "reth-tracing", "reth-transaction-pool", + "reth-trie-db", "revm", "serde", "serde_json", - "thiserror", "tokio", - "tracing", ] [[package]] name = "reth-optimism-payload-builder" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-debug", "alloy-rpc-types-engine", + "op-alloy-consensus", "op-alloy-rpc-types-engine", "reth-basic-payload-builder", "reth-chain-state", @@ -8209,45 +8543,61 @@ dependencies = [ "reth-optimism-evm", "reth-optimism-forks", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", + "reth-payload-util", "reth-primitives", "reth-provider", "reth-revm", "reth-rpc-types-compat", "reth-transaction-pool", - "reth-trie", "revm", - "revm-primitives", "sha2 0.10.8", - "thiserror", + "thiserror 2.0.5", "tracing", ] [[package]] name = "reth-optimism-primitives" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", + "alloy-rlp", + "arbitrary", + "bytes", + "derive_more", + "op-alloy-consensus", + "proptest", + "proptest-arbitrary-interop", + "rand 0.8.5", + "reth-codecs", "reth-primitives", "reth-primitives-traits", + "revm-primitives", + "rstest", + "secp256k1", + "serde", ] [[package]] name = "reth-optimism-rpc" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-rpc-types", + "alloy-rpc-types-debug", "alloy-rpc-types-eth", - "derive_more 1.0.0", + "jsonrpsee-core", "jsonrpsee-types", "op-alloy-consensus", "op-alloy-network", + "op-alloy-rpc-jsonrpsee", "op-alloy-rpc-types", - "parking_lot 0.12.3", + "op-alloy-rpc-types-engine", + "parking_lot", "reqwest", "reth-chainspec", "reth-evm", @@ -8258,9 +8608,12 @@ dependencies = [ "reth-optimism-consensus", "reth-optimism-evm", "reth-optimism-forks", + "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-primitives", "reth-provider", "reth-rpc", + "reth-rpc-api", "reth-rpc-eth-api", "reth-rpc-eth-types", "reth-rpc-server-types", @@ -8268,14 +8621,14 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "reth-optimism-storage" -version = "1.1.0" +version = "1.1.4" dependencies = [ "reth-codecs", "reth-db-api", @@ -8286,8 +8639,9 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types", "async-trait", @@ -8296,39 +8650,58 @@ dependencies = [ "reth-chain-state", "reth-ethereum-engine-primitives", "reth-metrics", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", - "reth-provider", "revm", "tokio", "tokio-stream", "tracing", ] +[[package]] +name = "reth-payload-builder-primitives" +version = "1.1.4" +dependencies = [ + "alloy-rpc-types-engine", + "async-trait", + "pin-project", + "reth-payload-primitives", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "reth-payload-primitives" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-eips", "alloy-primitives", - "alloy-rpc-types", - "async-trait", + "alloy-rpc-types-engine", "op-alloy-rpc-types-engine", - "pin-project", "reth-chain-state", "reth-chainspec", "reth-errors", "reth-primitives", - "reth-transaction-pool", + "revm-primitives", "serde", - "thiserror", + "thiserror 2.0.5", "tokio", - "tokio-stream", - "tracing", +] + +[[package]] +name = "reth-payload-util" +version = "1.1.4" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "reth-primitives", ] [[package]] name = "reth-payload-validator" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8338,22 +8711,24 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-rlp", "alloy-rpc-types", "alloy-serde", + "alloy-trie", "arbitrary", "assert_matches", "bincode", "bytes", "c-kzg", "criterion", - "derive_more 1.0.0", + "derive_more", "k256", "modular-bitfield", "once_cell", @@ -8367,23 +8742,23 @@ dependencies = [ "reth-chainspec", "reth-codecs", "reth-ethereum-forks", - "reth-optimism-chainspec", "reth-primitives-traits", "reth-static-file-types", "reth-testing-utils", "reth-trie-common", + "reth-zstd-compressors", "revm-primitives", + "rstest", "secp256k1", "serde", "serde_json", "serde_with", "test-fuzz", - "zstd", ] [[package]] name = "reth-primitives-traits" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8391,18 +8766,18 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "arbitrary", + "auto_impl", "bincode", "byteorder", "bytes", - "derive_more 1.0.0", + "derive_more", "modular-bitfield", + "op-alloy-consensus", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", "reth-codecs", - "reth-testing-utils", "revm-primitives", - "roaring", "serde", "serde_json", "serde_with", @@ -8411,7 +8786,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8424,7 +8799,7 @@ dependencies = [ "itertools 0.13.0", "metrics", "notify", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "rayon", "reth-blockchain-tree-api", @@ -8444,6 +8819,7 @@ dependencies = [ "reth-node-types", "reth-optimism-primitives", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-api", @@ -8460,8 +8836,10 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "assert_matches", "itertools 0.13.0", @@ -8474,6 +8852,7 @@ dependencies = [ "reth-errors", "reth-exex-types", "reth-metrics", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-stages", @@ -8481,21 +8860,21 @@ dependencies = [ "reth-testing-utils", "reth-tokio-util", "reth-tracing", - "rustc-hash 2.0.0", - "thiserror", + "rustc-hash 2.1.0", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "reth-prune-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "arbitrary", "assert_matches", "bytes", - "derive_more 1.0.0", + "derive_more", "modular-bitfield", "proptest", "proptest-arbitrary-interop", @@ -8503,20 +8882,21 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror", + "thiserror 2.0.5", "toml", ] [[package]] name = "reth-revm" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", - "reth-chainspec", - "reth-consensus-common", "reth-ethereum-forks", "reth-execution-errors", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-storage-api", "reth-storage-errors", @@ -8526,7 +8906,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8537,7 +8917,9 @@ dependencies = [ "alloy-rlp", "alloy-rpc-types", "alloy-rpc-types-admin", + "alloy-rpc-types-beacon", "alloy-rpc-types-debug", + "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-mev", "alloy-rpc-types-trace", @@ -8546,7 +8928,7 @@ dependencies = [ "alloy-signer", "alloy-signer-local", "async-trait", - "derive_more 1.0.0", + "derive_more", "futures", "http", "http-body", @@ -8554,19 +8936,22 @@ dependencies = [ "jsonrpsee", "jsonrpsee-types", "jsonwebtoken", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "reth-chainspec", + "reth-consensus", "reth-consensus-common", + "reth-engine-primitives", "reth-errors", + "reth-ethereum-consensus", "reth-evm", "reth-evm-ethereum", "reth-network-api", "reth-network-peers", "reth-network-types", - "reth-node-api", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-api", @@ -8578,13 +8963,12 @@ dependencies = [ "reth-tasks", "reth-testing-utils", "reth-transaction-pool", - "reth-trie", "revm", "revm-inspectors", "revm-primitives", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tower 0.4.13", @@ -8594,7 +8978,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -8613,17 +8997,15 @@ dependencies = [ "jsonrpsee", "reth-engine-primitives", "reth-network-peers", - "reth-primitives", "reth-rpc-eth-api", - "serde_json", ] [[package]] name = "reth-rpc-api-testing-util" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", "alloy-rpc-types-trace", "futures", @@ -8639,15 +9021,13 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.1.0" +version = "1.1.4" dependencies = [ - "alloy-network", + "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-rpc-types-trace", - "alloy-serde", "clap", "http", "jsonrpsee", @@ -8655,6 +9035,7 @@ dependencies = [ "pin-project", "reth-beacon-consensus", "reth-chainspec", + "reth-consensus", "reth-engine-primitives", "reth-ethereum-engine-primitives", "reth-evm", @@ -8663,7 +9044,6 @@ dependencies = [ "reth-metrics", "reth-network-api", "reth-network-peers", - "reth-node-api", "reth-node-core", "reth-payload-builder", "reth-primitives", @@ -8677,13 +9057,13 @@ dependencies = [ "reth-rpc-server-types", "reth-rpc-types-compat", "reth-tasks", - "reth-tokio-util", "reth-tracing", "reth-transaction-pool", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", "tokio", + "tokio-util", "tower 0.4.13", "tower-http", "tracing", @@ -8691,7 +9071,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8702,6 +9082,7 @@ dependencies = [ "jsonrpsee-core", "jsonrpsee-types", "metrics", + "parking_lot", "reth-beacon-consensus", "reth-chainspec", "reth-engine-primitives", @@ -8709,6 +9090,7 @@ dependencies = [ "reth-evm", "reth-metrics", "reth-payload-builder", + "reth-payload-builder-primitives", "reth-payload-primitives", "reth-primitives", "reth-provider", @@ -8720,14 +9102,14 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "reth-rpc-eth-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8735,22 +9117,24 @@ dependencies = [ "alloy-json-rpc", "alloy-network", "alloy-primitives", - "alloy-rpc-types", + "alloy-rlp", "alloy-rpc-types-eth", "alloy-rpc-types-mev", + "alloy-serde", "async-trait", "auto_impl", "dyn-clone", "futures", "jsonrpsee", "jsonrpsee-types", - "parking_lot 0.12.3", + "parking_lot", "reth-chainspec", "reth-errors", "reth-evm", - "reth-execution-types", "reth-network-api", + "reth-node-api", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-eth-types", @@ -8758,7 +9142,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tasks", "reth-transaction-pool", - "reth-trie", + "reth-trie-common", "revm", "revm-inspectors", "revm-primitives", @@ -8768,16 +9152,14 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "alloy-rpc-types", "alloy-rpc-types-eth", - "alloy-serde", "alloy-sol-types", - "derive_more 1.0.0", + "derive_more", "futures", "itertools 0.13.0", "jsonrpsee-core", @@ -8787,10 +9169,10 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-errors", - "reth-evm", "reth-execution-types", "reth-metrics", "reth-primitives", + "reth-primitives-traits", "reth-revm", "reth-rpc-server-types", "reth-rpc-types-compat", @@ -8804,7 +9186,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -8812,55 +9194,59 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-rpc-types-engine", "http", + "http-body-util", "jsonrpsee", "jsonrpsee-http-client", "pin-project", "reqwest", "tokio", "tower 0.4.13", + "tower-http", "tracing", ] [[package]] name = "reth-rpc-server-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", "jsonrpsee-core", "jsonrpsee-types", "reth-errors", "reth-network-api", - "reth-primitives", "serde", "strum", ] [[package]] name = "reth-rpc-types-compat" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", "alloy-rpc-types-engine", "alloy-rpc-types-eth", - "alloy-serde", + "jsonrpsee-types", "reth-primitives", - "reth-trie-common", + "reth-primitives-traits", + "serde", "serde_json", ] [[package]] name = "reth-stages" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", "assert_matches", @@ -8900,17 +9286,17 @@ dependencies = [ "reth-testing-utils", "reth-trie", "reth-trie-db", - "serde_json", "tempfile", - "thiserror", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "reth-stages-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-eips", "alloy-primitives", "aquamarine", "assert_matches", @@ -8929,7 +9315,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -8937,7 +9323,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "arbitrary", @@ -8954,17 +9340,16 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "assert_matches", - "parking_lot 0.12.3", + "parking_lot", "rayon", - "reth-chainspec", + "reth-codecs", "reth-db", "reth-db-api", - "reth-nippy-jar", - "reth-node-types", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-stages", @@ -8979,48 +9364,55 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "clap", - "derive_more 1.0.0", + "derive_more", "serde", "strum", ] [[package]] name = "reth-storage-api" -version = "1.1.0" +version = "1.1.4" dependencies = [ + "alloy-consensus", "alloy-eips", "alloy-primitives", + "alloy-rpc-types-engine", "auto_impl", "reth-chainspec", + "reth-db", "reth-db-api", "reth-db-models", "reth-execution-types", "reth-primitives", + "reth-primitives-traits", "reth-prune-types", "reth-stages-types", "reth-storage-errors", "reth-trie", + "reth-trie-db", + "revm", ] [[package]] name = "reth-storage-errors" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", - "derive_more 1.0.0", + "derive_more", "reth-fs-util", - "reth-primitives", + "reth-primitives-traits", + "reth-static-file-types", ] [[package]] name = "reth-tasks" -version = "1.1.0" +version = "1.1.4" dependencies = [ "auto_impl", "dyn-clone", @@ -9029,7 +9421,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror", + "thiserror 2.0.5", "tokio", "tracing", "tracing-futures", @@ -9037,7 +9429,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9045,12 +9437,13 @@ dependencies = [ "alloy-primitives", "rand 0.8.5", "reth-primitives", + "reth-primitives-traits", "secp256k1", ] [[package]] name = "reth-tokio-util" -version = "1.1.0" +version = "1.1.4" dependencies = [ "tokio", "tokio-stream", @@ -9059,7 +9452,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.1.0" +version = "1.1.4" dependencies = [ "clap", "eyre", @@ -9073,7 +9466,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9086,7 +9479,7 @@ dependencies = [ "criterion", "futures-util", "metrics", - "parking_lot 0.12.3", + "parking_lot", "paste", "pprof", "proptest", @@ -9098,19 +9491,21 @@ dependencies = [ "reth-execution-types", "reth-fs-util", "reth-metrics", + "reth-payload-util", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-storage-api", "reth-tasks", "reth-tracing", "revm", - "rustc-hash 2.0.0", + "rustc-hash 2.1.0", "schnellru", "serde", "serde_json", "smallvec", "tempfile", - "thiserror", + "thiserror 2.0.5", "tokio", "tokio-stream", "tracing", @@ -9118,48 +9513,49 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", + "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-trie", "auto_impl", - "bincode", "criterion", - "derive_more 1.0.0", "itertools 0.13.0", "metrics", "proptest", "proptest-arbitrary-interop", "rayon", - "reth-chainspec", "reth-execution-errors", "reth-metrics", "reth-primitives", "reth-stages-types", "reth-storage-errors", "reth-trie-common", + "reth-trie-sparse", "revm", - "serde", "serde_json", - "serde_with", - "tokio", "tracing", "triehash", ] [[package]] name = "reth-trie-common" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-genesis", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-eth", + "alloy-serde", "alloy-trie", "arbitrary", + "bincode", "bytes", - "derive_more 1.0.0", + "criterion", + "derive_more", "hash-db", "itertools 0.13.0", "nybbles", @@ -9170,31 +9566,28 @@ dependencies = [ "reth-primitives-traits", "revm-primitives", "serde", + "serde_json", + "serde_with", ] [[package]] name = "reth-trie-db" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", - "auto_impl", - "derive_more 1.0.0", - "itertools 0.13.0", + "derive_more", "metrics", "proptest", "proptest-arbitrary-interop", - "rayon", "reth-chainspec", "reth-db", "reth-db-api", "reth-execution-errors", "reth-metrics", - "reth-node-types", "reth-primitives", "reth-provider", - "reth-stages-types", "reth-storage-errors", "reth-trie", "reth-trie-common", @@ -9202,20 +9595,18 @@ dependencies = [ "serde", "serde_json", "similar-asserts", - "tokio", - "tokio-stream", "tracing", "triehash", ] [[package]] name = "reth-trie-parallel" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", "criterion", - "derive_more 1.0.0", + "derive_more", "itertools 0.13.0", "metrics", "proptest", @@ -9223,42 +9614,54 @@ dependencies = [ "rand 0.8.5", "rayon", "reth-db", - "reth-db-api", "reth-execution-errors", "reth-metrics", "reth-primitives", "reth-provider", "reth-trie", + "reth-trie-common", "reth-trie-db", - "thiserror", + "thiserror 2.0.5", "tokio", "tracing", ] [[package]] name = "reth-trie-sparse" -version = "1.1.0" +version = "1.1.4" dependencies = [ "alloy-primitives", "alloy-rlp", + "arbitrary", "assert_matches", "criterion", "itertools 0.13.0", + "pretty_assertions", "proptest", - "rayon", - "reth-primitives", + "proptest-arbitrary-interop", + "rand 0.8.5", + "reth-execution-errors", + "reth-primitives-traits", + "reth-testing-utils", + "reth-tracing", "reth-trie", "reth-trie-common", "smallvec", - "thiserror", - "tracing", + "thiserror 2.0.5", +] + +[[package]] +name = "reth-zstd-compressors" +version = "1.1.4" +dependencies = [ + "zstd", ] [[package]] name = "revm" -version = "14.0.3" +version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "641702b12847f9ed418d552f4fcabe536d867a2c980e96b6e7e25d7b992f929f" +checksum = "15689a3c6a8d14b647b4666f2e236ef47b5a5133cdfd423f545947986fff7013" dependencies = [ "auto_impl", "cfg-if", @@ -9271,9 +9674,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.8.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43c44af0bf801f48d25f7baf25cf72aff4c02d610f83b428175228162fef0246" +checksum = "0b7f5f8a2deafb3c76f357bbf9e71b73bddb915c4994bbbe3208fbfbe8fc7f8e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -9285,14 +9688,14 @@ dependencies = [ "colorchoice", "revm", "serde_json", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "revm-interpreter" -version = "10.0.3" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5e14002afae20b5bf1566f22316122f42f57517000e559c55b25bf7a49cba2" +checksum = "74e3f11d0fed049a4a10f79820c59113a79b38aed4ebec786a79d5c667bfeb51" dependencies = [ "revm-primitives", "serde", @@ -9300,9 +9703,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "11.0.3" +version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3198c06247e8d4ad0d1312591edf049b0de4ddffa9fecb625c318fd67db8639b" +checksum = "e381060af24b750069a2b2d2c54bba273d84e8f5f9e8026fc9262298e26cc336" dependencies = [ "aurora-engine-modexp", "blst", @@ -9320,9 +9723,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "10.0.0" +version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f1525851a03aff9a9d6a1d018b414d76252d6802ab54695b27093ecd7e7a101" +checksum = "3702f132bb484f4f0d0ca4f6fbde3c82cfd745041abbedd6eda67730e1868ef0" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -9430,9 +9833,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.6" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4b84ba6e838ceb47b41de5194a60244fac43d9fe03b71dbe8c5a201081d6d1" +checksum = "395b0c39c00f9296f3937624c1fa4e0ee44f8c0e4b2c49408179ef381c6c2e6e" dependencies = [ "bytemuck", "byteorder", @@ -9453,6 +9856,36 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" +[[package]] +name = "rstest" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a2c585be59b6b5dd66a9d2084aa1d8bd52fbdb806eafdeffb52791147862035" +dependencies = [ + "futures", + "futures-timer", + "rstest_macros", + "rustc_version 0.4.1", +] + +[[package]] +name = "rstest_macros" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "825ea780781b15345a146be27eaefb05085e337e869bff01b4306a4fd4a9ad5a" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version 0.4.1", + "syn 2.0.90", + "unicode-ident", +] + [[package]] name = "ruint" version = "1.12.3" @@ -9498,9 +9931,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" dependencies = [ "rand 0.8.5", ] @@ -9531,9 +9964,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -9544,9 +9977,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.14" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "log", "once_cell", @@ -9567,20 +10000,19 @@ dependencies = [ "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 2.11.1", ] [[package]] name = "rustls-native-certs" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.0.1", ] [[package]] @@ -9597,6 +10029,9 @@ name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-platform-verifier" @@ -9604,7 +10039,7 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "afbb878bdfdf63a336a5e63561b1835e7a8c91524f51621db870169eac84b490" dependencies = [ - "core-foundation", + "core-foundation 0.9.4", "core-foundation-sys", "jni", "log", @@ -9613,7 +10048,7 @@ dependencies = [ "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework", + "security-framework 2.11.1", "security-framework-sys", "webpki-roots", "winapi", @@ -9677,18 +10112,18 @@ dependencies = [ [[package]] name = "scc" -version = "2.2.2" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c1f7fc6deb21665a9060dfc7d271be784669295a31babdcd4dd2c79ae8cbfb" +checksum = "66b202022bb57c049555430e11fc22fea12909276a80a4c3d368da36ac1d88ed" dependencies = [ "sdd", ] [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -9763,18 +10198,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", - "core-foundation", + "core-foundation 0.9.4", "core-foundation-sys", "libc", "num-bigint", "security-framework-sys", ] +[[package]] +name = "security-framework" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.10.0", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -9800,9 +10248,9 @@ dependencies = [ [[package]] name = "semver-parser" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "9900206b54a3527fdc7b8a938bffd94a568bac4f4aa8113b209df75a09c0dec2" dependencies = [ "pest", ] @@ -9821,31 +10269,31 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -9860,7 +10308,7 @@ checksum = "c7715380eec75f029a4ef7de39a9200e0a63823176b759d055b613f5a87df6a6" dependencies = [ "percent-encoding", "serde", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -9871,7 +10319,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -9905,7 +10353,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", @@ -9922,30 +10370,30 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "serial_test" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" +checksum = "1b258109f244e1d6891bf1053a55d63a5cd4f8f4c30cf9a1280989f80e7a1fa9" dependencies = [ "once_cell", - "parking_lot 0.12.3", + "parking_lot", "scc", "serial_test_derive", ] [[package]] name = "serial_test_derive" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" +checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -10044,7 +10492,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", - "mio 1.0.2", + "mio 1.0.3", "signal-hook", ] @@ -10072,6 +10520,10 @@ name = "similar" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" +dependencies = [ + "bstr", + "unicode-segmentation", +] [[package]] name = "similar-asserts" @@ -10080,6 +10532,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe85670573cd6f0fa97940f26e7e6601213c3b0555246c24234131f88c5709e" dependencies = [ "console", + "serde", "similar", ] @@ -10091,7 +10544,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 1.0.69", "time", ] @@ -10103,9 +10556,9 @@ checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "sketches-ddsketch" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85636c14b73d81f541e525f585c0a2109e6744e1565b5c1668e31c70c10ed65c" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" [[package]] name = "slab" @@ -10134,19 +10587,9 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -10154,9 +10597,9 @@ dependencies = [ [[package]] name = "soketto" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" +checksum = "2e859df029d160cb88608f5d7df7fb4753fd20fdfb4de5644f3d8b8440841721" dependencies = [ "base64 0.22.1", "bytes", @@ -10173,9 +10616,6 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] [[package]] name = "spki" @@ -10236,7 +10676,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -10260,9 +10700,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.12.0" +version = "12.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" +checksum = "e5ba5365997a4e375660bed52f5b42766475d5bc8ceb1bb13fea09c469ea0f49" dependencies = [ "debugid", "memmap2", @@ -10272,9 +10712,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.0" +version = "12.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" +checksum = "beff338b2788519120f38c59ff4bb15174f52a183e547bac3d6072c2c0aa48aa" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10294,9 +10734,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.79" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -10305,14 +10745,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.8.8" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebfc1bfd06acc78f16d8fd3ef846bc222ee7002468d10a7dce8d703d6eab89a3" +checksum = "da0523f59468a2696391f2a772edc089342aacd53c3caa2ac3264e598edf119b" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -10323,9 +10763,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -10338,14 +10778,14 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "sysinfo" -version = "0.31.4" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" +checksum = "4c33cd241af0f2e9e3b5c32163b873b29956890b5342e6745b917ce9d490f4af" dependencies = [ "core-foundation-sys", "libc", @@ -10354,6 +10794,12 @@ dependencies = [ "windows 0.57.0", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tap" version = "1.0.1" @@ -10362,12 +10808,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.1.1", + "fastrand 2.2.0", "once_cell", "rustix", "windows-sys 0.59.0", @@ -10415,7 +10861,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -10439,42 +10885,42 @@ checksum = "a38c90d48152c236a3ab59271da4f4ae63d678c5d7ad6b7714d7cb9760be5e4b" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", ] [[package]] -name = "thiserror-impl" -version = "1.0.64" +name = "thiserror" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "643caef17e3128658ff44d85923ef2d28af81bb71e0d67bbfe1d76f19a73e053" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.79", + "thiserror-impl 2.0.5", ] [[package]] -name = "thiserror-impl-no-std" -version = "2.0.2" +name = "thiserror-impl" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e6318948b519ba6dc2b442a6d0b904ebfb8d411a3ad3e07843615a72249758" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.90", ] [[package]] -name = "thiserror-no-std" -version = "2.0.2" +name = "thiserror-impl" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3ad459d94dd517257cc96add8a43190ee620011bb6e6cdc82dafd97dfafafea" +checksum = "995d0bbc9995d1f19d28b7215a9352b0fc3cd3a2d2ec95c2cadc485cdedbcdde" dependencies = [ - "thiserror-impl-no-std", + "proc-macro2", + "quote", + "syn 2.0.90", ] [[package]] @@ -10529,9 +10975,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -10553,9 +10999,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -10607,18 +11053,18 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.2", - "parking_lot 0.12.3", + "mio 1.0.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", "windows-sys 0.52.0", ] @@ -10631,25 +11077,24 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ "rustls", - "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -10675,9 +11120,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -10715,7 +11160,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -10759,12 +11204,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.5.2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "base64 0.21.7", + "base64 0.22.1", "bitflags 2.6.0", "bytes", "futures-core", @@ -10781,7 +11226,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower 0.4.13", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -10802,9 +11247,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -10819,27 +11264,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", - "thiserror", + "thiserror 1.0.69", "time", "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -10857,9 +11302,9 @@ dependencies = [ [[package]] name = "tracing-journald" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba316a74e8fc3c3896a850dba2375928a9fa171b085ecddfc7c054d39970f3fd" +checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" dependencies = [ "libc", "tracing-core", @@ -10891,9 +11336,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" dependencies = [ "serde", "tracing-core", @@ -10901,9 +11346,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -10922,9 +11367,9 @@ dependencies = [ [[package]] name = "tracy-client" -version = "0.17.4" +version = "0.17.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "746b078c6a09ebfd5594609049e07116735c304671eaab06ce749854d23435bc" +checksum = "51e295eae54124872df35720dc3a5b1e827c7deee352b342ec7f7e626d0d0ef3" dependencies = [ "loom", "once_cell", @@ -10934,11 +11379,12 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68613466112302fdbeabc5fa55f7d57462a0b247d5a6b7d7e09401fb471a144d" +checksum = "3637e734239e12ab152cd269302500bd063f37624ee210cd04b4936ed671f3b1" dependencies = [ "cc", + "windows-targets 0.52.6", ] [[package]] @@ -10952,50 +11398,10 @@ dependencies = [ ] [[package]] -name = "trust-dns-proto" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" -dependencies = [ - "async-trait", - "cfg-if", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.4.0", - "ipnet", - "once_cell", - "rand 0.8.5", - "smallvec", - "thiserror", - "tinyvec", - "tokio", - "tracing", - "url", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.23.2" +name = "triomphe" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" -dependencies = [ - "cfg-if", - "futures-util", - "ipconfig", - "lru-cache", - "once_cell", - "parking_lot 0.12.3", - "rand 0.8.5", - "resolv-conf", - "smallvec", - "thiserror", - "tokio", - "tracing", - "trust-dns-proto", -] +checksum = "859eb650cfee7434994602c3a68b25d77ad9e68c8a6cd491616ef86661382eb3" [[package]] name = "try-lock" @@ -11019,7 +11425,7 @@ dependencies = [ "rustls", "rustls-pki-types", "sha1", - "thiserror", + "thiserror 1.0.69", "utf-8", ] @@ -11048,40 +11454,34 @@ dependencies = [ ] [[package]] -name = "unarray" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" - -[[package]] -name = "unicase" -version = "2.7.0" +name = "uint" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" dependencies = [ - "version_check", + "byteorder", + "crunchy", + "hex", + "static_assertions", ] [[package]] -name = "unicode-bidi" -version = "0.3.17" +name = "unarray" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] -name = "unicode-ident" -version = "1.0.13" +name = "unicase" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" [[package]] -name = "unicode-normalization" -version = "0.1.24" +name = "unicode-ident" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-segmentation" @@ -11097,7 +11497,7 @@ checksum = "b3644627a5af5fa321c95b9b235a72fd24cd29c648c2c379431e6628655627bf" dependencies = [ "itertools 0.13.0", "unicode-segmentation", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -11106,6 +11506,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -11122,12 +11528,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "unsigned-varint" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" - [[package]] name = "unsigned-varint" version = "0.8.0" @@ -11142,12 +11542,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna", "percent-encoding", "serde", ] @@ -11178,9 +11578,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom 0.2.15", ] @@ -11211,6 +11611,17 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "visibility" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "wait-timeout" version = "0.2.0" @@ -11259,9 +11670,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -11270,36 +11681,36 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -11307,28 +11718,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-streams" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -11337,11 +11748,35 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmtimer" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0048ad49a55b9deb3953841fa1fc5858f0efbcb7a18868c899a360269fac1b23" +dependencies = [ + "futures", + "js-sys", + "parking_lot", + "pin-utils", + "slab", + "wasm-bindgen", +] + [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", @@ -11349,9 +11784,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.6" +version = "0.26.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" dependencies = [ "rustls-pki-types", ] @@ -11384,7 +11819,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -11455,7 +11890,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -11466,7 +11901,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -11477,7 +11912,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -11488,7 +11923,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -11722,7 +12157,7 @@ dependencies = [ "pharos", "rustc_version 0.4.1", "send_wrapper 0.6.0", - "thiserror", + "thiserror 1.0.69", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -11745,9 +12180,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", @@ -11757,13 +12192,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", "synstructure", ] @@ -11785,27 +12220,27 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] name = "zerofrom" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", "synstructure", ] @@ -11826,7 +12261,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] @@ -11848,7 +12283,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.79", + "syn 2.0.90", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e3ec1c1fb4a..4ee8fac0e74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace.package] -version = "1.1.0" +version = "1.1.4" edition = "2021" -rust-version = "1.81" +rust-version = "1.82" license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" @@ -20,7 +20,6 @@ members = [ "crates/cli/runner/", "crates/cli/util/", "crates/config/", - "crates/consensus/auto-seal/", "crates/consensus/beacon/", "crates/consensus/common/", "crates/consensus/consensus/", @@ -40,6 +39,7 @@ members = [ "crates/ethereum/evm", "crates/ethereum/node", "crates/ethereum/payload/", + "crates/ethereum/primitives/", "crates/etl/", "crates/evm/", "crates/evm/execution-errors", @@ -81,8 +81,10 @@ members = [ "crates/optimism/storage", "crates/payload/basic/", "crates/payload/builder/", + "crates/payload/builder-primitives/", "crates/payload/primitives/", "crates/payload/validator/", + "crates/payload/util/", "crates/primitives-traits/", "crates/primitives/", "crates/prune/prune", @@ -116,6 +118,7 @@ members = [ "crates/storage/nippy-jar/", "crates/storage/provider/", "crates/storage/storage-api/", + "crates/storage/zstd-compressors/", "crates/tasks/", "crates/tokio-util/", "crates/tracing/", @@ -145,6 +148,7 @@ members = [ "examples/rpc-db/", "examples/stateful-precompile/", "examples/txpool-tracing/", + "examples/custom-beacon-withdrawals", "testing/ef-tests/", "testing/testing-utils", ] @@ -175,6 +179,7 @@ branches_sharing_code = "warn" clear_with_drain = "warn" cloned_instead_of_copied = "warn" collection_is_never_read = "warn" +dbg_macro = "warn" derive_partial_eq_without_eq = "warn" doc_markdown = "warn" empty_line_after_doc_comments = "warn" @@ -287,11 +292,17 @@ codegen-units = 1 inherits = "release" lto = "fat" +[profile.reproducible] +inherits = "release" +debug = false +panic = "abort" +codegen-units = 1 +overflow-checks = true + [workspace.dependencies] # reth op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } -reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-bench = { path = "bin/reth-bench" } @@ -330,8 +341,9 @@ reth-eth-wire-types = { path = "crates/net/eth-wire-types" } reth-ethereum-cli = { path = "crates/ethereum/cli" } reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } -reth-ethereum-forks = { path = "crates/ethereum-forks" } +reth-ethereum-forks = { path = "crates/ethereum-forks", default-features = false } reth-ethereum-payload-builder = { path = "crates/ethereum/payload" } +reth-ethereum-primitives = { path = "crates/ethereum/primitives", default-features = false } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } @@ -366,14 +378,16 @@ reth-node-types = { path = "crates/node/types" } reth-optimism-chainspec = { path = "crates/optimism/chainspec" } reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } -reth-optimism-forks = { path = "crates/optimism/hardforks" } +reth-optimism-forks = { path = "crates/optimism/hardforks", default-features = false } reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-optimism-primitives = { path = "crates/optimism/primitives" } reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-optimism-storage = { path = "crates/optimism/storage" } reth-payload-builder = { path = "crates/payload/builder" } +reth-payload-builder-primitives = { path = "crates/payload/builder-primitives" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } +reth-payload-util = { path = "crates/payload/util" } reth-primitives = { path = "crates/primitives", default-features = false, features = [ "std", ] } @@ -408,66 +422,66 @@ reth-trie = { path = "crates/trie/trie" } reth-trie-common = { path = "crates/trie/common" } reth-trie-db = { path = "crates/trie/db" } reth-trie-parallel = { path = "crates/trie/parallel" } +reth-trie-sparse = { path = "crates/trie/sparse" } +reth-zstd-compressors = { path = "crates/storage/zstd-compressors", default-features = false } # revm -revm = { version = "14.0.3", features = [ - "std", -], default-features = false } -revm-inspectors = "0.8.1" -revm-primitives = { version = "10.0.0", features = [ - "std", -], default-features = false } +revm = { version = "18.0.0", features = ["std"], default-features = false } +revm-inspectors = "0.12.0" +revm-primitives = { version = "14.0.0", default-features = false } # eth -alloy-chains = "0.1.32" -alloy-dyn-abi = "0.8.0" -alloy-primitives = { version = "0.8.7", default-features = false } -alloy-rlp = "0.3.4" -alloy-sol-types = "0.8.0" +alloy-chains = { version = "0.1.32", default-features = false } +alloy-dyn-abi = "0.8.11" +alloy-primitives = { version = "0.8.11", default-features = false } +alloy-rlp = { version = "0.3.10", default-features = false } +alloy-sol-types = "0.8.11" alloy-trie = { version = "0.7", default-features = false } -alloy-consensus = { version = "0.4.2", default-features = false } -alloy-eips = { version = "0.4.2", default-features = false } -alloy-genesis = { version = "0.4.2", default-features = false } -alloy-json-rpc = { version = "0.4.2", default-features = false } -alloy-network = { version = "0.4.2", default-features = false } -alloy-network-primitives = { version = "0.4.2", default-features = false } -alloy-node-bindings = { version = "0.4.2", default-features = false } -alloy-provider = { version = "0.4.2", features = [ +alloy-consensus = { version = "0.7.3", default-features = false } +alloy-contract = { version = "0.7.3", default-features = false } +alloy-eips = { version = "0.7.3", default-features = false } +alloy-genesis = { version = "0.7.3", default-features = false } +alloy-json-rpc = { version = "0.7.3", default-features = false } +alloy-network = { version = "0.7.3", default-features = false } +alloy-network-primitives = { version = "0.7.3", default-features = false } +alloy-node-bindings = { version = "0.7.3", default-features = false } +alloy-provider = { version = "0.7.3", features = [ "reqwest", ], default-features = false } -alloy-pubsub = { version = "0.4.2", default-features = false } -alloy-rpc-client = { version = "0.4.2", default-features = false } -alloy-rpc-types = { version = "0.4.2", features = [ +alloy-pubsub = { version = "0.7.3", default-features = false } +alloy-rpc-client = { version = "0.7.3", default-features = false } +alloy-rpc-types = { version = "0.7.3", features = [ "eth", ], default-features = false } -alloy-rpc-types-admin = { version = "0.4.2", default-features = false } -alloy-rpc-types-anvil = { version = "0.4.2", default-features = false } -alloy-rpc-types-beacon = { version = "0.4.2", default-features = false } -alloy-rpc-types-debug = { version = "0.4.2", default-features = false } -alloy-rpc-types-engine = { version = "0.4.2", default-features = false } -alloy-rpc-types-eth = { version = "0.4.2", default-features = false } -alloy-rpc-types-mev = { version = "0.4.2", default-features = false } -alloy-rpc-types-trace = { version = "0.4.2", default-features = false } -alloy-rpc-types-txpool = { version = "0.4.2", default-features = false } -alloy-serde = { version = "0.4.2", default-features = false } -alloy-signer = { version = "0.4.2", default-features = false } -alloy-signer-local = { version = "0.4.2", default-features = false } -alloy-transport = { version = "0.4.2" } -alloy-transport-http = { version = "0.4.2", features = [ +alloy-rpc-types-admin = { version = "0.7.3", default-features = false } +alloy-rpc-types-anvil = { version = "0.7.3", default-features = false } +alloy-rpc-types-beacon = { version = "0.7.3", default-features = false } +alloy-rpc-types-debug = { version = "0.7.3", default-features = false } +alloy-rpc-types-engine = { version = "0.7.3", default-features = false } +alloy-rpc-types-eth = { version = "0.7.3", default-features = false } +alloy-rpc-types-mev = { version = "0.7.3", default-features = false } +alloy-rpc-types-trace = { version = "0.7.3", default-features = false } +alloy-rpc-types-txpool = { version = "0.7.3", default-features = false } +alloy-serde = { version = "0.7.3", default-features = false } +alloy-signer = { version = "0.7.3", default-features = false } +alloy-signer-local = { version = "0.7.3", default-features = false } +alloy-transport = { version = "0.7.3" } +alloy-transport-http = { version = "0.7.3", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ipc = { version = "0.4.2", default-features = false } -alloy-transport-ws = { version = "0.4.2", default-features = false } +alloy-transport-ipc = { version = "0.7.3", default-features = false } +alloy-transport-ws = { version = "0.7.3", default-features = false } # op -op-alloy-rpc-types = "0.4" -op-alloy-rpc-types-engine = "0.4" -op-alloy-network = "0.4" -op-alloy-consensus = "0.4" +op-alloy-rpc-types = "0.7.3" +op-alloy-rpc-types-engine = "0.7.3" +op-alloy-rpc-jsonrpsee = "0.7.3" +op-alloy-network = "0.7.3" +op-alloy-consensus = "0.7.3" # misc -aquamarine = "0.5" +aquamarine = "0.6" auto_impl = "1" backon = { version = "1.2", default-features = false, features = [ "std-blocking-sleep", @@ -476,12 +490,12 @@ backon = { version = "1.2", default-features = false, features = [ bincode = "1.3" bitflags = "2.4" boyer-moore-magiclen = "0.2.16" -bytes = "1.5" +bytes = { version = "1.5", default-features = false } cfg-if = "1.0" clap = "4" const_format = { version = "0.2.32", features = ["rust_1_64"] } dashmap = "6.0" -derive_more = { version = "1", features = ["full"] } +derive_more = { version = "1", default-features = false, features = ["full"] } dyn-clone = "1.0.17" eyre = "0.6" fdlimit = "0.3.0" @@ -494,7 +508,7 @@ modular-bitfield = "0.11.2" notify = { version = "6.1.1", default-features = false, features = [ "macos_fsevent", ] } -nybbles = "0.2.1" +nybbles = { version = "0.2.1", default-features = false } once_cell = { version = "1.19", default-features = false, features = [ "critical-section", ] } @@ -506,25 +520,25 @@ rustc-hash = { version = "2.0", default-features = false } schnellru = "0.2" serde = { version = "1.0", default-features = false } serde_json = "1.0.94" -serde_with = "3.3.0" +serde_with = { version = "3", default-features = false, features = ["macros"] } sha2 = { version = "0.10", default-features = false } shellexpand = "3.0.0" smallvec = "1" strum = { version = "0.26", default-features = false } syn = "2.0" -thiserror = "1.0" -thiserror-no-std = { version = "2.0.2", default-features = false } +thiserror = { version = "2.0.0", default-features = false } tracing = "0.1.0" tracing-appender = "0.2" -url = "2.3" +url = { version = "2.3", default-features = false } zstd = "0.13" +byteorder = "1" # metrics -metrics = "0.23.0" +metrics = "0.24.0" metrics-derive = "0.1" -metrics-exporter-prometheus = { version = "0.15.0", default-features = false } +metrics-exporter-prometheus = { version = "0.16.0", default-features = false } metrics-process = "2.1.0" -metrics-util = { default-features = false, version = "0.17.0" } +metrics-util = { default-features = false, version = "0.18.0" } # proc-macros proc-macro2 = "1.0" @@ -545,22 +559,26 @@ hyper = "1.3" hyper-util = "0.1.5" pin-project = "1.0.12" reqwest = { version = "0.12", default-features = false } +tracing-futures = "0.2" tower = "0.4" -tower-http = "0.5" +tower-http = "0.6" + # p2p -discv5 = "0.7.0" +discv5 = "0.8.0" if-addrs = "0.13" # rpc jsonrpsee = "0.24" jsonrpsee-core = "0.24" +jsonrpsee-server = "0.24" jsonrpsee-http-client = "0.24" jsonrpsee-types = "0.24" # http http = "1.0" http-body = "1.0" +http-body-util = "0.1.2" jsonwebtoken = "9" proptest-arbitrary-interop = "0.1.0" @@ -582,43 +600,50 @@ toml = "0.8" arbitrary = "1.3" assert_matches = "1.5.0" criterion = "0.5" -iai-callgrind = "0.13" -pprof = "0.13" +iai-callgrind = "0.14" +pprof = "0.14" proptest = "1.4" proptest-derive = "0.5" serial_test = { default-features = false, version = "3" } -similar-asserts = { default-features = false, version = "1.5.0" } +similar-asserts = { version = "1.5.0", features = ["serde"] } tempfile = "3.8" test-fuzz = "6" +rstest = "0.23.0" tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" tracy-client = "0.17.3" -[patch.crates-io] -#alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} -#alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "8c499409"} +# [patch.crates-io] +# alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-genesis = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-network-primitives = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-pubsub = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-admin = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-beacon = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-debug = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-eth = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-mev = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-rpc-types-txpool = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-serde = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-signer = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-signer-local = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-http = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-ipc = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } +# alloy-transport-ws = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } + +# op-alloy-consensus = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-network = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-rpc-types = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } +# op-alloy-rpc-types-engine = { git = "https://github.com/alloy-rs/op-alloy", rev = "debfc29" } diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible new file mode 100644 index 00000000000..12c12dd7c7d --- /dev/null +++ b/Dockerfile.reproducible @@ -0,0 +1,37 @@ +# Use the Rust 1.82 image based on Debian Bullseye +FROM rust:1.82-bullseye@sha256:c42c8ca762560c182ba30edda0e0d71a8604040af2672370559d7e854653c66d AS builder + +# Install specific version of libclang-dev +RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 + +# Clone the repository at the specific branch +RUN git clone https://github.com/paradigmxyz/reth /app +WORKDIR /app + +# Checkout the reproducible-build branch +RUN git checkout reproducible-build + +# Get the latest commit timestamp and set SOURCE_DATE_EPOCH +RUN SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) && \ + echo "SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH" >> /etc/environment + +# Set environment variables for reproducibility +ARG RUSTFLAGS="-C target-feature=+crt-static -C link-arg=-Wl,--build-id=none -Clink-arg=-static-libgcc -C metadata='' --remap-path-prefix $(pwd)=." +ENV SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="${RUSTFLAGS}" + +# Set the default features if not provided +ARG FEATURES="jemalloc asm-keccak" + +# Build the project with the reproducible settings +RUN . /etc/environment && \ + cargo build --bin reth --features "${FEATURES}" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + +# Create a minimal final image with just the binary +FROM scratch AS binaries + +# Copy the compiled binary from the builder stage +COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md new file mode 100644 index 00000000000..17c639f0d5e --- /dev/null +++ b/HARDFORK-CHECKLIST.md @@ -0,0 +1,21 @@ +# Non-exhaustive checklist for integrating new changes for an upcoming hard fork/devnet + +## Introducing new EIP types or changes to primitive types + +- Make required changes to primitive data structures on [alloy](https://github.com/alloy-rs/alloy) +- All new EIP data structures/constants/helpers etc. go into the `alloy-eips` crate at first. +- New transaction types go into `alloy-consensus` +- If there are changes to existing data structures, such as `Header` or `Block`, apply them to the types in `alloy-consensus` (e.g. new `request_hashes` field in Prague) + +## Engine API + +- If there are changes to the engine API (e.g. a new `engine_newPayloadVx` and `engine_getPayloadVx` pair) add the new types to the `alloy-rpc-types-engine` crate. +- If there are new parameters to the `engine_newPayloadVx` endpoint, add them to the `ExecutionPayloadSidecar` container type. This types contains all additional parameters that are required to convert an `ExecutionPayload` to an EL block. + +## Reth changes + +### Updates to the engine API + +- Add new endpoints to the `EngineApi` trait and implement endpoints. +- Update the `ExecutionPayload` + `ExecutionPayloadSidecar` to `Block` conversion if there are any additional parameters. +- Update version specific validation checks in the `EngineValidator` trait. diff --git a/Makefile b/Makefile index 908f1ef24da..b1908d7b109 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,16 @@ install-op: ## Build and install the op-reth binary under `~/.cargo/bin`. build: ## Build the reth binary into `target` directory. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" +SOURCE_DATE_EPOCH ?= $(shell git log -1 --pretty=%ct) +.PHONY: reproducible +reproducible: ## Build the reth binary into `target` directory with reproducible builds. Only works for x86_64-unknown-linux-gnu currently + SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH) \ + CARGO_INCREMENTAL=0 \ + LC_ALL=C \ + TZ=UTC \ + RUSTFLAGS="-C target-feature=+crt-static -C link-arg=-Wl,--build-id=none -Clink-arg=-static-libgcc -C metadata='' --remap-path-prefix $$(pwd)=." \ + cargo build --bin reth --features "$(FEATURES)" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu + .PHONY: build-debug build-debug: ## Build the reth binary into `target/debug` directory. cargo build --bin reth --features "$(FEATURES)" @@ -487,6 +497,7 @@ test: pr: make lint && \ make update-book-cli && \ + cargo docs --document-private-items && \ make test check-features: diff --git a/README.md b/README.md index 7fae4d0b62c..8a6b8ddb42f 100644 --- a/README.md +++ b/README.md @@ -87,7 +87,7 @@ When updating this, also update: - .github/workflows/lint.yml --> -The Minimum Supported Rust Version (MSRV) of this project is [1.81.0](https://blog.rust-lang.org/2024/09/05/Rust-1.81.0.html). +The Minimum Supported Rust Version (MSRV) of this project is [1.82.0](https://blog.rust-lang.org/2024/10/17/Rust-1.82.0.html). See the book for detailed instructions on how to [build from source](https://paradigmxyz.github.io/reth/installation/source.html). diff --git a/SECURITY.md b/SECURITY.md index 5260d529f5a..7521d62e959 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,5 +1,5 @@ # Security Policy -## Reporting a Vulnerability +## Report a Vulnerability -Contact georgios at paradigm.xyz. +Contact [security@ithaca.xyz](mailto:security@ithaca.xyz). diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index e4e40daeca9..0182076130c 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -20,6 +20,7 @@ reth-node-core.workspace = true reth-node-api.workspace = true reth-rpc-types-compat.workspace = true reth-primitives = { workspace = true, features = ["alloy-compat"] } +reth-primitives-traits.workspace = true reth-tracing.workspace = true # alloy @@ -76,9 +77,16 @@ reth-tracing.workspace = true [features] default = ["jemalloc"] -asm-keccak = ["reth-primitives/asm-keccak"] +asm-keccak = [ + "reth-primitives/asm-keccak", + "reth-node-core/asm-keccak", + "alloy-primitives/asm-keccak" +] -jemalloc = ["reth-cli-util/jemalloc"] +jemalloc = [ + "reth-cli-util/jemalloc", + "reth-node-core/jemalloc" +] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] diff --git a/bin/reth-bench/src/authenticated_transport.rs b/bin/reth-bench/src/authenticated_transport.rs index c946d244de9..72c4fd29889 100644 --- a/bin/reth-bench/src/authenticated_transport.rs +++ b/bin/reth-bench/src/authenticated_transport.rs @@ -84,7 +84,8 @@ impl InnerTransport { let (auth, claims) = build_auth(jwt).map_err(|e| AuthenticatedTransportError::InvalidJwt(e.to_string()))?; - let inner = WsConnect { url: url.to_string(), auth: Some(auth) } + let inner = WsConnect::new(url.clone()) + .with_auth(auth) .into_service() .await .map(Self::Ws) diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index 5f8936934c6..59533bc6e97 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -74,14 +74,17 @@ impl BenchContext { let first_block = match benchmark_mode { BenchMode::Continuous => { // fetch Latest block - block_provider.get_block_by_number(BlockNumberOrTag::Latest, true).await?.unwrap() + block_provider + .get_block_by_number(BlockNumberOrTag::Latest, true.into()) + .await? + .unwrap() } BenchMode::Range(ref mut range) => { match range.next() { Some(block_number) => { // fetch first block in range block_provider - .get_block_by_number(block_number.into(), true) + .get_block_by_number(block_number.into(), true.into()) .await? .unwrap() } diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index a8c18b48a2b..9e573a8957e 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -18,7 +18,7 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -37,23 +37,23 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-fcu` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let cloned_args = self.benchmark.clone(); let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&cloned_args, self.rpc_url).await?; + BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { - let block_res = block_provider.get_block_by_number(next_block.into(), true).await; + let block_res = + block_provider.get_block_by_number(next_block.into(), true.into()).await; let block = block_res.unwrap().unwrap(); let block_hash = block.header.hash; - let block = Block::try_from(block.inner).unwrap().seal(block_hash); + let block = Block::try_from(block).unwrap().seal(block_hash); let head_block_hash = block.hash(); let safe_block_hash = block_provider - .get_block_by_number(block.number.saturating_sub(32).into(), false); + .get_block_by_number(block.number.saturating_sub(32).into(), false.into()); let finalized_block_hash = block_provider - .get_block_by_number(block.number.saturating_sub(64).into(), false); + .get_block_by_number(block.number.saturating_sub(64).into(), false.into()); let (safe, finalized) = tokio::join!(safe_block_hash, finalized_block_hash,); @@ -75,11 +75,11 @@ impl Command { while let Some((block, head, safe, finalized)) = receiver.recv().await { // just put gas used here - let gas_used = block.header.gas_used; + let gas_used = block.gas_used; let block_number = block.header.number; let versioned_hashes: Vec = - block.blob_versioned_hashes().into_iter().copied().collect(); + block.body.blob_versioned_hashes().into_iter().copied().collect(); let parent_beacon_block_root = block.parent_beacon_block_root; let payload = block_to_payload(block); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index e6392318a54..0611faabf10 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -16,7 +16,7 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -35,19 +35,19 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-only` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let cloned_args = self.benchmark.clone(); // TODO: this could be just a function I guess, but destructuring makes the code slightly // more readable than a 4 element tuple. let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&cloned_args, self.rpc_url).await?; + BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { - let block_res = block_provider.get_block_by_number(next_block.into(), true).await; + let block_res = + block_provider.get_block_by_number(next_block.into(), true.into()).await; let block = block_res.unwrap().unwrap(); let block_hash = block.header.hash; - let block = Block::try_from(block.inner).unwrap().seal(block_hash); + let block = Block::try_from(block).unwrap().seal(block_hash); next_block += 1; sender.send(block).await.unwrap(); @@ -60,10 +60,10 @@ impl Command { while let Some(block) = receiver.recv().await { // just put gas used here - let gas_used = block.header.gas_used; + let gas_used = block.gas_used; let versioned_hashes: Vec = - block.blob_versioned_hashes().into_iter().copied().collect(); + block.body.blob_versioned_hashes().into_iter().copied().collect(); let parent_beacon_block_root = block.parent_beacon_block_root; let payload = block_to_payload(block); diff --git a/bin/reth-bench/src/bench/output.rs b/bin/reth-bench/src/bench/output.rs index 8f68dac4533..56343c6af64 100644 --- a/bin/reth-bench/src/bench/output.rs +++ b/bin/reth-bench/src/bench/output.rs @@ -1,7 +1,7 @@ //! Contains various benchmark output formats, either for logging or for //! serialization to / from files. -use reth_primitives::constants::gas_units::GIGAGAS; +use reth_primitives_traits::constants::GIGAGAS; use serde::{ser::SerializeStruct, Serialize}; use std::time::Duration; diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index 6353aea7123..b00f4ddcd64 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -215,14 +215,6 @@ pub(crate) async fn call_new_payload>( versioned_hashes: Vec, ) -> TransportResult { match payload { - ExecutionPayload::V4(_payload) => { - todo!("V4 payloads not supported yet"); - // auth_provider - // .new_payload_v4_wait(payload, versioned_hashes, parent_beacon_block_root, ...) - // .await?; - // - // Ok(EngineApiMessageVersion::V4) - } ExecutionPayload::V3(payload) => { // We expect the caller let parent_beacon_block_root = parent_beacon_block_root diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 476f9cd5cec..cf9c53261b4 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -19,6 +19,7 @@ reth-ethereum-cli.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true @@ -58,7 +59,7 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-node-api.workspace = true reth-node-core.workspace = true reth-ethereum-payload-builder.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["js-tracer"] } reth-node-builder.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true @@ -96,22 +97,28 @@ backon.workspace = true similar-asserts.workspace = true [dev-dependencies] -reth-discv4.workspace = true tempfile.workspace = true [features] default = ["jemalloc"] -dev = ["reth-cli-commands/dev"] +dev = ["reth-cli-commands/arbitrary"] -asm-keccak = ["reth-node-core/asm-keccak", "reth-primitives/asm-keccak"] +asm-keccak = [ + "reth-node-core/asm-keccak", + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak" +] jemalloc = [ "reth-cli-util/jemalloc", "reth-node-core/jemalloc", "reth-node-metrics/jemalloc", ] -jemalloc-prof = ["reth-cli-util/jemalloc"] +jemalloc-prof = [ + "reth-cli-util/jemalloc", + "reth-cli-util/jemalloc-prof" +] tracy-allocator = ["reth-cli-util/tracy-allocator"] min-error-logs = ["tracing/release_max_level_error"] diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 01f8f73e7b1..192ab670028 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -39,7 +39,7 @@ pub struct Cli, + pub command: Commands, /// The chain this node is running. /// @@ -52,7 +52,7 @@ pub struct Cli, + pub chain: Arc, /// Add a new instance of a node. /// @@ -68,10 +68,11 @@ pub struct Cli, Ext: clap::Args + fmt::Debug> Cl let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); - // Install the prometheus recorder to be sure to record task - // executor's metrics + // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); let runner = CliRunner::default(); diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 455d8356aff..0e4d3f7188a 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,6 +1,6 @@ //! Command for debugging block building. use alloy_consensus::TxEip4844; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, eip4844::BlobTransactionSidecar}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::Decodable; use alloy_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; @@ -15,25 +15,29 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, FullConsensus}; use reth_errors::RethResult; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine, PayloadBuilderAttributes}; +use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; -use reth_payload_builder::database::CachedReads; use reth_primitives::{ - revm_primitives::KzgSettings, BlobTransaction, BlobTransactionSidecar, - PooledTransactionsElement, SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, + BlobTransaction, BlockExt, PooledTransactionsElement, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, Transaction, TransactionSigned, }; use reth_provider::{ - providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, - ProviderFactory, StageCheckpointReader, StateProviderFactory, + providers::{BlockchainProvider, ProviderNodeTypes}, + BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, + StageCheckpointReader, StateProviderFactory, +}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + primitives::{EnvKzgSettings, KzgSettings}, }; -use reth_revm::{database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_stages::StageId; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, @@ -83,10 +87,10 @@ impl> Command { /// Fetches the best block block from the database. /// /// If the database is empty, returns the genesis block. - fn lookup_best_block>( + fn lookup_best_block>( &self, factory: ProviderFactory, - ) -> RethResult> { + ) -> RethResult>>> { let provider = factory.provider()?; let best_number = @@ -118,13 +122,13 @@ impl> Command { } /// Execute `debug in-memory-merkle` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); @@ -197,7 +201,7 @@ impl> Command { let encoded_length = pooled.encode_2718_len(); // insert the blob into the store - blob_store.insert(transaction.hash, sidecar)?; + blob_store.insert(transaction.hash(), sidecar)?; encoded_length } @@ -220,13 +224,16 @@ impl> Command { suggested_fee_recipient: self.suggested_fee_recipient, // TODO: add support for withdrawals withdrawals: None, + target_blobs_per_block: None, + max_blobs_per_block: None, }; let payload_config = PayloadConfig::new( - Arc::clone(&best_block), + Arc::new(SealedHeader::new(best_block.header().clone(), best_block.hash())), Bytes::default(), reth_payload_builder::EthPayloadBuilderAttributes::try_new( best_block.hash(), payload_attrs, + EngineApiMessageVersion::default() as u8, )?, ); @@ -254,9 +261,10 @@ impl> Command { let senders = block.senders().expect("sender recovery failed"); let block_with_senders = - SealedBlockWithSenders::new(block.clone(), senders).unwrap(); + SealedBlockWithSenders::>::new(block.clone(), senders).unwrap(); - let db = StateProviderDatabase::new(blockchain_db.latest()?); + let state_provider = blockchain_db.latest()?; + let db = StateProviderDatabase::new(&state_provider); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); @@ -266,7 +274,7 @@ impl> Command { ExecutionOutcome::from((block_execution_output, block.number)); debug!(target: "reth::cli", ?execution_outcome, "Executed block"); - let hashed_post_state = execution_outcome.hash_state_slow(); + let hashed_post_state = state_provider.hashed_post_state(execution_outcome.state()); let (state_root, trie_updates) = StateRoot::overlay_root_with_updates( provider_factory.provider()?.tx_ref(), hashed_post_state.clone(), diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 215afacb583..efe4a2f7c22 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,13 +1,14 @@ //! Command for debugging execution. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; -use futures::{stream::select as stream_select, StreamExt}; +use futures::StreamExt; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -18,14 +19,13 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider, NetworkHandle}; +use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_network_p2p::{headers::client::HeadersClient, BlockClient}; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; +use reth_node_api::NodeTypesWithDBAdapter; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ - BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, + providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; use reth_prune::PruneModes; use reth_stages::{ @@ -58,7 +58,7 @@ pub struct Command { } impl> Command { - fn build_pipeline, Client>( + fn build_pipeline + CliNodeTypes, Client>( &self, config: &Config, client: Client, @@ -68,11 +68,11 @@ impl> Command { static_file_producer: StaticFileProducer>, ) -> eyre::Result> where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(client.clone(), Arc::clone(&consensus)) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) @@ -116,7 +116,7 @@ impl> Command { Ok(pipeline) } - async fn build_network>( + async fn build_network>( &self, config: &Config, task_executor: TaskExecutor, @@ -137,11 +137,14 @@ impl> Command { Ok(network) } - async fn fetch_block_hash( + async fn fetch_block_hash( &self, client: Client, block: BlockNumber, - ) -> eyre::Result { + ) -> eyre::Result + where + Client: HeadersClient, + { info!(target: "reth::cli", ?block, "Fetching block from the network."); loop { match get_single_header(&client, BlockHashOrNumber::Number(block)).await { @@ -157,7 +160,7 @@ impl> Command { } /// Execute `execution-debug` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { @@ -203,17 +206,12 @@ impl> Command { return Ok(()) } - let pipeline_events = pipeline.events(); - let events = stream_select( - network.event_listener().map(Into::into), - pipeline_events.map(Into::into), - ); ctx.task_executor.spawn_critical( "events task", reth_node_events::node::handle_events( Some(Box::new(network)), latest_block_number, - events, + pipeline.events().map(Into::into), ), ); @@ -231,11 +229,7 @@ impl> Command { trace!(target: "reth::cli", from = next_block, to = target_block, tip = ?target_block_hash, ?result, "Pipeline finished"); // Unwind the pipeline without committing. - { - provider_factory - .provider_rw()? - .take_block_and_execution_range(next_block..=target_block)?; - } + provider_factory.provider_rw()?.unwind_trie_state_range(next_block..=target_block)?; // Update latest block current_max_block = target_block; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index 51851c0b0ad..58b86648b90 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -4,11 +4,13 @@ use crate::{ args::NetworkArgs, utils::{get_single_body, get_single_header}, }; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; +use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -17,13 +19,14 @@ use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; +use reth_primitives::BlockExt; use reth_provider::{ - writer::UnifiedStorageWriter, AccountExtReader, ChainSpecProvider, HashingWriter, - HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StageCheckpointReader, StateWriter, StaticFileProviderFactory, StorageReader, + providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, + HashedPostStateProvider, HashingWriter, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, StorageLocation, + StorageReader, }; use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; @@ -55,7 +58,16 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -77,7 +89,7 @@ impl> Command { } /// Execute `debug in-memory-merkle` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { @@ -124,17 +136,16 @@ impl> Command { let client = fetch_client.clone(); let chain = provider_factory.chain_spec(); - let block = (move || get_single_body(client.clone(), Arc::clone(&chain), header.clone())) + let consensus = Arc::new(EthBeaconConsensus::new(chain.clone())); + let block = (move || get_single_body(client.clone(), header.clone(), consensus.clone())) .retry(backoff) .notify( |err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying..."), ) .await?; - let db = StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider_factory.static_file_provider(), - )); + let state_provider = LatestStateProviderRef::new(&provider); + let db = StateProviderDatabase::new(&state_provider); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); @@ -144,7 +155,7 @@ impl> Command { ( &block .clone() - .unseal() + .unseal::>() .with_recovered_senders() .ok_or(BlockValidationError::SenderRecoveryError)?, merkle_block_td + block.difficulty, @@ -156,7 +167,7 @@ impl> Command { // Unpacked `BundleState::state_root_slow` function let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( provider.tx_ref(), - execution_outcome.hash_state_slow(), + state_provider.hashed_post_state(execution_outcome.state()), )?; if in_memory_state_root == block.state_root { @@ -164,7 +175,7 @@ impl> Command { return Ok(()) } - let provider_rw = provider_factory.provider_rw()?; + let provider_rw = provider_factory.database_provider_rw()?; // Insert block, state and hashes provider_rw.insert_historical_block( @@ -173,8 +184,11 @@ impl> Command { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw.0); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + provider_rw.write_state( + execution_outcome, + OriginalValuesKnown::No, + StorageLocation::Database, + )?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; let storages = provider_rw.plain_state_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 8e02a52eaf0..16a1f111272 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,11 +1,12 @@ //! Command for debugging merkle trie calculation. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; @@ -16,13 +17,12 @@ use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_api::{BlockTy, NodePrimitives}; use reth_node_ethereum::EthExecutorProvider; -use reth_primitives::BlockHashOrNumber; use reth_provider::{ - writer::UnifiedStorageWriter, BlockNumReader, BlockWriter, ChainSpecProvider, + providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, DatabaseProviderFactory, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, ProviderFactory, StateWriter, StaticFileProviderFactory, + ProviderError, ProviderFactory, StateWriter, StorageLocation, }; use reth_revm::database::StateProviderDatabase; use reth_stages::{ @@ -56,7 +56,16 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -78,7 +87,7 @@ impl> Command { } /// Execute `merkle-debug` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { @@ -145,24 +154,24 @@ impl> Command { for block in blocks.into_iter().rev() { let block_number = block.number; let sealed_block = block - .try_seal_with_senders() + .try_seal_with_senders::>() .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; trace!(target: "reth::cli", block_number, "Executing block"); - provider_rw.insert_block(sealed_block.clone())?; + provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?; td += sealed_block.difficulty; let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new( - provider_rw.tx_ref(), - provider_rw.static_file_provider().clone(), - ), + LatestStateProviderRef::new(&provider_rw), )); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let execution_outcome = executor.finalize(); - let mut storage_writer = UnifiedStorageWriter::from_database(&provider_rw); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider_rw.write_state( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::Database, + )?; let checkpoint = Some(StageCheckpoint::new( block_number diff --git a/bin/reth/src/commands/debug_cmd/mod.rs b/bin/reth/src/commands/debug_cmd/mod.rs index 51681e8c59e..65329f41400 100644 --- a/bin/reth/src/commands/debug_cmd/mod.rs +++ b/bin/reth/src/commands/debug_cmd/mod.rs @@ -3,8 +3,8 @@ use clap::{Parser, Subcommand}; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::common::CliNodeTypes; use reth_cli_runner::CliContext; -use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::EthEngineTypes; mod build_block; @@ -37,9 +37,7 @@ pub enum Subcommands { impl> Command { /// Execute `debug` command - pub async fn execute< - N: NodeTypesWithEngine, - >( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index cbffa1f0e07..4b98fc85d0b 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -8,21 +8,22 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; use reth_db::DatabaseEnv; use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_fs_util as fs; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_api::{EngineApiMessageVersion, NodePrimitives, NodeTypesWithDBAdapter}; use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ - providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, + providers::{BlockchainProvider, ProviderNodeTypes}, + CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, }; use reth_prune::PruneModes; use reth_stages::Pipeline; @@ -54,7 +55,16 @@ pub struct Command { } impl> Command { - async fn build_network>( + async fn build_network< + N: ProviderNodeTypes< + ChainSpec = C::ChainSpec, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + >, + >, + >( &self, config: &Config, task_executor: TaskExecutor, @@ -76,16 +86,14 @@ impl> Command { } /// Execute `debug replay-engine` command - pub async fn execute< - N: NodeTypesWithEngine, - >( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); @@ -166,12 +174,17 @@ impl> Command { debug!(target: "reth::cli", filepath = %filepath.display(), ?message, "Forwarding Engine API message"); match message { StoredEngineApiMessage::ForkchoiceUpdated { state, payload_attrs } => { - let response = - beacon_engine_handle.fork_choice_updated(state, payload_attrs).await?; + let response = beacon_engine_handle + .fork_choice_updated( + state, + payload_attrs, + EngineApiMessageVersion::default(), + ) + .await?; debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); } - StoredEngineApiMessage::NewPayload { payload, cancun_fields } => { - let response = beacon_engine_handle.new_payload(payload, cancun_fields).await?; + StoredEngineApiMessage::NewPayload { payload, sidecar } => { + let response = beacon_engine_handle.new_payload(payload, sidecar).await?; debug!(target: "reth::cli", ?response, "Received for new payload"); } }; diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 6b71f48de12..53c592063ec 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -15,7 +15,7 @@ //! - `min-error-logs`: Disables all logs below `error` level. //! - `min-warn-logs`: Disables all logs below `warn` level. //! - `min-info-logs`: Disables all logs below `info` level. This can speed up the node, since fewer -//! calls to the logging component is made. +//! calls to the logging component are made. //! - `min-debug-logs`: Disables all logs below `debug` level. //! - `min-trace-logs`: Disables all logs below `trace` level. diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index f424163a24f..e146912c06f 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -33,11 +33,11 @@ pub struct EngineArgs { pub legacy: bool, /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] pub memory_block_buffer_target: u64, } diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index 9e3b32cc0b3..17a6de4e607 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -81,6 +81,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/db/diff.md b/book/cli/reth/db/diff.md index ea4c29612ff..efb9e7d32e3 100644 --- a/book/cli/reth/db/diff.md +++ b/book/cli/reth/db/diff.md @@ -45,6 +45,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --table The table name to diff. If not specified, all tables are diffed. diff --git a/book/cli/reth/debug/build-block.md b/book/cli/reth/debug/build-block.md index 76ddac306ce..7bceb62b940 100644 --- a/book/cli/reth/debug/build-block.md +++ b/book/cli/reth/debug/build-block.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --trusted-setup-file Overrides the KZG trusted setup by reading from the supplied file diff --git a/book/cli/reth/debug/execution.md b/book/cli/reth/debug/execution.md index 202e1452a8a..b8e1ce05d17 100644 --- a/book/cli/reth/debug/execution.md +++ b/book/cli/reth/debug/execution.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/cli/reth/debug/in-memory-merkle.md index 534e6d46c69..a183db997e9 100644 --- a/book/cli/reth/debug/in-memory-merkle.md +++ b/book/cli/reth/debug/in-memory-merkle.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/merkle.md b/book/cli/reth/debug/merkle.md index 19bc38acceb..d9a72794ef2 100644 --- a/book/cli/reth/debug/merkle.md +++ b/book/cli/reth/debug/merkle.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md index 7a14b9cf09d..b7a1266d399 100644 --- a/book/cli/reth/debug/replay-engine.md +++ b/book/cli/reth/debug/replay-engine.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 7bd8a0079ec..82a521ac0ab 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --no-state Disables stages that require state. diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index cb221634c40..533c0f8f888 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -69,6 +69,31 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --without-evm + Specifies whether to initialize the state without relying on EVM historical data. + + When enabled, and before inserting the state, it creates a dummy chain up to the last EVM block specified. It then, appends the first block provided block. + + - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be ignored. + + --header + Header file containing the header in an RLP encoded format. + + --total-difficulty + Total difficulty of the header. + + --header-hash + Hash of the header. + JSONL file with state dump. diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index cc889e5e35a..ebe2a8386cf 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 34d32209ada..cf05ae66e28 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -245,7 +245,7 @@ RPC: --http.api Rpc Modules to be configured for the HTTP server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner] --http.corsdomain Http Corsdomain to allow request from @@ -269,7 +269,7 @@ RPC: --ws.api Rpc Modules to be configured for the WS server - [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots] + [possible values: admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, flashbots, miner] --ipcdisable Disable the IPC-RPC server @@ -367,6 +367,9 @@ RPC: [default: 25] + --builder.disallow + Path to file containing disallowed addresses, json-encoded list of strings. Block validation API will reject blocks containing transactions from these addresses + RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache @@ -378,8 +381,8 @@ RPC State Cache: [default: 2000] - --rpc-cache.max-envs - Max number of bytes for cached env data + --rpc-cache.max-envs + Max number of headers in cache [default: 1000] @@ -499,6 +502,11 @@ TxPool: [default: 1024] + --txpool.max-new-pending-txs-notifications + How many new pending transactions to buffer and send to in progress pending transaction iterators + + [default: 200] + Builder: --builder.extradata Block extra data set by the payload builder @@ -590,6 +598,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Dev testnet: --dev Start the node in dev mode diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index 01253705b23..33639042a1d 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -247,6 +247,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md index e0641256f1c..41684ecd9e0 100644 --- a/book/cli/reth/prune.md +++ b/book/cli/reth/prune.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index 1f639cb095a..1afe94f55db 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index ae21a891830..c22d6be6680 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Possible values: - headers: The headers stage within the pipeline diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index 291d896902d..e3df5bf2df7 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -76,6 +76,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index bfe5ff9d6c6..204efc9685b 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -69,6 +69,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + --metrics Enable Prometheus metrics. diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index d181b3bcade..cb72b9313c0 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -74,6 +74,15 @@ Database: [possible values: true, false] + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + Networking: -d, --disable-discovery Disable the discovery service diff --git a/book/developers/profiling.md b/book/developers/profiling.md index f1fdf520eb2..956bc563303 100644 --- a/book/developers/profiling.md +++ b/book/developers/profiling.md @@ -25,7 +25,7 @@ In this tutorial, we will be reviewing: [Jemalloc](https://jemalloc.net/) is a general-purpose allocator that is used [across the industry in production](https://engineering.fb.com/2011/01/03/core-data/scalable-memory-allocation-using-jemalloc/), well known for its performance benefits, predictability, and profiling capabilities. We've seen significant performance benefits in reth when using jemalloc, but will be primarily focusing on its profiling capabilities. -Jemalloc also provides tools for analyzing and visualizing its the allocation profiles it generates, notably `jeprof`. +Jemalloc also provides tools for analyzing and visualizing its allocation profiles it generates, notably `jeprof`. #### Enabling jemalloc in reth diff --git a/book/installation/installation.md b/book/installation/installation.md index ebf6c8ef3f9..1df122d4d44 100644 --- a/book/installation/installation.md +++ b/book/installation/installation.md @@ -44,13 +44,13 @@ As of April 2024 at block number 19.6M: * Archive Node: At least 2.14TB is required * Full Node: At least 1.13TB is required -NVMe drives are recommended for the best performance, with SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. +NVMe based SSD drives are recommended for the best performance, with SATA SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended. As of February 2024, syncing an Ethereum mainnet node to block 19.3M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days. > **Note** > -> It is highly recommended to choose a TLC drive when using NVMe, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). +> It is highly recommended to choose a TLC drive when using an NVMe drive, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). ### CPU diff --git a/book/run/config.md b/book/run/config.md index 10fd40ca763..bb28d855de8 100644 --- a/book/run/config.md +++ b/book/run/config.md @@ -36,7 +36,7 @@ The defaults shipped with Reth try to be relatively reasonable, but may not be o ### `headers` -The headers section controls both the behavior of the header stage, which download historical headers, as well as the primary downloader that fetches headers over P2P. +The headers section controls both the behavior of the header stage, which downloads historical headers, as well as the primary downloader that fetches headers over P2P. ```toml [stages.headers] @@ -65,7 +65,7 @@ commit_threshold = 10000 ### `bodies` -The bodies section controls both the behavior of the bodies stage, which download historical block bodies, as well as the primary downloader that fetches block bodies over P2P. +The bodies section controls both the behavior of the bodies stage, which downloads historical block bodies, as well as the primary downloader that fetches block bodies over P2P. ```toml [stages.bodies] @@ -102,7 +102,7 @@ The sender recovery stage recovers the address of transaction senders using tran ```toml [stages.sender_recovery] -# The amount of transactions to recover senders for before +# The number of transactions to recover senders for before # writing the results to disk. # # Lower thresholds correspond to more frequent disk I/O (writes), diff --git a/book/run/mainnet.md b/book/run/mainnet.md index 4412f51c7bf..c4908971f69 100644 --- a/book/run/mainnet.md +++ b/book/run/mainnet.md @@ -83,4 +83,14 @@ In the meantime, consider setting up [observability](./observability.md) to moni ## Running without a Consensus Layer -We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending a `engine_forkChoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. +We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. + +## Running with Etherscan as Block Source + +You can use `--debug.etherscan` to run Reth with a fake consensus client that advances the chain using recent blocks on Etherscan. This requires an Etherscan API key (set via `ETHERSCAN_API_KEY` environment variable). Optionally, specify a custom API URL with `--debug.etherscan `. + +Example: +```bash +export ETHERSCAN_API_KEY=your_api_key_here +reth node --debug.etherscan +``` \ No newline at end of file diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index 3a987e52c73..28253ca9f01 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -6,7 +6,7 @@ This guide uses [Kurtosis' ethereum-package](https://github.com/ethpandaops/ethe * Go [here](https://docs.kurtosis.com/install/) to install Kurtosis * Go [here](https://docs.docker.com/get-docker/) to install Docker -The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. +The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth and various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations. To see all possible configurations and flags you can use, including metrics and observability tools (e.g. Grafana, Prometheus, etc), go [here](https://github.com/ethpandaops/ethereum-package#configuration). diff --git a/book/run/pruning.md b/book/run/pruning.md index da3bb07e2cd..25d11b4e46e 100644 --- a/book/run/pruning.md +++ b/book/run/pruning.md @@ -18,7 +18,7 @@ the steps for running Reth as a full node, what caveats to expect and how to con - Full Node – Reth node that has the latest state and historical data for only the last 10064 blocks available for querying in the same way as an archive node. -The node type that was chosen when first [running a node](./run-a-node.md) **can not** be changed after +The node type that was chosen when first [running a node](./run-a-node.md) **cannot** be changed after the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported. ## Modes diff --git a/book/run/sync-op-mainnet.md b/book/run/sync-op-mainnet.md index 2a862314a1d..0e2090acbcb 100644 --- a/book/run/sync-op-mainnet.md +++ b/book/run/sync-op-mainnet.md @@ -1,6 +1,6 @@ # Sync OP Mainnet -To sync OP mainnet, bedrock state needs to be imported as a starting point. There are currently two ways: +To sync OP mainnet, Bedrock state needs to be imported as a starting point. There are currently two ways: * Minimal bootstrap **(recommended)**: only state snapshot at Bedrock block is imported without any OVM historical data. * Full bootstrap **(not recommended)**: state, blocks and receipts are imported. *Not recommended for now: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node diff --git a/book/run/transactions.md b/book/run/transactions.md index 61327b57300..edb3a24d76f 100644 --- a/book/run/transactions.md +++ b/book/run/transactions.md @@ -38,7 +38,7 @@ Alongside the `accessList` parameter and legacy parameters (except `gasPrice`), The base fee is burned, while the priority fee is paid to the miner who includes the transaction, incentivizing miners to include transactions with higher priority fees per gas. -## EIP-4844 Transaction +## EIP-4844 Transactions [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844) transactions (type `0x3`) was introduced in Ethereum's Dencun fork. This provides a temporary but significant scaling relief for rollups by allowing them to initially scale to 0.375 MB per slot, with a separate fee market allowing fees to be very low while usage of this system is limited. diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index 7368b6631ab..7b8ec6ba19c 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -2,13 +2,45 @@ This page tries to answer how to deal with the most popular issues. +- [Troubleshooting](#troubleshooting) + - [Database](#database) + - [Docker](#docker) + - [Error code 13](#error-code-13) + - [Slow database inserts and updates](#slow-database-inserts-and-updates) + - [Compact the database](#compact-the-database) + - [Re-sync from scratch](#re-sync-from-scratch) + - [Database write error](#database-write-error) + - [Concurrent database access error (using containers/Docker)](#concurrent-database-access-error-using-containersdocker) + - [Hardware Performance Testing](#hardware-performance-testing) + - [Disk Speed Testing with IOzone](#disk-speed-testing-with-iozone) + + ## Database +### Docker + +Externally accessing a `datadir` inside a named docker volume will usually come with folder/file ownership/permissions issues. + +**It is not recommended** to use the path to the named volume as it will trigger an error code 13. `RETH_DB_PATH: /var/lib/docker/volumes/named_volume/_data/eth/db cargo r --examples db-access --path ` is **DISCOURAGED** and a mounted volume with the right permissions should be used instead. + +### Error code 13 + +`the environment opened in read-only code: 13` + +Externally accessing a database in a read-only folder is not supported, **UNLESS** there's no `mdbx.lck` present, and it's called with `exclusive` on calling `open_db_read_only`. Meaning that there's no node syncing concurrently. + +If the error persists, ensure that you have the right `rx` permissions on the `datadir` **and its parent** folders. Eg. the following command should succeed: + +```bash,ignore +stat /full/path/datadir +``` + + ### Slow database inserts and updates If you're: 1. Running behind the tip -2. Have slow canonical commit time according to the `Canonical Commit Latency time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) +2. Have slow canonical commit time according to the `Canonical Commit Latency Time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds) 3. Seeing warnings in your logs such as ```console 2023-11-08T15:17:24.789731Z WARN providers::db: Transaction insertion took too long block_number=18528075 tx_num=2150227643 hash=0xb7de1d6620efbdd3aa8547c47a0ff09a7fd3e48ba3fd2c53ce94c6683ed66e7c elapsed=6.793759034s @@ -48,7 +80,7 @@ equal to the [freshly synced node](../installation/installation.md#hardware-requ mv reth_compact.dat $(reth db path)/mdbx.dat ``` 7. Start Reth -8. Confirm that the values on the `Freelist` chart is near zero and the values on the `Canonical Commit Latency time` chart +8. Confirm that the values on the `Freelist` chart are near zero and the values on the `Canonical Commit Latency Time` chart is less than 1 second. 9. Delete original database ```bash diff --git a/book/sources/Cargo.toml b/book/sources/Cargo.toml index 1529af952b9..b374ad798b5 100644 --- a/book/sources/Cargo.toml +++ b/book/sources/Cargo.toml @@ -1,11 +1,13 @@ [workspace] -members = [ - "exex/hello-world", - "exex/remote", - "exex/tracking-state", -] +members = ["exex/hello-world", "exex/remote", "exex/tracking-state"] # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html resolver = "2" +[patch.'https://github.com/paradigmxyz/reth'] +reth = { path = "../../bin/reth" } +reth-exex = { path = "../../crates/exex/exex" } +reth-node-ethereum = { path = "../../crates/ethereum/node" } +reth-tracing = { path = "../../crates/tracing" } +reth-node-api = { path = "../../crates/node/api" } diff --git a/book/sources/exex/hello-world/Cargo.toml b/book/sources/exex/hello-world/Cargo.toml index e5d32a14054..c466018c667 100644 --- a/book/sources/exex/hello-world/Cargo.toml +++ b/book/sources/exex/hello-world/Cargo.toml @@ -4,10 +4,10 @@ version = "0.1.0" edition = "2021" [dependencies] -reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth -reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions +reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth +reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } # Ethereum Node implementation -reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging -eyre = "0.6" # Easy error handling +eyre = "0.6" # Easy error handling futures-util = "0.3" # Stream utilities for consuming notifications diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/book/sources/exex/hello-world/src/bin/3.rs index 21bd25a56db..9b429d3eb08 100644 --- a/book/sources/exex/hello-world/src/bin/3.rs +++ b/book/sources/exex/hello-world/src/bin/3.rs @@ -1,10 +1,12 @@ use futures_util::TryStreamExt; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; -async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { +async fn my_exex>>( + mut ctx: ExExContext, +) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.try_next().await? { match ¬ification { ExExNotification::ChainCommitted { new } => { diff --git a/book/sources/exex/remote/Cargo.toml b/book/sources/exex/remote/Cargo.toml index 6eeb848cacf..6cca3a841f0 100644 --- a/book/sources/exex/remote/Cargo.toml +++ b/book/sources/exex/remote/Cargo.toml @@ -6,9 +6,11 @@ edition = "2021" [dependencies] # reth reth = { git = "https://github.com/paradigmxyz/reth.git" } -reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"} -reth-node-api = { git = "https://github.com/paradigmxyz/reth.git"} +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = [ + "serde", +] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } +reth-node-api = { git = "https://github.com/paradigmxyz/reth.git" } reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # async @@ -49,4 +51,4 @@ path = "src/exex.rs" [[bin]] name = "consumer" -path = "src/consumer.rs" \ No newline at end of file +path = "src/consumer.rs" diff --git a/book/sources/exex/remote/src/exex.rs b/book/sources/exex/remote/src/exex.rs index 1ae4785db8b..c823d98ded4 100644 --- a/book/sources/exex/remote/src/exex.rs +++ b/book/sources/exex/remote/src/exex.rs @@ -3,6 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; +use reth::{builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -44,7 +45,7 @@ impl RemoteExEx for ExExService { } } -async fn remote_exex( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/remote/src/exex_4.rs b/book/sources/exex/remote/src/exex_4.rs index 24c7bf2c2f1..8286c028934 100644 --- a/book/sources/exex/remote/src/exex_4.rs +++ b/book/sources/exex/remote/src/exex_4.rs @@ -3,6 +3,7 @@ use remote_exex::proto::{ self, remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, }; +use reth::{builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_api::FullNodeComponents; use reth_node_ethereum::EthereumNode; @@ -46,7 +47,7 @@ impl RemoteExEx for ExExService { // ANCHOR: snippet #[allow(dead_code)] -async fn remote_exex( +async fn remote_exex>>( mut ctx: ExExContext, notifications: Arc>, ) -> eyre::Result<()> { diff --git a/book/sources/exex/tracking-state/Cargo.toml b/book/sources/exex/tracking-state/Cargo.toml index 3ce21b0c340..a8e862d0a73 100644 --- a/book/sources/exex/tracking-state/Cargo.toml +++ b/book/sources/exex/tracking-state/Cargo.toml @@ -5,10 +5,12 @@ edition = "2021" [dependencies] reth = { git = "https://github.com/paradigmxyz/reth.git" } -reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"} +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = [ + "serde", +] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } -eyre = "0.6" # Easy error handling -futures-util = "0.3" # Stream utilities for consuming notifications +eyre = "0.6" # Easy error handling +futures-util = "0.3" # Stream utilities for consuming notifications alloy-primitives = "0.8.7" diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/book/sources/exex/tracking-state/src/bin/1.rs index 0d42e0791a1..b1a8609b727 100644 --- a/book/sources/exex/tracking-state/src/bin/1.rs +++ b/book/sources/exex/tracking-state/src/bin/1.rs @@ -5,7 +5,7 @@ use std::{ }; use futures_util::{FutureExt, TryStreamExt}; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent, ExExNotification}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -14,7 +14,9 @@ struct MyExEx { ctx: ExExContext, } -impl Future for MyExEx { +impl>> Future + for MyExEx +{ type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/sources/exex/tracking-state/src/bin/2.rs index 9416810668f..7e9aadf8a04 100644 --- a/book/sources/exex/tracking-state/src/bin/2.rs +++ b/book/sources/exex/tracking-state/src/bin/2.rs @@ -6,7 +6,7 @@ use std::{ use alloy_primitives::BlockNumber; use futures_util::{FutureExt, TryStreamExt}; -use reth::api::FullNodeComponents; +use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; use reth_exex::{ExExContext, ExExEvent}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -25,7 +25,9 @@ impl MyExEx { } } -impl Future for MyExEx { +impl>> Future + for MyExEx +{ type Output = eyre::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/clippy.toml b/clippy.toml index cdfa4bc93a2..ab08b1132c1 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,3 +1,18 @@ -msrv = "1.81" +msrv = "1.82" too-large-for-stack = 128 -doc-valid-idents = ["P2P", "ExEx", "ExExes", "IPv4", "IPv6", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "WAL", "MessagePack"] +doc-valid-idents = [ + "P2P", + "ExEx", + "ExExes", + "IPv4", + "IPv6", + "KiB", + "MiB", + "GiB", + "TiB", + "PiB", + "EiB", + "WAL", + "MessagePack", +] +allow-dbg-in-tests = true diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml index 552b7276717..83ae378090b 100644 --- a/crates/blockchain-tree-api/Cargo.toml +++ b/crates/blockchain-tree-api/Cargo.toml @@ -14,10 +14,13 @@ workspace = true reth-consensus.workspace = true reth-execution-errors.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-errors.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true # misc thiserror.workspace = true diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs index 4dd42c889a3..92866b4d4da 100644 --- a/crates/blockchain-tree-api/src/error.rs +++ b/crates/blockchain-tree-api/src/error.rs @@ -1,11 +1,13 @@ //! Error handling for the blockchain tree +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber}; use reth_consensus::ConsensusError; use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; -use reth_primitives::SealedBlock; +use reth_primitives::{SealedBlock, SealedBlockFor}; +use reth_primitives_traits::{Block, BlockBody}; pub use reth_storage_errors::provider::ProviderError; /// Various error cases that can occur when a block violates tree assumptions. @@ -210,48 +212,48 @@ impl InsertBlockErrorData { } } -struct InsertBlockErrorDataTwo { - block: SealedBlock, +struct InsertBlockErrorDataTwo { + block: SealedBlockFor, kind: InsertBlockErrorKindTwo, } -impl std::fmt::Display for InsertBlockErrorDataTwo { +impl std::fmt::Display for InsertBlockErrorDataTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Failed to insert block (hash={}, number={}, parent_hash={}): {}", self.block.hash(), - self.block.number, - self.block.parent_hash, + self.block.number(), + self.block.parent_hash(), self.kind ) } } -impl std::fmt::Debug for InsertBlockErrorDataTwo { +impl std::fmt::Debug for InsertBlockErrorDataTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InsertBlockError") .field("error", &self.kind) .field("hash", &self.block.hash()) - .field("number", &self.block.number) - .field("parent_hash", &self.block.parent_hash) - .field("num_txs", &self.block.body.transactions.len()) + .field("number", &self.block.number()) + .field("parent_hash", &self.block.parent_hash()) + .field("num_txs", &self.block.body.transactions().len()) .finish_non_exhaustive() } } -impl core::error::Error for InsertBlockErrorDataTwo { +impl core::error::Error for InsertBlockErrorDataTwo { fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { Some(&self.kind) } } -impl InsertBlockErrorDataTwo { - const fn new(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Self { +impl InsertBlockErrorDataTwo { + const fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { Self { block, kind } } - fn boxed(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Box { + fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Box { Box::new(Self::new(block, kind)) } } @@ -259,36 +261,36 @@ impl InsertBlockErrorDataTwo { /// Error thrown when inserting a block failed because the block is considered invalid. #[derive(thiserror::Error)] #[error(transparent)] -pub struct InsertBlockErrorTwo { - inner: Box, +pub struct InsertBlockErrorTwo { + inner: Box>, } // === impl InsertBlockErrorTwo === -impl InsertBlockErrorTwo { +impl InsertBlockErrorTwo { /// Create a new `InsertInvalidBlockErrorTwo` - pub fn new(block: SealedBlock, kind: InsertBlockErrorKindTwo) -> Self { + pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { Self { inner: InsertBlockErrorDataTwo::boxed(block, kind) } } /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn consensus_error(error: ConsensusError, block: SealedBlock) -> Self { + pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::Consensus(error)) } /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn sender_recovery_error(block: SealedBlock) -> Self { + pub fn sender_recovery_error(block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::SenderRecovery) } /// Create a new `InsertInvalidBlockError` from an execution error - pub fn execution_error(error: BlockExecutionError, block: SealedBlock) -> Self { + pub fn execution_error(error: BlockExecutionError, block: SealedBlockFor) -> Self { Self::new(block, InsertBlockErrorKindTwo::Execution(error)) } /// Consumes the error and returns the block that resulted in the error #[inline] - pub fn into_block(self) -> SealedBlock { + pub fn into_block(self) -> SealedBlockFor { self.inner.block } @@ -300,19 +302,19 @@ impl InsertBlockErrorTwo { /// Returns the block that resulted in the error #[inline] - pub const fn block(&self) -> &SealedBlock { + pub const fn block(&self) -> &SealedBlockFor { &self.inner.block } /// Consumes the type and returns the block and error kind. #[inline] - pub fn split(self) -> (SealedBlock, InsertBlockErrorKindTwo) { + pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKindTwo) { let inner = *self.inner; (inner.block, inner.kind) } } -impl std::fmt::Debug for InsertBlockErrorTwo { +impl std::fmt::Debug for InsertBlockErrorTwo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.inner, f) } diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs index 0a1bf6164e0..7e1d0d714c1 100644 --- a/crates/blockchain-tree-api/src/lib.rs +++ b/crates/blockchain-tree-api/src/lib.rs @@ -9,8 +9,9 @@ use self::error::CanonicalError; use crate::error::InsertBlockError; +use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::{BlockNumHash, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::collections::BTreeMap; diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index cff117c92b0..07ecedf882f 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -31,6 +31,7 @@ reth-consensus.workspace = true reth-node-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -57,11 +58,31 @@ reth-consensus = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-revm.workspace = true reth-evm-ethereum.workspace = true +reth-execution-types.workspace = true parking_lot.workspace = true assert_matches.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true [features] -test-utils = [] -optimism = ["reth-primitives/optimism", "reth-provider/optimism"] +test-utils = [ + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-db/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-trie-db/test-utils", + "reth-trie/test-utils" +] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-execution-types/optimism", + "reth-db/optimism", + "reth-db-api/optimism" +] diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs index 5d4ca2705cb..994ed82cfb9 100644 --- a/crates/blockchain-tree/src/block_buffer.rs +++ b/crates/blockchain-tree/src/block_buffer.rs @@ -1,6 +1,8 @@ use crate::metrics::BlockBufferMetrics; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber}; use reth_network::cache::LruCache; +use reth_node_types::Block; use reth_primitives::SealedBlockWithSenders; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -16,9 +18,9 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// Note: Buffer is limited by number of blocks that it can contain and eviction of the block /// is done by last recently used block. #[derive(Debug)] -pub struct BlockBuffer { +pub struct BlockBuffer { /// All blocks in the buffer stored by their block hash. - pub(crate) blocks: HashMap, + pub(crate) blocks: HashMap>, /// Map of any parent block hash (even the ones not currently in the buffer) /// to the buffered children. /// Allows connecting buffered blocks by parent. @@ -35,7 +37,7 @@ pub struct BlockBuffer { pub(crate) metrics: BlockBufferMetrics, } -impl BlockBuffer { +impl BlockBuffer { /// Create new buffer with max limit of blocks pub fn new(limit: u32) -> Self { Self { @@ -48,37 +50,37 @@ impl BlockBuffer { } /// Return reference to buffered blocks - pub const fn blocks(&self) -> &HashMap { + pub const fn blocks(&self) -> &HashMap> { &self.blocks } /// Return reference to the requested block. - pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { self.blocks.get(hash) } /// Return a reference to the lowest ancestor of the given block in the buffer. - pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { let mut current_block = self.blocks.get(hash)?; - while let Some(parent) = self.blocks.get(¤t_block.parent_hash) { + while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { current_block = parent; } Some(current_block) } /// Insert a correct block inside the buffer. - pub fn insert_block(&mut self, block: SealedBlockWithSenders) { + pub fn insert_block(&mut self, block: SealedBlockWithSenders) { let hash = block.hash(); - self.parent_to_child.entry(block.parent_hash).or_default().insert(hash); - self.earliest_blocks.entry(block.number).or_default().insert(hash); + self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); + self.earliest_blocks.entry(block.number()).or_default().insert(hash); self.blocks.insert(hash, block); if let (_, Some(evicted_hash)) = self.lru.insert_and_get_evicted(hash) { // evict the block if limit is hit if let Some(evicted_block) = self.remove_block(&evicted_hash) { // evict the block if limit is hit - self.remove_from_parent(evicted_block.parent_hash, &evicted_hash); + self.remove_from_parent(evicted_block.parent_hash(), &evicted_hash); } } self.metrics.blocks.set(self.blocks.len() as f64); @@ -93,7 +95,7 @@ impl BlockBuffer { pub fn remove_block_with_children( &mut self, parent_hash: &BlockHash, - ) -> Vec { + ) -> Vec> { let removed = self .remove_block(parent_hash) .into_iter() @@ -152,16 +154,16 @@ impl BlockBuffer { /// This method will only remove the block if it's present inside `self.blocks`. /// The block might be missing from other collections, the method will only ensure that it has /// been removed. - fn remove_block(&mut self, hash: &BlockHash) -> Option { + fn remove_block(&mut self, hash: &BlockHash) -> Option> { let block = self.blocks.remove(hash)?; - self.remove_from_earliest_blocks(block.number, hash); - self.remove_from_parent(block.parent_hash, hash); + self.remove_from_earliest_blocks(block.number(), hash); + self.remove_from_parent(block.parent_hash(), hash); self.lru.remove(hash); Some(block) } /// Remove all children and their descendants for the given blocks and return them. - fn remove_children(&mut self, parent_hashes: Vec) -> Vec { + fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { // remove all parent child connection and all the child children blocks that are connected // to the discarded parent blocks. let mut remove_parent_children = parent_hashes; diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs index 0c48b3b9ce8..7778fb9262c 100644 --- a/crates/blockchain-tree/src/block_indices.rs +++ b/crates/blockchain-tree/src/block_indices.rs @@ -377,8 +377,9 @@ impl BlockIndices { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Header; use alloy_primitives::B256; - use reth_primitives::{Header, SealedBlock, SealedHeader}; + use reth_primitives::{SealedBlock, SealedHeader}; #[test] fn pending_block_num_hash_returns_none_if_no_fork() { diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 71a58aa5628..e8576de4a71 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1,6 +1,7 @@ //! Implementation of [`BlockchainTree`] use crate::{ + externals::TreeNodeTypes, metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, state::{SidechainId, TreeState}, AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, @@ -21,10 +22,10 @@ use reth_primitives::{ SealedHeader, StaticFileSegment, }; use reth_provider::{ - providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter, - CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - ChainSpecProvider, ChainSplit, ChainSplitTarget, DisplayBlocksChain, HeaderProvider, - ProviderError, StaticFileProviderFactory, + BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, + CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, + ChainSplitTarget, DBProvider, DisplayBlocksChain, HashedPostStateProvider, HeaderProvider, + ProviderError, StaticFileProviderFactory, StorageLocation, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; @@ -93,8 +94,8 @@ impl BlockchainTree { impl BlockchainTree where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// @@ -113,9 +114,6 @@ where /// is crucial for the correct execution of transactions. /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect /// its structure or performance. - /// - `prune_modes`: Configuration for pruning old blockchain data. This helps in managing the - /// storage space efficiently. It's important to validate this configuration to ensure it does - /// not lead to unintended data loss. pub fn new( externals: TreeExternals, config: BlockchainTreeConfig, @@ -902,6 +900,7 @@ where // check unconnected block buffer for children of the chains let mut all_chain_blocks = Vec::new(); for chain in self.state.chains.values() { + all_chain_blocks.reserve_exact(chain.blocks().len()); for (&number, block) in chain.blocks() { all_chain_blocks.push(BlockNumHash { number, hash: block.hash() }) } @@ -1216,7 +1215,7 @@ where recorder: &mut MakeCanonicalDurationsRecorder, ) -> Result<(), CanonicalError> { let (blocks, state, chain_trie_updates) = chain.into_inner(); - let hashed_state = state.hash_state_slow(); + let hashed_state = self.externals.provider_factory.hashed_post_state(state.state()); let prefix_sets = hashed_state.construct_prefix_sets().freeze(); let hashed_state_sorted = hashed_state.into_sorted(); @@ -1334,7 +1333,7 @@ where info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); // read block and execution result from database. and remove traces of block from tables. let blocks_and_execution = provider_rw - .take_block_and_execution_range(revert_range) + .take_block_and_execution_above(revert_until, StorageLocation::Database) .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; provider_rw.commit()?; @@ -1376,9 +1375,10 @@ where #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip1559, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH}; + use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip4895::Withdrawals}; use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, Sealable, B256}; + use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; @@ -1387,20 +1387,20 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_node_types::FullNodePrimitives; use reth_primitives::{ - constants::EIP1559_INITIAL_BASE_FEE, proofs::{calculate_receipt_root, calculate_transaction_root}, - revm_primitives::AccountInfo, - Account, BlockBody, Header, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, Withdrawals, + Account, BlockBody, RecoveredTx, Transaction, TransactionSigned, }; use reth_provider::{ + providers::ProviderNodeTypes, test_utils::{ blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, }, - ProviderFactory, + ProviderFactory, StorageLocation, }; + use reth_revm::primitives::AccountInfo; use reth_stages_api::StageCheckpoint; use reth_trie::{root::state_root_unhashed, StateRoot}; use std::collections::HashMap; @@ -1423,7 +1423,17 @@ mod tests { TreeExternals::new(provider_factory, consensus, executor_factory) } - fn setup_genesis(factory: &ProviderFactory, mut genesis: SealedBlock) { + fn setup_genesis< + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, + >, + >( + factory: &ProviderFactory, + mut genesis: SealedBlock, + ) { // insert genesis to db. genesis.header.set_block_number(10); @@ -1554,6 +1564,7 @@ mod tests { SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) .try_seal_with_senders() .unwrap(), + StorageLocation::Database, ) .unwrap(); let account = Account { balance: initial_signer_balance, ..Default::default() }; @@ -1562,15 +1573,15 @@ mod tests { provider_rw.commit().unwrap(); } - let single_tx_cost = U256::from(EIP1559_INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); - let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { - TransactionSigned::from_transaction_and_signature( + let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); + let mock_tx = |nonce: u64| -> RecoveredTx { + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce, gas_limit: MIN_TRANSACTION_GAS, to: Address::ZERO.into(), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, + max_fee_per_gas: INITIAL_BASE_FEE as u128, ..Default::default() }), Signature::test_signature(), @@ -1580,10 +1591,12 @@ mod tests { let mock_block = |number: u64, parent: Option, - body: Vec, + body: Vec, num_of_signer_txs: u64| -> SealedBlockWithSenders { - let transactions_root = calculate_transaction_root(&body); + let signed_body = + body.clone().into_iter().map(|tx| tx.into_signed()).collect::>(); + let transactions_root = calculate_transaction_root(&signed_body); let receipts = body .iter() .enumerate() @@ -1601,13 +1614,13 @@ mod tests { // receipts root computation is different for OP let receipts_root = calculate_receipt_root(&receipts); - let sealed = Header { + let header = Header { number, parent_hash: parent.unwrap_or_default(), gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, gas_limit: chain_spec.max_gas_limit, mix_hash: B256::random(), - base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), + base_fee_per_gas: Some(INITIAL_BASE_FEE), transactions_root, receipts_root, state_root: state_root_unhashed(HashMap::from([( @@ -1623,18 +1636,15 @@ mod tests { ), )])), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; SealedBlockWithSenders::new( SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { - transactions: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), + transactions: signed_body, ommers: Vec::new(), withdrawals: Some(Withdrawals::default()), - requests: None, }, }, body.iter().map(|tx| tx.signer()).collect(), @@ -1875,7 +1885,12 @@ mod tests { ); let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = exec5.hash_state_slow().construct_prefix_sets().freeze(); + let prefix_sets = tree + .externals + .provider_factory + .hashed_post_state(exec5.state()) + .construct_prefix_sets() + .freeze(); let state_root = StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); assert_eq!(state_root, block5.state_root); diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 393e525d5ae..4002fae1ac9 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -11,18 +11,19 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, InsertBlockErrorKind}, BlockAttachment, BlockValidationKind, }; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{ConsensusError, PostExecutionInput}; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - providers::{BundleStateProvider, ConsistentDbView, ProviderNodeTypes}, - FullExecutionDataProvider, ProviderError, StateRootProvider, TryIntoHistoricalStateProvider, + providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes}, + DBProvider, FullExecutionDataProvider, HashedPostStateProvider, ProviderError, + StateRootProvider, TryIntoHistoricalStateProvider, }; use reth_revm::database::StateProviderDatabase; -use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie::{updates::TrieUpdates, TrieInput}; +use reth_trie_parallel::root::ParallelStateRoot; use std::{ collections::BTreeMap, ops::{Deref, DerefMut}, @@ -75,8 +76,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -113,8 +114,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -176,8 +177,8 @@ impl AppendableChain { ) -> Result<(ExecutionOutcome, Option), BlockExecutionError> where EDP: FullExecutionDataProvider, - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -227,14 +228,13 @@ impl AppendableChain { execution_outcome.extend(initial_execution_outcome.clone()); ParallelStateRoot::new( consistent_view, - TrieInput::from_state(execution_outcome.hash_state_slow()), + TrieInput::from_state(provider.hashed_post_state(execution_outcome.state())), ) .incremental_root_with_updates() .map(|(root, updates)| (root, Some(updates))) .map_err(ProviderError::from)? } else { - let hashed_state = - HashedPostState::from_bundle_state(&initial_execution_outcome.state().state); + let hashed_state = provider.hashed_post_state(initial_execution_outcome.state()); let state_root = provider.state_root(hashed_state)?; (state_root, None) }; @@ -283,8 +283,8 @@ impl AppendableChain { block_validation_kind: BlockValidationKind, ) -> Result<(), InsertBlockErrorKind> where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 719852c12ac..9e72008e838 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -1,8 +1,8 @@ //! Blockchain tree externals. use alloy_primitives::{BlockHash, BlockNumber}; -use reth_consensus::Consensus; -use reth_db::{static_file::HeaderMask, tables}; +use reth_consensus::FullConsensus; +use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_node_types::NodeTypesWithDB; use reth_primitives::StaticFileSegment; @@ -13,6 +13,8 @@ use reth_provider::{ use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; +pub use reth_provider::providers::{NodeTypesForTree, TreeNodeTypes}; + /// A container for external components. /// /// This is a simple container for external components used throughout the blockchain tree @@ -21,13 +23,12 @@ use std::{collections::BTreeMap, sync::Arc}; /// - A handle to the database /// - A handle to the consensus engine /// - The executor factory to execute blocks with -/// - The chain spec #[derive(Debug)] pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. pub(crate) provider_factory: ProviderFactory, /// The consensus engine. - pub(crate) consensus: Arc, + pub(crate) consensus: Arc, /// The executor factory to execute blocks with. pub(crate) executor_factory: E, } @@ -36,7 +37,7 @@ impl TreeExternals { /// Create new tree externals. pub fn new( provider_factory: ProviderFactory, - consensus: Arc, + consensus: Arc, executor_factory: E, ) -> Self { Self { provider_factory, consensus, executor_factory } @@ -76,7 +77,7 @@ impl TreeExternals { hashes.extend(range.clone().zip(static_file_provider.fetch_range_with_predicate( StaticFileSegment::Headers, range, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::(number.into()), |_| true, )?)); } diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 925b8f03add..f5d2ad8c6f7 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -6,10 +6,10 @@ use reth_blockchain_tree_api::{ BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, InsertPayloadOk, }; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, }; use reth_storage_errors::provider::ProviderResult; use std::collections::BTreeMap; @@ -60,6 +60,12 @@ impl BlockchainTreeEngine for NoopBlockchainTree { Ok(()) } + fn update_block_hashes_and_clear_buffered( + &self, + ) -> Result, CanonicalError> { + Ok(BTreeMap::new()) + } + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { Ok(()) } @@ -67,12 +73,6 @@ impl BlockchainTreeEngine for NoopBlockchainTree { fn make_canonical(&self, block_hash: BlockHash) -> Result { Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - Ok(BTreeMap::new()) - } } impl BlockchainTreeViewer for NoopBlockchainTree { @@ -126,6 +126,10 @@ impl BlockchainTreePendingStateProvider for NoopBlockchainTree { } } +impl NodePrimitivesProvider for NoopBlockchainTree { + type Primitives = EthPrimitives; +} + impl CanonStateSubscriptions for NoopBlockchainTree { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canon_state_notification_sender diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 8e6cceccdd1..e668f4e2dac 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -1,5 +1,7 @@ //! Wrapper around `BlockchainTree` that allows for it to be shared. +use crate::externals::TreeNodeTypes; + use super::BlockchainTree; use alloy_eips::BlockNumHash; use alloy_primitives::{BlockHash, BlockNumber}; @@ -13,8 +15,8 @@ use reth_evm::execute::BlockExecutorProvider; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ - providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateSubscriptions, - FullExecutionDataProvider, ProviderError, + providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError, }; use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; @@ -36,8 +38,8 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -107,8 +109,8 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); @@ -170,8 +172,8 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, @@ -183,12 +185,20 @@ where } } -impl CanonStateSubscriptions for ShareableBlockchainTree +impl NodePrimitivesProvider for ShareableBlockchainTree where N: ProviderNodeTypes, E: Send + Sync, { - fn subscribe_to_canonical_state(&self) -> reth_provider::CanonStateNotifications { + type Primitives = N::Primitives; +} + +impl CanonStateSubscriptions for ShareableBlockchainTree +where + N: TreeNodeTypes, + E: Send + Sync, +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); self.tree.read().subscribe_canon_state() } diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index b76db9e6a9c..a8e43240f4f 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -61,6 +61,7 @@ impl TreeState { pub(crate) fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { self.block_with_senders_by_hash(block_hash).map(|block| &block.block) } + /// Returns the block with matching hash from any side-chain. /// /// Caution: This will not return blocks from the canonical chain. @@ -128,3 +129,302 @@ impl From for SidechainId { Self(value) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::canonical_chain::CanonicalChain; + use alloy_primitives::B256; + use reth_execution_types::Chain; + use reth_provider::ExecutionOutcome; + + #[test] + fn test_tree_state_initialization() { + // Set up some dummy data for initialization + let last_finalized_block_number = 10u64; + let last_canonical_hashes = vec![(9u64, B256::random()), (10u64, B256::random())]; + let buffer_limit = 5; + + // Initialize the tree state + let tree_state = TreeState::new( + last_finalized_block_number, + last_canonical_hashes.clone(), + buffer_limit, + ); + + // Verify the tree state after initialization + assert_eq!(tree_state.block_chain_id_generator, 0); + assert_eq!(tree_state.block_indices().last_finalized_block(), last_finalized_block_number); + assert_eq!( + *tree_state.block_indices.canonical_chain().inner(), + *CanonicalChain::new(last_canonical_hashes.into_iter().collect()).inner() + ); + assert!(tree_state.chains.is_empty()); + assert!(tree_state.buffered_blocks.lru.is_empty()); + } + + #[test] + fn test_tree_state_next_id() { + // Initialize the tree state + let mut tree_state = TreeState::new(0, vec![], 5); + + // Generate a few sidechain IDs + let first_id = tree_state.next_id(); + let second_id = tree_state.next_id(); + + // Verify the generated sidechain IDs and the updated generator state + assert_eq!(first_id, SidechainId(0)); + assert_eq!(second_id, SidechainId(1)); + assert_eq!(tree_state.block_chain_id_generator, 2); + } + + #[test] + fn test_tree_state_insert_chain() { + // Initialize tree state + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a chain with two blocks + let block: SealedBlockWithSenders = Default::default(); + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1 = block.clone(); + let mut block2 = block; + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + let chain = AppendableChain::new(Chain::new( + [block1, block2], + Default::default(), + Default::default(), + )); + + // Insert the chain into the TreeState + let chain_id = tree_state.insert_chain(chain).unwrap(); + + // Verify the chain ID and that it was added to the chains collection + assert_eq!(chain_id, SidechainId(0)); + assert!(tree_state.chains.contains_key(&chain_id)); + + // Ensure that the block indices are updated + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), + SidechainId(0) + ); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), + SidechainId(0) + ); + + // Ensure that the block chain ID generator was updated + assert_eq!(tree_state.block_chain_id_generator, 1); + + // Create an empty chain + let chain_empty = AppendableChain::new(Chain::default()); + + // Insert the empty chain into the tree state + let chain_id = tree_state.insert_chain(chain_empty); + + // Ensure that the empty chain was not inserted + assert!(chain_id.is_none()); + + // Nothing should have changed and no new chain should have been added + assert!(tree_state.chains.contains_key(&SidechainId(0))); + assert!(!tree_state.chains.contains_key(&SidechainId(1))); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), + SidechainId(0) + ); + assert_eq!( + tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), + SidechainId(0) + ); + assert_eq!(tree_state.block_chain_id_generator, 1); + } + + #[test] + fn test_block_by_hash_side_chain() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create two side-chain blocks with random hashes + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1: SealedBlockWithSenders = Default::default(); + let mut block2: SealedBlockWithSenders = Default::default(); + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + // Create an chain with these blocks + let chain = AppendableChain::new(Chain::new( + vec![block1.clone(), block2.clone()], + Default::default(), + Default::default(), + )); + + // Insert the side chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Retrieve the blocks by their hashes + let retrieved_block1 = tree_state.block_by_hash(block1_hash); + assert_eq!(*retrieved_block1.unwrap(), block1.block); + + let retrieved_block2 = tree_state.block_by_hash(block2_hash); + assert_eq!(*retrieved_block2.unwrap(), block2.block); + + // Test block_by_hash with a random hash that doesn't exist + let non_existent_hash = B256::random(); + let result = tree_state.block_by_hash(non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_block_with_senders_by_hash() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create two side-chain blocks with random hashes + let block1_hash = B256::random(); + let block2_hash = B256::random(); + + let mut block1: SealedBlockWithSenders = Default::default(); + let mut block2: SealedBlockWithSenders = Default::default(); + + block1.block.header.set_hash(block1_hash); + block1.block.header.set_block_number(9); + block2.block.header.set_hash(block2_hash); + block2.block.header.set_block_number(10); + + // Create a chain with these blocks + let chain = AppendableChain::new(Chain::new( + vec![block1.clone(), block2.clone()], + Default::default(), + Default::default(), + )); + + // Insert the side chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Test to retrieve the blocks with senders by their hashes + let retrieved_block1 = tree_state.block_with_senders_by_hash(block1_hash); + assert_eq!(*retrieved_block1.unwrap(), block1); + + let retrieved_block2 = tree_state.block_with_senders_by_hash(block2_hash); + assert_eq!(*retrieved_block2.unwrap(), block2); + + // Test block_with_senders_by_hash with a random hash that doesn't exist + let non_existent_hash = B256::random(); + let result = tree_state.block_with_senders_by_hash(non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_get_buffered_block() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a block with a random hash and add it to the buffer + let block_hash = B256::random(); + let mut block: SealedBlockWithSenders = Default::default(); + block.block.header.set_hash(block_hash); + + // Add the block to the buffered blocks in the TreeState + tree_state.buffered_blocks.insert_block(block.clone()); + + // Test get_buffered_block to retrieve the block by its hash + let retrieved_block = tree_state.get_buffered_block(&block_hash); + assert_eq!(*retrieved_block.unwrap(), block); + + // Test get_buffered_block with a non-existent hash + let non_existent_hash = B256::random(); + let result = tree_state.get_buffered_block(&non_existent_hash); + + // Ensure that no block is found + assert!(result.is_none()); + } + + #[test] + fn test_lowest_buffered_ancestor() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create blocks with random hashes and set up parent-child relationships + let ancestor_hash = B256::random(); + let descendant_hash = B256::random(); + + let mut ancestor_block: SealedBlockWithSenders = Default::default(); + let mut descendant_block: SealedBlockWithSenders = Default::default(); + + ancestor_block.block.header.set_hash(ancestor_hash); + descendant_block.block.header.set_hash(descendant_hash); + descendant_block.block.header.set_parent_hash(ancestor_hash); + + // Insert the blocks into the buffer + tree_state.buffered_blocks.insert_block(ancestor_block.clone()); + tree_state.buffered_blocks.insert_block(descendant_block.clone()); + + // Test lowest_buffered_ancestor for the descendant block + let lowest_ancestor = tree_state.lowest_buffered_ancestor(&descendant_hash); + assert!(lowest_ancestor.is_some()); + assert_eq!(lowest_ancestor.unwrap().block.header.hash(), ancestor_hash); + + // Test lowest_buffered_ancestor with a non-existent hash + let non_existent_hash = B256::random(); + let result = tree_state.lowest_buffered_ancestor(&non_existent_hash); + + // Ensure that no ancestor is found + assert!(result.is_none()); + } + + #[test] + fn test_receipts_by_block_hash() { + // Initialize a tree state with some dummy data + let mut tree_state = TreeState::new(0, vec![], 5); + + // Create a block with a random hash and receipts + let block_hash = B256::random(); + let receipt1 = Receipt::default(); + let receipt2 = Receipt::default(); + + let mut block: SealedBlockWithSenders = Default::default(); + block.block.header.set_hash(block_hash); + + let receipts = vec![receipt1, receipt2]; + + // Create a chain with the block and its receipts + let chain = AppendableChain::new(Chain::new( + vec![block.clone()], + ExecutionOutcome { receipts: receipts.clone().into(), ..Default::default() }, + Default::default(), + )); + + // Insert the chain into the TreeState + tree_state.insert_chain(chain).unwrap(); + + // Test receipts_by_block_hash for the inserted block + let retrieved_receipts = tree_state.receipts_by_block_hash(block_hash); + assert!(retrieved_receipts.is_some()); + + // Check if the correct receipts are returned + let receipts_ref: Vec<&Receipt> = receipts.iter().collect(); + assert_eq!(retrieved_receipts.unwrap(), receipts_ref); + + // Test receipts_by_block_hash with a non-existent block hash + let non_existent_hash = B256::random(); + let result = tree_state.receipts_by_block_hash(non_existent_hash); + + // Ensure that no receipts are found + assert!(result.is_none()); + } +} diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index c9691bec411..2b06bd93707 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -18,22 +18,24 @@ reth-errors.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-trie.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true +revm.workspace = true # async -tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } +tokio = { workspace = true, default-features = false, features = ["sync", "macros"] } tokio-stream = { workspace = true, features = ["sync"] } # tracing tracing.workspace = true # misc -auto_impl.workspace = true derive_more.workspace = true metrics.workspace = true parking_lot.workspace = true @@ -42,9 +44,7 @@ pin-project.workspace = true # optional deps for test-utils alloy-signer = { workspace = true, optional = true } alloy-signer-local = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } rand = { workspace = true, optional = true } -revm = { workspace = true, optional = true } [dev-dependencies] reth-testing-utils.workspace = true @@ -52,13 +52,15 @@ alloy-signer.workspace = true alloy-signer-local.workspace = true alloy-consensus.workspace = true rand.workspace = true -revm.workspace = true [features] test-utils = [ - "alloy-signer", - "alloy-signer-local", - "alloy-consensus", - "rand", - "revm" + "alloy-signer", + "alloy-signer-local", + "rand", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-trie/test-utils", + "revm/test-utils", ] diff --git a/crates/chain-state/src/chain_info.rs b/crates/chain-state/src/chain_info.rs index 3c75544ac46..1b8575005c4 100644 --- a/crates/chain-state/src/chain_info.rs +++ b/crates/chain-state/src/chain_info.rs @@ -1,8 +1,9 @@ +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::BlockNumber; use parking_lot::RwLock; use reth_chainspec::ChainInfo; -use reth_primitives::SealedHeader; +use reth_primitives::{NodePrimitives, SealedHeader}; use std::{ sync::{ atomic::{AtomicU64, Ordering}, @@ -14,17 +15,21 @@ use tokio::sync::watch; /// Tracks the chain info: canonical head, safe block, finalized block. #[derive(Debug, Clone)] -pub struct ChainInfoTracker { - inner: Arc, +pub struct ChainInfoTracker { + inner: Arc>, } -impl ChainInfoTracker { +impl ChainInfoTracker +where + N: NodePrimitives, + N::BlockHeader: BlockHeader, +{ /// Create a new chain info container for the given canonical head and finalized header if it /// exists. pub fn new( - head: SealedHeader, - finalized: Option, - safe: Option, + head: SealedHeader, + finalized: Option>, + safe: Option>, ) -> Self { let (finalized_block, _) = watch::channel(finalized); let (safe_block, _) = watch::channel(safe); @@ -33,7 +38,7 @@ impl ChainInfoTracker { inner: Arc::new(ChainInfoInner { last_forkchoice_update: RwLock::new(None), last_transition_configuration_exchange: RwLock::new(None), - canonical_head_number: AtomicU64::new(head.number), + canonical_head_number: AtomicU64::new(head.number()), canonical_head: RwLock::new(head), safe_block, finalized_block, @@ -44,7 +49,7 @@ impl ChainInfoTracker { /// Returns the [`ChainInfo`] for the canonical head. pub fn chain_info(&self) -> ChainInfo { let inner = self.inner.canonical_head.read(); - ChainInfo { best_hash: inner.hash(), best_number: inner.number } + ChainInfo { best_hash: inner.hash(), best_number: inner.number() } } /// Update the timestamp when we received a forkchoice update. @@ -68,17 +73,17 @@ impl ChainInfoTracker { } /// Returns the canonical head of the chain. - pub fn get_canonical_head(&self) -> SealedHeader { + pub fn get_canonical_head(&self) -> SealedHeader { self.inner.canonical_head.read().clone() } /// Returns the safe header of the chain. - pub fn get_safe_header(&self) -> Option { + pub fn get_safe_header(&self) -> Option> { self.inner.safe_block.borrow().clone() } /// Returns the finalized header of the chain. - pub fn get_finalized_header(&self) -> Option { + pub fn get_finalized_header(&self) -> Option> { self.inner.finalized_block.borrow().clone() } @@ -104,8 +109,8 @@ impl ChainInfoTracker { } /// Sets the canonical head of the chain. - pub fn set_canonical_head(&self, header: SealedHeader) { - let number = header.number; + pub fn set_canonical_head(&self, header: SealedHeader) { + let number = header.number(); *self.inner.canonical_head.write() = header; // also update the atomic number. @@ -113,7 +118,7 @@ impl ChainInfoTracker { } /// Sets the safe header of the chain. - pub fn set_safe(&self, header: SealedHeader) { + pub fn set_safe(&self, header: SealedHeader) { self.inner.safe_block.send_if_modified(|current_header| { if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { let _ = current_header.replace(header); @@ -125,7 +130,7 @@ impl ChainInfoTracker { } /// Sets the finalized header of the chain. - pub fn set_finalized(&self, header: SealedHeader) { + pub fn set_finalized(&self, header: SealedHeader) { self.inner.finalized_block.send_if_modified(|current_header| { if current_header.as_ref().map(SealedHeader::hash) != Some(header.hash()) { let _ = current_header.replace(header); @@ -137,19 +142,21 @@ impl ChainInfoTracker { } /// Subscribe to the finalized block. - pub fn subscribe_finalized_block(&self) -> watch::Receiver> { + pub fn subscribe_finalized_block( + &self, + ) -> watch::Receiver>> { self.inner.finalized_block.subscribe() } /// Subscribe to the safe block. - pub fn subscribe_safe_block(&self) -> watch::Receiver> { + pub fn subscribe_safe_block(&self) -> watch::Receiver>> { self.inner.safe_block.subscribe() } } /// Container type for all chain info fields #[derive(Debug)] -struct ChainInfoInner { +struct ChainInfoInner { /// Timestamp when we received the last fork choice update. /// /// This is mainly used to track if we're connected to a beacon node. @@ -161,16 +168,17 @@ struct ChainInfoInner { /// Tracks the number of the `canonical_head`. canonical_head_number: AtomicU64, /// The canonical head of the chain. - canonical_head: RwLock, + canonical_head: RwLock>, /// The block that the beacon node considers safe. - safe_block: watch::Sender>, + safe_block: watch::Sender>>, /// The block that the beacon node considers finalized. - finalized_block: watch::Sender>, + finalized_block: watch::Sender>>, } #[cfg(test)] mod tests { use super::*; + use reth_primitives::EthPrimitives; use reth_testing_utils::{generators, generators::random_header}; #[test] @@ -180,7 +188,8 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header.clone(), None, None); + let tracker: ChainInfoTracker = + ChainInfoTracker::new(header.clone(), None, None); // Fetch the chain information from the tracker let chain_info = tracker.chain_info(); @@ -197,7 +206,7 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header, None, None); // Assert that there has been no forkchoice update yet (the timestamp is None) assert!(tracker.last_forkchoice_update_received_at().is_none()); @@ -216,7 +225,7 @@ mod tests { let header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the header - let tracker = ChainInfoTracker::new(header, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header, None, None); // Assert that there has been no transition configuration exchange yet (the timestamp is // None) @@ -239,7 +248,7 @@ mod tests { let header2 = random_header(&mut rng, 20, None); // Create a new chain info tracker with the first header - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Set the second header as the canonical head of the tracker tracker.set_canonical_head(header2.clone()); @@ -260,7 +269,7 @@ mod tests { let header2 = random_header(&mut rng, 20, None); // Create a new chain info tracker with the first header (header1) - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Call the set_safe method with the second header (header2) tracker.set_safe(header2.clone()); @@ -306,7 +315,7 @@ mod tests { let header3 = random_header(&mut rng, 30, None); // Create a new chain info tracker with the first header - let tracker = ChainInfoTracker::new(header1, None, None); + let tracker: ChainInfoTracker = ChainInfoTracker::new(header1, None, None); // Initial state: finalize header should be None assert!(tracker.get_finalized_header().is_none()); @@ -343,7 +352,7 @@ mod tests { let finalized_header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the finalized header - let tracker = + let tracker: ChainInfoTracker = ChainInfoTracker::new(finalized_header.clone(), Some(finalized_header.clone()), None); // Assert that the BlockNumHash returned matches the finalized header @@ -357,7 +366,8 @@ mod tests { let safe_header = random_header(&mut rng, 10, None); // Create a new chain info tracker with the safe header - let tracker = ChainInfoTracker::new(safe_header.clone(), None, None); + let tracker: ChainInfoTracker = + ChainInfoTracker::new(safe_header.clone(), None, None); tracker.set_safe(safe_header.clone()); // Assert that the BlockNumHash returned matches the safe header diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index f157da5ff45..670c340db4b 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -4,16 +4,18 @@ use crate::{ CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, ChainInfoTracker, MemoryOverlayStateProvider, }; -use alloy_eips::BlockNumHash; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber, BlockNumHash}; use alloy_primitives::{map::HashMap, Address, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, Header, Receipt, Receipts, SealedBlock, SealedBlockWithSenders, SealedHeader, - TransactionMeta, TransactionSigned, + BlockWithSenders, EthPrimitives, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; +use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -49,22 +51,22 @@ pub(crate) struct InMemoryStateMetrics { /// This holds, because only lookup by number functions need to acquire the numbers lock first to /// get the block hash. #[derive(Debug, Default)] -pub(crate) struct InMemoryState { +pub(crate) struct InMemoryState { /// All canonical blocks that are not on disk yet. - blocks: RwLock>>, + blocks: RwLock>>>, /// Mapping of block numbers to block hashes. numbers: RwLock>, /// The pending block that has not yet been made canonical. - pending: watch::Sender>, + pending: watch::Sender>>, /// Metrics for the in-memory state. metrics: InMemoryStateMetrics, } -impl InMemoryState { +impl InMemoryState { pub(crate) fn new( - blocks: HashMap>, + blocks: HashMap>>, numbers: BTreeMap, - pending: Option, + pending: Option>, ) -> Self { let (pending, _) = watch::channel(pending); let this = Self { @@ -94,12 +96,12 @@ impl InMemoryState { } /// Returns the state for a given block hash. - pub(crate) fn state_by_hash(&self, hash: B256) -> Option> { + pub(crate) fn state_by_hash(&self, hash: B256) -> Option>> { self.blocks.read().get(&hash).cloned() } /// Returns the state for a given block number. - pub(crate) fn state_by_number(&self, number: u64) -> Option> { + pub(crate) fn state_by_number(&self, number: u64) -> Option>> { let hash = self.hash_by_number(number)?; self.state_by_hash(hash) } @@ -110,14 +112,14 @@ impl InMemoryState { } /// Returns the current chain head state. - pub(crate) fn head_state(&self) -> Option> { + pub(crate) fn head_state(&self) -> Option>> { let hash = *self.numbers.read().last_key_value()?.1; self.state_by_hash(hash) } /// Returns the pending state corresponding to the current head plus one, /// from the payload received in newPayload that does not have a FCU yet. - pub(crate) fn pending_state(&self) -> Option { + pub(crate) fn pending_state(&self) -> Option> { self.pending.borrow().clone() } @@ -130,17 +132,17 @@ impl InMemoryState { /// Inner type to provide in memory state. It includes a chain tracker to be /// advanced internally by the tree. #[derive(Debug)] -pub(crate) struct CanonicalInMemoryStateInner { +pub(crate) struct CanonicalInMemoryStateInner { /// Tracks certain chain information, such as the canonical head, safe head, and finalized /// head. - pub(crate) chain_info_tracker: ChainInfoTracker, + pub(crate) chain_info_tracker: ChainInfoTracker, /// Tracks blocks at the tip of the chain that have not been persisted to disk yet. - pub(crate) in_memory_state: InMemoryState, + pub(crate) in_memory_state: InMemoryState, /// A broadcast stream that emits events when the canonical chain is updated. - pub(crate) canon_state_notification_sender: CanonStateNotificationSender, + pub(crate) canon_state_notification_sender: CanonStateNotificationSender, } -impl CanonicalInMemoryStateInner { +impl CanonicalInMemoryStateInner { /// Clears all entries in the in memory state. fn clear(&self) { { @@ -157,23 +159,26 @@ impl CanonicalInMemoryStateInner { } } +type PendingBlockAndReceipts = + (SealedBlockFor<::Block>, Vec>); + /// This type is responsible for providing the blocks, receipts, and state for /// all canonical blocks not on disk yet and keeps track of the block range that /// is in memory. #[derive(Debug, Clone)] -pub struct CanonicalInMemoryState { - pub(crate) inner: Arc, +pub struct CanonicalInMemoryState { + pub(crate) inner: Arc>, } -impl CanonicalInMemoryState { +impl CanonicalInMemoryState { /// Create a new in-memory state with the given blocks, numbers, pending state, and optional /// finalized header. pub fn new( - blocks: HashMap>, + blocks: HashMap>>, numbers: BTreeMap, - pending: Option, - finalized: Option, - safe: Option, + pending: Option>, + finalized: Option>, + safe: Option>, ) -> Self { let in_memory_state = InMemoryState::new(blocks, numbers, pending); let header = in_memory_state @@ -200,9 +205,9 @@ impl CanonicalInMemoryState { /// Create a new in memory state with the given local head and finalized header /// if it exists. pub fn with_head( - head: SealedHeader, - finalized: Option, - safe: Option, + head: SealedHeader, + finalized: Option>, + safe: Option>, ) -> Self { let chain_info_tracker = ChainInfoTracker::new(head, finalized, safe); let in_memory_state = InMemoryState::default(); @@ -223,7 +228,7 @@ impl CanonicalInMemoryState { } /// Returns the header corresponding to the given hash. - pub fn header_by_hash(&self, hash: B256) -> Option { + pub fn header_by_hash(&self, hash: B256) -> Option> { self.state_by_hash(hash).map(|block| block.block_ref().block.header.clone()) } @@ -235,9 +240,9 @@ impl CanonicalInMemoryState { /// Updates the pending block with the given block. /// /// Note: This assumes that the parent block of the pending block is canonical. - pub fn set_pending_block(&self, pending: ExecutedBlock) { + pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block - let parent = self.state_by_hash(pending.block().parent_hash); + let parent = self.state_by_hash(pending.block().parent_hash()); let pending = BlockState::with_parent(pending, parent); self.inner.in_memory_state.pending.send_modify(|p| { p.replace(pending); @@ -251,7 +256,7 @@ impl CanonicalInMemoryState { /// them to their parent blocks. fn update_blocks(&self, new_blocks: I, reorged: I) where - I: IntoIterator, + I: IntoIterator>, { { // acquire locks, starting with the numbers lock @@ -261,15 +266,15 @@ impl CanonicalInMemoryState { // we first remove the blocks from the reorged chain for block in reorged { let hash = block.block().hash(); - let number = block.block().number; + let number = block.block().number(); blocks.remove(&hash); numbers.remove(&number); } // insert the new blocks for block in new_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent); + let parent = blocks.get(&block.block().parent_hash()).cloned(); + let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -287,7 +292,7 @@ impl CanonicalInMemoryState { } /// Update the in memory state with the given chain update. - pub fn update_chain(&self, new_chain: NewCanonicalChain) { + pub fn update_chain(&self, new_chain: NewCanonicalChain) { match new_chain { NewCanonicalChain::Commit { new } => { self.update_blocks(new, vec![]); @@ -328,17 +333,17 @@ impl CanonicalInMemoryState { // height) let mut old_blocks = blocks .drain() - .filter(|(_, b)| b.block_ref().block().number > persisted_height) + .filter(|(_, b)| b.block_ref().block().number() > persisted_height) .map(|(_, b)| b.block.clone()) .collect::>(); // sort the blocks by number so we can insert them back in natural order (low -> high) - old_blocks.sort_unstable_by_key(|block| block.block().number); + old_blocks.sort_unstable_by_key(|block| block.block().number()); // re-insert the blocks in natural order and connect them to their parent blocks for block in old_blocks { - let parent = blocks.get(&block.block().parent_hash).cloned(); - let block_state = BlockState::with_parent(block.clone(), parent); + let parent = blocks.get(&block.block().parent_hash()).cloned(); + let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -350,7 +355,7 @@ impl CanonicalInMemoryState { // also shift the pending state if it exists self.inner.in_memory_state.pending.send_modify(|p| { if let Some(p) = p.as_mut() { - p.parent = blocks.get(&p.block_ref().block.parent_hash).cloned(); + p.parent = blocks.get(&p.block_ref().block.parent_hash()).cloned(); } }); } @@ -358,22 +363,22 @@ impl CanonicalInMemoryState { } /// Returns in memory state corresponding the given hash. - pub fn state_by_hash(&self, hash: B256) -> Option> { + pub fn state_by_hash(&self, hash: B256) -> Option>> { self.inner.in_memory_state.state_by_hash(hash) } /// Returns in memory state corresponding the block number. - pub fn state_by_number(&self, number: u64) -> Option> { + pub fn state_by_number(&self, number: u64) -> Option>> { self.inner.in_memory_state.state_by_number(number) } /// Returns the in memory head state. - pub fn head_state(&self) -> Option> { + pub fn head_state(&self) -> Option>> { self.inner.in_memory_state.head_state() } /// Returns the in memory pending state. - pub fn pending_state(&self) -> Option { + pub fn pending_state(&self) -> Option> { self.inner.in_memory_state.pending_state() } @@ -426,81 +431,86 @@ impl CanonicalInMemoryState { } /// Canonical head setter. - pub fn set_canonical_head(&self, header: SealedHeader) { + pub fn set_canonical_head(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_canonical_head(header); } /// Safe head setter. - pub fn set_safe(&self, header: SealedHeader) { + pub fn set_safe(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_safe(header); } /// Finalized head setter. - pub fn set_finalized(&self, header: SealedHeader) { + pub fn set_finalized(&self, header: SealedHeader) { self.inner.chain_info_tracker.set_finalized(header); } /// Canonical head getter. - pub fn get_canonical_head(&self) -> SealedHeader { + pub fn get_canonical_head(&self) -> SealedHeader { self.inner.chain_info_tracker.get_canonical_head() } /// Finalized header getter. - pub fn get_finalized_header(&self) -> Option { + pub fn get_finalized_header(&self) -> Option> { self.inner.chain_info_tracker.get_finalized_header() } /// Safe header getter. - pub fn get_safe_header(&self) -> Option { + pub fn get_safe_header(&self) -> Option> { self.inner.chain_info_tracker.get_safe_header() } /// Returns the `SealedHeader` corresponding to the pending state. - pub fn pending_sealed_header(&self) -> Option { + pub fn pending_sealed_header(&self) -> Option> { self.pending_state().map(|h| h.block_ref().block().header.clone()) } /// Returns the `Header` corresponding to the pending state. - pub fn pending_header(&self) -> Option
{ + pub fn pending_header(&self) -> Option { self.pending_sealed_header().map(|sealed_header| sealed_header.unseal()) } /// Returns the `SealedBlock` corresponding to the pending state. - pub fn pending_block(&self) -> Option { + pub fn pending_block(&self) -> Option> { self.pending_state().map(|block_state| block_state.block_ref().block().clone()) } /// Returns the `SealedBlockWithSenders` corresponding to the pending state. - pub fn pending_block_with_senders(&self) -> Option { + pub fn pending_block_with_senders(&self) -> Option> + where + N::SignedTx: SignedTransaction, + { self.pending_state() .and_then(|block_state| block_state.block_ref().block().clone().seal_with_senders()) } /// Returns a tuple with the `SealedBlock` corresponding to the pending /// state and a vector of its `Receipt`s. - pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { + pub fn pending_block_and_receipts(&self) -> Option> { self.pending_state().map(|block_state| { (block_state.block_ref().block().clone(), block_state.executed_block_receipts()) }) } /// Subscribe to new blocks events. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { + pub fn subscribe_canon_state(&self) -> CanonStateNotifications { self.inner.canon_state_notification_sender.subscribe() } /// Subscribe to new safe block events. - pub fn subscribe_safe_block(&self) -> watch::Receiver> { + pub fn subscribe_safe_block(&self) -> watch::Receiver>> { self.inner.chain_info_tracker.subscribe_safe_block() } /// Subscribe to new finalized block events. - pub fn subscribe_finalized_block(&self) -> watch::Receiver> { + pub fn subscribe_finalized_block( + &self, + ) -> watch::Receiver>> { self.inner.chain_info_tracker.subscribe_finalized_block() } /// Attempts to send a new [`CanonStateNotification`] to all active Receiver handles. - pub fn notify_canon_state(&self, event: CanonStateNotification) { + pub fn notify_canon_state(&self, event: CanonStateNotification) { self.inner.canon_state_notification_sender.send(event).ok(); } @@ -512,9 +522,9 @@ impl CanonicalInMemoryState { &self, hash: B256, historical: StateProviderBox, - ) -> MemoryOverlayStateProvider { + ) -> MemoryOverlayStateProvider { let in_memory = if let Some(state) = self.state_by_hash(hash) { - state.chain().into_iter().map(|block_state| block_state.block()).collect() + state.chain().map(|block_state| block_state.block()).collect() } else { Vec::new() }; @@ -526,15 +536,23 @@ impl CanonicalInMemoryState { /// oldest (highest to lowest). /// /// This iterator contains a snapshot of the in-memory state at the time of the call. - pub fn canonical_chain(&self) -> impl Iterator> { + pub fn canonical_chain(&self) -> impl Iterator>> { self.inner.in_memory_state.head_state().into_iter().flat_map(|head| head.iter()) } /// Returns a `TransactionSigned` for the given `TxHash` if found. - pub fn transaction_by_hash(&self, hash: TxHash) -> Option { + pub fn transaction_by_hash(&self, hash: TxHash) -> Option + where + N::SignedTx: Encodable2718, + { for block_state in self.canonical_chain() { - if let Some(tx) = - block_state.block_ref().block().body.transactions().find(|tx| tx.hash() == hash) + if let Some(tx) = block_state + .block_ref() + .block() + .body + .transactions() + .iter() + .find(|tx| tx.trie_hash() == hash) { return Some(tx.clone()) } @@ -547,24 +565,28 @@ impl CanonicalInMemoryState { pub fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> Option<(TransactionSigned, TransactionMeta)> { + ) -> Option<(N::SignedTx, TransactionMeta)> + where + N::SignedTx: Encodable2718, + { for block_state in self.canonical_chain() { if let Some((index, tx)) = block_state .block_ref() .block() .body .transactions() + .iter() .enumerate() - .find(|(_, tx)| tx.hash() == tx_hash) + .find(|(_, tx)| tx.trie_hash() == tx_hash) { let meta = TransactionMeta { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number, - base_fee: block_state.block_ref().block.header.base_fee_per_gas, - timestamp: block_state.block_ref().block.timestamp, - excess_blob_gas: block_state.block_ref().block.excess_blob_gas, + block_number: block_state.block_ref().block.number(), + base_fee: block_state.block_ref().block.header.base_fee_per_gas(), + timestamp: block_state.block_ref().block.timestamp(), + excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), }; return Some((tx.clone(), meta)) } @@ -576,22 +598,22 @@ impl CanonicalInMemoryState { /// State after applying the given block, this block is part of the canonical chain that partially /// stored in memory and can be traced back to a canonical block on disk. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct BlockState { +pub struct BlockState { /// The executed block that determines the state after this block has been executed. - block: ExecutedBlock, + block: ExecutedBlock, /// The block's parent block if it exists. - parent: Option>, + parent: Option>>, } #[allow(dead_code)] -impl BlockState { +impl BlockState { /// [`BlockState`] constructor. - pub const fn new(block: ExecutedBlock) -> Self { + pub const fn new(block: ExecutedBlock) -> Self { Self { block, parent: None } } /// [`BlockState`] constructor with parent. - pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { + pub const fn with_parent(block: ExecutedBlock, parent: Option>) -> Self { Self { block, parent } } @@ -605,24 +627,25 @@ impl BlockState { } /// Returns the executed block that determines the state. - pub fn block(&self) -> ExecutedBlock { + pub fn block(&self) -> ExecutedBlock { self.block.clone() } /// Returns a reference to the executed block that determines the state. - pub const fn block_ref(&self) -> &ExecutedBlock { + pub const fn block_ref(&self) -> &ExecutedBlock { &self.block } /// Returns the block with senders for the state. - pub fn block_with_senders(&self) -> BlockWithSenders { + pub fn block_with_senders(&self) -> BlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - BlockWithSenders { block: block.unseal(), senders } + let (header, body) = block.split(); + BlockWithSenders::new_unchecked(N::Block::new(header.unseal(), body), senders) } /// Returns the sealed block with senders for the state. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); SealedBlockWithSenders { block, senders } @@ -635,17 +658,17 @@ impl BlockState { /// Returns the block number of executed block that determines the state. pub fn number(&self) -> u64 { - self.block.block().number + self.block.block().number() } /// Returns the state root after applying the executed block that determines /// the state. pub fn state_root(&self) -> B256 { - self.block.block().header.state_root + self.block.block().header.state_root() } /// Returns the `Receipts` of executed block that determines the state. - pub fn receipts(&self) -> &Receipts { + pub fn receipts(&self) -> &Receipts { &self.block.execution_outcome().receipts } @@ -653,7 +676,7 @@ impl BlockState { /// We assume that the `Receipts` in the executed block `ExecutionOutcome` /// has only one element corresponding to the executed block associated to /// the state. - pub fn executed_block_receipts(&self) -> Vec { + pub fn executed_block_receipts(&self) -> Vec { let receipts = self.receipts(); debug_assert!( @@ -692,10 +715,8 @@ impl BlockState { /// Returns a vector of `BlockStates` representing the entire in memory chain. /// The block state order in the output vector is newest to oldest (highest to lowest), /// including self as the first element. - pub fn chain(&self) -> Vec<&Self> { - let mut chain = vec![self]; - self.append_parent_chain(&mut chain); - chain + pub fn chain(&self) -> impl Iterator { + std::iter::successors(Some(self), |state| state.parent.as_deref()) } /// Appends the parent chain of this [`BlockState`] to the given vector. @@ -714,34 +735,91 @@ impl BlockState { /// /// This merges the state of all blocks that are part of the chain that the this block is /// the head of. This includes all blocks that connect back to the canonical block on disk. - pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { - let in_memory = self.chain().into_iter().map(|block_state| block_state.block()).collect(); + pub fn state_provider(&self, historical: StateProviderBox) -> MemoryOverlayStateProvider { + let in_memory = self.chain().map(|block_state| block_state.block()).collect(); MemoryOverlayStateProvider::new(historical, in_memory) } + + /// Tries to find a block by [`BlockHashOrNumber`] in the chain ending at this block. + pub fn block_on_chain(&self, hash_or_num: BlockHashOrNumber) -> Option<&Self> { + self.chain().find(|block| match hash_or_num { + BlockHashOrNumber::Hash(hash) => block.hash() == hash, + BlockHashOrNumber::Number(number) => block.number() == number, + }) + } + + /// Tries to find a transaction by [`TxHash`] in the chain ending at this block. + pub fn transaction_on_chain(&self, hash: TxHash) -> Option + where + N::SignedTx: Encodable2718, + { + self.chain().find_map(|block_state| { + block_state + .block_ref() + .block() + .body + .transactions() + .iter() + .find(|tx| tx.trie_hash() == hash) + .cloned() + }) + } + + /// Tries to find a transaction with meta by [`TxHash`] in the chain ending at this block. + pub fn transaction_meta_on_chain( + &self, + tx_hash: TxHash, + ) -> Option<(N::SignedTx, TransactionMeta)> + where + N::SignedTx: Encodable2718, + { + self.chain().find_map(|block_state| { + block_state + .block_ref() + .block() + .body + .transactions() + .iter() + .enumerate() + .find(|(_, tx)| tx.trie_hash() == tx_hash) + .map(|(index, tx)| { + let meta = TransactionMeta { + tx_hash, + index: index as u64, + block_hash: block_state.hash(), + block_number: block_state.block_ref().block.number(), + base_fee: block_state.block_ref().block.header.base_fee_per_gas(), + timestamp: block_state.block_ref().block.timestamp(), + excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), + }; + (tx.clone(), meta) + }) + }) + } } /// Represents an executed block stored in-memory. #[derive(Clone, Debug, PartialEq, Eq, Default)] -pub struct ExecutedBlock { +pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. - pub block: Arc, + pub block: Arc>, /// Block's senders. pub senders: Arc>, /// Block's execution outcome. - pub execution_output: Arc, - /// Block's hashedst state. + pub execution_output: Arc>, + /// Block's hashed state. pub hashed_state: Arc, /// Trie updates that result of applying the block. pub trie: Arc, } -impl ExecutedBlock { +impl ExecutedBlock { /// [`ExecutedBlock`] constructor. pub const fn new( - block: Arc, + block: Arc>, senders: Arc>, - execution_output: Arc, + execution_output: Arc>, hashed_state: Arc, trie: Arc, ) -> Self { @@ -749,7 +827,7 @@ impl ExecutedBlock { } /// Returns a reference to the executed block. - pub fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlockFor { &self.block } @@ -761,12 +839,12 @@ impl ExecutedBlock { /// Returns a [`SealedBlockWithSenders`] /// /// Note: this clones the block and senders. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { + pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { SealedBlockWithSenders { block: (*self.block).clone(), senders: (*self.senders).clone() } } /// Returns a reference to the block's execution outcome - pub fn execution_outcome(&self) -> &ExecutionOutcome { + pub fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_output } @@ -783,23 +861,23 @@ impl ExecutedBlock { /// Non-empty chain of blocks. #[derive(Debug)] -pub enum NewCanonicalChain { +pub enum NewCanonicalChain { /// A simple append to the current canonical head Commit { /// all blocks that lead back to the canonical head - new: Vec, + new: Vec>, }, /// A reorged chain consists of two chains that trace back to a shared ancestor block at which /// point they diverge. Reorg { /// All blocks of the _new_ chain - new: Vec, + new: Vec>, /// All blocks of the _old_ chain - old: Vec, + old: Vec>, }, } -impl NewCanonicalChain { +impl> NewCanonicalChain { /// Returns the length of the new chain. pub fn new_block_count(&self) -> usize { match self { @@ -816,7 +894,7 @@ impl NewCanonicalChain { } /// Converts the new chain into a notification that will be emitted to listeners - pub fn to_chain_notification(&self) -> CanonStateNotification { + pub fn to_chain_notification(&self) -> CanonStateNotification { match self { Self::Commit { new } => { let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { @@ -852,7 +930,7 @@ impl NewCanonicalChain { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlock { + pub fn tip(&self) -> &SealedBlockFor { match self { Self::Commit { new } | Self::Reorg { new, .. } => { new.last().expect("non empty blocks").block() @@ -865,18 +943,21 @@ impl NewCanonicalChain { mod tests { use super::*; use crate::test_utils::TestBlockBuilder; + use alloy_eips::eip7685::Requests; use alloy_primitives::{map::HashSet, BlockNumber, Bytes, StorageKey, StorageValue}; use rand::Rng; use reth_errors::ProviderResult; - use reth_primitives::{Account, Bytecode, Receipt, Requests}; + use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, + }; + use reth_trie::{ + AccountProof, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, }; - use reth_trie::{AccountProof, HashedStorage, MultiProof, StorageProof, TrieInput}; fn create_mock_state( - test_block_builder: &mut TestBlockBuilder, + test_block_builder: &mut TestBlockBuilder, block_number: u64, parent_hash: B256, ) -> BlockState { @@ -886,7 +967,7 @@ mod tests { } fn create_mock_state_chain( - test_block_builder: &mut TestBlockBuilder, + test_block_builder: &mut TestBlockBuilder, num_blocks: u64, ) -> Vec { let mut chain = Vec::with_capacity(num_blocks as usize); @@ -966,6 +1047,12 @@ mod tests { } } + impl HashedPostStateProvider for MockStateProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } + } + impl StorageRootProvider for MockStateProvider { fn storage_root( &self, @@ -983,6 +1070,15 @@ mod tests { ) -> ProviderResult { Ok(StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageMultiProof::empty()) + } } impl StateProofProvider for MockStateProvider { @@ -1016,7 +1112,7 @@ mod tests { fn test_in_memory_state_impl_state_by_hash() { let mut state_by_hash = HashMap::default(); let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); state_by_hash.insert(state.hash(), state.clone()); @@ -1032,7 +1128,7 @@ mod tests { let mut hash_by_number = BTreeMap::new(); let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state = Arc::new(create_mock_state(&mut test_block_builder, number, B256::random())); let hash = state.hash(); @@ -1049,7 +1145,7 @@ mod tests { fn test_in_memory_state_impl_head_state() { let mut state_by_hash = HashMap::default(); let mut hash_by_number = BTreeMap::new(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let state1 = Arc::new(create_mock_state(&mut test_block_builder, 1, B256::random())); let hash1 = state1.hash(); let state2 = Arc::new(create_mock_state(&mut test_block_builder, 2, hash1)); @@ -1069,7 +1165,7 @@ mod tests { #[test] fn test_in_memory_state_impl_pending_state() { let pending_number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let pending_state = create_mock_state(&mut test_block_builder, pending_number, B256::random()); let pending_hash = pending_state.hash(); @@ -1086,7 +1182,8 @@ mod tests { #[test] fn test_in_memory_state_impl_no_pending_state() { - let in_memory_state = InMemoryState::new(HashMap::default(), BTreeMap::new(), None); + let in_memory_state: InMemoryState = + InMemoryState::new(HashMap::default(), BTreeMap::new(), None); assert_eq!(in_memory_state.pending_state(), None); } @@ -1094,7 +1191,7 @@ mod tests { #[test] fn test_state_new() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1105,7 +1202,7 @@ mod tests { #[test] fn test_state_block() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1116,7 +1213,7 @@ mod tests { #[test] fn test_state_hash() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1127,7 +1224,7 @@ mod tests { #[test] fn test_state_number() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block); @@ -1138,7 +1235,7 @@ mod tests { #[test] fn test_state_state_root() { let number = rand::thread_rng().gen::(); - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_number(number, B256::random()); let state = BlockState::new(block.clone()); @@ -1149,7 +1246,7 @@ mod tests { #[test] fn test_state_receipts() { let receipts = Receipts { receipt_vec: vec![vec![Some(Receipt::default())]] }; - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block = test_block_builder.get_executed_block_with_receipts(receipts.clone(), B256::random()); @@ -1160,8 +1257,8 @@ mod tests { #[test] fn test_in_memory_state_chain_update() { - let state = CanonicalInMemoryState::empty(); - let mut test_block_builder = TestBlockBuilder::default(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); let block2 = test_block_builder.get_executed_block_with_number(0, B256::random()); let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] }; @@ -1185,8 +1282,8 @@ mod tests { #[test] fn test_in_memory_state_set_pending_block() { - let state = CanonicalInMemoryState::empty(); - let mut test_block_builder = TestBlockBuilder::default(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); // First random block let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); @@ -1237,7 +1334,7 @@ mod tests { #[test] fn test_canonical_in_memory_state_state_provider() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(1, B256::random()); let block2 = test_block_builder.get_executed_block_with_number(2, block1.block().hash()); let block3 = test_block_builder.get_executed_block_with_number(3, block2.block().hash()); @@ -1284,14 +1381,15 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_empty() { - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); let chain: Vec<_> = state.canonical_chain().collect(); assert!(chain.is_empty()); } #[test] fn test_canonical_in_memory_state_canonical_chain_single_block() { - let block = TestBlockBuilder::default().get_executed_block_with_number(1, B256::random()); + let block = TestBlockBuilder::::default() + .get_executed_block_with_number(1, B256::random()); let hash = block.block().hash(); let mut blocks = HashMap::default(); blocks.insert(hash, Arc::new(BlockState::new(block))); @@ -1310,7 +1408,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_multiple_blocks() { let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); for i in 1..=3 { let block = block_builder.get_executed_block_with_number(i, parent_hash); @@ -1332,7 +1430,7 @@ mod tests { fn test_canonical_in_memory_state_canonical_chain_with_pending_block() { let mut parent_hash = B256::random(); let mut block_builder = TestBlockBuilder::default(); - let state = CanonicalInMemoryState::empty(); + let state: CanonicalInMemoryState = CanonicalInMemoryState::empty(); for i in 1..=2 { let block = block_builder.get_executed_block_with_number(i, parent_hash); @@ -1352,7 +1450,7 @@ mod tests { #[test] fn test_block_state_parent_blocks() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 4); let parents = chain[3].parent_state_chain(); @@ -1373,7 +1471,7 @@ mod tests { #[test] fn test_block_state_single_block_state_chain() { let single_block_number = 1; - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let single_block = create_mock_state(&mut test_block_builder, single_block_number, B256::random()); let single_block_hash = single_block.block().block.hash(); @@ -1381,7 +1479,7 @@ mod tests { let parents = single_block.parent_state_chain(); assert_eq!(parents.len(), 0); - let block_state_chain = single_block.chain(); + let block_state_chain = single_block.chain().collect::>(); assert_eq!(block_state_chain.len(), 1); assert_eq!(block_state_chain[0].block().block.number, single_block_number); assert_eq!(block_state_chain[0].block().block.hash(), single_block_hash); @@ -1389,21 +1487,21 @@ mod tests { #[test] fn test_block_state_chain() { - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let chain = create_mock_state_chain(&mut test_block_builder, 3); - let block_state_chain = chain[2].chain(); + let block_state_chain = chain[2].chain().collect::>(); assert_eq!(block_state_chain.len(), 3); assert_eq!(block_state_chain[0].block().block.number, 3); assert_eq!(block_state_chain[1].block().block.number, 2); assert_eq!(block_state_chain[2].block().block.number, 1); - let block_state_chain = chain[1].chain(); + let block_state_chain = chain[1].chain().collect::>(); assert_eq!(block_state_chain.len(), 2); assert_eq!(block_state_chain[0].block().block.number, 2); assert_eq!(block_state_chain[1].block().block.number, 1); - let block_state_chain = chain[0].chain(); + let block_state_chain = chain[0].chain().collect::>(); assert_eq!(block_state_chain.len(), 1); assert_eq!(block_state_chain[0].block().block.number, 1); } @@ -1411,7 +1509,7 @@ mod tests { #[test] fn test_to_chain_notification() { // Generate 4 blocks - let mut test_block_builder = TestBlockBuilder::default(); + let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block0 = test_block_builder.get_executed_block_with_number(0, B256::random()); let block1 = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); let block1a = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); diff --git a/crates/chain-state/src/lib.rs b/crates/chain-state/src/lib.rs index 50a10311107..519469d67f6 100644 --- a/crates/chain-state/src/lib.rs +++ b/crates/chain-state/src/lib.rs @@ -22,8 +22,11 @@ pub use notifications::{ }; mod memory_overlay; -pub use memory_overlay::MemoryOverlayStateProvider; +pub use memory_overlay::{MemoryOverlayStateProvider, MemoryOverlayStateProviderRef}; #[cfg(any(test, feature = "test-utils"))] /// Common test helpers pub mod test_utils; + +// todo: remove when generic data prim integration complete +pub use reth_primitives::EthPrimitives; diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index eb125dad115..21bc30b07cf 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -1,219 +1,261 @@ use super::ExecutedBlock; +use alloy_consensus::BlockHeader; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_errors::ProviderResult; -use reth_primitives::{Account, Bytecode}; +use reth_primitives::{Account, Bytecode, NodePrimitives}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateProviderBox, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, TrieInput, }; +use revm::db::BundleState; use std::sync::OnceLock; +/// A state provider that stores references to in-memory blocks along with their state as well as a +/// reference of the historical state provider for fallback lookups. +#[allow(missing_debug_implementations)] +pub struct MemoryOverlayStateProviderRef<'a, N: NodePrimitives = reth_primitives::EthPrimitives> { + /// Historical state provider for state lookups that are not found in in-memory blocks. + pub(crate) historical: Box, + /// The collection of executed parent blocks. Expected order is newest to oldest. + pub(crate) in_memory: Vec>, + /// Lazy-loaded in-memory trie data. + pub(crate) trie_state: OnceLock, +} + /// A state provider that stores references to in-memory blocks along with their state as well as /// the historical state provider for fallback lookups. #[allow(missing_debug_implementations)] -pub struct MemoryOverlayStateProvider { +pub struct MemoryOverlayStateProvider { /// Historical state provider for state lookups that are not found in in-memory blocks. pub(crate) historical: Box, /// The collection of executed parent blocks. Expected order is newest to oldest. - pub(crate) in_memory: Vec, + pub(crate) in_memory: Vec>, /// Lazy-loaded in-memory trie data. pub(crate) trie_state: OnceLock, } -impl MemoryOverlayStateProvider { - /// Create new memory overlay state provider. - /// - /// ## Arguments - /// - /// - `in_memory` - the collection of executed ancestor blocks in reverse. - /// - `historical` - a historical state provider for the latest ancestor block stored in the - /// database. - pub fn new(historical: Box, in_memory: Vec) -> Self { - Self { historical, in_memory, trie_state: OnceLock::new() } - } - - /// Turn this state provider into a [`StateProviderBox`] - pub fn boxed(self) -> StateProviderBox { - Box::new(self) - } - - /// Return lazy-loaded trie state aggregated from in-memory blocks. - fn trie_state(&self) -> &MemoryOverlayTrieState { - self.trie_state.get_or_init(|| { - let mut trie_state = MemoryOverlayTrieState::default(); - for block in self.in_memory.iter().rev() { - trie_state.state.extend_ref(block.hashed_state.as_ref()); - trie_state.nodes.extend_ref(block.trie.as_ref()); - } - trie_state - }) - } -} +macro_rules! impl_state_provider { + ([$($tokens:tt)*],$type:ty, $historical_type:ty) => { + impl $($tokens)* $type { + /// Create new memory overlay state provider. + /// + /// ## Arguments + /// + /// - `in_memory` - the collection of executed ancestor blocks in reverse. + /// - `historical` - a historical state provider for the latest ancestor block stored in the + /// database. + pub fn new(historical: $historical_type, in_memory: Vec>) -> Self { + Self { historical, in_memory, trie_state: OnceLock::new() } + } + + /// Turn this state provider into a state provider + pub fn boxed(self) -> $historical_type { + Box::new(self) + } -impl BlockHashReader for MemoryOverlayStateProvider { - fn block_hash(&self, number: BlockNumber) -> ProviderResult> { - for block in &self.in_memory { - if block.block.number == number { - return Ok(Some(block.block.hash())) + /// Return lazy-loaded trie state aggregated from in-memory blocks. + fn trie_state(&self) -> &MemoryOverlayTrieState { + self.trie_state.get_or_init(|| { + let mut trie_state = MemoryOverlayTrieState::default(); + for block in self.in_memory.iter().rev() { + trie_state.state.extend_ref(block.hashed_state.as_ref()); + trie_state.nodes.extend_ref(block.trie.as_ref()); + } + trie_state + }) } } - self.historical.block_hash(number) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> ProviderResult> { - let range = start..end; - let mut earliest_block_number = None; - let mut in_memory_hashes = Vec::new(); - for block in &self.in_memory { - if range.contains(&block.block.number) { - in_memory_hashes.insert(0, block.block.hash()); - earliest_block_number = Some(block.block.number); + impl $($tokens)* BlockHashReader for $type { + fn block_hash(&self, number: BlockNumber) -> ProviderResult> { + for block in &self.in_memory { + if block.block.number() == number { + return Ok(Some(block.block.hash())) + } + } + + self.historical.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + let range = start..end; + let mut earliest_block_number = None; + let mut in_memory_hashes = Vec::new(); + for block in &self.in_memory { + if range.contains(&block.block.number()) { + in_memory_hashes.insert(0, block.block.hash()); + earliest_block_number = Some(block.block.number()); + } + } + + let mut hashes = + self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; + hashes.append(&mut in_memory_hashes); + Ok(hashes) } } - let mut hashes = - self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; - hashes.append(&mut in_memory_hashes); - Ok(hashes) - } -} + impl $($tokens)* AccountReader for $type { + fn basic_account(&self, address: Address) -> ProviderResult> { + for block in &self.in_memory { + if let Some(account) = block.execution_output.account(&address) { + return Ok(account) + } + } -impl AccountReader for MemoryOverlayStateProvider { - fn basic_account(&self, address: Address) -> ProviderResult> { - for block in &self.in_memory { - if let Some(account) = block.execution_output.account(&address) { - return Ok(account) + self.historical.basic_account(address) } } - self.historical.basic_account(address) - } -} + impl $($tokens)* StateRootProvider for $type { + fn state_root(&self, state: HashedPostState) -> ProviderResult { + self.state_root_from_nodes(TrieInput::from_state(state)) + } -impl StateRootProvider for MemoryOverlayStateProvider { - fn state_root(&self, state: HashedPostState) -> ProviderResult { - self.state_root_from_nodes(TrieInput::from_state(state)) - } - - fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.state_root_from_nodes(input) - } - - fn state_root_with_updates( - &self, - state: HashedPostState, - ) -> ProviderResult<(B256, TrieUpdates)> { - self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) - } - - fn state_root_from_nodes_with_updates( - &self, - mut input: TrieInput, - ) -> ProviderResult<(B256, TrieUpdates)> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.state_root_from_nodes_with_updates(input) - } -} + fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes(input) + } -impl StorageRootProvider for MemoryOverlayStateProvider { - // TODO: Currently this does not reuse available in-memory trie nodes. - fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { - let state = &self.trie_state().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_root(address, hashed_storage) - } - - // TODO: Currently this does not reuse available in-memory trie nodes. - fn storage_proof( - &self, - address: Address, - slot: B256, - storage: HashedStorage, - ) -> ProviderResult { - let state = &self.trie_state().state; - let mut hashed_storage = - state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); - hashed_storage.extend(&storage); - self.historical.storage_proof(address, slot, hashed_storage) - } -} + fn state_root_with_updates( + &self, + state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_root_from_nodes_with_updates(TrieInput::from_state(state)) + } -impl StateProofProvider for MemoryOverlayStateProvider { - fn proof( - &self, - mut input: TrieInput, - address: Address, - slots: &[B256], - ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.proof(input, address, slots) - } - - fn multiproof( - &self, - mut input: TrieInput, - targets: HashMap>, - ) -> ProviderResult { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.multiproof(input, targets) - } - - fn witness( - &self, - mut input: TrieInput, - target: HashedPostState, - ) -> ProviderResult> { - let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); - input.prepend_cached(nodes, state); - self.historical.witness(input, target) - } -} + fn state_root_from_nodes_with_updates( + &self, + mut input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.state_root_from_nodes_with_updates(input) + } + } + + impl $($tokens)* StorageRootProvider for $type { + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_root(address, hashed_storage) + } -impl StateProvider for MemoryOverlayStateProvider { - fn storage( - &self, - address: Address, - storage_key: StorageKey, - ) -> ProviderResult> { - for block in &self.in_memory { - if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { - return Ok(Some(value)) + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_proof( + &self, + address: Address, + slot: B256, + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_proof(address, slot, hashed_storage) + } + + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_multiproof(address, slots, hashed_storage) } } - self.historical.storage(address, storage_key) - } + impl $($tokens)* StateProofProvider for $type { + fn proof( + &self, + mut input: TrieInput, + address: Address, + slots: &[B256], + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.proof(input, address, slots) + } + + fn multiproof( + &self, + mut input: TrieInput, + targets: HashMap>, + ) -> ProviderResult { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.multiproof(input, targets) + } - fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - for block in &self.in_memory { - if let Some(contract) = block.execution_output.bytecode(&code_hash) { - return Ok(Some(contract)) + fn witness( + &self, + mut input: TrieInput, + target: HashedPostState, + ) -> ProviderResult> { + let MemoryOverlayTrieState { nodes, state } = self.trie_state().clone(); + input.prepend_cached(nodes, state); + self.historical.witness(input, target) } } - self.historical.bytecode_by_hash(code_hash) - } + impl $($tokens)* HashedPostStateProvider for $type { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + self.historical.hashed_post_state(bundle_state) + } + } + + impl $($tokens)* StateProvider for $type { + fn storage( + &self, + address: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + for block in &self.in_memory { + if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { + return Ok(Some(value)) + } + } + + self.historical.storage(address, storage_key) + } + + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { + for block in &self.in_memory { + if let Some(contract) = block.execution_output.bytecode(&code_hash) { + return Ok(Some(contract)) + } + } + + self.historical.bytecode_by_hash(code_hash) + } + } + }; } +impl_state_provider!([], MemoryOverlayStateProvider, Box); +impl_state_provider!([<'a, N: NodePrimitives>], MemoryOverlayStateProviderRef<'a, N>, Box); + /// The collection of data necessary for trie-related operations for [`MemoryOverlayStateProvider`]. #[derive(Clone, Default, Debug)] pub(crate) struct MemoryOverlayTrieState { diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index fc717314b3f..498528813d6 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -1,9 +1,10 @@ //! Canonical chain state notification trait and types. -use auto_impl::auto_impl; +use alloy_eips::eip2718::Encodable2718; use derive_more::{Deref, DerefMut}; use reth_execution_types::{BlockReceipts, Chain}; -use reth_primitives::{SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_storage_api::NodePrimitivesProvider; use std::{ pin::Pin, sync::Arc, @@ -17,37 +18,48 @@ use tokio_stream::{ use tracing::debug; /// Type alias for a receiver that receives [`CanonStateNotification`] -pub type CanonStateNotifications = broadcast::Receiver; +pub type CanonStateNotifications = + broadcast::Receiver>; /// Type alias for a sender that sends [`CanonStateNotification`] -pub type CanonStateNotificationSender = broadcast::Sender; +pub type CanonStateNotificationSender = + broadcast::Sender>; /// A type that allows to register chain related event subscriptions. -#[auto_impl(&, Arc)] -pub trait CanonStateSubscriptions: Send + Sync { +pub trait CanonStateSubscriptions: NodePrimitivesProvider + Send + Sync { /// Get notified when a new canonical chain was imported. /// /// A canonical chain be one or more blocks, a reorg or a revert. - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications; + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications; /// Convenience method to get a stream of [`CanonStateNotification`]. - fn canonical_state_stream(&self) -> CanonStateNotificationStream { + fn canonical_state_stream(&self) -> CanonStateNotificationStream { CanonStateNotificationStream { st: BroadcastStream::new(self.subscribe_to_canonical_state()), } } } +impl CanonStateSubscriptions for &T { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + (*self).subscribe_to_canonical_state() + } + + fn canonical_state_stream(&self) -> CanonStateNotificationStream { + (*self).canonical_state_stream() + } +} + /// A Stream of [`CanonStateNotification`]. #[derive(Debug)] #[pin_project::pin_project] -pub struct CanonStateNotificationStream { +pub struct CanonStateNotificationStream { #[pin] - st: BroadcastStream, + st: BroadcastStream>, } -impl Stream for CanonStateNotificationStream { - type Item = CanonStateNotification; +impl Stream for CanonStateNotificationStream { + type Item = CanonStateNotification; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { @@ -68,11 +80,11 @@ impl Stream for CanonStateNotificationStream { /// The notification contains at least one [`Chain`] with the imported segment. If some blocks were /// reverted (e.g. during a reorg), the old chain is also returned. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum CanonStateNotification { +pub enum CanonStateNotification { /// The canonical chain was extended. Commit { /// The newly added chain segment. - new: Arc, + new: Arc>, }, /// A chain segment was reverted or reorged. /// @@ -82,18 +94,18 @@ pub enum CanonStateNotification { /// chain segment. Reorg { /// The chain segment that was reverted. - old: Arc, + old: Arc>, /// The chain segment that was added on top of the canonical chain, minus the reverted /// blocks. /// /// In the case of a revert, not a reorg, this chain segment is empty. - new: Arc, + new: Arc>, }, } -impl CanonStateNotification { +impl CanonStateNotification { /// Get the chain segment that was reverted, if any. - pub fn reverted(&self) -> Option> { + pub fn reverted(&self) -> Option>> { match self { Self::Commit { .. } => None, Self::Reorg { old, .. } => Some(old.clone()), @@ -101,7 +113,7 @@ impl CanonStateNotification { } /// Get the newly imported chain segment, if any. - pub fn committed(&self) -> Arc { + pub fn committed(&self) -> Arc> { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.clone(), } @@ -111,7 +123,7 @@ impl CanonStateNotification { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.tip(), } @@ -122,7 +134,10 @@ impl CanonStateNotification { /// /// The boolean in the tuple (2nd element) denotes whether the receipt was from the reverted /// chain segment. - pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> { + pub fn block_receipts(&self) -> Vec<(BlockReceipts, bool)> + where + N::SignedTx: Encodable2718, + { let mut receipts = Vec::new(); // get old receipts @@ -140,24 +155,29 @@ impl CanonStateNotification { /// Wrapper around a broadcast receiver that receives fork choice notifications. #[derive(Debug, Deref, DerefMut)] -pub struct ForkChoiceNotifications(pub watch::Receiver>); +pub struct ForkChoiceNotifications( + pub watch::Receiver>>, +); /// A trait that allows to register to fork choice related events /// and get notified when a new fork choice is available. pub trait ForkChoiceSubscriptions: Send + Sync { + /// Block Header type. + type Header: Clone + Send + Sync + 'static; + /// Get notified when a new safe block of the chain is selected. - fn subscribe_safe_block(&self) -> ForkChoiceNotifications; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications; /// Get notified when a new finalized block of the chain is selected. - fn subscribe_finalized_block(&self) -> ForkChoiceNotifications; + fn subscribe_finalized_block(&self) -> ForkChoiceNotifications; /// Convenience method to get a stream of the new safe blocks of the chain. - fn safe_block_stream(&self) -> ForkChoiceStream { + fn safe_block_stream(&self) -> ForkChoiceStream> { ForkChoiceStream::new(self.subscribe_safe_block().0) } /// Convenience method to get a stream of the new finalized blocks of the chain. - fn finalized_block_stream(&self) -> ForkChoiceStream { + fn finalized_block_stream(&self) -> ForkChoiceStream> { ForkChoiceStream::new(self.subscribe_finalized_block().0) } } @@ -190,3 +210,245 @@ impl Stream for ForkChoiceStream { } } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{b256, B256}; + use reth_execution_types::ExecutionOutcome; + use reth_primitives::{Receipt, Receipts, TransactionSigned, TxType}; + + #[test] + fn test_commit_notification() { + let block: SealedBlockWithSenders = Default::default(); + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + + let mut block2 = block; + block2.set_block_number(2); + block2.set_hash(block2_hash); + + let chain: Arc = Arc::new(Chain::new( + vec![block1.clone(), block2.clone()], + ExecutionOutcome::default(), + None, + )); + + // Create a commit notification + let notification = CanonStateNotification::Commit { new: chain.clone() }; + + // Test that `committed` returns the correct chain + assert_eq!(notification.committed(), chain); + + // Test that `reverted` returns None for `Commit` + assert!(notification.reverted().is_none()); + + // Test that `tip` returns the correct block + assert_eq!(*notification.tip(), block2); + } + + #[test] + fn test_reorg_notification() { + let block: SealedBlockWithSenders = Default::default(); + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + let block3_hash = B256::new([0x03; 32]); + + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + + let mut block2 = block.clone(); + block2.set_block_number(2); + block2.set_hash(block2_hash); + + let mut block3 = block; + block3.set_block_number(3); + block3.set_hash(block3_hash); + + let old_chain: Arc = + Arc::new(Chain::new(vec![block1.clone()], ExecutionOutcome::default(), None)); + let new_chain = Arc::new(Chain::new( + vec![block2.clone(), block3.clone()], + ExecutionOutcome::default(), + None, + )); + + // Create a reorg notification + let notification = + CanonStateNotification::Reorg { old: old_chain.clone(), new: new_chain.clone() }; + + // Test that `reverted` returns the old chain + assert_eq!(notification.reverted(), Some(old_chain)); + + // Test that `committed` returns the new chain + assert_eq!(notification.committed(), new_chain); + + // Test that `tip` returns the tip of the new chain (last block in the new chain) + assert_eq!(*notification.tip(), block3); + } + + #[test] + fn test_block_receipts_commit() { + // Create a default block instance for use in block definitions. + let block: SealedBlockWithSenders = Default::default(); + + // Define unique hashes for two blocks to differentiate them in the chain. + let block1_hash = B256::new([0x01; 32]); + let block2_hash = B256::new([0x02; 32]); + + // Create a default transaction to include in block1's transactions. + let tx = TransactionSigned::default(); + + // Create a clone of the default block and customize it to act as block1. + let mut block1 = block.clone(); + block1.set_block_number(1); + block1.set_hash(block1_hash); + // Add the transaction to block1's transactions. + block1.block.body.transactions.push(tx); + + // Clone the default block and customize it to act as block2. + let mut block2 = block; + block2.set_block_number(2); + block2.set_hash(block2_hash); + + // Create a receipt for the transaction in block1. + #[allow(clippy::needless_update)] + let receipt1 = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 12345, + logs: vec![], + success: true, + ..Default::default() + }; + + // Wrap the receipt in a `Receipts` structure, as expected in the `ExecutionOutcome`. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt1.clone())]] }; + + // Define an `ExecutionOutcome` with the created receipts. + let execution_outcome = ExecutionOutcome { receipts, ..Default::default() }; + + // Create a new chain segment with `block1` and `block2` and the execution outcome. + let new_chain: Arc = + Arc::new(Chain::new(vec![block1.clone(), block2.clone()], execution_outcome, None)); + + // Create a commit notification containing the new chain segment. + let notification = CanonStateNotification::Commit { new: new_chain }; + + // Call `block_receipts` on the commit notification to retrieve block receipts. + let block_receipts = notification.block_receipts(); + + // Assert that only one receipt entry exists in the `block_receipts` list. + assert_eq!(block_receipts.len(), 1); + + // Verify that the first entry matches block1's hash and transaction receipt. + assert_eq!( + block_receipts[0].0, + BlockReceipts { + block: block1.num_hash(), + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + receipt1 + )] + } + ); + + // Assert that the receipt is from the committed segment (not reverted). + assert!(!block_receipts[0].1); + } + + #[test] + fn test_block_receipts_reorg() { + // Define block1 for the old chain segment, which will be reverted. + let mut old_block1: SealedBlockWithSenders = Default::default(); + old_block1.set_block_number(1); + old_block1.set_hash(B256::new([0x01; 32])); + old_block1.block.body.transactions.push(TransactionSigned::default()); + + // Create a receipt for a transaction in the reverted block. + #[allow(clippy::needless_update)] + let old_receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 54321, + logs: vec![], + success: false, + ..Default::default() + }; + let old_receipts = Receipts { receipt_vec: vec![vec![Some(old_receipt.clone())]] }; + + let old_execution_outcome = + ExecutionOutcome { receipts: old_receipts, ..Default::default() }; + + // Create an old chain segment to be reverted, containing `old_block1`. + let old_chain: Arc = + Arc::new(Chain::new(vec![old_block1.clone()], old_execution_outcome, None)); + + // Define block2 for the new chain segment, which will be committed. + let mut new_block1: SealedBlockWithSenders = Default::default(); + new_block1.set_block_number(2); + new_block1.set_hash(B256::new([0x02; 32])); + new_block1.block.body.transactions.push(TransactionSigned::default()); + + // Create a receipt for a transaction in the new committed block. + #[allow(clippy::needless_update)] + let new_receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 12345, + logs: vec![], + success: true, + ..Default::default() + }; + let new_receipts = Receipts { receipt_vec: vec![vec![Some(new_receipt.clone())]] }; + + let new_execution_outcome = + ExecutionOutcome { receipts: new_receipts, ..Default::default() }; + + // Create a new chain segment to be committed, containing `new_block1`. + let new_chain = Arc::new(Chain::new(vec![new_block1.clone()], new_execution_outcome, None)); + + // Create a reorg notification with both reverted (old) and committed (new) chain segments. + let notification = CanonStateNotification::Reorg { old: old_chain, new: new_chain }; + + // Retrieve receipts from both old (reverted) and new (committed) segments. + let block_receipts = notification.block_receipts(); + + // Assert there are two receipt entries, one from each chain segment. + assert_eq!(block_receipts.len(), 2); + + // Verify that the first entry matches old_block1 and its receipt from the reverted segment. + assert_eq!( + block_receipts[0].0, + BlockReceipts { + block: old_block1.num_hash(), + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + old_receipt + )] + } + ); + // Confirm this is from the reverted segment. + assert!(block_receipts[0].1); + + // Verify that the second entry matches new_block1 and its receipt from the committed + // segment. + assert_eq!( + block_receipts[1].0, + BlockReceipts { + block: new_block1.num_hash(), + tx_receipts: vec![( + // Transaction hash of a Transaction::default() + b256!("20b5378c6fe992c118b557d2f8e8bbe0b7567f6fe5483a8f0f1c51e93a9d91ab"), + new_receipt + )] + } + ); + // Confirm this is from the committed segment. + assert!(!block_receipts[1].1); + } +} diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index a820bb5cf01..1cd9f2df96b 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -1,20 +1,23 @@ +use core::marker::PhantomData; + use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_consensus::{Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; -use alloy_primitives::{Address, BlockNumber, Sealable, B256, U256}; +use alloy_consensus::{Header, Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; +use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip7685::Requests}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use rand::{thread_rng, Rng}; use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - constants::EIP1559_INITIAL_BASE_FEE, proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, Header, Receipt, Receipts, Requests, SealedBlock, SealedBlockWithSenders, - SealedHeader, Transaction, TransactionSigned, TransactionSignedEcRecovered, + BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, RecoveredTx, SealedBlock, + SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, }; +use reth_storage_api::NodePrimitivesProvider; use reth_trie::{root::state_root_unhashed, updates::TrieUpdates, HashedPostState}; use revm::{db::BundleState, primitives::AccountInfo}; use std::{ @@ -27,7 +30,7 @@ use tokio::sync::broadcast::{self, Sender}; /// Functionality to build blocks for tests and help with assertions about /// their execution. #[derive(Debug)] -pub struct TestBlockBuilder { +pub struct TestBlockBuilder { /// The account that signs all the block's transactions. pub signer: Address, /// Private key for signing. @@ -40,9 +43,10 @@ pub struct TestBlockBuilder { pub signer_build_account_info: AccountInfo, /// Chain spec of the blocks generated by this builder pub chain_spec: ChainSpec, + _prims: PhantomData, } -impl Default for TestBlockBuilder { +impl Default for TestBlockBuilder { fn default() -> Self { let initial_account_info = AccountInfo::from_balance(U256::from(10).pow(U256::from(18))); let signer_pk = PrivateKeySigner::random(); @@ -53,6 +57,7 @@ impl Default for TestBlockBuilder { signer_pk, signer_execute_account_info: initial_account_info.clone(), signer_build_account_info: initial_account_info, + _prims: PhantomData, } } } @@ -74,7 +79,7 @@ impl TestBlockBuilder { /// Gas cost of a single transaction generated by the block builder. pub fn single_tx_cost() -> U256 { - U256::from(EIP1559_INITIAL_BASE_FEE * MIN_TRANSACTION_GAS) + U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS) } /// Generates a random [`SealedBlockWithSenders`]. @@ -85,26 +90,25 @@ impl TestBlockBuilder { ) -> SealedBlockWithSenders { let mut rng = thread_rng(); - let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered { + let mock_tx = |nonce: u64| -> RecoveredTx { let tx = Transaction::Eip1559(TxEip1559 { chain_id: self.chain_spec.chain.id(), nonce, gas_limit: MIN_TRANSACTION_GAS, to: Address::random().into(), - max_fee_per_gas: EIP1559_INITIAL_BASE_FEE as u128, + max_fee_per_gas: INITIAL_BASE_FEE as u128, max_priority_fee_per_gas: 1, ..Default::default() }); let signature_hash = tx.signature_hash(); let signature = self.signer_pk.sign_hash_sync(&signature_hash).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) - .with_signer(self.signer) + TransactionSigned::new_unhashed(tx, signature).with_signer(self.signer) }; let num_txs = rng.gen_range(0..5); let signer_balance_decrease = Self::single_tx_cost() * U256::from(num_txs); - let transactions: Vec = (0..num_txs) + let transactions: Vec = (0..num_txs) .map(|_| { let tx = mock_tx(self.signer_build_account_info.nonce); self.signer_build_account_info.nonce += 1; @@ -135,8 +139,10 @@ impl TestBlockBuilder { gas_used: transactions.len() as u64 * MIN_TRANSACTION_GAS, gas_limit: self.chain_spec.max_gas_limit, mix_hash: B256::random(), - base_fee_per_gas: Some(EIP1559_INITIAL_BASE_FEE), - transactions_root: calculate_transaction_root(&transactions), + base_fee_per_gas: Some(INITIAL_BASE_FEE), + transactions_root: calculate_transaction_root( + &transactions.clone().into_iter().map(|tx| tx.into_signed()).collect::>(), + ), receipts_root: calculate_receipt_root(&receipts), beneficiary: Address::random(), state_root: state_root_unhashed(HashMap::from([( @@ -160,16 +166,12 @@ impl TestBlockBuilder { ..Default::default() }; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let block = SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { transactions: transactions.into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), withdrawals: Some(vec![].into()), - requests: None, }, }; @@ -293,8 +295,8 @@ impl TestBlockBuilder { } /// A test `ChainEventSubscriptions` #[derive(Clone, Debug, Default)] -pub struct TestCanonStateSubscriptions { - canon_notif_tx: Arc>>>, +pub struct TestCanonStateSubscriptions { + canon_notif_tx: Arc>>>>, } impl TestCanonStateSubscriptions { @@ -313,6 +315,10 @@ impl TestCanonStateSubscriptions { } } +impl NodePrimitivesProvider for TestCanonStateSubscriptions { + type Primitives = EthPrimitives; +} + impl CanonStateSubscriptions for TestCanonStateSubscriptions { /// Sets up a broadcast channel with a buffer size of 100. fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index b44a606b65b..0e56cf2d3d9 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -22,7 +22,7 @@ alloy-chains = { workspace = true, features = ["serde", "rlp"] } alloy-eips = { workspace = true, features = ["serde"] } alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } -alloy-trie.workspace = true +alloy-consensus.workspace = true # misc auto_impl.workspace = true @@ -40,11 +40,30 @@ alloy-genesis.workspace = true [features] default = ["std"] std = [ - "alloy-chains/std", - "alloy-eips/std", - "alloy-genesis/std", - "alloy-primitives/std", - "alloy-trie/std", + "alloy-chains/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-trie/std", + "reth-primitives-traits/std", + "alloy-consensus/std", + "once_cell/std", + "alloy-rlp/std", + "reth-ethereum-forks/std", + "derive_more/std", + "reth-network-peers/std" +] +arbitrary = [ + "alloy-chains/arbitrary", + "reth-ethereum-forks/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "alloy-trie/arbitrary" +] +test-utils = [ + "reth-primitives-traits/test-utils", + "reth-trie-common/test-utils" ] -arbitrary = ["alloy-chains/arbitrary"] -test-utils = [] diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index f7061ff18fe..348051bef9c 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,12 +1,12 @@ use crate::{ChainSpec, DepositContract}; -use alloc::vec::Vec; +use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; +use alloy_consensus::Header; use alloy_eips::eip1559::BaseFeeParams; use alloy_genesis::Genesis; use alloy_primitives::B256; use core::fmt::{Debug, Display}; use reth_network_peers::NodeRecord; -use reth_primitives_traits::Header; /// Trait representing type configuring a chain spec. #[auto_impl::auto_impl(&, Arc)] @@ -14,6 +14,9 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { // todo: make chain spec type generic over hardfork //type Hardfork: Clone + Copy + 'static; + /// The header type of the network. + type Header; + /// Returns the [`Chain`] object this spec targets. fn chain(&self) -> Chain; @@ -38,10 +41,10 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn prune_delete_limit(&self) -> usize; /// Returns a string representation of the hardforks. - fn display_hardforks(&self) -> impl Display; + fn display_hardforks(&self) -> Box; /// The genesis header. - fn genesis_header(&self) -> &Header; + fn genesis_header(&self) -> &Self::Header; /// The genesis block specification. fn genesis(&self) -> &Genesis; @@ -56,9 +59,16 @@ pub trait EthChainSpec: Send + Sync + Unpin + Debug { fn is_optimism(&self) -> bool { self.chain().is_optimism() } + + /// Returns `true` if this chain contains Ethereum configuration. + fn is_ethereum(&self) -> bool { + self.chain().is_ethereum() + } } impl EthChainSpec for ChainSpec { + type Header = Header; + fn chain(&self) -> Chain { self.chain } @@ -83,11 +93,11 @@ impl EthChainSpec for ChainSpec { self.prune_delete_limit } - fn display_hardforks(&self) -> impl Display { - self.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(Self::display_hardforks(self)) } - fn genesis_header(&self) -> &Header { + fn genesis_header(&self) -> &Self::Header { self.genesis_header() } @@ -104,6 +114,6 @@ impl EthChainSpec for ChainSpec { } fn is_optimism(&self) -> bool { - self.chain.is_optimism() + false } } diff --git a/crates/chainspec/src/constants.rs b/crates/chainspec/src/constants.rs index 2e22b2299a4..3f46fb6b746 100644 --- a/crates/chainspec/src/constants.rs +++ b/crates/chainspec/src/constants.rs @@ -1,11 +1,12 @@ use crate::spec::DepositContract; -use alloy_primitives::{address, b256}; +use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; +use alloy_primitives::b256; /// Gas per transaction not creating a contract. pub const MIN_TRANSACTION_GAS: u64 = 21_000u64; /// Deposit contract address: `0x00000000219ab540356cbb839cbe05303d7705fa` pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), + MAINNET_DEPOSIT_CONTRACT_ADDRESS, 11052984, b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), ); diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index a8bae966b58..1f8ebd45f45 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -2,11 +2,22 @@ pub use alloy_eips::eip1559::BaseFeeParams; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, NamedChain}; +use alloy_consensus::constants::EMPTY_WITHDRAWALS; +use alloy_eips::{ + eip1559::INITIAL_BASE_FEE, eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS, + eip7685::EMPTY_REQUESTS_HASH, +}; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; -use alloy_trie::EMPTY_ROOT_HASH; use derive_more::From; +use alloy_consensus::{ + constants::{ + DEV_GENESIS_HASH, HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + }, + Header, +}; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use reth_ethereum_forks::{ ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Hardforks, Head, DEV_HARDFORKS, @@ -15,13 +26,7 @@ use reth_network_peers::{ base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, sepolia_nodes, NodeRecord, }; -use reth_primitives_traits::{ - constants::{ - DEV_GENESIS_HASH, EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, ETHEREUM_BLOCK_GAS_LIMIT, - HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, - }, - Header, SealedHeader, -}; +use reth_primitives_traits::SealedHeader; use reth_trie_common::root::state_root_ref_unhashed; use crate::{constants::MAINNET_DEPOSIT_CONTRACT, once_cell_set, EthChainSpec, LazyLock, OnceLock}; @@ -42,7 +47,7 @@ pub static MAINNET: LazyLock> = LazyLock::new(|| { hardforks: EthereumHardfork::mainnet().into(), // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 deposit_contract: Some(DepositContract::new( - address!("00000000219ab540356cbb839cbe05303d7705fa"), + MAINNET_DEPOSIT_CONTRACT_ADDRESS, 11052984, b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), )), @@ -282,8 +287,9 @@ impl ChainSpec { }; // If Prague is activated at genesis we set requests root to an empty trie root. - let requests_root = - self.is_prague_active_at_timestamp(self.genesis.timestamp).then_some(EMPTY_ROOT_HASH); + let requests_hash = self + .is_prague_active_at_timestamp(self.genesis.timestamp) + .then_some(EMPTY_REQUESTS_HASH); Header { gas_limit: self.genesis.gas_limit, @@ -299,7 +305,7 @@ impl ChainSpec { parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root, + requests_hash, ..Default::default() } } @@ -313,7 +319,7 @@ impl ChainSpec { pub fn initial_base_fee(&self) -> Option { // If the base fee is set in the genesis block, we use that instead of the default. let genesis_base_fee = - self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(EIP1559_INITIAL_BASE_FEE); + self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(INITIAL_BASE_FEE); // If London is activated at genesis, we set the initial base fee as per EIP-1559. self.hardforks.fork(EthereumHardfork::London).active_at_block(0).then_some(genesis_base_fee) @@ -511,34 +517,36 @@ impl ChainSpec { } } - /// An internal helper function that returns the block number of the last block-based - /// fork that occurs before any existing TTD (merge)/timestamp based forks. + /// This internal helper function retrieves the block number of the last block-based fork + /// that occurs before: + /// - Any existing Total Terminal Difficulty (TTD) or + /// - Timestamp-based forks in the current [`ChainSpec`]. + /// + /// The function operates by examining the configured hard forks in the chain. It iterates + /// through the fork conditions and identifies the most recent block-based fork that + /// precedes any TTD or timestamp-based conditions. /// - /// Note: this returns None if the `ChainSpec` is not configured with a TTD/Timestamp fork. + /// If there are no block-based forks found before these conditions, or if the [`ChainSpec`] + /// is not configured with a TTD or timestamp fork, this function will return `None`. pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option { let mut hardforks_iter = self.hardforks.forks_iter().peekable(); while let Some((_, curr_cond)) = hardforks_iter.next() { if let Some((_, next_cond)) = hardforks_iter.peek() { - // peek and find the first occurrence of ForkCondition::TTD (merge) , or in - // custom ChainSpecs, the first occurrence of - // ForkCondition::Timestamp. If curr_cond is ForkCondition::Block at - // this point, which it should be in most "normal" ChainSpecs, - // return its block_num + // Match against the `next_cond` to see if it represents: + // - A TTD (merge) + // - A timestamp-based fork match next_cond { - ForkCondition::TTD { fork_block, .. } => { - // handle Sepolia merge netsplit case - if fork_block.is_some() { - return *fork_block - } - // ensure curr_cond is indeed ForkCondition::Block and return block_num - if let ForkCondition::Block(block_num) = curr_cond { - return Some(block_num) - } - } - ForkCondition::Timestamp(_) => { - // ensure curr_cond is indeed ForkCondition::Block and return block_num + // If the next fork is TTD and specifies a specific block, return that block + // number + ForkCondition::TTD { fork_block: Some(block), .. } => return Some(*block), + + // If the next fork is TTD without a specific block or is timestamp-based, + // return the block number of the current condition if it is block-based. + ForkCondition::TTD { .. } | ForkCondition::Timestamp(_) => { + // Check if `curr_cond` is a block-based fork and return its block number if + // true. if let ForkCondition::Block(block_num) = curr_cond { - return Some(block_num) + return Some(block_num); } } ForkCondition::Block(_) | ForkCondition::Never => continue, @@ -614,6 +622,7 @@ impl From for ChainSpec { (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), + (EthereumHardfork::Osaka.boxed(), genesis.config.osaka_time), ]; let mut time_hardforks = time_hardfork_opts @@ -861,6 +870,13 @@ impl ChainSpecBuilder { self } + /// Enable Osaka at genesis. + pub fn osaka_activated(mut self) -> Self { + self = self.prague_activated(); + self.hardforks.insert(EthereumHardfork::Osaka, ForkCondition::Timestamp(0)); + self + } + /// Build the resulting [`ChainSpec`]. /// /// # Panics @@ -938,6 +954,7 @@ mod tests { use alloy_chains::Chain; use alloy_genesis::{ChainConfig, GenesisAccount}; use alloy_primitives::{b256, hex}; + use alloy_trie::EMPTY_ROOT_HASH; use reth_ethereum_forks::{ForkCondition, ForkHash, ForkId, Head}; use reth_trie_common::TrieAccount; diff --git a/crates/cli/cli/src/lib.rs b/crates/cli/cli/src/lib.rs index f7bf716ea37..e2c55057a48 100644 --- a/crates/cli/cli/src/lib.rs +++ b/crates/cli/cli/src/lib.rs @@ -15,6 +15,7 @@ use std::{borrow::Cow, ffi::OsString}; /// The chainspec module defines the different chainspecs that can be used by the node. pub mod chainspec; +use crate::chainspec::ChainSpecParser; /// Reth based node cli. /// @@ -23,6 +24,9 @@ pub mod chainspec; /// It provides commonly used functionality for running commands and information about the CL, such /// as the name and version. pub trait RethCli: Sized { + /// The associated `ChainSpecParser` type + type ChainSpecParser: ChainSpecParser; + /// The name of the implementation, eg. `reth`, `op-reth`, etc. fn name(&self) -> Cow<'static, str>; diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index e307859dfd8..2220efda5c6 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -17,6 +17,7 @@ reth-cli.workspace = true reth-ethereum-cli.workspace = true reth-cli-runner.workspace = true reth-cli-util.workspace = true +reth-codecs.workspace = true reth-config.workspace = true reth-consensus.workspace = true reth-db = { workspace = true, features = ["mdbx"] } @@ -31,6 +32,7 @@ reth-fs-util.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } +reth-node-api.workspace = true reth-node-builder.workspace = true reth-node-core.workspace = true reth-node-events.workspace = true @@ -38,15 +40,20 @@ reth-node-metrics.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true +reth-prune-types = { workspace = true, optional = true } reth-stages.workspace = true +reth-stages-types = { workspace = true, optional = true } reth-static-file-types = { workspace = true, features = ["clap"] } reth-static-file.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-trie-db = { workspace = true, features = ["metrics"] } +reth-trie-common = { workspace = true, optional = true } # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-rlp.workspace = true +alloy-consensus.workspace = true itertools.workspace = true futures.workspace = true @@ -88,10 +95,24 @@ reth-discv4.workspace = true [features] default = [] -dev = [ +arbitrary = [ "dep:proptest", "dep:arbitrary", "dep:proptest-arbitrary-interop", "reth-primitives/arbitrary", "reth-db-api/arbitrary", + "reth-eth-wire/arbitrary", + "reth-db/arbitrary", + "reth-chainspec/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", + "reth-trie-common/test-utils", + "reth-codecs/arbitrary", + "reth-prune-types?/arbitrary", + "reth-stages-types?/arbitrary", + "reth-trie-common?/arbitrary", + "alloy-consensus/arbitrary", ] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 956a63a5aa0..e206715fc01 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -2,10 +2,10 @@ use alloy_primitives::B256; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_config::{config::EtlConfig, Config}; +use reth_consensus::noop::NoopConsensus; use reth_db::{init_db, open_db_read_only, DatabaseEnv}; use reth_db_common::init::init_genesis; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; @@ -15,7 +15,11 @@ use reth_node_core::{ args::{DatabaseArgs, DatadirArgs}, dirs::{ChainPath, DataDirPath}, }; -use reth_provider::{providers::StaticFileProvider, ProviderFactory, StaticFileProviderFactory}; +use reth_primitives::EthPrimitives; +use reth_provider::{ + providers::{NodeTypesForProvider, StaticFileProvider}, + ProviderFactory, StaticFileProviderFactory, +}; use reth_stages::{sets::DefaultStages, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -50,13 +54,13 @@ pub struct EnvironmentArgs { pub db: DatabaseArgs, } -impl> EnvironmentArgs { +impl EnvironmentArgs { /// Initializes environment according to [`AccessRights`] and returns an instance of /// [`Environment`]. - pub fn init>( - &self, - access: AccessRights, - ) -> eyre::Result> { + pub fn init(&self, access: AccessRights) -> eyre::Result> + where + C: ChainSpecParser, + { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let db_path = data_dir.db(); let sf_path = data_dir.static_files(); @@ -105,13 +109,16 @@ impl> Environmen /// If it's a read-write environment and an issue is found, it will attempt to heal (including a /// pipeline unwind). Otherwise, it will print out an warning, advising the user to restart the /// node to heal. - fn create_provider_factory>( + fn create_provider_factory( &self, config: &Config, db: Arc, - static_file_provider: StaticFileProvider, - ) -> eyre::Result>>> { - let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); + static_file_provider: StaticFileProvider, + ) -> eyre::Result>>> + where + C: ChainSpecParser, + { + let has_receipt_pruning = config.prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); let prune_modes = config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); let factory = ProviderFactory::>>::new( @@ -126,7 +133,7 @@ impl> Environmen .static_file_provider() .check_consistency(&factory.provider()?, has_receipt_pruning)? { - if factory.db_ref().is_read_only() { + if factory.db_ref().is_read_only()? { warn!(target: "reth::cli", ?unwind_target, "Inconsistent storage. Restart node to heal."); return Ok(factory) } @@ -144,10 +151,10 @@ impl> Environmen .add_stages(DefaultStages::new( factory.clone(), tip_rx, - Arc::new(EthBeaconConsensus::new(self.chain.clone())), + Arc::new(NoopConsensus::default()), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), - NoopBlockExecutorProvider::default(), + NoopBlockExecutorProvider::::default(), config.stages.clone(), prune_modes.clone(), )) @@ -188,3 +195,14 @@ impl AccessRights { matches!(self, Self::RW) } } + +/// Helper trait with a common set of requirements for the +/// [`NodeTypes`](reth_node_builder::NodeTypes) in CLI. +pub trait CliNodeTypes: + NodeTypesWithEngine + NodeTypesForProvider +{ +} +impl CliNodeTypes for N where + N: NodeTypesWithEngine + NodeTypesForProvider +{ +} diff --git a/crates/cli/commands/src/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs index 60ec09c9606..76d92962f72 100644 --- a/crates/cli/commands/src/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -1,12 +1,15 @@ -use crate::db::get::{maybe_json_value_parser, table_key}; +use crate::{ + common::CliNodeTypes, + db::get::{maybe_json_value_parser, table_key}, +}; use ahash::RandomState; use clap::Parser; use reth_chainspec::EthereumHardforks; use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; use reth_db_api::{cursor::DbCursorRO, table::Table, transaction::DbTx}; use reth_db_common::DbTool; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; -use reth_provider::providers::ProviderNodeTypes; +use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_provider::{providers::ProviderNodeTypes, DBProvider}; use std::{ hash::{BuildHasher, Hasher}, sync::Arc, @@ -36,7 +39,7 @@ pub struct Command { impl Command { /// Execute `db checksum` command - pub fn execute>( + pub fn execute>( self, tool: &DbTool>>, ) -> eyre::Result<()> { @@ -79,17 +82,17 @@ impl TableViewer<(u64, Duration)> for ChecksumViewer<'_, N let mut cursor = tx.cursor_read::>()?; let walker = match (self.start_key.as_deref(), self.end_key.as_deref()) { (Some(start), Some(end)) => { - let start_key = table_key::(start).map(RawKey::::new)?; - let end_key = table_key::(end).map(RawKey::::new)?; + let start_key = table_key::(start).map(RawKey::new)?; + let end_key = table_key::(end).map(RawKey::new)?; cursor.walk_range(start_key..=end_key)? } (None, Some(end)) => { - let end_key = table_key::(end).map(RawKey::::new)?; + let end_key = table_key::(end).map(RawKey::new)?; cursor.walk_range(..=end_key)? } (Some(start), None) => { - let start_key = table_key::(start).map(RawKey::::new)?; + let start_key = table_key::(start).map(RawKey::new)?; cursor.walk_range(start_key..)? } (None, None) => cursor.walk_range(..)?, diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 4006d1660aa..13b7b70347e 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,13 +1,16 @@ +use alloy_consensus::Header; use alloy_primitives::{hex, BlockHash}; use clap::Parser; use reth_db::{ - static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask}, + static_file::{ + ColumnSelectorOne, ColumnSelectorTwo, HeaderWithHashMask, ReceiptMask, TransactionMask, + }, tables, RawKey, RawTable, Receipts, TableViewer, Transactions, }; use reth_db_api::table::{Decompress, DupSort, Table}; use reth_db_common::DbTool; +use reth_node_api::{ReceiptTy, TxTy}; use reth_node_builder::NodeTypesWithDB; -use reth_primitives::Header; use reth_provider::{providers::ProviderNodeTypes, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use tracing::error; @@ -61,16 +64,14 @@ impl Command { Subcommand::StaticFile { segment, key, raw } => { let (key, mask): (u64, _) = match segment { StaticFileSegment::Headers => { - (table_key::(&key)?, >::MASK) + (table_key::(&key)?, >::MASK) + } + StaticFileSegment::Transactions => { + (table_key::(&key)?, >>::MASK) + } + StaticFileSegment::Receipts => { + (table_key::(&key)?, >>::MASK) } - StaticFileSegment::Transactions => ( - table_key::(&key)?, - ::Value>>::MASK, - ), - StaticFileSegment::Receipts => ( - table_key::(&key)?, - ::Value>>::MASK, - ), }; let content = tool.provider_factory.static_file_provider().find_static_file( @@ -128,12 +129,12 @@ impl Command { /// Get an instance of key for given table pub(crate) fn table_key(key: &str) -> Result { - serde_json::from_str::(key).map_err(|e| eyre::eyre!(e)) + serde_json::from_str(key).map_err(|e| eyre::eyre!(e)) } /// Get an instance of subkey for given dupsort table fn table_subkey(subkey: Option<&str>) -> Result { - serde_json::from_str::(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e)) + serde_json::from_str(subkey.unwrap_or_default()).map_err(|e| eyre::eyre!(e)) } struct GetValueViewer<'a, N: NodeTypesWithDB> { diff --git a/crates/cli/commands/src/db/mod.rs b/crates/cli/commands/src/db/mod.rs index e1a9a90bacc..e80b51160e2 100644 --- a/crates/cli/commands/src/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -1,10 +1,9 @@ -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithEngine; use std::io::{self, Write}; mod checksum; @@ -65,9 +64,7 @@ macro_rules! db_ro_exec { impl> Command { /// Execute `db` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let data_dir = self.env.datadir.clone().resolve_datadir(self.env.chain.chain()); let db_path = data_dir.db(); let static_files_path = data_dir.static_files(); diff --git a/crates/cli/commands/src/db/stats.rs b/crates/cli/commands/src/db/stats.rs index ac36b866b07..71ea995800f 100644 --- a/crates/cli/commands/src/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -1,4 +1,4 @@ -use crate::db::checksum::ChecksumViewer; +use crate::{common::CliNodeTypes, db::checksum::ChecksumViewer}; use clap::Parser; use comfy_table::{Cell, Row, Table as ComfyTable}; use eyre::WrapErr; @@ -9,7 +9,7 @@ use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Ta use reth_db_api::database::Database; use reth_db_common::DbTool; use reth_fs_util as fs; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter, NodeTypesWithEngine}; +use reth_node_builder::{NodePrimitives, NodeTypesWithDB, NodeTypesWithDBAdapter}; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::providers::{ProviderNodeTypes, StaticFileProvider}; use reth_static_file_types::SegmentRangeInclusive; @@ -38,7 +38,7 @@ pub struct Command { impl Command { /// Execute `db stats` command - pub fn execute>( + pub fn execute>( self, data_dir: ChainPath, tool: &DbTool>>, @@ -49,7 +49,7 @@ impl Command { println!("\n"); } - let static_files_stats_table = self.static_files_stats_table(data_dir)?; + let static_files_stats_table = self.static_files_stats_table::(data_dir)?; println!("{static_files_stats_table}"); println!("\n"); @@ -143,7 +143,7 @@ impl Command { Ok(table) } - fn static_files_stats_table( + fn static_files_stats_table( &self, data_dir: ChainPath, ) -> eyre::Result { @@ -173,7 +173,8 @@ impl Command { } let static_files = iter_static_files(data_dir.static_files())?; - let static_file_provider = StaticFileProvider::read_only(data_dir.static_files(), false)?; + let static_file_provider = + StaticFileProvider::::read_only(data_dir.static_files(), false)?; let mut total_data_size = 0; let mut total_index_size = 0; diff --git a/crates/cli/commands/src/db/tui.rs b/crates/cli/commands/src/db/tui.rs index 240ca376970..1a9fae7f891 100644 --- a/crates/cli/commands/src/db/tui.rs +++ b/crates/cli/commands/src/db/tui.rs @@ -365,7 +365,7 @@ where .map(|(i, k)| { ListItem::new(format!("[{:0>width$}]: {k:?}", i + app.skip, width = key_length)) }) - .collect::>>(); + .collect::>(); let key_list = List::new(formatted_keys) .block(Block::default().borders(Borders::ALL).title(format!( diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 6b750d32a3d..dc99ae7f98d 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -1,5 +1,5 @@ //! Command that initializes the node by importing a chain from a file. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_primitives::B256; use clap::Parser; use futures::{Stream, StreamExt}; @@ -20,7 +20,6 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::version::SHORT_VERSION; use reth_node_events::node::NodeEvent; use reth_provider::{ @@ -60,8 +59,8 @@ impl> ImportComm /// Execute `import` command pub async fn execute(self, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, - E: BlockExecutorProvider, + N: CliNodeTypes, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); @@ -168,9 +167,9 @@ pub fn build_import_pipeline( executor: E, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: ProviderNodeTypes, + N: ProviderNodeTypes + CliNodeTypes, C: Consensus + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { if !file_client.has_canonical_blocks() { eyre::bail!("unable to import non canonical blocks"); @@ -203,10 +202,11 @@ where let max_block = file_client.max_block().unwrap_or(0); - let pipeline = Pipeline::::builder() + let pipeline = Pipeline::builder() .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) + .with_fail_on_unwind(true) .add_stages( DefaultStages::new( provider_factory.clone(), diff --git a/crates/cli/commands/src/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs index 5fde9ac0d0b..83f471d629d 100644 --- a/crates/cli/commands/src/init_cmd.rs +++ b/crates/cli/commands/src/init_cmd.rs @@ -1,10 +1,9 @@ //! Command that initializes the node from a genesis file. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_node_builder::NodeTypesWithEngine; use reth_provider::BlockHashReader; use tracing::info; @@ -17,9 +16,7 @@ pub struct InitCommand { impl> InitCommand { /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth init starting"); let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; diff --git a/crates/cli/commands/src/init_state.rs b/crates/cli/commands/src/init_state.rs deleted file mode 100644 index 16e99f8fe97..00000000000 --- a/crates/cli/commands/src/init_state.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Command that initializes the node from a genesis file. - -use crate::common::{AccessRights, Environment, EnvironmentArgs}; -use alloy_primitives::B256; -use clap::Parser; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_cli::chainspec::ChainSpecParser; -use reth_config::config::EtlConfig; -use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; -use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; - -use std::{fs::File, io::BufReader, path::PathBuf}; -use tracing::info; - -/// Initializes the database with the genesis block. -#[derive(Debug, Parser)] -pub struct InitStateCommand { - #[command(flatten)] - pub env: EnvironmentArgs, - - /// JSONL file with state dump. - /// - /// Must contain accounts in following format, additional account fields are ignored. Must - /// also contain { "root": \ } as first line. - /// { - /// "balance": "\", - /// "nonce": \, - /// "code": "\", - /// "storage": { - /// "\": "\", - /// .. - /// }, - /// "address": "\", - /// } - /// - /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until - /// and including the non-genesis block to init chain at. See 'import' command. - #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] - pub state: PathBuf, -} - -impl> InitStateCommand { - /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { - info!(target: "reth::cli", "Reth init-state starting"); - - let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; - - info!(target: "reth::cli", "Initiating state dump"); - - let hash = init_at_state(self.state, provider_factory, config.stages.etl)?; - - info!(target: "reth::cli", hash = ?hash, "Genesis block written"); - Ok(()) - } -} - -/// Initialize chain with state at specific block, from a file with state dump. -pub fn init_at_state( - state_dump_path: PathBuf, - factory: ProviderFactory, - etl_config: EtlConfig, -) -> eyre::Result { - info!(target: "reth::cli", - path=?state_dump_path, - "Opening state dump"); - - let file = File::open(state_dump_path)?; - let reader = BufReader::new(file); - - let provider_rw = factory.provider_rw()?; - let hash = init_from_state_dump(reader, &provider_rw.0, etl_config)?; - provider_rw.commit()?; - - Ok(hash) -} diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs new file mode 100644 index 00000000000..bdade252a66 --- /dev/null +++ b/crates/cli/commands/src/init_state/mod.rs @@ -0,0 +1,127 @@ +//! Command that initializes the node from a genesis file. + +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use alloy_primitives::{B256, U256}; +use clap::Parser; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_db_common::init::init_from_state_dump; +use reth_primitives::SealedHeader; +use reth_provider::{ + BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, +}; + +use std::{io::BufReader, path::PathBuf, str::FromStr}; +use tracing::info; + +pub mod without_evm; + +/// Initializes the database with the genesis block. +#[derive(Debug, Parser)] +pub struct InitStateCommand { + #[command(flatten)] + pub env: EnvironmentArgs, + + /// JSONL file with state dump. + /// + /// Must contain accounts in following format, additional account fields are ignored. Must + /// also contain { "root": \ } as first line. + /// { + /// "balance": "\", + /// "nonce": \, + /// "code": "\", + /// "storage": { + /// "\": "\", + /// .. + /// }, + /// "address": "\", + /// } + /// + /// Allows init at a non-genesis block. Caution! Blocks must be manually imported up until + /// and including the non-genesis block to init chain at. See 'import' command. + #[arg(value_name = "STATE_DUMP_FILE", verbatim_doc_comment)] + pub state: PathBuf, + + /// Specifies whether to initialize the state without relying on EVM historical data. + /// + /// When enabled, and before inserting the state, it creates a dummy chain up to the last EVM + /// block specified. It then, appends the first block provided block. + /// + /// - **Note**: **Do not** import receipts and blocks beforehand, or this will fail or be + /// ignored. + #[arg(long, default_value = "false")] + pub without_evm: bool, + + /// Header file containing the header in an RLP encoded format. + #[arg(long, value_name = "HEADER_FILE", verbatim_doc_comment)] + pub header: Option, + + /// Total difficulty of the header. + #[arg(long, value_name = "TOTAL_DIFFICULTY", verbatim_doc_comment)] + pub total_difficulty: Option, + + /// Hash of the header. + #[arg(long, value_name = "HEADER_HASH", verbatim_doc_comment)] + pub header_hash: Option, +} + +impl> InitStateCommand { + /// Execute the `init` command + pub async fn execute>(self) -> eyre::Result<()> { + info!(target: "reth::cli", "Reth init-state starting"); + + let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; + + let static_file_provider = provider_factory.static_file_provider(); + let provider_rw = provider_factory.database_provider_rw()?; + + if self.without_evm { + // ensure header, total difficulty and header hash are provided + let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; + let header = without_evm::read_header_from_file(header)?; + + let header_hash = + self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; + let header_hash = B256::from_str(&header_hash)?; + + let total_difficulty = self + .total_difficulty + .ok_or_else(|| eyre::eyre!("Total difficulty must be provided"))?; + let total_difficulty = U256::from_str(&total_difficulty)?; + + let last_block_number = provider_rw.last_block_number()?; + + if last_block_number == 0 { + without_evm::setup_without_evm( + &provider_rw, + // &header, + // header_hash, + SealedHeader::new(header, header_hash), + total_difficulty, + )?; + + // SAFETY: it's safe to commit static files, since in the event of a crash, they + // will be unwinded according to database checkpoints. + // + // Necessary to commit, so the header is accessible to provider_rw and + // init_state_dump + static_file_provider.commit()?; + } else if last_block_number > 0 && last_block_number < header.number { + return Err(eyre::eyre!( + "Data directory should be empty when calling init-state with --without-evm-history." + )); + } + } + + info!(target: "reth::cli", "Initiating state dump"); + + let reader = BufReader::new(reth_fs_util::open(self.state)?); + + let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; + + provider_rw.commit()?; + + info!(target: "reth::cli", hash = ?hash, "Genesis block written"); + Ok(()) + } +} diff --git a/crates/optimism/cli/src/commands/init_state/bedrock.rs b/crates/cli/commands/src/init_state/without_evm.rs similarity index 54% rename from crates/optimism/cli/src/commands/init_state/bedrock.rs rename to crates/cli/commands/src/init_state/without_evm.rs index efff065e505..f8f72709a7e 100644 --- a/crates/optimism/cli/src/commands/init_state/bedrock.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,34 +1,53 @@ use alloy_primitives::{BlockNumber, B256, U256}; -use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; -use reth_primitives::{ - BlockBody, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, -}; +use alloy_rlp::Decodable; + +use alloy_consensus::{BlockHeader, Header}; +use reth_codecs::Compact; +use reth_node_builder::NodePrimitives; +use reth_primitives::{SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment}; use reth_provider::{ - providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileWriter, + providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileProviderFactory, + StaticFileWriter, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; + +use std::{fs::File, io::Read, path::PathBuf}; use tracing::info; -/// Creates a dummy chain (with no transactions) up to the last OVM block and appends the -/// first valid Bedrock block. -pub(crate) fn setup_op_mainnet_without_ovm( +/// Reads the header RLP from a file and returns the Header. +pub(crate) fn read_header_from_file(path: PathBuf) -> Result { + let mut file = File::open(path)?; + let mut buf = Vec::new(); + file.read_to_end(&mut buf)?; + + let header = Header::decode(&mut &buf[..])?; + Ok(header) +} + +/// Creates a dummy chain (with no transactions) up to the last EVM block and appends the +/// first valid block. +pub fn setup_without_evm( provider_rw: &Provider, - static_file_provider: &StaticFileProvider, + header: SealedHeader<::BlockHeader>, + total_difficulty: U256, ) -> Result<(), eyre::Error> where - Provider: StageCheckpointWriter + BlockWriter, + Provider: StaticFileProviderFactory> + + StageCheckpointWriter + + BlockWriter::Block>, { - info!(target: "reth::cli", "Setting up dummy OVM chain before importing state."); + info!(target: "reth::cli", "Setting up dummy EVM chain before importing state."); - // Write OVM dummy data up to `BEDROCK_HEADER - 1` block - append_dummy_chain(static_file_provider, BEDROCK_HEADER.number - 1)?; + let static_file_provider = provider_rw.static_file_provider(); + // Write EVM dummy data up to `header - 1` block + append_dummy_chain(&static_file_provider, header.number() - 1)?; - info!(target: "reth::cli", "Appending Bedrock block."); + info!(target: "reth::cli", "Appending first valid block."); - append_bedrock_block(provider_rw, static_file_provider)?; + append_first_block(provider_rw, &header, total_difficulty)?; for stage in StageId::ALL { - provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(BEDROCK_HEADER.number))?; + provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(header.number()))?; } info!(target: "reth::cli", "Set up finished."); @@ -36,38 +55,36 @@ where Ok(()) } -/// Appends the first bedrock block. +/// Appends the first block. /// /// By appending it, static file writer also verifies that all segments are at the same /// height. -fn append_bedrock_block( - provider_rw: impl BlockWriter, - sf_provider: &StaticFileProvider, -) -> Result<(), eyre::Error> { +fn append_first_block( + provider_rw: &Provider, + header: &SealedHeader<::BlockHeader>, + total_difficulty: U256, +) -> Result<(), eyre::Error> +where + Provider: BlockWriter::Block> + + StaticFileProviderFactory>, +{ provider_rw.insert_block( - SealedBlockWithSenders::new( - SealedBlock::new( - SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), - BlockBody::default(), - ), - vec![], - ) - .expect("no senders or txes"), + SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) + .expect("no senders or txes"), + StorageLocation::Database, )?; + let sf_provider = provider_rw.static_file_provider(); + sf_provider.latest_writer(StaticFileSegment::Headers)?.append_header( - &BEDROCK_HEADER, - BEDROCK_HEADER_TTD, - &BEDROCK_HEADER_HASH, + header, + total_difficulty, + &header.hash(), )?; - sf_provider - .latest_writer(StaticFileSegment::Receipts)? - .increment_block(BEDROCK_HEADER.number)?; + sf_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(header.number())?; - sf_provider - .latest_writer(StaticFileSegment::Transactions)? - .increment_block(BEDROCK_HEADER.number)?; + sf_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(header.number())?; Ok(()) } @@ -77,8 +94,8 @@ fn append_bedrock_block( /// * Headers: It will push an empty block. /// * Transactions: It will not push any tx, only increments the end block range. /// * Receipts: It will not push any receipt, only increments the end block range. -fn append_dummy_chain( - sf_provider: &StaticFileProvider, +fn append_dummy_chain>( + sf_provider: &StaticFileProvider, target_height: BlockNumber, ) -> Result<(), eyre::Error> { let (tx, rx) = std::sync::mpsc::channel(); diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs index 33a38ddbc01..166ea438fb9 100644 --- a/crates/cli/commands/src/lib.rs +++ b/crates/cli/commands/src/lib.rs @@ -20,7 +20,7 @@ pub mod p2p; pub mod prune; pub mod recover; pub mod stage; -#[cfg(feature = "dev")] +#[cfg(feature = "arbitrary")] pub mod test_vectors; pub use node::NodeCommand; diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs index 7dbb66fc2fa..a5b186bc138 100644 --- a/crates/cli/commands/src/prune.rs +++ b/crates/cli/commands/src/prune.rs @@ -1,9 +1,8 @@ //! Command that runs pruning without any limits. -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_node_builder::NodeTypesWithEngine; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use tracing::info; @@ -17,9 +16,7 @@ pub struct PruneCommand { impl> PruneCommand { /// Execute the `prune` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let Environment { config, provider_factory, .. } = self.env.init::(AccessRights::RW)?; let prune_config = config.prune.unwrap_or_default(); @@ -27,7 +24,8 @@ impl> PruneComma info!(target: "reth::cli", "Copying data from database to static files..."); let static_file_producer = StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone()); - let lowest_static_file_height = static_file_producer.lock().copy_to_static_files()?.min(); + let lowest_static_file_height = + static_file_producer.lock().copy_to_static_files()?.min_block_num(); info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files"); // Delete data which has been copied to static files. diff --git a/crates/cli/commands/src/recover/mod.rs b/crates/cli/commands/src/recover/mod.rs index 3216449e49b..a2d94360227 100644 --- a/crates/cli/commands/src/recover/mod.rs +++ b/crates/cli/commands/src/recover/mod.rs @@ -1,10 +1,10 @@ //! `reth recover` command. +use crate::common::CliNodeTypes; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; -use reth_node_builder::NodeTypesWithEngine; mod storage_tries; @@ -24,7 +24,7 @@ pub enum Subcommands { impl> Command { /// Execute `recover` command - pub async fn execute>( + pub async fn execute>( self, ctx: CliContext, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs index 794058fac1d..f879c393c6b 100644 --- a/crates/cli/commands/src/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -1,4 +1,4 @@ -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -8,7 +8,6 @@ use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRW}, transaction::DbTx, }; -use reth_node_builder::NodeTypesWithEngine; use reth_provider::{BlockNumReader, HeaderProvider, ProviderError}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -23,7 +22,7 @@ pub struct Command { impl> Command { /// Execute `storage-tries` recovery command - pub async fn execute>( + pub async fn execute>( self, _ctx: CliContext, ) -> eyre::Result<()> { diff --git a/crates/cli/commands/src/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs index 9e0396404b3..b93ab1a3c40 100644 --- a/crates/cli/commands/src/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -1,8 +1,8 @@ //! Database debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_db::{mdbx::tx::Tx, static_file::iter_static_files, tables, DatabaseError}; use reth_db_api::transaction::{DbTx, DbTxMut}; @@ -10,9 +10,10 @@ use reth_db_common::{ init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}, DbTool, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::args::StageEnum; -use reth_provider::{writer::UnifiedStorageWriter, StaticFileProviderFactory}; +use reth_provider::{ + writer::UnifiedStorageWriter, DatabaseProviderFactory, StaticFileProviderFactory, +}; use reth_prune::PruneSegment; use reth_stages::StageId; use reth_static_file_types::StaticFileSegment; @@ -26,15 +27,14 @@ pub struct Command { stage: StageEnum, } -impl> Command { +impl Command { /// Execute `db` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute(self) -> eyre::Result<()> + where + C: ChainSpecParser, + { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let static_file_provider = provider_factory.static_file_provider(); - let tool = DbTool::new(provider_factory)?; let static_file_segment = match self.stage { @@ -60,7 +60,7 @@ impl> Command } } - let provider_rw = tool.provider_factory.provider_rw()?; + let provider_rw = tool.provider_factory.database_provider_rw()?; let tx = provider_rw.tx_ref(); match self.stage { @@ -71,7 +71,7 @@ impl> Command tx.clear::()?; reset_stage_checkpoint(tx, StageId::Headers)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } StageEnum::Bodies => { tx.clear::()?; @@ -81,10 +81,9 @@ impl> Command tx.clear::()?; tx.clear::()?; tx.clear::()?; - tx.clear::()?; reset_stage_checkpoint(tx, StageId::Bodies)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } StageEnum::Senders => { tx.clear::()?; @@ -105,7 +104,7 @@ impl> Command reset_stage_checkpoint(tx, StageId::Execution)?; let alloc = &self.env.chain.genesis().alloc; - insert_genesis_state(&provider_rw.0, alloc.iter())?; + insert_genesis_state(&provider_rw, alloc.iter())?; } StageEnum::AccountHashing => { tx.clear::()?; @@ -143,20 +142,20 @@ impl> Command reset_stage_checkpoint(tx, StageId::IndexAccountHistory)?; reset_stage_checkpoint(tx, StageId::IndexStorageHistory)?; - insert_genesis_history(&provider_rw.0, self.env.chain.genesis().alloc.iter())?; + insert_genesis_history(&provider_rw, self.env.chain.genesis().alloc.iter())?; } StageEnum::TxLookup => { tx.clear::()?; reset_prune_checkpoint(tx, PruneSegment::TransactionLookup)?; reset_stage_checkpoint(tx, StageId::TransactionLookup)?; - insert_genesis_header(&provider_rw.0, &static_file_provider, &self.env.chain)?; + insert_genesis_header(&provider_rw, &self.env.chain)?; } } tx.put::(StageId::Finish.to_string(), Default::default())?; - UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; Ok(()) } diff --git a/crates/cli/commands/src/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs index 709fc59190d..1460c6bb6f6 100644 --- a/crates/cli/commands/src/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -7,7 +7,8 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_api::NodePrimitives; +use reth_node_builder::NodeTypesWithDB; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -25,8 +26,15 @@ pub(crate) async fn dump_execution_stage( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: ProviderNodeTypes< + DB = Arc, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + >, + >, + E: BlockExecutorProvider, { let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; @@ -36,7 +44,7 @@ where if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, @@ -131,7 +139,9 @@ fn import_tables_with_range( /// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. -fn unwind_and_copy( +fn unwind_and_copy< + N: ProviderNodeTypes>, +>( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -139,7 +149,8 @@ fn unwind_and_copy( ) -> eyre::Result<()> { let provider = db_tool.provider_factory.database_provider_rw()?; - let mut exec_stage = ExecutionStage::new_with_executor(NoopBlockExecutorProvider::default()); + let mut exec_stage = + ExecutionStage::new_with_executor(NoopBlockExecutorProvider::::default()); exec_stage.unwind( &provider, @@ -168,8 +179,14 @@ fn dry_run( executor: E, ) -> eyre::Result<()> where - N: ProviderNodeTypes, - E: BlockExecutorProvider, + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + >, + >, + E: BlockExecutorProvider, { info!(target: "reth::cli", "Executing stage. [dry-run]"); diff --git a/crates/cli/commands/src/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs index 738dcabafa7..97452cee892 100644 --- a/crates/cli/commands/src/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -6,7 +6,6 @@ use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -15,7 +14,7 @@ use reth_provider::{ use reth_stages::{stages::AccountHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_account_stage( +pub(crate) async fn dump_hashing_account_stage>>( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, @@ -37,7 +36,7 @@ pub(crate) async fn dump_hashing_account_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, diff --git a/crates/cli/commands/src/stage/dump/hashing_storage.rs b/crates/cli/commands/src/stage/dump/hashing_storage.rs index 204c087a234..06b064bc02f 100644 --- a/crates/cli/commands/src/stage/dump/hashing_storage.rs +++ b/crates/cli/commands/src/stage/dump/hashing_storage.rs @@ -5,7 +5,6 @@ use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; -use reth_node_builder::NodeTypesWithDBAdapter; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -14,7 +13,7 @@ use reth_provider::{ use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_hashing_storage_stage( +pub(crate) async fn dump_hashing_storage_stage>>( db_tool: &DbTool, from: u64, to: u64, @@ -27,7 +26,7 @@ pub(crate) async fn dump_hashing_storage_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, diff --git a/crates/cli/commands/src/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs index f7e9e2fc1af..f0dbb1a1faf 100644 --- a/crates/cli/commands/src/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -9,7 +9,7 @@ use reth_db_api::{database::Database, table::TableImporter}; use reth_db_common::DbTool; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_node_builder::NodeTypesWithDBAdapter; +use reth_node_api::NodePrimitives; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{ providers::{ProviderNodeTypes, StaticFileProvider}, @@ -25,13 +25,23 @@ use reth_stages::{ }; use tracing::info; -pub(crate) async fn dump_merkle_stage( +pub(crate) async fn dump_merkle_stage( db_tool: &DbTool, from: BlockNumber, to: BlockNumber, output_datadir: ChainPath, should_run: bool, -) -> Result<()> { +) -> Result<()> +where + N: ProviderNodeTypes< + DB = Arc, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + >, + >, +{ let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; output_db.update(|tx| { @@ -54,7 +64,7 @@ pub(crate) async fn dump_merkle_stage( if should_run { dry_run( - ProviderFactory::>>::new( + ProviderFactory::::new( Arc::new(output_db), db_tool.chain(), StaticFileProvider::read_write(output_datadir.static_files())?, @@ -68,7 +78,15 @@ pub(crate) async fn dump_merkle_stage( } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -fn unwind_and_copy( +fn unwind_and_copy< + N: ProviderNodeTypes< + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + BlockHeader = reth_primitives::Header, + >, + >, +>( db_tool: &DbTool, range: (u64, u64), tip_block_number: u64, @@ -94,7 +112,7 @@ fn unwind_and_copy( // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - NoopBlockExecutorProvider::default(), // Not necessary for unwinding. + NoopBlockExecutorProvider::::default(), // Not necessary for unwinding. ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, @@ -146,11 +164,10 @@ fn unwind_and_copy( } /// Try to re-execute the stage straight away -fn dry_run( - output_provider_factory: ProviderFactory, - to: u64, - from: u64, -) -> eyre::Result<()> { +fn dry_run(output_provider_factory: ProviderFactory, to: u64, from: u64) -> eyre::Result<()> +where + N: ProviderNodeTypes>, +{ info!(target: "reth::cli", "Executing stage."); let provider = output_provider_factory.database_provider_rw()?; diff --git a/crates/cli/commands/src/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs index 6fd2f23aa0e..9cc0f54dd33 100644 --- a/crates/cli/commands/src/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -1,5 +1,5 @@ //! Database debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -10,7 +10,7 @@ use reth_db_api::{ }; use reth_db_common::DbTool; use reth_evm::execute::BlockExecutorProvider; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; +use reth_node_builder::NodeTypesWithDB; use reth_node_core::{ args::DatadirArgs, dirs::{DataDirPath, PlatformPath}, @@ -92,8 +92,8 @@ impl> Command /// Execute `dump-stage` command pub async fn execute(self, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, - E: BlockExecutorProvider, + N: CliNodeTypes, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index 562bd73a28d..91ab458daf6 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -2,12 +2,12 @@ use std::sync::Arc; +use crate::common::CliNodeTypes; use clap::{Parser, Subcommand}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_evm::execute::BlockExecutorProvider; -use reth_node_builder::NodeTypesWithEngine; pub mod drop; pub mod dump; @@ -43,8 +43,8 @@ impl> Command /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, - E: BlockExecutorProvider, + N: CliNodeTypes, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { match self.command { diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 23d6f6f28ac..88a5fa6204e 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -2,7 +2,7 @@ //! //! Stage debugging tool -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; @@ -11,6 +11,7 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig}; +use reth_db_api::database_metrics::DatabaseMetrics; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -19,7 +20,6 @@ use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network::BlockDownloaderProvider; use reth_network_p2p::HeadersClient; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::{ args::{NetworkArgs, StageEnum}, version::{ @@ -106,8 +106,8 @@ impl> Command /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where - N: NodeTypesWithEngine, - E: BlockExecutorProvider, + N: CliNodeTypes, + E: BlockExecutorProvider, F: FnOnce(Arc) -> E, { // Raise the fd limit of the process. @@ -133,10 +133,20 @@ impl> Command }, ChainSpecInfo { name: provider_factory.chain_spec().chain().to_string() }, ctx.task_executor, - Hooks::new( - provider_factory.db_ref().clone(), - provider_factory.static_file_provider(), - ), + Hooks::builder() + .with_hook({ + let db = provider_factory.db_ref().clone(); + move || db.report_metrics() + }) + .with_hook({ + let sfp = provider_factory.static_file_provider(); + move || { + if let Err(error) = sfp.report_metrics() { + error!(%error, "Failed to report metrics from static file provider"); + } + } + }) + .build(), ); MetricServer::new(config).serve().await?; @@ -329,10 +339,7 @@ impl> Command } if self.commit { - UnifiedStorageWriter::commit_unwind( - provider_rw, - provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; provider_rw = provider_factory.database_provider_rw()?; } } @@ -355,7 +362,7 @@ impl> Command provider_rw.save_stage_checkpoint(exec_stage.id(), checkpoint)?; } if self.commit { - UnifiedStorageWriter::commit(provider_rw, provider_factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; provider_rw = provider_factory.database_provider_rw()?; } diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index a5c9956c95b..cc5d719d270 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -1,8 +1,8 @@ //! Unwinding a certain block range -use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::B256; use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -13,11 +13,11 @@ use reth_db::DatabaseEnv; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::args::NetworkArgs; use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainSpecProvider, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, + StorageLocation, }; use reth_prune::PruneModes; use reth_stages::{ @@ -26,7 +26,7 @@ use reth_stages::{ ExecutionStageThresholds, Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; -use std::{ops::RangeInclusive, sync::Arc}; +use std::sync::Arc; use tokio::sync::watch; use tracing::info; @@ -50,21 +50,16 @@ pub struct Command { impl> Command { /// Execute `db stage unwind` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; - let range = self.command.unwind_range(provider_factory.clone())?; - if *range.start() == 0 { - eyre::bail!("Cannot unwind genesis block") - } + let target = self.command.unwind_target(provider_factory.clone())?; let highest_static_file_block = provider_factory .static_file_provider() .get_highest_static_files() - .max() - .filter(|highest_static_file_block| highest_static_file_block >= range.start()); + .max_block_num() + .filter(|highest_static_file_block| *highest_static_file_block > target); // Execute a pipeline unwind if the start of the range overlaps the existing static // files. If that's the case, then copy all available data from MDBX to static files, and @@ -78,9 +73,9 @@ impl> Command } if let Some(highest_static_file_block) = highest_static_file_block { - info!(target: "reth::cli", ?range, ?highest_static_file_block, "Executing a pipeline unwind."); + info!(target: "reth::cli", ?target, ?highest_static_file_block, "Executing a pipeline unwind."); } else { - info!(target: "reth::cli", ?range, "Executing a pipeline unwind."); + info!(target: "reth::cli", ?target, "Executing a pipeline unwind."); } // This will build an offline-only pipeline if the `offline` flag is enabled @@ -89,34 +84,30 @@ impl> Command // Move all applicable data from database to static files. pipeline.move_to_static_files()?; - pipeline.unwind((*range.start()).saturating_sub(1), None)?; + pipeline.unwind(target, None)?; } else { - info!(target: "reth::cli", ?range, "Executing a database unwind."); + info!(target: "reth::cli", ?target, "Executing a database unwind."); let provider = provider_factory.provider_rw()?; - let _ = provider - .take_block_and_execution_range(range.clone()) + provider + .remove_block_and_execution_above(target, StorageLocation::Both) .map_err(|err| eyre::eyre!("Transaction error on unwind: {err}"))?; // update finalized block if needed let last_saved_finalized_block_number = provider.last_finalized_block_number()?; - let range_min = - range.clone().min().ok_or(eyre::eyre!("Could not fetch lower range end"))?; - if last_saved_finalized_block_number.is_none() || - Some(range_min) < last_saved_finalized_block_number - { - provider.save_finalized_block_number(BlockNumber::from(range_min))?; + if last_saved_finalized_block_number.is_none_or(|f| f > target) { + provider.save_finalized_block_number(target)?; } provider.commit()?; } - info!(target: "reth::cli", range=?range.clone(), count=range.count(), "Unwound blocks"); + info!(target: "reth::cli", ?target, "Unwound blocks"); Ok(()) } - fn build_pipeline>( + fn build_pipeline + CliNodeTypes>( self, config: Config, provider_factory: ProviderFactory, @@ -129,7 +120,7 @@ impl> Command let (tip_tx, tip_rx) = watch::channel(B256::ZERO); // Unwinding does not require a valid executor - let executor = NoopBlockExecutorProvider::default(); + let executor = NoopBlockExecutorProvider::::default(); let builder = if self.offline { Pipeline::::builder().add_stages( @@ -186,13 +177,11 @@ enum Subcommands { } impl Subcommands { - /// Returns the block range to unwind. - /// - /// This returns an inclusive range: [target..=latest] - fn unwind_range>>( + /// Returns the block to unwind to. The returned block will stay in database. + fn unwind_target>>( &self, factory: ProviderFactory, - ) -> eyre::Result> { + ) -> eyre::Result { let provider = factory.provider()?; let last = provider.last_block_number()?; let target = match self { @@ -203,11 +192,11 @@ impl Subcommands { BlockHashOrNumber::Number(num) => *num, }, Self::NumBlocks { amount } => last.saturating_sub(*amount), - } + 1; + }; if target > last { eyre::bail!("Target block number is higher than the latest block number") } - Ok(target..=last) + Ok(target) } } diff --git a/crates/cli/commands/src/test_vectors/compact.rs b/crates/cli/commands/src/test_vectors/compact.rs new file mode 100644 index 00000000000..c321e35be73 --- /dev/null +++ b/crates/cli/commands/src/test_vectors/compact.rs @@ -0,0 +1,282 @@ +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{hex, private::getrandom::getrandom, PrimitiveSignature, TxKind}; +use arbitrary::Arbitrary; +use eyre::{Context, Result}; +use proptest::{ + prelude::{ProptestConfig, RngCore}, + test_runner::{TestRng, TestRunner}, +}; +use reth_codecs::alloy::{ + authorization_list::Authorization, + genesis_account::GenesisAccount, + header::{Header, HeaderExt}, + transaction::{ + eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, + legacy::TxLegacy, + }, + withdrawal::Withdrawal, +}; +use reth_db::{ + models::{AccountBeforeTx, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, + ClientVersion, +}; +use reth_fs_util as fs; +use reth_primitives::{ + Account, Log, LogData, Receipt, StorageEntry, Transaction, TransactionSigned, TxType, +}; +use reth_prune_types::{PruneCheckpoint, PruneMode}; +use reth_stages_types::{ + AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, + HeadersCheckpoint, IndexHistoryCheckpoint, StageCheckpoint, StageUnitCheckpoint, + StorageHashingCheckpoint, +}; +use reth_trie::{hash_builder::HashBuilderValue, TrieMask}; +use reth_trie_common::{hash_builder::HashBuilderState, StoredNibbles, StoredNibblesSubKey}; +use std::{fs::File, io::BufReader}; + +pub const VECTORS_FOLDER: &str = "testdata/micro/compact"; +pub const VECTOR_SIZE: usize = 100; + +#[macro_export] +macro_rules! compact_types { + (regular: [$($regular_ty:ident),*], identifier: [$($id_ty:ident),*]) => { + pub const GENERATE_VECTORS: &[fn(&mut TestRunner) -> eyre::Result<()>] = &[ + $( + generate_vector::<$regular_ty> as fn(&mut TestRunner) -> eyre::Result<()>, + )* + $( + generate_vector::<$id_ty> as fn(&mut TestRunner) -> eyre::Result<()>, + )* + ]; + + pub const READ_VECTORS: &[fn() -> eyre::Result<()>] = &[ + $( + read_vector::<$regular_ty> as fn() -> eyre::Result<()>, + )* + $( + read_vector::<$id_ty> as fn() -> eyre::Result<()>, + )* + ]; + + pub static IDENTIFIER_TYPE: std::sync::LazyLock> = std::sync::LazyLock::new(|| { + let mut map = std::collections::HashSet::new(); + $( + map.insert(type_name::<$id_ty>()); + )* + map + }); + }; +} + +// The type that **actually** implements `Compact` should go here. If it's an alloy type, import the +// auxiliary type from reth_codecs::alloy instead. +compact_types!( + regular: [ + // reth-primitives + Account, + Receipt, + // reth_codecs::alloy + Authorization, + GenesisAccount, + Header, + HeaderExt, + Withdrawal, + Withdrawals, + TxEip2930, + TxEip1559, + TxEip4844, + TxEip7702, + TxLegacy, + HashBuilderValue, + LogData, + Log, + // BranchNodeCompact, // todo requires arbitrary + TrieMask, + // TxDeposit, TODO(joshie): optimism + // reth_prune_types + PruneCheckpoint, + PruneMode, + // reth_stages_types + AccountHashingCheckpoint, + StorageHashingCheckpoint, + ExecutionCheckpoint, + HeadersCheckpoint, + IndexHistoryCheckpoint, + EntitiesCheckpoint, + CheckpointBlockRange, + StageCheckpoint, + StageUnitCheckpoint, + // reth_db_api + StoredBlockOmmers, + StoredBlockBodyIndices, + StoredBlockWithdrawals, + // Manual implementations + TransactionSigned, + // Bytecode, // todo revm arbitrary + StorageEntry, + // MerkleCheckpoint, // todo storedsubnode -> branchnodecompact arbitrary + AccountBeforeTx, + ClientVersion, + StoredNibbles, + StoredNibblesSubKey, + // StorageTrieEntry, // todo branchnodecompact arbitrary + // StoredSubNode, // todo branchnodecompact arbitrary + HashBuilderState + ], + // These types require an extra identifier which is usually stored elsewhere (eg. parent type). + identifier: [ + PrimitiveSignature, + Transaction, + TxType, + TxKind + ] +); + +/// Generates a vector of type `T` to a file. +pub fn generate_vectors() -> Result<()> { + generate_vectors_with(GENERATE_VECTORS) +} + +pub fn read_vectors() -> Result<()> { + read_vectors_with(READ_VECTORS) +} + +/// Generates a vector of type `T` to a file. +pub fn generate_vectors_with(gen: &[fn(&mut TestRunner) -> eyre::Result<()>]) -> Result<()> { + // Prepare random seed for test (same method as used by proptest) + let mut seed = [0u8; 32]; + getrandom(&mut seed)?; + println!("Seed for compact test vectors: {:?}", hex::encode_prefixed(seed)); + + // Start the runner with the seed + let config = ProptestConfig::default(); + let rng = TestRng::from_seed(config.rng_algorithm, &seed); + let mut runner = TestRunner::new_with_rng(config, rng); + + fs::create_dir_all(VECTORS_FOLDER)?; + + for generate_fn in gen { + generate_fn(&mut runner)?; + } + + Ok(()) +} + +/// Reads multiple vectors of different types ensuring their correctness by decoding and +/// re-encoding. +pub fn read_vectors_with(read: &[fn() -> eyre::Result<()>]) -> Result<()> { + fs::create_dir_all(VECTORS_FOLDER)?; + let mut errors = None; + + for read_fn in read { + if let Err(err) = read_fn() { + errors.get_or_insert_with(Vec::new).push(err); + } + } + + if let Some(err_list) = errors { + for error in err_list { + eprintln!("{:?}", error); + } + return Err(eyre::eyre!( + "If there are missing types, make sure to run `reth test-vectors compact --write` first.\n + If it happened during CI, ignore IF it's a new proposed type that `main` branch does not have." + )); + } + + Ok(()) +} + +/// Generates test vectors for a specific type `T`. +pub fn generate_vector(runner: &mut TestRunner) -> Result<()> +where + T: for<'a> Arbitrary<'a> + reth_codecs::Compact, +{ + let type_name = type_name::(); + print!("{}", &type_name); + + let mut bytes = std::iter::repeat(0u8).take(256).collect::>(); + let mut compact_buffer = vec![]; + + let mut values = Vec::with_capacity(VECTOR_SIZE); + for _ in 0..VECTOR_SIZE { + runner.rng().fill_bytes(&mut bytes); + compact_buffer.clear(); + + // Sometimes type T, might require extra arbitrary data, so we retry it a few times. + let mut tries = 0; + let obj = loop { + match T::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) { + Ok(obj) => break obj, + Err(err) => { + if tries < 5 && matches!(err, arbitrary::Error::NotEnoughData) { + tries += 1; + bytes.extend(std::iter::repeat(0u8).take(256)); + } else { + return Err(err)? + } + } + } + }; + let res = obj.to_compact(&mut compact_buffer); + + if IDENTIFIER_TYPE.contains(&type_name) { + compact_buffer.push(res as u8); + } + + values.push(hex::encode(&compact_buffer)); + } + + serde_json::to_writer( + std::io::BufWriter::new( + std::fs::File::create(format!("{VECTORS_FOLDER}/{}.json", &type_name)).unwrap(), + ), + &values, + )?; + + println!(" ✅"); + + Ok(()) +} + +/// Reads a vector of type `T` from a file and compares each item with its reconstructed version +/// using `T::from_compact`. +pub fn read_vector() -> Result<()> +where + T: reth_codecs::Compact, +{ + let type_name = type_name::(); + print!("{}", &type_name); + + // Read the file where the vectors are stored + let file_path = format!("{VECTORS_FOLDER}/{}.json", &type_name); + let file = + File::open(&file_path).wrap_err_with(|| format!("Failed to open vector {type_name}."))?; + let reader = BufReader::new(file); + + let stored_values: Vec = serde_json::from_reader(reader)?; + let mut buffer = vec![]; + + for hex_str in stored_values { + let mut compact_bytes = hex::decode(hex_str)?; + let mut identifier = None; + buffer.clear(); + + if IDENTIFIER_TYPE.contains(&type_name) { + identifier = compact_bytes.pop().map(|b| b as usize); + } + let len_or_identifier = identifier.unwrap_or(compact_bytes.len()); + + let (reconstructed, _) = T::from_compact(&compact_bytes, len_or_identifier); + reconstructed.to_compact(&mut buffer); + assert_eq!(buffer, compact_bytes); + } + + println!(" ✅"); + + Ok(()) +} + +pub fn type_name() -> String { + std::any::type_name::().split("::").last().unwrap_or(std::any::type_name::()).to_string() +} diff --git a/crates/cli/commands/src/test_vectors/mod.rs b/crates/cli/commands/src/test_vectors/mod.rs index 999c0bc9132..001d0c2e862 100644 --- a/crates/cli/commands/src/test_vectors/mod.rs +++ b/crates/cli/commands/src/test_vectors/mod.rs @@ -2,7 +2,8 @@ use clap::{Parser, Subcommand}; -mod tables; +pub mod compact; +pub mod tables; /// Generate test-vectors for different data types. #[derive(Debug, Parser)] @@ -19,6 +20,22 @@ pub enum Subcommands { /// List of table names. Case-sensitive. names: Vec, }, + /// Randomly generate test vectors for each `Compact` type using the `--write` flag. + /// + /// The generated vectors are serialized in both `json` and `Compact` formats and saved to a + /// file. + /// + /// Use the `--read` flag to read and validate the previously generated vectors from file. + #[group(multiple = false, required = true)] + Compact { + /// Write test vectors to a file. + #[arg(long)] + write: bool, + + /// Read test vectors from a file. + #[arg(long)] + read: bool, + }, } impl Command { @@ -28,6 +45,13 @@ impl Command { Subcommands::Tables { names } => { tables::generate_vectors(names)?; } + Subcommands::Compact { write, .. } => { + if write { + compact::generate_vectors()?; + } else { + compact::read_vectors()?; + } + } } Ok(()) } diff --git a/crates/cli/commands/src/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs index 112685251d0..f845d2a6613 100644 --- a/crates/cli/commands/src/test_vectors/tables.rs +++ b/crates/cli/commands/src/test_vectors/tables.rs @@ -1,4 +1,5 @@ -use alloy_primitives::private::getrandom::getrandom; +use alloy_consensus::Header; +use alloy_primitives::{hex, private::getrandom::getrandom}; use arbitrary::Arbitrary; use eyre::Result; use proptest::{ @@ -10,6 +11,7 @@ use proptest_arbitrary_interop::arb; use reth_db::tables; use reth_db_api::table::{DupSort, Table, TableRow}; use reth_fs_util as fs; +use reth_primitives::TransactionSigned; use std::collections::HashSet; use tracing::error; @@ -17,11 +19,11 @@ const VECTORS_FOLDER: &str = "testdata/micro/db"; const PER_TABLE: usize = 1000; /// Generates test vectors for specified `tables`. If list is empty, then generate for all tables. -pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { +pub fn generate_vectors(mut tables: Vec) -> Result<()> { // Prepare random seed for test (same method as used by proptest) let mut seed = [0u8; 32]; getrandom(&mut seed)?; - println!("Seed for test vectors: {:?}", seed); + println!("Seed for table test vectors: {:?}", hex::encode_prefixed(seed)); // Start the runner with the seed let config = ProptestConfig::default(); @@ -31,16 +33,16 @@ pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { fs::create_dir_all(VECTORS_FOLDER)?; macro_rules! generate_vector { - ($table_type:ident, $per_table:expr, TABLE) => { - generate_table_vector::(&mut runner, $per_table)?; + ($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, TABLE) => { + generate_table_vector::)?>(&mut runner, $per_table)?; }; - ($table_type:ident, $per_table:expr, DUPSORT) => { - generate_dupsort_vector::(&mut runner, $per_table)?; + ($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, DUPSORT) => { + generate_dupsort_vector::)?>(&mut runner, $per_table)?; }; } macro_rules! generate { - ([$(($table_type:ident, $per_table:expr, $table_or_dup:tt)),*]) => { + ([$(($table_type:ident$(<$($generic:ident),+>)?, $per_table:expr, $table_or_dup:tt)),*]) => { let all_tables = vec![$(stringify!($table_type).to_string(),)*]; if tables.is_empty() { @@ -51,9 +53,9 @@ pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { match table.as_str() { $( stringify!($table_type) => { - println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type::NAME); + println!("Generating test vectors for {} <{}>.", stringify!($table_or_dup), tables::$table_type$(::<$($generic),+>)?::NAME); - generate_vector!($table_type, $per_table, $table_or_dup); + generate_vector!($table_type$(<$($generic),+>)?, $per_table, $table_or_dup); }, )* _ => { @@ -68,11 +70,11 @@ pub(crate) fn generate_vectors(mut tables: Vec) -> Result<()> { (CanonicalHeaders, PER_TABLE, TABLE), (HeaderTerminalDifficulties, PER_TABLE, TABLE), (HeaderNumbers, PER_TABLE, TABLE), - (Headers, PER_TABLE, TABLE), + (Headers
, PER_TABLE, TABLE), (BlockBodyIndices, PER_TABLE, TABLE), - (BlockOmmers, 100, TABLE), + (BlockOmmers
, 100, TABLE), (TransactionHashNumbers, PER_TABLE, TABLE), - (Transactions, 100, TABLE), + (Transactions, 100, TABLE), (PlainStorageState, PER_TABLE, DUPSORT), (PlainAccountState, PER_TABLE, TABLE) ]); diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index d96a882a672..70515f83b4b 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -24,6 +24,7 @@ eyre.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } thiserror.workspace = true +serde.workspace = true tracy-client = { workspace = true, optional = true, features = ["demangle"] } diff --git a/crates/cli/util/src/load_secret_key.rs b/crates/cli/util/src/load_secret_key.rs index 25da0e06676..8b3bee09c8c 100644 --- a/crates/cli/util/src/load_secret_key.rs +++ b/crates/cli/util/src/load_secret_key.rs @@ -41,10 +41,7 @@ pub fn get_secret_key(secret_key_path: &Path) -> Result { let contents = fs::read_to_string(secret_key_path)?; - Ok(contents - .as_str() - .parse::() - .map_err(SecretKeyError::SecretKeyDecodeError)?) + Ok(contents.as_str().parse().map_err(SecretKeyError::SecretKeyDecodeError)?) } Ok(false) => { if let Some(dir) = secret_key_path.parent() { diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs index 202744a4bb7..fb27e1420c0 100644 --- a/crates/cli/util/src/parsers.rs +++ b/crates/cli/util/src/parsers.rs @@ -1,7 +1,9 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; +use reth_fs_util::FsPathError; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, + path::Path, str::FromStr, time::Duration, }; @@ -21,11 +23,11 @@ pub fn parse_duration_from_secs_or_ms( arg: &str, ) -> eyre::Result { if arg.ends_with("ms") { - arg.trim_end_matches("ms").parse::().map(Duration::from_millis) + arg.trim_end_matches("ms").parse().map(Duration::from_millis) } else if arg.ends_with('s') { - arg.trim_end_matches('s').parse::().map(Duration::from_secs) + arg.trim_end_matches('s').parse().map(Duration::from_secs) } else { - arg.parse::().map(Duration::from_secs) + arg.parse().map(Duration::from_secs) } } @@ -73,7 +75,7 @@ pub fn parse_socket_address(value: &str) -> eyre::Result() { + if let Ok(port) = value.parse() { return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) } value @@ -82,6 +84,11 @@ pub fn parse_socket_address(value: &str) -> eyre::Result(path: &str) -> Result { + reth_fs_util::read_json_file(Path::new(path)) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml deleted file mode 100644 index b4b28123033..00000000000 --- a/crates/consensus/auto-seal/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "reth-auto-seal-consensus" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "A consensus impl for local testing purposes" - -[lints] -workspace = true - -[dependencies] -# reth -reth-chainspec.workspace = true -reth-beacon-consensus.workspace = true -reth-primitives.workspace = true -reth-execution-errors.workspace = true -reth-execution-types.workspace = true -reth-network-p2p.workspace = true -reth-provider.workspace = true -reth-stages-api.workspace = true -reth-revm.workspace = true -reth-transaction-pool.workspace = true -reth-evm.workspace = true -reth-engine-primitives.workspace = true -reth-consensus.workspace = true -reth-network-peers.workspace = true -reth-tokio-util.workspace = true -reth-trie.workspace = true - -# ethereum -alloy-primitives.workspace = true -revm-primitives.workspace = true -alloy-rpc-types-engine.workspace = true - -# optimism -reth-optimism-consensus = { workspace = true, optional = true } - -# async -futures-util.workspace = true -tokio = { workspace = true, features = ["sync", "time"] } -tokio-stream.workspace = true -tracing.workspace = true - -[features] -optimism = ["reth-provider/optimism", "reth-optimism-consensus"] diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs deleted file mode 100644 index f9b80f10bb5..00000000000 --- a/crates/consensus/auto-seal/src/client.rs +++ /dev/null @@ -1,131 +0,0 @@ -//! This includes download client implementations for auto sealing miners. - -use crate::Storage; -use alloy_primitives::B256; -use reth_network_p2p::{ - bodies::client::{BodiesClient, BodiesFut}, - download::DownloadClient, - headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, - priority::Priority, -}; -use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header}; -use std::fmt::Debug; -use tracing::{trace, warn}; - -/// A download client that polls the miner for transactions and assembles blocks to be returned in -/// the download process. -/// -/// When polled, the miner will assemble blocks when miners produce ready transactions and store the -/// blocks in memory. -#[derive(Debug, Clone)] -pub struct AutoSealClient { - storage: Storage, -} - -impl AutoSealClient { - pub(crate) const fn new(storage: Storage) -> Self { - Self { storage } - } - - async fn fetch_headers(&self, request: HeadersRequest) -> Vec
{ - trace!(target: "consensus::auto", ?request, "received headers request"); - - let storage = self.storage.read().await; - let HeadersRequest { start, limit, direction } = request; - let mut headers = Vec::new(); - - let mut block: BlockHashOrNumber = match start { - BlockHashOrNumber::Hash(start) => start.into(), - BlockHashOrNumber::Number(num) => { - if let Some(hash) = storage.block_hash(num) { - hash.into() - } else { - warn!(target: "consensus::auto", num, "no matching block found"); - return headers - } - } - }; - - for _ in 0..limit { - // fetch from storage - if let Some(header) = storage.header_by_hash_or_number(block) { - match direction { - HeadersDirection::Falling => block = header.parent_hash.into(), - HeadersDirection::Rising => { - let next = header.number + 1; - block = next.into() - } - } - headers.push(header); - } else { - break - } - } - - trace!(target: "consensus::auto", ?headers, "returning headers"); - - headers - } - - async fn fetch_bodies(&self, hashes: Vec) -> Vec { - trace!(target: "consensus::auto", ?hashes, "received bodies request"); - let storage = self.storage.read().await; - let mut bodies = Vec::new(); - for hash in hashes { - if let Some(body) = storage.bodies.get(&hash).cloned() { - bodies.push(body); - } else { - break - } - } - - trace!(target: "consensus::auto", ?bodies, "returning bodies"); - - bodies - } -} - -impl HeadersClient for AutoSealClient { - type Output = HeadersFut; - - fn get_headers_with_priority( - &self, - request: HeadersRequest, - _priority: Priority, - ) -> Self::Output { - let this = self.clone(); - Box::pin(async move { - let headers = this.fetch_headers(request).await; - Ok(WithPeerId::new(PeerId::random(), headers)) - }) - } -} - -impl BodiesClient for AutoSealClient { - type Output = BodiesFut; - - fn get_block_bodies_with_priority( - &self, - hashes: Vec, - _priority: Priority, - ) -> Self::Output { - let this = self.clone(); - Box::pin(async move { - let bodies = this.fetch_bodies(hashes).await; - Ok(WithPeerId::new(PeerId::random(), bodies)) - }) - } -} - -impl DownloadClient for AutoSealClient { - fn report_bad_message(&self, _peer_id: PeerId) { - warn!("Reported a bad message on a miner, we should never produce bad blocks"); - // noop - } - - fn num_connected_peers(&self) -> usize { - // no such thing as connected peers when we are mining ourselves - 1 - } -} diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs deleted file mode 100644 index 261227f1074..00000000000 --- a/crates/consensus/auto-seal/src/lib.rs +++ /dev/null @@ -1,691 +0,0 @@ -//! A [Consensus] implementation for local testing purposes -//! that automatically seals blocks. -//! -//! The Mining task polls a [`MiningMode`], and will return a list of transactions that are ready to -//! be mined. -//! -//! These downloaders poll the miner, assemble the block, and return transactions that are ready to -//! be mined. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; -use reth_execution_errors::{ - BlockExecutionError, BlockValidationError, InternalBlockExecutionError, -}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{ - proofs, Block, BlockBody, BlockHashOrNumber, BlockWithSenders, Header, Requests, SealedBlock, - SealedHeader, TransactionSigned, Withdrawals, -}; -use reth_provider::{BlockReaderIdExt, StateProviderFactory, StateRootProvider}; -use reth_revm::database::StateProviderDatabase; -use reth_transaction_pool::TransactionPool; -use reth_trie::HashedPostState; -use revm_primitives::calc_excess_blob_gas; -use std::{ - collections::HashMap, - fmt::Debug, - sync::Arc, - time::{SystemTime, UNIX_EPOCH}, -}; -use tokio::sync::{mpsc::UnboundedSender, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use tracing::trace; - -mod client; -mod mode; -mod task; - -pub use crate::client::AutoSealClient; -pub use mode::{FixedBlockTimeMiner, MiningMode, ReadyTransactionMiner}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -pub use task::MiningTask; - -/// A consensus implementation intended for local development and testing purposes. -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub struct AutoSealConsensus { - /// Configuration - chain_spec: Arc, -} - -impl AutoSealConsensus { - /// Create a new instance of [`AutoSealConsensus`] - pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } - } -} - -impl Consensus for AutoSealConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_header_against_parent( - &self, - _header: &SealedHeader, - _parent: &SealedHeader, - ) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_header_with_total_difficulty( - &self, - _header: &Header, - _total_difficulty: U256, - ) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { - Ok(()) - } - - fn validate_block_post_execution( - &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - Ok(()) - } -} - -/// Builder type for configuring the setup -#[derive(Debug)] -pub struct AutoSealBuilder { - client: Client, - consensus: AutoSealConsensus, - pool: Pool, - mode: MiningMode, - storage: Storage, - to_engine: UnboundedSender>, - evm_config: EvmConfig, -} - -// === impl AutoSealBuilder === - -impl - AutoSealBuilder -where - Client: BlockReaderIdExt, - Pool: TransactionPool, - Engine: EngineTypes, - ChainSpec: EthChainSpec, -{ - /// Creates a new builder instance to configure all parts. - pub fn new( - chain_spec: Arc, - client: Client, - pool: Pool, - to_engine: UnboundedSender>, - mode: MiningMode, - evm_config: EvmConfig, - ) -> Self { - let latest_header = client.latest_header().ok().flatten().unwrap_or_else(|| { - SealedHeader::new(chain_spec.genesis_header().clone(), chain_spec.genesis_hash()) - }); - - Self { - storage: Storage::new(latest_header), - client, - consensus: AutoSealConsensus::new(chain_spec), - pool, - mode, - to_engine, - evm_config, - } - } - - /// Sets the [`MiningMode`] it operates in, default is [`MiningMode::Auto`] - pub fn mode(mut self, mode: MiningMode) -> Self { - self.mode = mode; - self - } - - /// Consumes the type and returns all components - #[track_caller] - pub fn build( - self, - ) -> ( - AutoSealConsensus, - AutoSealClient, - MiningTask, - ) { - let Self { client, consensus, pool, mode, storage, to_engine, evm_config } = self; - let auto_client = AutoSealClient::new(storage.clone()); - let task = MiningTask::new( - Arc::clone(&consensus.chain_spec), - mode, - to_engine, - storage, - client, - pool, - evm_config, - ); - (consensus, auto_client, task) - } -} - -/// In memory storage -#[derive(Debug, Clone, Default)] -pub(crate) struct Storage { - inner: Arc>, -} - -// == impl Storage === - -impl Storage { - /// Initializes the [Storage] with the given best block. This should be initialized with the - /// highest block in the chain, if there is a chain already stored on-disk. - fn new(best_block: SealedHeader) -> Self { - let (header, best_hash) = best_block.split(); - let mut storage = StorageInner { - best_hash, - total_difficulty: header.difficulty, - best_block: header.number, - ..Default::default() - }; - storage.headers.insert(header.number, header); - storage.bodies.insert(best_hash, BlockBody::default()); - Self { inner: Arc::new(RwLock::new(storage)) } - } - - /// Returns the write lock of the storage - pub(crate) async fn write(&self) -> RwLockWriteGuard<'_, StorageInner> { - self.inner.write().await - } - - /// Returns the read lock of the storage - pub(crate) async fn read(&self) -> RwLockReadGuard<'_, StorageInner> { - self.inner.read().await - } -} - -/// In-memory storage for the chain the auto seal engine is building. -#[derive(Default, Debug)] -pub(crate) struct StorageInner { - /// Headers buffered for download. - pub(crate) headers: HashMap, - /// A mapping between block hash and number. - pub(crate) hash_to_number: HashMap, - /// Bodies buffered for download. - pub(crate) bodies: HashMap, - /// Tracks best block - pub(crate) best_block: u64, - /// Tracks hash of best block - pub(crate) best_hash: B256, - /// The total difficulty of the chain until this block - pub(crate) total_difficulty: U256, -} - -// === impl StorageInner === - -impl StorageInner { - /// Returns the block hash for the given block number if it exists. - pub(crate) fn block_hash(&self, num: u64) -> Option { - self.hash_to_number.iter().find_map(|(k, v)| num.eq(v).then_some(*k)) - } - - /// Returns the matching header if it exists. - pub(crate) fn header_by_hash_or_number( - &self, - hash_or_num: BlockHashOrNumber, - ) -> Option
{ - let num = match hash_or_num { - BlockHashOrNumber::Hash(hash) => self.hash_to_number.get(&hash).copied()?, - BlockHashOrNumber::Number(num) => num, - }; - self.headers.get(&num).cloned() - } - - /// Inserts a new header+body pair - pub(crate) fn insert_new_block(&mut self, mut header: Header, body: BlockBody) { - header.number = self.best_block + 1; - header.parent_hash = self.best_hash; - - self.best_hash = header.hash_slow(); - self.best_block = header.number; - self.total_difficulty += header.difficulty; - - trace!(target: "consensus::auto", num=self.best_block, hash=?self.best_hash, "inserting new block"); - self.headers.insert(header.number, header); - self.bodies.insert(self.best_hash, body); - self.hash_to_number.insert(self.best_hash, self.best_block); - } - - /// Fills in pre-execution header fields based on the current best block and given - /// transactions. - pub(crate) fn build_header_template( - &self, - timestamp: u64, - transactions: &[TransactionSigned], - ommers: &[Header], - withdrawals: Option<&Withdrawals>, - requests: Option<&Requests>, - chain_spec: &ChainSpec, - ) -> Header - where - ChainSpec: EthChainSpec + EthereumHardforks, - { - // check previous block for base fee - let base_fee_per_gas = self.headers.get(&self.best_block).and_then(|parent| { - parent.next_block_base_fee(chain_spec.base_fee_params_at_timestamp(timestamp)) - }); - - let blob_gas_used = chain_spec.is_cancun_active_at_timestamp(timestamp).then(|| { - transactions - .iter() - .filter_map(|tx| tx.transaction.as_eip4844()) - .map(|blob_tx| blob_tx.blob_gas()) - .sum::() - }); - - let mut header = Header { - parent_hash: self.best_hash, - ommers_hash: proofs::calculate_ommers_root(ommers), - transactions_root: proofs::calculate_transaction_root(transactions), - withdrawals_root: withdrawals.map(|w| proofs::calculate_withdrawals_root(w)), - difficulty: U256::from(2), - number: self.best_block + 1, - gas_limit: chain_spec.max_gas_limit(), - timestamp, - base_fee_per_gas, - blob_gas_used, - requests_root: requests.map(|r| proofs::calculate_requests_root(&r.0)), - ..Default::default() - }; - - if chain_spec.is_cancun_active_at_timestamp(timestamp) { - let parent = self.headers.get(&self.best_block); - header.parent_beacon_block_root = - parent.and_then(|parent| parent.parent_beacon_block_root); - header.blob_gas_used = Some(0); - - let (parent_excess_blob_gas, parent_blob_gas_used) = match parent { - Some(parent) if chain_spec.is_cancun_active_at_timestamp(parent.timestamp) => ( - parent.excess_blob_gas.unwrap_or_default(), - parent.blob_gas_used.unwrap_or_default(), - ), - _ => (0, 0), - }; - header.excess_blob_gas = - Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) - } - - header - } - - /// Builds and executes a new block with the given transactions, on the provided executor. - /// - /// This returns the header of the executed block, as well as the poststate from execution. - #[allow(clippy::too_many_arguments)] - pub(crate) fn build_and_execute( - &mut self, - transactions: Vec, - ommers: Vec
, - provider: &Provider, - chain_spec: Arc, - executor: &Executor, - ) -> Result<(SealedHeader, ExecutionOutcome), BlockExecutionError> - where - Executor: BlockExecutorProvider, - Provider: StateProviderFactory, - ChainSpec: EthChainSpec + EthereumHardforks, - { - let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); - - // if shanghai is active, include empty withdrawals - let withdrawals = - chain_spec.is_shanghai_active_at_timestamp(timestamp).then_some(Withdrawals::default()); - // if prague is active, include empty requests - let requests = - chain_spec.is_prague_active_at_timestamp(timestamp).then_some(Requests::default()); - - let header = self.build_header_template( - timestamp, - &transactions, - &ommers, - withdrawals.as_ref(), - requests.as_ref(), - &chain_spec, - ); - - let block = Block { - header, - body: BlockBody { - transactions, - ommers: ommers.clone(), - withdrawals: withdrawals.clone(), - requests: requests.clone(), - }, - } - .with_recovered_senders() - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; - - trace!(target: "consensus::auto", transactions=?&block.body, "executing transactions"); - - let mut db = StateProviderDatabase::new( - provider.latest().map_err(InternalBlockExecutionError::LatestBlock)?, - ); - - // execute the block - let block_execution_output = - executor.executor(&mut db).execute((&block, U256::ZERO).into())?; - let gas_used = block_execution_output.gas_used; - let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number)); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - - // todo(onbjerg): we should not pass requests around as this is building a block, which - // means we need to extract the requests from the execution output and compute the requests - // root here - - let Block { mut header, body, .. } = block.block; - let body = BlockBody { transactions: body.transactions, ommers, withdrawals, requests }; - - trace!(target: "consensus::auto", ?execution_outcome, ?header, ?body, "executed block, calculating state root and completing header"); - - // now we need to update certain header fields with the results of the execution - header.state_root = db.state_root(hashed_state)?; - header.gas_used = gas_used; - - let receipts = execution_outcome.receipts_by_block(header.number); - - // update logs bloom - let receipts_with_bloom = - receipts.iter().map(|r| r.as_ref().unwrap().bloom_slow()).collect::>(); - header.logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | *r); - - // update receipts root - header.receipts_root = { - #[cfg(feature = "optimism")] - let receipts_root = execution_outcome - .generic_receipts_root_slow(header.number, |receipts| { - reth_optimism_consensus::calculate_receipt_root_no_memo_optimism( - receipts, - &chain_spec, - header.timestamp, - ) - }) - .expect("Receipts is present"); - - #[cfg(not(feature = "optimism"))] - let receipts_root = - execution_outcome.receipts_root_slow(header.number).expect("Receipts is present"); - - receipts_root - }; - trace!(target: "consensus::auto", root=?header.state_root, ?body, "calculated root"); - - // finally insert into storage - self.insert_new_block(header.clone(), body); - - // set new header with hash that should have been updated by insert_new_block - let new_header = SealedHeader::new(header, self.best_hash); - - Ok((new_header, execution_outcome)) - } -} - -#[cfg(test)] -mod tests { - use reth_chainspec::{ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition}; - use reth_primitives::Transaction; - - use super::*; - - #[test] - fn test_block_hash() { - let mut storage = StorageInner::default(); - - // Define two block hashes and their corresponding block numbers. - let block_hash_1: BlockHash = B256::random(); - let block_number_1: BlockNumber = 1; - let block_hash_2: BlockHash = B256::random(); - let block_number_2: BlockNumber = 2; - - // Insert the block number and hash pairs into the `hash_to_number` map. - storage.hash_to_number.insert(block_hash_1, block_number_1); - storage.hash_to_number.insert(block_hash_2, block_number_2); - - // Verify that `block_hash` returns the correct block hash for the given block number. - assert_eq!(storage.block_hash(block_number_1), Some(block_hash_1)); - assert_eq!(storage.block_hash(block_number_2), Some(block_hash_2)); - - // Test that `block_hash` returns `None` for a non-existent block number. - let block_number_3: BlockNumber = 3; - assert_eq!(storage.block_hash(block_number_3), None); - } - - #[test] - fn test_header_by_hash_or_number() { - let mut storage = StorageInner::default(); - - // Define block numbers, headers, and hashes. - let block_number_1: u64 = 1; - let block_number_2: u64 = 2; - let header_1 = Header { number: block_number_1, ..Default::default() }; - let header_2 = Header { number: block_number_2, ..Default::default() }; - let block_hash_1: BlockHash = B256::random(); - let block_hash_2: BlockHash = B256::random(); - - // Insert headers and hash-to-number mappings. - storage.headers.insert(block_number_1, header_1.clone()); - storage.headers.insert(block_number_2, header_2.clone()); - storage.hash_to_number.insert(block_hash_1, block_number_1); - storage.hash_to_number.insert(block_hash_2, block_number_2); - - // Test header retrieval by block number. - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_1)), - Some(header_1.clone()) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block_number_2)), - Some(header_2.clone()) - ); - - // Test header retrieval by block hash. - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_1)), - Some(header_1) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block_hash_2)), - Some(header_2) - ); - - // Test non-existent block number and hash. - assert_eq!(storage.header_by_hash_or_number(BlockHashOrNumber::Number(999)), None); - let non_existent_hash: BlockHash = B256::random(); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(non_existent_hash)), - None - ); - } - - #[test] - fn test_insert_new_block() { - let mut storage = StorageInner::default(); - - // Define headers and block bodies. - let header_1 = Header { difficulty: U256::from(100), ..Default::default() }; - let body_1 = BlockBody::default(); - let header_2 = Header { difficulty: U256::from(200), ..Default::default() }; - let body_2 = BlockBody::default(); - - // Insert the first block. - storage.insert_new_block(header_1.clone(), body_1.clone()); - let best_block_1 = storage.best_block; - let best_hash_1 = storage.best_hash; - - // Verify the block was inserted correctly. - assert_eq!( - storage.headers.get(&best_block_1), - Some(&Header { number: 1, ..header_1.clone() }) - ); - assert_eq!(storage.bodies.get(&best_hash_1), Some(&body_1)); - assert_eq!(storage.hash_to_number.get(&best_hash_1), Some(&best_block_1)); - - // Insert the second block. - storage.insert_new_block(header_2.clone(), body_2.clone()); - let best_block_2 = storage.best_block; - let best_hash_2 = storage.best_hash; - - // Verify the second block was inserted correctly. - assert_eq!( - storage.headers.get(&best_block_2), - Some(&Header { - number: 2, - parent_hash: Header { number: 1, ..header_1 }.hash_slow(), - ..header_2 - }) - ); - assert_eq!(storage.bodies.get(&best_hash_2), Some(&body_2)); - assert_eq!(storage.hash_to_number.get(&best_hash_2), Some(&best_block_2)); - - // Check that the total difficulty was updated. - assert_eq!(storage.total_difficulty, header_1.difficulty + header_2.difficulty); - } - - #[test] - fn test_build_basic_header_template() { - let mut storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - - let best_block_number = 1; - let best_block_hash = B256::random(); - let timestamp = 1_600_000_000; - - // Set up best block information - storage.best_block = best_block_number; - storage.best_hash = best_block_hash; - - // Build header template - let header = storage.build_header_template( - timestamp, - &[], // no transactions - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify basic fields - assert_eq!(header.parent_hash, best_block_hash); - assert_eq!(header.number, best_block_number + 1); - assert_eq!(header.timestamp, timestamp); - assert_eq!(header.gas_limit, chain_spec.max_gas_limit); - } - - #[test] - fn test_ommers_and_transactions_roots() { - let storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - let timestamp = 1_600_000_000; - - // Setup ommers and transactions - let ommers = vec![Header::default()]; - let transactions = vec![TransactionSigned::default()]; - - // Build header template - let header = storage.build_header_template( - timestamp, - &transactions, - &ommers, - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify ommers and transactions roots - assert_eq!(header.ommers_hash, proofs::calculate_ommers_root(&ommers)); - assert_eq!(header.transactions_root, proofs::calculate_transaction_root(&transactions)); - } - - // Test base fee calculation from the parent block - #[test] - fn test_base_fee_calculation() { - let mut storage = StorageInner::default(); - let chain_spec = ChainSpec::default(); - let timestamp = 1_600_000_000; - - // Set up the parent header with base fee - let base_fee = Some(100); - let parent_header = Header { base_fee_per_gas: base_fee, ..Default::default() }; - storage.headers.insert(storage.best_block, parent_header); - - // Build header template - let header = storage.build_header_template( - timestamp, - &[], // no transactions - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify base fee is correctly propagated - assert_eq!(header.base_fee_per_gas, base_fee); - } - - // Test blob gas and excess blob gas calculation when Cancun is active - #[test] - fn test_blob_gas_calculation_cancun() { - let storage = StorageInner::default(); - let chain_spec = ChainSpec { - hardforks: ChainHardforks::new(vec![( - EthereumHardfork::Cancun.boxed(), - ForkCondition::Timestamp(25), - )]), - ..Default::default() - }; - let timestamp = 26; - - // Set up a transaction with blob gas - let blob_tx = TransactionSigned { - transaction: Transaction::Eip4844(Default::default()), - ..Default::default() - }; - let transactions = vec![blob_tx]; - - // Build header template - let header = storage.build_header_template( - timestamp, - &transactions, - &[], // no ommers - None, // no withdrawals - None, // no requests - &chain_spec, - ); - - // Verify that the header has the correct fields including blob gas - assert_eq!( - header, - Header { - parent_hash: B256::ZERO, - ommers_hash: proofs::calculate_ommers_root(&[]), - transactions_root: proofs::calculate_transaction_root(&transactions), - withdrawals_root: None, - difficulty: U256::from(2), - number: 1, - gas_limit: chain_spec.max_gas_limit, - timestamp, - base_fee_per_gas: None, - blob_gas_used: Some(0), - requests_root: None, - excess_blob_gas: Some(0), - ..Default::default() - } - ); - } -} diff --git a/crates/consensus/auto-seal/src/mode.rs b/crates/consensus/auto-seal/src/mode.rs deleted file mode 100644 index 82750c8e47b..00000000000 --- a/crates/consensus/auto-seal/src/mode.rs +++ /dev/null @@ -1,166 +0,0 @@ -//! The mode the auto seal miner is operating in. - -use alloy_primitives::TxHash; -use futures_util::{stream::Fuse, StreamExt}; -use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; -use std::{ - fmt, - pin::Pin, - sync::Arc, - task::{Context, Poll}, - time::Duration, -}; -use tokio::{sync::mpsc::Receiver, time::Interval}; -use tokio_stream::{wrappers::ReceiverStream, Stream}; - -/// Mode of operations for the `Miner` -#[derive(Debug)] -pub enum MiningMode { - /// A miner that does nothing - None, - /// A miner that listens for new transactions that are ready. - /// - /// Either one transaction will be mined per block, or any number of transactions will be - /// allowed - Auto(ReadyTransactionMiner), - /// A miner that constructs a new block every `interval` tick - FixedBlockTime(FixedBlockTimeMiner), -} - -// === impl MiningMode === - -impl MiningMode { - /// Creates a new instant mining mode that listens for new transactions and tries to build - /// non-empty blocks as soon as transactions arrive. - pub fn instant(max_transactions: usize, listener: Receiver) -> Self { - Self::Auto(ReadyTransactionMiner { - max_transactions, - has_pending_txs: None, - rx: ReceiverStream::new(listener).fuse(), - }) - } - - /// Creates a new interval miner that builds a block ever `duration`. - pub fn interval(duration: Duration) -> Self { - Self::FixedBlockTime(FixedBlockTimeMiner::new(duration)) - } - - /// polls the Pool and returns those transactions that should be put in a block, if any. - pub(crate) fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - match self { - Self::None => Poll::Pending, - Self::Auto(miner) => miner.poll(pool, cx), - Self::FixedBlockTime(miner) => miner.poll(pool, cx), - } - } -} - -impl fmt::Display for MiningMode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let kind = match self { - Self::None => "None", - Self::Auto(_) => "Auto", - Self::FixedBlockTime(_) => "FixedBlockTime", - }; - write!(f, "{kind}") - } -} - -/// A miner that's supposed to create a new block every `interval`, mining all transactions that are -/// ready at that time. -/// -/// The default blocktime is set to 6 seconds -#[derive(Debug)] -pub struct FixedBlockTimeMiner { - /// The interval this fixed block time miner operates with - interval: Interval, -} - -// === impl FixedBlockTimeMiner === - -impl FixedBlockTimeMiner { - /// Creates a new instance with an interval of `duration` - pub(crate) fn new(duration: Duration) -> Self { - let start = tokio::time::Instant::now() + duration; - Self { interval: tokio::time::interval_at(start, duration) } - } - - fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - if self.interval.poll_tick(cx).is_ready() { - // drain the pool - return Poll::Ready(pool.best_transactions().collect()) - } - Poll::Pending - } -} - -impl Default for FixedBlockTimeMiner { - fn default() -> Self { - Self::new(Duration::from_secs(6)) - } -} - -/// A miner that Listens for new ready transactions -pub struct ReadyTransactionMiner { - /// how many transactions to mine per block - max_transactions: usize, - /// stores whether there are pending transactions (if known) - has_pending_txs: Option, - /// Receives hashes of transactions that are ready - rx: Fuse>, -} - -// === impl ReadyTransactionMiner === - -impl ReadyTransactionMiner { - fn poll( - &mut self, - pool: &Pool, - cx: &mut Context<'_>, - ) -> Poll::Transaction>>>> - where - Pool: TransactionPool, - { - // drain the notification stream - while let Poll::Ready(Some(_hash)) = Pin::new(&mut self.rx).poll_next(cx) { - self.has_pending_txs = Some(true); - } - - if self.has_pending_txs == Some(false) { - return Poll::Pending - } - - let transactions = pool.best_transactions().take(self.max_transactions).collect::>(); - - // there are pending transactions if we didn't drain the pool - self.has_pending_txs = Some(transactions.len() >= self.max_transactions); - - if transactions.is_empty() { - return Poll::Pending - } - - Poll::Ready(transactions) - } -} - -impl fmt::Debug for ReadyTransactionMiner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ReadyTransactionMiner") - .field("max_transactions", &self.max_transactions) - .finish_non_exhaustive() - } -} diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs deleted file mode 100644 index cb0586d4440..00000000000 --- a/crates/consensus/auto-seal/src/task.rs +++ /dev/null @@ -1,220 +0,0 @@ -use crate::{mode::MiningMode, Storage}; -use alloy_rpc_types_engine::ForkchoiceState; -use futures_util::{future::BoxFuture, FutureExt}; -use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::EngineTypes; -use reth_evm::execute::BlockExecutorProvider; -use reth_provider::{CanonChainTracker, StateProviderFactory}; -use reth_stages_api::PipelineEvent; -use reth_tokio_util::EventStream; -use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; -use std::{ - collections::VecDeque, - future::Future, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use tracing::{debug, error, warn}; - -/// A Future that listens for new ready transactions and puts new blocks into storage -pub struct MiningTask { - /// The configured chain spec - chain_spec: Arc, - /// The client used to interact with the state - client: Client, - /// The active miner - miner: MiningMode, - /// Single active future that inserts a new block into `storage` - insert_task: Option>>>, - /// Shared storage to insert new blocks - storage: Storage, - /// Pool where transactions are stored - pool: Pool, - /// backlog of sets of transactions ready to be mined - queued: VecDeque::Transaction>>>>, - // TODO: ideally this would just be a sender of hashes - to_engine: UnboundedSender>, - /// The pipeline events to listen on - pipe_line_events: Option>, - /// The type used for block execution - block_executor: Executor, -} - -// === impl MiningTask === - -impl - MiningTask -{ - /// Creates a new instance of the task - #[allow(clippy::too_many_arguments)] - pub(crate) fn new( - chain_spec: Arc, - miner: MiningMode, - to_engine: UnboundedSender>, - storage: Storage, - client: Client, - pool: Pool, - block_executor: Executor, - ) -> Self { - Self { - chain_spec, - client, - miner, - insert_task: None, - storage, - pool, - to_engine, - queued: Default::default(), - pipe_line_events: None, - block_executor, - } - } - - /// Sets the pipeline events to listen on. - pub fn set_pipeline_events(&mut self, events: EventStream) { - self.pipe_line_events = Some(events); - } -} - -impl Future - for MiningTask -where - Client: StateProviderFactory + CanonChainTracker + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, - Engine: EngineTypes, - Executor: BlockExecutorProvider, - ChainSpec: EthChainSpec + EthereumHardforks + 'static, -{ - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // this drives block production and - loop { - if let Poll::Ready(transactions) = this.miner.poll(&this.pool, cx) { - // miner returned a set of transaction that we feed to the producer - this.queued.push_back(transactions); - } - - if this.insert_task.is_none() { - if this.queued.is_empty() { - // nothing to insert - break - } - - // ready to queue in new insert task - let storage = this.storage.clone(); - let transactions = this.queued.pop_front().expect("not empty"); - - let to_engine = this.to_engine.clone(); - let client = this.client.clone(); - let chain_spec = Arc::clone(&this.chain_spec); - let events = this.pipe_line_events.take(); - let executor = this.block_executor.clone(); - - // Create the mining future that creates a block, notifies the engine that drives - // the pipeline - this.insert_task = Some(Box::pin(async move { - let mut storage = storage.write().await; - - let transactions: Vec<_> = transactions - .into_iter() - .map(|tx| { - let recovered = tx.to_recovered_transaction(); - recovered.into_signed() - }) - .collect(); - let ommers = vec![]; - - match storage.build_and_execute( - transactions.clone(), - ommers.clone(), - &client, - chain_spec, - &executor, - ) { - Ok((new_header, _bundle_state)) => { - let state = ForkchoiceState { - head_block_hash: new_header.hash(), - finalized_block_hash: new_header.hash(), - safe_block_hash: new_header.hash(), - }; - drop(storage); - - // TODO: make this a future - // await the fcu call rx for SYNCING, then wait for a VALID response - loop { - // send the new update to the engine, this will trigger the engine - // to download and execute the block we just inserted - let (tx, rx) = oneshot::channel(); - let _ = to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs: None, - tx, - }); - debug!(target: "consensus::auto", ?state, "Sent fork choice update"); - - match rx.await.unwrap() { - Ok(fcu_response) => { - match fcu_response.forkchoice_status() { - ForkchoiceStatus::Valid => break, - ForkchoiceStatus::Invalid => { - error!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned invalid response"); - return None - } - ForkchoiceStatus::Syncing => { - debug!(target: "consensus::auto", ?fcu_response, "Forkchoice update returned SYNCING, waiting for VALID"); - // wait for the next fork choice update - continue - } - } - } - Err(err) => { - error!(target: "consensus::auto", %err, "Autoseal fork choice update failed"); - return None - } - } - } - - // update canon chain for rpc - client.set_canonical_head(new_header.clone()); - client.set_safe(new_header.clone()); - client.set_finalized(new_header.clone()); - } - Err(err) => { - warn!(target: "consensus::auto", %err, "failed to execute block") - } - } - - events - })); - } - - if let Some(mut fut) = this.insert_task.take() { - match fut.poll_unpin(cx) { - Poll::Ready(events) => { - this.pipe_line_events = events; - } - Poll::Pending => { - this.insert_task = Some(fut); - break - } - } - } - } - - Poll::Pending - } -} - -impl - std::fmt::Debug for MiningTask -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MiningTask").finish_non_exhaustive() - } -} diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index f1366812608..b937eb2b468 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -14,12 +14,16 @@ workspace = true # reth reth-ethereum-consensus.workspace = true reth-blockchain-tree-api.workspace = true +reth-codecs.workspace = true +reth-db-api.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-stages-api.workspace = true reth-errors.workspace = true reth-provider.workspace = true reth-tasks.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-prune.workspace = true @@ -32,7 +36,9 @@ reth-chainspec = { workspace = true, optional = true } # ethereum alloy-primitives.workspace = true -alloy-rpc-types-engine.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["std"] } +alloy-eips.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -72,13 +78,17 @@ reth-exex-types.workspace = true reth-prune-types.workspace = true reth-chainspec.workspace = true alloy-genesis.workspace = true - assert_matches.workspace = true [features] optimism = [ + "reth-blockchain-tree/optimism", + "reth-codecs/op", "reth-chainspec", + "reth-db-api/optimism", + "reth-db/optimism", + "reth-downloaders/optimism", "reth-primitives/optimism", "reth-provider/optimism", - "reth-blockchain-tree/optimism", + "reth-downloaders/optimism", ] diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 5fc6df2b884..2092ea49f77 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -77,24 +77,3 @@ impl From for BeaconForkChoiceUpdateError { Self::internal(e) } } - -/// Represents all error cases when handling a new payload. -/// -/// This represents all possible error cases that must be returned as JSON RCP errors back to the -/// beacon node. -#[derive(Debug, thiserror::Error)] -pub enum BeaconOnNewPayloadError { - /// Thrown when the engine task is unavailable/stopped. - #[error("beacon consensus engine task stopped")] - EngineUnavailable, - /// An internal error occurred, not necessarily related to the payload. - #[error(transparent)] - Internal(Box), -} - -impl BeaconOnNewPayloadError { - /// Create a new internal error. - pub fn internal(e: E) -> Self { - Self::Internal(Box::new(e)) - } -} diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/consensus/beacon/src/engine/event.rs index 975085a32f3..acf056b3ff4 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/consensus/beacon/src/engine/event.rs @@ -1,7 +1,8 @@ -use crate::engine::forkchoice::ForkchoiceStatus; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_engine_primitives::ForkchoiceStatus; +use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlockFor, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, @@ -10,23 +11,23 @@ use std::{ /// Events emitted by [`crate::BeaconConsensusEngine`]. #[derive(Clone, Debug)] -pub enum BeaconConsensusEngineEvent { +pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(Arc, Duration), + ForkBlockAdded(Arc>, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(Arc, Duration), + CanonicalBlockAdded(Arc>, Duration), /// A canonical chain was committed, and the elapsed time committing the data - CanonicalChainCommitted(Box, Duration), + CanonicalChainCommitted(Box>, Duration), /// The consensus engine is involved in live sync, and has specific progress LiveSyncProgress(ConsensusEngineLiveSyncProgress), } -impl BeaconConsensusEngineEvent { +impl BeaconConsensusEngineEvent { /// Returns the canonical header if the event is a /// [`BeaconConsensusEngineEvent::CanonicalChainCommitted`]. - pub const fn canonical_header(&self) -> Option<&SealedHeader> { + pub const fn canonical_header(&self) -> Option<&SealedHeader> { match self { Self::CanonicalChainCommitted(header, _) => Some(header), _ => None, @@ -34,7 +35,10 @@ impl BeaconConsensusEngineEvent { } } -impl Display for BeaconConsensusEngineEvent { +impl Display for BeaconConsensusEngineEvent +where + N: NodePrimitives, +{ fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self { Self::ForkchoiceUpdated(state, status) => { diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs deleted file mode 100644 index 7e49714ba37..00000000000 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ /dev/null @@ -1,224 +0,0 @@ -use alloy_primitives::B256; -use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatusEnum}; - -/// The struct that keeps track of the received forkchoice state and their status. -#[derive(Debug, Clone, Default)] -pub struct ForkchoiceStateTracker { - /// The latest forkchoice state that we received. - /// - /// Caution: this can be invalid. - latest: Option, - - /// Tracks the latest forkchoice state that we received to which we need to sync. - last_syncing: Option, - /// The latest valid forkchoice state that we received and processed as valid. - last_valid: Option, -} - -impl ForkchoiceStateTracker { - /// Sets the latest forkchoice state that we received. - /// - /// If the status is `VALID`, we also update the last valid forkchoice state and set the - /// `sync_target` to `None`, since we're now fully synced. - pub fn set_latest(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { - if status.is_valid() { - self.set_valid(state); - } else if status.is_syncing() { - self.last_syncing = Some(state); - } - - let received = ReceivedForkchoiceState { state, status }; - self.latest = Some(received); - } - - fn set_valid(&mut self, state: ForkchoiceState) { - // we no longer need to sync to this state. - self.last_syncing = None; - - self.last_valid = Some(state); - } - - /// Returns the [`ForkchoiceStatus`] of the latest received FCU. - /// - /// Caution: this can be invalid. - pub(crate) fn latest_status(&self) -> Option { - self.latest.as_ref().map(|s| s.status) - } - - /// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`] - #[allow(dead_code)] - pub(crate) fn is_latest_valid(&self) -> bool { - self.latest_status().map(|s| s.is_valid()).unwrap_or(false) - } - - /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`] - #[allow(dead_code)] - pub(crate) fn is_latest_syncing(&self) -> bool { - self.latest_status().map(|s| s.is_syncing()).unwrap_or(false) - } - - /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] - #[allow(dead_code)] - pub(crate) fn is_latest_invalid(&self) -> bool { - self.latest_status().map(|s| s.is_invalid()).unwrap_or(false) - } - - /// Returns the last valid head hash. - #[allow(dead_code)] - pub(crate) fn last_valid_head(&self) -> Option { - self.last_valid.as_ref().map(|s| s.head_block_hash) - } - - /// Returns the head hash of the latest received FCU to which we need to sync. - #[allow(dead_code)] - pub(crate) fn sync_target(&self) -> Option { - self.last_syncing.as_ref().map(|s| s.head_block_hash) - } - - /// Returns the latest received `ForkchoiceState`. - /// - /// Caution: this can be invalid. - pub const fn latest_state(&self) -> Option { - self.last_valid - } - - /// Returns the last valid `ForkchoiceState`. - pub const fn last_valid_state(&self) -> Option { - self.last_valid - } - - /// Returns the last valid finalized hash. - /// - /// This will return [`None`], if either there is no valid finalized forkchoice state, or the - /// finalized hash for the latest valid forkchoice state is zero. - #[inline] - pub fn last_valid_finalized(&self) -> Option { - self.last_valid.and_then(|state| { - // if the hash is zero then we should act like there is no finalized hash - if state.finalized_block_hash.is_zero() { - None - } else { - Some(state.finalized_block_hash) - } - }) - } - - /// Returns the last received `ForkchoiceState` to which we need to sync. - pub const fn sync_target_state(&self) -> Option { - self.last_syncing - } - - /// Returns the sync target finalized hash. - /// - /// This will return [`None`], if either there is no sync target forkchoice state, or the - /// finalized hash for the sync target forkchoice state is zero. - #[inline] - pub fn sync_target_finalized(&self) -> Option { - self.last_syncing.and_then(|state| { - // if the hash is zero then we should act like there is no finalized hash - if state.finalized_block_hash.is_zero() { - None - } else { - Some(state.finalized_block_hash) - } - }) - } - - /// Returns true if no forkchoice state has been received yet. - pub const fn is_empty(&self) -> bool { - self.latest.is_none() - } -} - -/// Represents a forkchoice update and tracks the status we assigned to it. -#[derive(Debug, Clone)] -#[allow(dead_code)] -pub(crate) struct ReceivedForkchoiceState { - state: ForkchoiceState, - status: ForkchoiceStatus, -} - -/// A simplified representation of [`PayloadStatusEnum`] specifically for FCU. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum ForkchoiceStatus { - /// The forkchoice state is valid. - Valid, - /// The forkchoice state is invalid. - Invalid, - /// The forkchoice state is unknown. - Syncing, -} - -impl ForkchoiceStatus { - /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Valid`]. - pub const fn is_valid(&self) -> bool { - matches!(self, Self::Valid) - } - - /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Invalid`]. - pub const fn is_invalid(&self) -> bool { - matches!(self, Self::Invalid) - } - - /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Syncing`]. - pub const fn is_syncing(&self) -> bool { - matches!(self, Self::Syncing) - } - - /// Converts the general purpose [`PayloadStatusEnum`] into a [`ForkchoiceStatus`]. - pub(crate) const fn from_payload_status(status: &PayloadStatusEnum) -> Self { - match status { - PayloadStatusEnum::Valid | PayloadStatusEnum::Accepted => { - // `Accepted` is only returned on `newPayload`. It would be a valid state here. - Self::Valid - } - PayloadStatusEnum::Invalid { .. } => Self::Invalid, - PayloadStatusEnum::Syncing => Self::Syncing, - } - } -} - -impl From for ForkchoiceStatus { - fn from(status: PayloadStatusEnum) -> Self { - Self::from_payload_status(&status) - } -} - -/// A helper type to check represent hashes of a [`ForkchoiceState`] -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum ForkchoiceStateHash { - /// Head hash of the [`ForkchoiceState`]. - Head(B256), - /// Safe hash of the [`ForkchoiceState`]. - Safe(B256), - /// Finalized hash of the [`ForkchoiceState`]. - Finalized(B256), -} - -impl ForkchoiceStateHash { - /// Tries to find a matching hash in the given [`ForkchoiceState`]. - pub(crate) fn find(state: &ForkchoiceState, hash: B256) -> Option { - if state.head_block_hash == hash { - Some(Self::Head(hash)) - } else if state.safe_block_hash == hash { - Some(Self::Safe(hash)) - } else if state.finalized_block_hash == hash { - Some(Self::Finalized(hash)) - } else { - None - } - } - - /// Returns true if this is the head hash of the [`ForkchoiceState`] - pub(crate) const fn is_head(&self) -> bool { - matches!(self, Self::Head(_)) - } -} - -impl AsRef for ForkchoiceStateHash { - fn as_ref(&self) -> &B256 { - match self { - Self::Head(h) | Self::Safe(h) | Self::Finalized(h) => h, - } - } -} diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 65b7c38df91..339f2fb067f 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -1,14 +1,14 @@ //! `BeaconConsensusEngine` external API -use crate::{ - engine::message::OnForkChoiceUpdated, BeaconConsensusEngineEvent, BeaconEngineMessage, - BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, -}; +use crate::{BeaconConsensusEngineEvent, BeaconForkChoiceUpdateError}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use futures::TryFutureExt; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + OnForkChoiceUpdated, +}; use reth_errors::RethResult; use reth_tokio_util::{EventSender, EventStream}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; @@ -46,10 +46,10 @@ where pub async fn new_payload( &self, payload: ExecutionPayload, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? } @@ -60,9 +60,10 @@ where &self, state: ForkchoiceState, payload_attrs: Option, + version: EngineApiMessageVersion, ) -> Result { Ok(self - .send_fork_choice_updated(state, payload_attrs) + .send_fork_choice_updated(state, payload_attrs, version) .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) .await?? .await?) @@ -74,12 +75,14 @@ where &self, state: ForkchoiceState, payload_attrs: Option, + version: EngineApiMessageVersion, ) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx, + version, }); rx } diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 89231ed5582..b4b38239a03 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -6,8 +6,10 @@ use crate::{ }; use alloy_primitives::BlockNumber; use futures::FutureExt; +use reth_codecs::Compact; +use reth_db_api::table::Value; use reth_errors::RethResult; -use reth_primitives::static_file::HighestStaticFiles; +use reth_primitives::{static_file::HighestStaticFiles, NodePrimitives}; use reth_provider::{ BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, @@ -33,7 +35,11 @@ impl StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + + BlockReader + + ChainStateBlockReader, > + 'static, { /// Create a new instance @@ -145,7 +151,11 @@ impl EngineHook for StaticFileHook where Provider: StaticFileProviderFactory + DatabaseProviderFactory< - Provider: StageCheckpointReader + BlockReader + ChainStateBlockReader, + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + + BlockReader + + ChainStateBlockReader, > + 'static, { fn name(&self) -> &'static str { diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 8a1c95d73ce..2e2bc37a27e 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -1,11 +1,11 @@ +use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::B256; use reth_metrics::{ metrics::{Counter, Gauge}, Metrics, }; -use reth_primitives::{Header, SealedHeader}; use schnellru::{ByLength, LruMap}; -use std::sync::Arc; +use std::fmt::Debug; use tracing::warn; /// The max hit counter for invalid headers in the cache before it is forcefully evicted. @@ -29,7 +29,7 @@ impl InvalidHeaderCache { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } - fn insert_entry(&mut self, hash: B256, header: Arc
) { + fn insert_entry(&mut self, hash: B256, header: BlockWithParent) { self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); } @@ -37,7 +37,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option> { + pub fn get(&mut self, hash: &B256) -> Option { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -55,7 +55,7 @@ impl InvalidHeaderCache { pub fn insert_with_invalid_ancestor( &mut self, header_hash: B256, - invalid_ancestor: Arc
, + invalid_ancestor: BlockWithParent, ) { if self.get(&header_hash).is_none() { warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); @@ -68,12 +68,10 @@ impl InvalidHeaderCache { } /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: SealedHeader) { - if self.get(&invalid_ancestor.hash()).is_none() { - let hash = invalid_ancestor.hash(); - let header = invalid_ancestor.unseal(); - warn!(target: "consensus::engine", ?hash, ?header, "Bad block with hash"); - self.insert_entry(hash, Arc::new(header)); + pub fn insert(&mut self, invalid_ancestor: BlockWithParent) { + if self.get(&invalid_ancestor.block.hash).is_none() { + warn!(target: "consensus::engine", ?invalid_ancestor, "Bad block with hash"); + self.insert_entry(invalid_ancestor.block.hash, invalid_ancestor); // update metrics self.metrics.unique_inserts.increment(1); @@ -85,8 +83,8 @@ impl InvalidHeaderCache { struct HeaderEntry { /// Keeps track how many times this header has been hit. hit_count: u8, - /// The actually header entry - header: Arc
, + /// The actual header entry + header: BlockWithParent, } /// Metrics for the invalid headers cache. @@ -106,15 +104,15 @@ struct InvalidHeaderCacheMetrics { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Sealable; + use alloy_consensus::Header; + use reth_primitives::SealedHeader; #[test] fn test_hit_eviction() { let mut cache = InvalidHeaderCache::new(10); - let sealed = Header::default().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); - cache.insert(header.clone()); + let header = Header::default(); + let header = SealedHeader::seal(header); + cache.insert(header.block_with_parent()); assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); for hit in 1..INVALID_HEADER_HIT_EVICTION_THRESHOLD { diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index ccea982bfbd..c41f9283db8 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1,6 +1,8 @@ +use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use futures::{stream::BoxStream, Future, StreamExt}; @@ -9,19 +11,22 @@ use reth_blockchain_tree_api::{ error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, }; -use reth_engine_primitives::{EngineTypes, PayloadTypes}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus, OnForkChoiceUpdated, + PayloadTypes, +}; use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; use reth_network_p2p::{ sync::{NetworkSyncUpdater, SyncState}, - BlockClient, + EthBlockClient, }; -use reth_node_types::NodeTypesWithEngine; +use reth_node_types::{Block, BlockTy, HeaderTy, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{ - constants::EPOCH_SLOTS, BlockNumHash, Head, Header, SealedBlock, SealedHeader, -}; +use reth_primitives::{EthPrimitives, Head, SealedBlock, SealedHeader}; use reth_provider::{ providers::ProviderNodeTypes, BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, StageCheckpointReader, @@ -42,14 +47,8 @@ use tokio::sync::{ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; -mod message; -pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; - mod error; -pub use error::{ - BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, -}; +pub use error::{BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError}; mod invalid_headers; pub use invalid_headers::InvalidHeaderCache; @@ -60,9 +59,6 @@ pub use event::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; mod handle; pub use handle::BeaconConsensusEngineHandle; -mod forkchoice; -pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; - mod metrics; use metrics::EngineMetrics; @@ -88,9 +84,15 @@ const MAX_INVALID_HEADERS: u32 = 512u32; pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; /// Helper trait expressing requirements for node types to be used in engine. -pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} +pub trait EngineNodeTypes: + ProviderNodeTypes + NodeTypesWithEngine +{ +} -impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} +impl EngineNodeTypes for T where + T: ProviderNodeTypes + NodeTypesWithEngine +{ +} /// Represents a pending forkchoice update. /// @@ -175,7 +177,7 @@ type PendingForkchoiceUpdate = pub struct BeaconConsensusEngine where N: EngineNodeTypes, - Client: BlockClient, + Client: EthBlockClient, BT: BlockchainTreeEngine + BlockReader + BlockIdReader @@ -232,13 +234,13 @@ impl BeaconConsensusEngine where N: EngineNodeTypes, BT: BlockchainTreeEngine - + BlockReader + + BlockReader, Header = HeaderTy> + BlockIdReader - + CanonChainTracker + + CanonChainTracker
> + StageCheckpointReader + ChainSpecProvider + 'static, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance of the [`BeaconConsensusEngine`]. #[allow(clippy::too_many_arguments)] @@ -429,7 +431,12 @@ where } else if let Some(attrs) = attrs { // the CL requested to build a new payload on top of this new VALID head let head = outcome.into_header().unseal(); - self.process_payload_attributes(attrs, head, state) + self.process_payload_attributes( + attrs, + head, + state, + EngineApiMessageVersion::default(), + ) } else { OnForkChoiceUpdated::valid(PayloadStatus::new( PayloadStatusEnum::Valid, @@ -753,14 +760,14 @@ where // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor let mut current_hash = parent_hash; - let mut current_header = self.invalid_headers.get(¤t_hash); - while let Some(header) = current_header { - current_hash = header.parent_hash; - current_header = self.invalid_headers.get(¤t_hash); + let mut current_block = self.invalid_headers.get(¤t_hash); + while let Some(block) = current_block { + current_hash = block.parent; + current_block = self.invalid_headers.get(¤t_hash); // If current_header is None, then the current_hash does not have an invalid // ancestor in the cache, check its presence in blockchain tree - if current_header.is_none() && + if current_block.is_none() && self.blockchain.find_block_by_hash(current_hash, BlockSource::Any)?.is_some() { return Ok(Some(current_hash)) @@ -799,13 +806,13 @@ where head: B256, ) -> ProviderResult> { // check if the check hash was previously marked as invalid - let Some(header) = self.invalid_headers.get(&check) else { return Ok(None) }; + let Some(block) = self.invalid_headers.get(&check) else { return Ok(None) }; // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent_hash)?; + let status = self.prepare_invalid_response(block.parent)?; // insert the head block into the invalid header cache - self.invalid_headers.insert_with_invalid_ancestor(head, header); + self.invalid_headers.insert_with_invalid_ancestor(head, block); Ok(Some(status)) } @@ -814,10 +821,10 @@ where /// to a forkchoice update. fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { // check if the head was previously marked as invalid - let Some(header) = self.invalid_headers.get(&head) else { return Ok(None) }; + let Some(block) = self.invalid_headers.get(&head) else { return Ok(None) }; // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + Ok(Some(self.prepare_invalid_response(block.parent)?)) } /// Record latency metrics for one call to make a block canonical @@ -945,7 +952,7 @@ where .blockchain .find_block_by_hash(safe_block_hash, BlockSource::Any)? .ok_or(ProviderError::UnknownBlockHash(safe_block_hash))?; - self.blockchain.set_safe(SealedHeader::new(safe.header, safe_block_hash)); + self.blockchain.set_safe(SealedHeader::new(safe.split().0, safe_block_hash)); } Ok(()) } @@ -965,9 +972,9 @@ where .blockchain .find_block_by_hash(finalized_block_hash, BlockSource::Any)? .ok_or(ProviderError::UnknownBlockHash(finalized_block_hash))?; - self.blockchain.finalize_block(finalized.number)?; + self.blockchain.finalize_block(finalized.header().number())?; self.blockchain - .set_finalized(SealedHeader::new(finalized.header, finalized_block_hash)); + .set_finalized(SealedHeader::new(finalized.split().0, finalized_block_hash)); } Ok(()) } @@ -1080,11 +1087,11 @@ where /// /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, cancun_fields), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] + #[instrument(level = "trace", skip(self, payload, sidecar), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] fn on_new_payload( &mut self, payload: ExecutionPayload, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result, BeaconOnNewPayloadError> { self.metrics.new_payload_messages.increment(1); @@ -1114,10 +1121,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self - .payload_validator - .ensure_well_formed_payload(payload, cancun_fields.into()) - { + let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { Ok(block) => block, Err(error) => { error!(target: "consensus::engine", %error, "Invalid payload"); @@ -1164,6 +1168,7 @@ where attrs: ::PayloadAttributes, head: Header, state: ForkchoiceState, + version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held @@ -1181,6 +1186,7 @@ where match <::PayloadBuilderAttributes as PayloadBuilderAttributes>::try_new( state.head_block_hash, attrs, + version as u8 ) { Ok(attributes) => { // send the payload to the builder and return the receiver for the pending payload @@ -1448,7 +1454,7 @@ where fn on_pipeline_outcome(&mut self, ctrl: ControlFlow) -> RethResult<()> { // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + warn!(target: "consensus::engine", invalid_num_hash=?bad_block.block, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header self.invalid_headers.insert(*bad_block); return Ok(()) @@ -1667,7 +1673,7 @@ where self.latest_valid_hash_for_invalid_payload(block.parent_hash)? }; // keep track of the invalid header - self.invalid_headers.insert(block.header); + self.invalid_headers.insert(block.header.block_with_parent()); PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: error.to_string() }, latest_valid_hash, @@ -1776,7 +1782,7 @@ where let (block, err) = err.split(); warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); - self.invalid_headers.insert(block.header); + self.invalid_headers.insert(block.header.block_with_parent()); } } } @@ -1796,11 +1802,11 @@ where impl Future for BeaconConsensusEngine where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, BT: BlockchainTreeEngine - + BlockReader + + BlockReader, Header = HeaderTy> + BlockIdReader - + CanonChainTracker + + CanonChainTracker
> + StageCheckpointReader + ChainSpecProvider + Unpin @@ -1859,11 +1865,16 @@ where // sensitive, hence they are polled first. if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { match msg { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version: _version, + } => { this.on_forkchoice_updated(state, payload_attrs, tx); } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - match this.on_new_payload(payload, cancun_fields) { + BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { + match this.on_new_payload(payload, sidecar) { Ok(Either::Right(block)) => { this.set_blockchain_tree_action( BlockchainTreeAction::InsertNewPayload { block, tx }, @@ -1986,7 +1997,9 @@ mod tests { use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; use assert_matches::assert_matches; use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_provider::{BlockWriter, ProviderFactory}; + use reth_node_types::FullNodePrimitives; + use reth_primitives::BlockExt; + use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; use reth_stages::{ExecOutput, PipelineError, StageError}; use reth_stages_api::StageCheckpoint; @@ -2022,7 +2035,7 @@ mod tests { .await; assert_matches!( res.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(),PipelineError::Stage(StageError::ChannelClosed)) + Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) ); } @@ -2051,7 +2064,12 @@ mod tests { assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); // consensus engine is still idle because no FCUs were received - let _ = env.send_new_payload(block_to_payload_v1(SealedBlock::default()), None).await; + let _ = env + .send_new_payload( + block_to_payload_v1(SealedBlock::default()), + ExecutionPayloadSidecar::none(), + ) + .await; assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); @@ -2123,7 +2141,7 @@ mod tests { assert_matches!( rx.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(),PipelineError::Stage(StageError::ChannelClosed)) + Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) ); } @@ -2159,7 +2177,15 @@ mod tests { assert_matches!(rx.await, Ok(Ok(()))); } - fn insert_blocks<'a, N: ProviderNodeTypes>( + fn insert_blocks< + 'a, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, + >, + >( provider_factory: ProviderFactory, mut blocks: impl Iterator, ) { @@ -2169,6 +2195,7 @@ mod tests { provider .insert_block( b.clone().try_seal_with_senders().expect("invalid tx signature in block"), + StorageLocation::Database, ) .map(drop) }) @@ -2616,7 +2643,7 @@ mod tests { 0, BlockParams { ommers_count: Some(0), ..Default::default() }, )), - None, + ExecutionPayloadSidecar::none(), ) .await; @@ -2631,7 +2658,7 @@ mod tests { 1, BlockParams { ommers_count: Some(0), ..Default::default() }, )), - None, + ExecutionPayloadSidecar::none(), ) .await; @@ -2709,7 +2736,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing( + block_to_payload_v1(block2.clone()), + ExecutionPayloadSidecar::none(), + ) .await .unwrap(); @@ -2844,7 +2874,9 @@ mod tests { 2, BlockParams { parent: Some(parent), ommers_count: Some(0), ..Default::default() }, ); - let res = env.send_new_payload(block_to_payload_v1(block), None).await; + let res = env + .send_new_payload(block_to_payload_v1(block), ExecutionPayloadSidecar::none()) + .await; let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); @@ -2858,7 +2890,7 @@ mod tests { block1.header.set_difficulty( MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), ); - block1 = block1.unseal().seal_slow(); + block1 = block1.unseal::().seal_slow(); let (block2, exec_result2) = data.blocks[1].clone(); let mut block2 = block2.unseal().block; block2.body.withdrawals = None; @@ -2914,7 +2946,10 @@ mod tests { // Send new payload let result = env - .send_new_payload_retry_on_syncing(block_to_payload_v1(block2.clone()), None) + .send_new_payload_retry_on_syncing( + block_to_payload_v1(block2.clone()), + ExecutionPayloadSidecar::none(), + ) .await .unwrap(); diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 9426ca19712..861aeebf1eb 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -4,13 +4,14 @@ use crate::{ engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, EthBeaconConsensus, }; +use alloy_consensus::Header; use alloy_primitives::{BlockNumber, B256}; use futures::FutureExt; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, + EthBlockClient, }; -use reth_primitives::SealedBlock; +use reth_primitives::{BlockBody, EthPrimitives, NodePrimitives, SealedBlock}; use reth_provider::providers::ProviderNodeTypes; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; @@ -34,7 +35,7 @@ use tracing::trace; pub(crate) struct EngineSyncController where N: ProviderNodeTypes, - Client: BlockClient, + Client: EthBlockClient, { /// A downloader that can download full blocks from the network. full_block_client: FullBlockClient, @@ -64,7 +65,7 @@ where impl EngineSyncController where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance pub(crate) fn new( @@ -345,25 +346,33 @@ where /// A wrapper type around [`SealedBlock`] that implements the [Ord] trait by block number. #[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlock(SealedBlock); +struct OrderedSealedBlock(SealedBlock); -impl PartialOrd for OrderedSealedBlock { +impl PartialOrd for OrderedSealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, +{ fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedSealedBlock { +impl Ord for OrderedSealedBlock +where + H: reth_primitives_traits::BlockHeader + 'static, + B: reth_primitives_traits::BlockBody + 'static, +{ fn cmp(&self, other: &Self) -> Ordering { - self.0.number.cmp(&other.0.number) + self.0.number().cmp(&other.0.number()) } } /// The event type emitted by the [`EngineSyncController`]. #[derive(Debug)] -pub(crate) enum EngineSyncEvent { +pub(crate) enum EngineSyncEvent { /// A full block has been downloaded from the network. - FetchedFullBlock(SealedBlock), + FetchedFullBlock(SealedBlock), /// Pipeline started syncing /// /// This is none if the pipeline is triggered without a specific target. @@ -410,12 +419,12 @@ impl PipelineState { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Sealable; + use alloy_consensus::Header; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient}; - use reth_primitives::{BlockBody, Header, SealedHeader}; + use reth_primitives::{BlockBody, SealedHeader}; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ExecutionOutcome, @@ -522,7 +531,7 @@ mod tests { ) -> EngineSyncController> where N: ProviderNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { let client = self .client @@ -599,9 +608,7 @@ mod tests { header.parent_hash = hash; header.number += 1; header.timestamp += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } } @@ -617,14 +624,12 @@ mod tests { ); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..10); // set up a pipeline diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 633ae03d8ad..ae627cae696 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -2,28 +2,31 @@ use crate::{ engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - BeaconOnNewPayloadError, EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, + EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, }; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; use reth_blockchain_tree::{ config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, }; use reth_chainspec::ChainSpec; use reth_config::config::StageConfig; -use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_consensus::{test_utils::TestConsensus, FullConsensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_engine_primitives::{BeaconOnNewPayloadError, EngineApiMessageVersion}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm::{either::Either, test_utils::MockExecutorProvider}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; -use reth_network_p2p::{sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, BlockClient}; +use reth_network_p2p::{ + sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, EthBlockClient, +}; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::SealedHeader; use reth_provider::{ @@ -68,9 +71,9 @@ impl TestEnv { pub async fn send_new_payload>( &self, payload: T, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { - self.engine_handle.new_payload(payload.into(), cancun_fields).await + self.engine_handle.new_payload(payload.into(), sidecar).await } /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine @@ -78,11 +81,11 @@ impl TestEnv { pub async fn send_new_payload_retry_on_syncing>( &self, payload: T, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result { let payload: ExecutionPayload = payload.into(); loop { - let result = self.send_new_payload(payload.clone(), cancun_fields.clone()).await?; + let result = self.send_new_payload(payload.clone(), sidecar.clone()).await?; if !result.is_syncing() { return Ok(result) } @@ -93,7 +96,9 @@ impl TestEnv { &self, state: ForkchoiceState, ) -> Result { - self.engine_handle.fork_choice_updated(state, None).await + self.engine_handle + .fork_choice_updated(state, None, EngineApiMessageVersion::default()) + .await } /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine @@ -103,7 +108,10 @@ impl TestEnv { state: ForkchoiceState, ) -> Result { loop { - let result = self.engine_handle.fork_choice_updated(state, None).await?; + let result = self + .engine_handle + .fork_choice_updated(state, None, EngineApiMessageVersion::default()) + .await?; if !result.is_syncing() { return Ok(result) } @@ -231,7 +239,7 @@ impl TestConsensusEngineBuilder { client: Client, ) -> NetworkedTestConsensusEngineBuilder where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { NetworkedTestConsensusEngineBuilder { base_config: self, client: Some(client) } } @@ -258,7 +266,7 @@ pub struct NetworkedTestConsensusEngineBuilder { impl NetworkedTestConsensusEngineBuilder where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Set the pipeline execution outputs to use for the test consensus engine. #[allow(dead_code)] @@ -313,7 +321,7 @@ where client: ClientType, ) -> NetworkedTestConsensusEngineBuilder where - ClientType: BlockClient + 'static, + ClientType: EthBlockClient + 'static, { NetworkedTestConsensusEngineBuilder { base_config: self.base_config, client: Some(client) } } @@ -324,7 +332,7 @@ where let provider_factory = create_test_provider_factory_with_chain_spec(self.base_config.chain_spec.clone()); - let consensus: Arc = match self.base_config.consensus { + let consensus: Arc = match self.base_config.consensus { TestConsensusConfig::Real => { Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) } @@ -362,17 +370,21 @@ where .with_tip_sender(tip_tx), TestPipelineConfig::Real => { let header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(client.clone(), consensus.clone()) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task(); let body_downloader = BodiesDownloaderBuilder::default() - .build(client.clone(), consensus.clone(), provider_factory.clone()) + .build( + client.clone(), + consensus.clone().as_consensus(), + provider_factory.clone(), + ) .into_task(); Pipeline::::builder().add_stages(DefaultStages::new( provider_factory.clone(), tip_rx.clone(), - Arc::clone(&consensus), + consensus.clone().as_consensus(), header_downloader, body_downloader, executor_factory.clone(), @@ -394,9 +406,8 @@ where BlockchainTree::new(externals, BlockchainTreeConfig::new(1, 2, 3, 2)) .expect("failed to create tree"), )); - let sealed = self.base_config.chain_spec.genesis_header().clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - let genesis_block = SealedHeader::new(header, seal); + let header = self.base_config.chain_spec.genesis_header().clone(); + let genesis_block = SealedHeader::seal(header); let blockchain_provider = BlockchainProvider::with_blocks( provider_factory.clone(), @@ -444,7 +455,7 @@ pub fn spawn_consensus_engine( engine: TestBeaconConsensusEngine, ) -> oneshot::Receiver> where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { let (tx, rx) = oneshot::channel(); tokio::spawn(async move { diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 66a92270dba..a9a0c69ae55 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -13,16 +13,19 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-primitives.workspace = true reth-consensus.workspace = true +reth-primitives.workspace = true # ethereum alloy-primitives.workspace = true revm-primitives.workspace = true +reth-primitives-traits.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true [dev-dependencies] +alloy-consensus.workspace = true reth-storage-api.workspace = true rand.workspace = true mockall = "0.13" -alloy-consensus.workspace = true diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 3f519332fec..e30c5b715f5 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,6 +1,6 @@ +use alloy_consensus::constants::ETH_TO_WEI; use alloy_primitives::{BlockNumber, U256}; use reth_chainspec::{EthereumHardfork, Hardforks}; -use reth_primitives::constants::ETH_TO_WEI; /// Calculates the base block reward. /// @@ -57,7 +57,7 @@ pub fn base_block_reward_pre_merge(chain_spec: impl Hardforks, block_number: Blo /// ``` /// # use reth_chainspec::MAINNET; /// # use reth_consensus_common::calc::{base_block_reward, block_reward}; -/// # use reth_primitives::constants::ETH_TO_WEI; +/// # use alloy_consensus::constants::ETH_TO_WEI; /// # use alloy_primitives::U256; /// # /// // This is block 126 on mainnet. diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index df66a00d1df..37b6138e5d4 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,23 +1,23 @@ //! Collection of methods for block validation. -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_consensus::ConsensusError; -use reth_primitives::{ - constants::{ - eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, - MAXIMUM_EXTRA_DATA_SIZE, - }, - EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{ + calc_next_block_base_fee, + eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, }; +use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; +use reth_consensus::ConsensusError; +use reth_primitives::SealedBlock; +use reth_primitives_traits::{BlockBody, GotExpected, SealedHeader}; use revm_primitives::calc_excess_blob_gas; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. #[inline] -pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> { - if header.gas_used > header.gas_limit { +pub fn validate_header_gas(header: &H) -> Result<(), ConsensusError> { + if header.gas_used() > header.gas_limit() { return Err(ConsensusError::HeaderGasUsedExceedsGasLimit { - gas_used: header.gas_used, - gas_limit: header.gas_limit, + gas_used: header.gas_used(), + gas_limit: header.gas_limit(), }) } Ok(()) @@ -25,12 +25,12 @@ pub const fn validate_header_gas(header: &Header) -> Result<(), ConsensusError> /// Ensure the EIP-1559 base fee is set if the London hardfork is active. #[inline] -pub fn validate_header_base_fee( - header: &Header, +pub fn validate_header_base_fee( + header: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number) && - header.base_fee_per_gas.is_none() + if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number()) && + header.base_fee_per_gas().is_none() { return Err(ConsensusError::BaseFeeMissing) } @@ -43,15 +43,16 @@ pub fn validate_header_base_fee( /// /// [EIP-4895]: https://eips.ethereum.org/EIPS/eip-4895 #[inline] -pub fn validate_shanghai_withdrawals(block: &SealedBlock) -> Result<(), ConsensusError> { - let withdrawals = - block.body.withdrawals.as_ref().ok_or(ConsensusError::BodyWithdrawalsMissing)?; - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); +pub fn validate_shanghai_withdrawals( + block: &SealedBlock, +) -> Result<(), ConsensusError> { + let withdrawals = block.body.withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?; + let withdrawals_root = alloy_consensus::proofs::calculate_withdrawals_root(withdrawals); let header_withdrawals_root = - block.withdrawals_root.as_ref().ok_or(ConsensusError::WithdrawalsRootMissing)?; + block.withdrawals_root().ok_or(ConsensusError::WithdrawalsRootMissing)?; if withdrawals_root != *header_withdrawals_root { return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: *header_withdrawals_root }.into(), + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), )); } Ok(()) @@ -63,11 +64,14 @@ pub fn validate_shanghai_withdrawals(block: &SealedBlock) -> Result<(), Consensu /// /// [EIP-4844]: https://eips.ethereum.org/EIPS/eip-4844 #[inline] -pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { +pub fn validate_cancun_gas( + block: &SealedBlock, +) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each // blob tx - let header_blob_gas_used = block.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; - let total_blob_gas = block.blob_gas_used(); + let header_blob_gas_used = + block.header().blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let total_blob_gas = block.body.blob_gas_used(); if total_blob_gas != header_blob_gas_used { return Err(ConsensusError::BlobGasUsedDiff(GotExpected { got: header_blob_gas_used, @@ -77,21 +81,49 @@ pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { Ok(()) } -/// Validate that requests root is present if Prague is active. +/// Ensures the block response data matches the header. /// -/// See [EIP-7685]: General purpose execution layer requests -/// -/// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685 -#[inline] -pub fn validate_prague_request(block: &SealedBlock) -> Result<(), ConsensusError> { - let requests_root = - block.body.calculate_requests_root().ok_or(ConsensusError::BodyRequestsMissing)?; - let header_requests_root = block.requests_root.ok_or(ConsensusError::RequestsRootMissing)?; - if requests_root != *header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected { got: requests_root, expected: header_requests_root }.into(), - )); +/// This ensures the body response items match the header's hashes: +/// - ommer hash +/// - transaction root +/// - withdrawals root +pub fn validate_body_against_header(body: &B, header: &H) -> Result<(), ConsensusError> +where + B: BlockBody, + H: BlockHeader, +{ + let ommers_hash = body.calculate_ommers_root(); + if Some(header.ommers_hash()) != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { + got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH), + expected: header.ommers_hash(), + } + .into(), + )) + } + + let tx_root = body.calculate_tx_root(); + if header.transactions_root() != tx_root { + return Err(ConsensusError::BodyTransactionRootDiff( + GotExpected { got: tx_root, expected: header.transactions_root() }.into(), + )) + } + + match (header.withdrawals_root(), body.calculate_withdrawals_root()) { + (Some(header_withdrawals_root), Some(withdrawals_root)) => { + if withdrawals_root != header_withdrawals_root { + return Err(ConsensusError::BodyWithdrawalsRootDiff( + GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), + )) + } + } + (None, None) => { + // this is ok because we assume the fork is not active in this case + } + _ => return Err(ConsensusError::WithdrawalsRootUnexpected), } + Ok(()) } @@ -101,15 +133,24 @@ pub fn validate_prague_request(block: &SealedBlock) -> Result<(), ConsensusError /// - Compares the transactions root in the block header to the block body /// - Pre-execution transaction validation /// - (Optionally) Compares the receipts root in the block header to the block body -pub fn validate_block_pre_execution( - block: &SealedBlock, +pub fn validate_block_pre_execution( + block: &SealedBlock, chain_spec: &ChainSpec, -) -> Result<(), ConsensusError> { +) -> Result<(), ConsensusError> +where + H: BlockHeader, + B: BlockBody, + ChainSpec: EthereumHardforks, +{ // Check ommers hash let ommers_hash = block.body.calculate_ommers_root(); - if block.header.ommers_hash != ommers_hash { + if Some(block.header.ommers_hash()) != ommers_hash { return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), + GotExpected { + got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH), + expected: block.header.ommers_hash(), + } + .into(), )) } @@ -119,18 +160,14 @@ pub fn validate_block_pre_execution( } // EIP-4895: Beacon chain push withdrawals as operations - if chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + if chain_spec.is_shanghai_active_at_timestamp(block.timestamp()) { validate_shanghai_withdrawals(block)?; } - if chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + if chain_spec.is_cancun_active_at_timestamp(block.timestamp()) { validate_cancun_gas(block)?; } - if chain_spec.is_prague_active_at_timestamp(block.timestamp) { - validate_prague_request(block)?; - } - Ok(()) } @@ -142,11 +179,11 @@ pub fn validate_block_pre_execution( /// * `blob_gas_used` is less than or equal to `MAX_DATA_GAS_PER_BLOCK` /// * `blob_gas_used` is a multiple of `DATA_GAS_PER_BLOB` /// * `excess_blob_gas` is a multiple of `DATA_GAS_PER_BLOB` -pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusError> { - let blob_gas_used = header.blob_gas_used.ok_or(ConsensusError::BlobGasUsedMissing)?; - let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; +pub fn validate_4844_header_standalone(header: &H) -> Result<(), ConsensusError> { + let blob_gas_used = header.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; - if header.parent_beacon_block_root.is_none() { + if header.parent_beacon_block_root().is_none() { return Err(ConsensusError::ParentBeaconBlockRootMissing) } @@ -181,8 +218,8 @@ pub fn validate_4844_header_standalone(header: &Header) -> Result<(), ConsensusE /// From yellow paper: extraData: An arbitrary byte array containing data relevant to this block. /// This must be 32 bytes or fewer; formally Hx. #[inline] -pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - let extradata_len = header.extra_data.len(); +pub fn validate_header_extradata(header: &H) -> Result<(), ConsensusError> { + let extradata_len = header.extra_data().len(); if extradata_len > MAXIMUM_EXTRA_DATA_SIZE { Err(ConsensusError::ExtraDataExceedsMax { len: extradata_len }) } else { @@ -195,21 +232,21 @@ pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> /// This function ensures that the header block number is sequential and that the hash of the parent /// header matches the parent hash in the header. #[inline] -pub fn validate_against_parent_hash_number( - header: &Header, - parent: &SealedHeader, +pub fn validate_against_parent_hash_number( + header: &H, + parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Parent number is consistent. - if parent.number + 1 != header.number { + if parent.number() + 1 != header.number() { return Err(ConsensusError::ParentBlockNumberMismatch { - parent_block_number: parent.number, - block_number: header.number, + parent_block_number: parent.number(), + block_number: header.number(), }) } - if parent.hash() != header.parent_hash { + if parent.hash() != header.parent_hash() { return Err(ConsensusError::ParentHashMismatch( - GotExpected { got: header.parent_hash, expected: parent.hash() }.into(), + GotExpected { got: header.parent_hash(), expected: parent.hash() }.into(), )) } @@ -218,23 +255,30 @@ pub fn validate_against_parent_hash_number( /// Validates the base fee against the parent and EIP-1559 rules. #[inline] -pub fn validate_against_parent_eip1559_base_fee( - header: &Header, - parent: &Header, +pub fn validate_against_parent_eip1559_base_fee< + H: BlockHeader, + ChainSpec: EthChainSpec + EthereumHardforks, +>( + header: &H, + parent: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number) { - let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?; + if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number()) { + let base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; let expected_base_fee = - if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { - reth_primitives::constants::EIP1559_INITIAL_BASE_FEE + if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) { + alloy_eips::eip1559::INITIAL_BASE_FEE } else { // This BaseFeeMissing will not happen as previous blocks are checked to have // them. - parent - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(header.timestamp)) - .ok_or(ConsensusError::BaseFeeMissing)? + let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; + calc_next_block_base_fee( + parent.gas_used(), + parent.gas_limit(), + base_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp()), + ) }; if expected_base_fee != base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { @@ -249,14 +293,14 @@ pub fn validate_against_parent_eip1559_base_fee( + header: &H, + parent: &H, ) -> Result<(), ConsensusError> { - if header.timestamp <= parent.timestamp { + if header.timestamp() <= parent.timestamp() { return Err(ConsensusError::TimestampIsInPast { - parent_timestamp: parent.timestamp, - timestamp: header.timestamp, + parent_timestamp: parent.timestamp(), + timestamp: header.timestamp(), }) } Ok(()) @@ -266,9 +310,9 @@ pub const fn validate_against_parent_timestamp( /// ensures that the `blob_gas_used` and `excess_blob_gas` fields exist in the child header, and /// that the `excess_blob_gas` field matches the expected `excess_blob_gas` calculated from the /// parent header fields. -pub fn validate_against_parent_4844( - header: &Header, - parent: &Header, +pub fn validate_against_parent_4844( + header: &H, + parent: &H, ) -> Result<(), ConsensusError> { // From [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#header-extension): // @@ -276,13 +320,13 @@ pub fn validate_against_parent_4844( // > are evaluated as 0. // // This means in the first post-fork block, calc_excess_blob_gas will return 0. - let parent_blob_gas_used = parent.blob_gas_used.unwrap_or(0); - let parent_excess_blob_gas = parent.excess_blob_gas.unwrap_or(0); + let parent_blob_gas_used = parent.blob_gas_used().unwrap_or(0); + let parent_excess_blob_gas = parent.excess_blob_gas().unwrap_or(0); - if header.blob_gas_used.is_none() { + if header.blob_gas_used().is_none() { return Err(ConsensusError::BlobGasUsedMissing) } - let excess_blob_gas = header.excess_blob_gas.ok_or(ConsensusError::ExcessBlobGasMissing)?; + let excess_blob_gas = header.excess_blob_gas().ok_or(ConsensusError::ExcessBlobGasMissing)?; let expected_excess_blob_gas = calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used); @@ -300,17 +344,19 @@ pub fn validate_against_parent_4844( #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxEip4844, EMPTY_ROOT_HASH}; + use alloy_consensus::{Header, TxEip4844, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; + use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, + }; use alloy_primitives::{ - hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, Parity, Sealable, U256, + hex_literal::hex, Address, BlockHash, BlockNumber, Bytes, PrimitiveSignature as Signature, + U256, }; use mockall::mock; use rand::Rng; use reth_chainspec::ChainSpecBuilder; - use reth_primitives::{ - proofs, Account, BlockBody, BlockHashOrNumber, Signature, Transaction, TransactionSigned, - Withdrawal, Withdrawals, - }; + use reth_primitives::{proofs, Account, BlockBody, Transaction, TransactionSigned}; use reth_storage_api::{ errors::provider::ProviderResult, AccountReader, HeaderProvider, WithdrawalsProvider, }; @@ -356,6 +402,8 @@ mod tests { } impl HeaderProvider for Provider { + type Header = Header; + fn is_known(&self, _block_hash: &BlockHash) -> ProviderResult { Ok(self.is_known) } @@ -429,9 +477,9 @@ mod tests { blob_versioned_hashes: std::iter::repeat_with(|| rng.gen()).take(num_blobs).collect(), }); - let signature = Signature::new(U256::default(), U256::default(), Parity::Parity(true)); + let signature = Signature::new(U256::default(), U256::default(), true); - TransactionSigned::from_transaction_and_signature(request, signature) + TransactionSigned::new_unhashed(request, signature) } /// got test block @@ -441,7 +489,7 @@ mod tests { let header = Header { parent_hash: hex!("859fad46e75d9be177c2584843501f2270c7e5231711e90848290d12d7c6dcdd").into(), - ommers_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: hex!("4675c7e5baafbffbca748158becba61ef3b0a263").into(), state_root: hex!("8337403406e368b3e40411138f4868f79f6d835825d55fd0c2f6e17b1a3948e9").into(), transactions_root: EMPTY_ROOT_HASH, @@ -460,7 +508,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None, + target_blobs_per_block: None, }; // size: 0x9b5 @@ -474,13 +523,10 @@ mod tests { let ommers = Vec::new(); let transactions = Vec::new(); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - ( SealedBlock { - header: SealedHeader::new(header, seal), - body: BlockBody { transactions, ommers, withdrawals: None, requests: None }, + header: SealedHeader::seal(header), + body: BlockBody { transactions, ommers, withdrawals: None }, }, parent, ) @@ -498,15 +544,13 @@ mod tests { .collect(), ); - let sealed = Header { + let header = Header { withdrawals_root: Some(proofs::calculate_withdrawals_root(&withdrawals)), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(header), body: BlockBody { withdrawals: Some(withdrawals), ..Default::default() }, } }; @@ -537,22 +581,19 @@ mod tests { // create a tx with 10 blobs let transaction = mock_blob_tx(1, 10); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), blob_gas_used: Some(1), transactions_root: proofs::calculate_transaction_root(&[transaction.clone()]), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); let body = BlockBody { transactions: vec![transaction], ommers: vec![], withdrawals: Some(Withdrawals::default()), - requests: None, }; let block = SealedBlock::new(header, body); diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 660b43865ea..36356a4de36 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -13,9 +13,12 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # misc auto_impl.workspace = true @@ -23,5 +26,16 @@ derive_more.workspace = true [features] default = ["std"] -std = [] -test-utils = [] +std = [ + "reth-primitives/std", + "reth-primitives-traits/std", + "alloy-primitives/std", + "alloy-eips/std", + "alloy-consensus/std", + "reth-primitives-traits/std", + "derive_more/std" +] +test-utils = [ + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils" +] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 91b93c8a75e..ba1b1321e77 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -11,12 +11,15 @@ extern crate alloc; -use alloc::{fmt::Debug, vec::Vec}; +use alloc::{fmt::Debug, sync::Arc, vec::Vec}; +use alloy_consensus::Header; +use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockWithSenders, GotExpected, GotExpectedBoxed, Header, - InvalidTransactionError, Receipt, Request, SealedBlock, SealedHeader, + BlockBody, BlockWithSenders, EthPrimitives, GotExpected, GotExpectedBoxed, + InvalidTransactionError, NodePrimitives, Receipt, SealedBlock, SealedHeader, }; +use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; /// A consensus implementation that does nothing. pub mod noop; @@ -25,29 +28,71 @@ pub mod noop; /// test helpers for mocking consensus pub mod test_utils; -/// Post execution input passed to [`Consensus::validate_block_post_execution`]. +/// Post execution input passed to [`FullConsensus::validate_block_post_execution`]. #[derive(Debug)] -pub struct PostExecutionInput<'a> { +pub struct PostExecutionInput<'a, R = Receipt> { /// Receipts of the block. - pub receipts: &'a [Receipt], + pub receipts: &'a [R], /// EIP-7685 requests of the block. - pub requests: &'a [Request], + pub requests: &'a Requests, } -impl<'a> PostExecutionInput<'a> { +impl<'a, R> PostExecutionInput<'a, R> { /// Creates a new instance of `PostExecutionInput`. - pub const fn new(receipts: &'a [Receipt], requests: &'a [Request]) -> Self { + pub const fn new(receipts: &'a [R], requests: &'a Requests) -> Self { Self { receipts, requests } } } +/// [`Consensus`] implementation which knows full node primitives and is able to validation block's +/// execution outcome. +#[auto_impl::auto_impl(&, Arc)] +pub trait FullConsensus: + AsConsensus +{ + /// Validate a block considering world state, i.e. things that can not be checked before + /// execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity". + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_, N::Receipt>, + ) -> Result<(), ConsensusError>; +} + /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: Debug + Send + Sync { +pub trait Consensus: AsHeaderValidator { + /// Ensures that body field values match the header. + fn validate_body_against_header( + &self, + body: &B, + header: &SealedHeader, + ) -> Result<(), ConsensusError>; + + /// Validate a block disregarding world state, i.e. things that can be checked before sender + /// recovery and execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and + /// 11.1 "Ommer Validation". + /// + /// **This should not be called for the genesis block**. + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_pre_execution(&self, block: &SealedBlock) + -> Result<(), ConsensusError>; +} + +/// HeaderValidator is a protocol that validates headers and their relationships. +#[auto_impl::auto_impl(&, Arc)] +pub trait HeaderValidator: Debug + Send + Sync { /// Validate if header is correct and follows consensus specification. /// /// This is called on standalone header to check if all hashes are correct. - fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError>; + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError>; /// Validate that the header information regarding parent are correct. /// This checks the block number, timestamp, basefee and gas limit increment. @@ -57,11 +102,12 @@ pub trait Consensus: Debug + Send + Sync { /// /// **This should not be called for the genesis block**. /// - /// Note: Validating header against its parent does not include other Consensus validations. + /// Note: Validating header against its parent does not include other HeaderValidator + /// validations. fn validate_header_against_parent( &self, - header: &SealedHeader, - parent: &SealedHeader, + header: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError>; /// Validates the given headers @@ -70,7 +116,13 @@ pub trait Consensus: Debug + Send + Sync { /// on its own and valid against its parent. /// /// Note: this expects that the headers are in natural order (ascending block number) - fn validate_header_range(&self, headers: &[SealedHeader]) -> Result<(), HeaderConsensusError> { + fn validate_header_range( + &self, + headers: &[SealedHeader], + ) -> Result<(), HeaderConsensusError> + where + H: Clone, + { if let Some((initial_header, remaining_headers)) = headers.split_first() { self.validate_header(initial_header) .map_err(|e| HeaderConsensusError(e, initial_header.clone()))?; @@ -90,35 +142,46 @@ pub trait Consensus: Debug + Send + Sync { /// /// Some consensus engines may want to do additional checks here. /// - /// Note: validating headers with TD does not include other Consensus validation. + /// Note: validating headers with TD does not include other HeaderValidator validation. fn validate_header_with_total_difficulty( &self, - header: &Header, + header: &H, total_difficulty: U256, ) -> Result<(), ConsensusError>; +} - /// Validate a block disregarding world state, i.e. things that can be checked before sender - /// recovery and execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity", 4.3.4 "Block Header Validity", and - /// 11.1 "Ommer Validation". - /// - /// **This should not be called for the genesis block**. - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>; +/// Helper trait to cast `Arc` to `Arc` +pub trait AsHeaderValidator: HeaderValidator { + /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] + fn as_header_validator<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a; +} - /// Validate a block considering world state, i.e. things that can not be checked before - /// execution. - /// - /// See the Yellow Paper sections 4.3.2 "Holistic Validity". - /// - /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError>; +impl, H> AsHeaderValidator for T { + fn as_header_validator<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a, + { + self + } +} + +/// Helper trait to cast `Arc` to `Arc` +pub trait AsConsensus: Consensus { + /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] + fn as_consensus<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a; +} + +impl, H, B> AsConsensus for T { + fn as_consensus<'a>(self: Arc) -> Arc + 'a> + where + Self: 'a, + { + self + } } /// Consensus Errors @@ -170,10 +233,10 @@ pub enum ConsensusError { #[display("mismatched block withdrawals root: {_0}")] BodyWithdrawalsRootDiff(GotExpectedBoxed), - /// Error when the requests root in the block is different from the expected requests - /// root. - #[display("mismatched block requests root: {_0}")] - BodyRequestsRootDiff(GotExpectedBoxed), + /// Error when the requests hash in the block is different from the expected requests + /// hash. + #[display("mismatched block requests hash: {_0}")] + BodyRequestsHashDiff(GotExpectedBoxed), /// Error when a block with a specific hash and number is already known. #[display("block with [hash={hash}, number={number}] is already known")] @@ -248,17 +311,17 @@ pub enum ConsensusError { #[display("missing withdrawals root")] WithdrawalsRootMissing, - /// Error when the requests root is missing. - #[display("missing requests root")] - RequestsRootMissing, + /// Error when the requests hash is missing. + #[display("missing requests hash")] + RequestsHashMissing, /// Error when an unexpected withdrawals root is encountered. #[display("unexpected withdrawals root")] WithdrawalsRootUnexpected, - /// Error when an unexpected requests root is encountered. - #[display("unexpected requests root")] - RequestsRootUnexpected, + /// Error when an unexpected requests hash is encountered. + #[display("unexpected requests hash")] + RequestsHashUnexpected, /// Error when withdrawals are missing. #[display("missing withdrawals")] @@ -406,4 +469,4 @@ impl From for ConsensusError { /// `HeaderConsensusError` combines a `ConsensusError` with the `SealedHeader` it relates to. #[derive(derive_more::Display, derive_more::Error, Debug)] #[display("Consensus error: {_0}, Invalid header: {_1:?}")] -pub struct HeaderConsensusError(ConsensusError, SealedHeader); +pub struct HeaderConsensusError(ConsensusError, SealedHeader); diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index 53bdb72afb2..c56e9867a25 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,41 +1,56 @@ -use crate::{Consensus, ConsensusError, PostExecutionInput}; +use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, NodePrimitives, SealedBlock, SealedHeader}; /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] #[non_exhaustive] pub struct NoopConsensus; -impl Consensus for NoopConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { +impl HeaderValidator for NoopConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { Ok(()) } fn validate_header_against_parent( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _header: &SealedHeader, + _parent: &SealedHeader, ) -> Result<(), ConsensusError> { Ok(()) } fn validate_header_with_total_difficulty( &self, - _header: &Header, + _header: &H, _total_difficulty: U256, ) -> Result<(), ConsensusError> { Ok(()) } +} - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { +impl Consensus for NoopConsensus { + fn validate_body_against_header( + &self, + _body: &B, + _header: &SealedHeader, + ) -> Result<(), ConsensusError> { Ok(()) } + fn validate_block_pre_execution( + &self, + _block: &SealedBlock, + ) -> Result<(), ConsensusError> { + Ok(()) + } +} + +impl FullConsensus for NoopConsensus { fn validate_block_post_execution( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), ConsensusError> { Ok(()) } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 43694720917..082c8ca8bb5 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,18 +1,25 @@ -use crate::{Consensus, ConsensusError, PostExecutionInput}; +use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; -use reth_primitives::{BlockWithSenders, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockWithSenders, NodePrimitives, SealedBlock, SealedHeader}; /// Consensus engine implementation for testing #[derive(Debug)] pub struct TestConsensus { /// Flag whether the header validation should purposefully fail fail_validation: AtomicBool, + /// Separate flag for setting whether `validate_body_against_header` should fail. It is needed + /// for testing networking logic for which the body failing this check is getting completely + /// rejected while more high-level failures are handled by the sync logic. + fail_body_against_header: AtomicBool, } impl Default for TestConsensus { fn default() -> Self { - Self { fail_validation: AtomicBool::new(false) } + Self { + fail_validation: AtomicBool::new(false), + fail_body_against_header: AtomicBool::new(false), + } } } @@ -24,35 +31,51 @@ impl TestConsensus { /// Update the validation flag. pub fn set_fail_validation(&self, val: bool) { - self.fail_validation.store(val, Ordering::SeqCst) + self.fail_validation.store(val, Ordering::SeqCst); + self.fail_body_against_header.store(val, Ordering::SeqCst); + } + + /// Returns the body validation flag. + pub fn fail_body_against_header(&self) -> bool { + self.fail_body_against_header.load(Ordering::SeqCst) + } + + /// Update the body validation flag. + pub fn set_fail_body_against_header(&self, val: bool) { + self.fail_body_against_header.store(val, Ordering::SeqCst); } } -impl Consensus for TestConsensus { - fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { +impl FullConsensus for TestConsensus { + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _input: PostExecutionInput<'_, N::Receipt>, + ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } +} - fn validate_header_against_parent( +impl Consensus for TestConsensus { + fn validate_body_against_header( &self, - _header: &SealedHeader, - _parent: &SealedHeader, + _body: &B, + _header: &SealedHeader, ) -> Result<(), ConsensusError> { - if self.fail_validation() { + if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) } else { Ok(()) } } - fn validate_header_with_total_difficulty( + fn validate_block_pre_execution( &self, - _header: &Header, - _total_difficulty: U256, + _block: &SealedBlock, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) @@ -60,8 +83,10 @@ impl Consensus for TestConsensus { Ok(()) } } +} - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { +impl HeaderValidator for TestConsensus { + fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { @@ -69,10 +94,22 @@ impl Consensus for TestConsensus { } } - fn validate_block_post_execution( + fn validate_header_against_parent( &self, - _block: &BlockWithSenders, - _input: PostExecutionInput<'_>, + _header: &SealedHeader, + _parent: &SealedHeader, + ) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_header_with_total_difficulty( + &self, + _header: &H, + _total_difficulty: U256, ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) diff --git a/crates/consensus/debug-client/Cargo.toml b/crates/consensus/debug-client/Cargo.toml index c37beef1074..18e7aead306 100644 --- a/crates/consensus/debug-client/Cargo.toml +++ b/crates/consensus/debug-client/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth reth-node-api.workspace = true -reth-rpc-api.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-builder.workspace = true reth-tracing.workspace = true @@ -21,7 +21,7 @@ reth-tracing.workspace = true alloy-consensus = { workspace = true, features = ["serde"] } alloy-eips.workspace = true alloy-provider = { workspace = true, features = ["ws"] } -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true alloy-primitives.workspace = true diff --git a/crates/consensus/debug-client/src/client.rs b/crates/consensus/debug-client/src/client.rs index a6a59a6a380..0e2a50370b8 100644 --- a/crates/consensus/debug-client/src/client.rs +++ b/crates/consensus/debug-client/src/client.rs @@ -1,8 +1,8 @@ -use alloy_consensus::TxEnvelope; +use alloy_consensus::Transaction; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; -use alloy_rpc_types::{Block, BlockTransactions}; use alloy_rpc_types_engine::{ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3}; +use alloy_rpc_types_eth::{Block, BlockTransactions}; use reth_node_api::EngineTypes; use reth_rpc_builder::auth::AuthServerHandle; use reth_tracing::tracing::warn; @@ -184,18 +184,19 @@ pub fn block_to_execution_payload_v3(block: Block) -> ExecutionNewPayload { // https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#specification let versioned_hashes = transactions .iter() - .flat_map(|tx| tx.blob_versioned_hashes.clone().unwrap_or_default()) + .flat_map(|tx| tx.blob_versioned_hashes().unwrap_or_default()) + .copied() .collect(); let payload: ExecutionPayloadV3 = ExecutionPayloadV3 { payload_inner: ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { parent_hash: block.header.parent_hash, - fee_recipient: block.header.miner, + fee_recipient: block.header.beneficiary, state_root: block.header.state_root, receipts_root: block.header.receipts_root, logs_bloom: block.header.logs_bloom, - prev_randao: block.header.mix_hash.unwrap(), + prev_randao: block.header.mix_hash, block_number: block.header.number, gas_limit: block.header.gas_limit, gas_used: block.header.gas_used, @@ -205,15 +206,10 @@ pub fn block_to_execution_payload_v3(block: Block) -> ExecutionNewPayload { block_hash: block.header.hash, transactions: transactions .into_iter() - .map(|tx| { - let envelope: TxEnvelope = tx.try_into().unwrap(); - let mut buffer: Vec = vec![]; - envelope.encode_2718(&mut buffer); - buffer.into() - }) + .map(|tx| tx.inner.encoded_2718().into()) .collect(), }, - withdrawals: block.withdrawals.clone().unwrap_or_default(), + withdrawals: block.withdrawals.clone().unwrap_or_default().into_inner(), }, blob_gas_used: block.header.blob_gas_used.unwrap(), excess_blob_gas: block.header.excess_blob_gas.unwrap(), diff --git a/crates/consensus/debug-client/src/providers/etherscan.rs b/crates/consensus/debug-client/src/providers/etherscan.rs index 59b402f3e78..d3167b6cfab 100644 --- a/crates/consensus/debug-client/src/providers/etherscan.rs +++ b/crates/consensus/debug-client/src/providers/etherscan.rs @@ -1,6 +1,6 @@ use crate::BlockProvider; use alloy_eips::BlockNumberOrTag; -use alloy_rpc_types::Block; +use alloy_rpc_types_eth::Block; use reqwest::Client; use reth_tracing::tracing::warn; use serde::Deserialize; diff --git a/crates/consensus/debug-client/src/providers/rpc.rs b/crates/consensus/debug-client/src/providers/rpc.rs index a8cd15c105a..787515f1a60 100644 --- a/crates/consensus/debug-client/src/providers/rpc.rs +++ b/crates/consensus/debug-client/src/providers/rpc.rs @@ -1,7 +1,7 @@ use crate::BlockProvider; use alloy_eips::BlockNumberOrTag; use alloy_provider::{Provider, ProviderBuilder}; -use alloy_rpc_types::{Block, BlockTransactionsKind}; +use alloy_rpc_types_eth::{Block, BlockTransactionsKind}; use futures::StreamExt; use tokio::sync::mpsc::Sender; @@ -30,9 +30,9 @@ impl BlockProvider for RpcBlockProvider { .expect("failed to subscribe on new blocks") .into_stream(); - while let Some(block) = stream.next().await { + while let Some(header) = stream.next().await { let full_block = ws_provider - .get_block_by_hash(block.header.hash, BlockTransactionsKind::Full) + .get_block_by_hash(header.hash, BlockTransactionsKind::Full) .await .expect("failed to get block") .expect("block not found"); @@ -49,7 +49,7 @@ impl BlockProvider for RpcBlockProvider { .await .expect("failed to create WS provider"); let block: Block = ws_provider - .get_block_by_number(BlockNumberOrTag::Number(block_number), true) + .get_block_by_number(BlockNumberOrTag::Number(block_number), true.into()) .await? .ok_or_else(|| eyre::eyre!("block not found by number {}", block_number))?; Ok(block) diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 2742d704054..7cb8516816b 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -11,31 +11,40 @@ repository.workspace = true workspace = true [dependencies] -reth.workspace = true reth-chainspec.workspace = true -reth-engine-local.workspace = true -reth-primitives.workspace = true reth-tracing.workspace = true reth-db = { workspace = true, features = ["test-utils"] } -reth-rpc.workspace = true +reth-network-api.workspace = true reth-rpc-layer.workspace = true +reth-rpc-server-types.workspace = true +reth-rpc-eth-api.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true +reth-network.workspace = true +reth-node-api.workspace = true +reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true reth-network-peers.workspace = true -reth-node-ethereum.workspace = true -reth-rpc-types-compat.workspace = true +reth-engine-local.workspace = true +reth-tasks.workspace = true + +# currently need to enable this for workspace level +reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } # rpc -jsonrpsee-types.workspace = true jsonrpsee.workspace = true +url.workspace = true # ethereum alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true futures-util.workspace = true @@ -45,7 +54,9 @@ tokio-stream.workspace = true serde_json.workspace = true alloy-signer.workspace = true alloy-signer-local = { workspace = true, features = ["mnemonic"] } -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true +alloy-rpc-types-engine.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } -tracing.workspace = true \ No newline at end of file +tracing.workspace = true +derive_more.workspace = true diff --git a/crates/e2e-test-utils/src/engine_api.rs b/crates/e2e-test-utils/src/engine_api.rs index 1b0ff9b54e7..8c0f03bafd3 100644 --- a/crates/e2e-test-utils/src/engine_api.rs +++ b/crates/e2e-test-utils/src/engine_api.rs @@ -1,35 +1,35 @@ use crate::traits::PayloadEnvelopeExt; use alloy_primitives::B256; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatusEnum}; use jsonrpsee::{ core::client::ClientT, http_client::{transport::HttpBackend, HttpClient}, }; -use reth::{ - api::{EngineTypes, PayloadBuilderAttributes}, - providers::CanonStateNotificationStream, - rpc::{ - api::EngineApiClient, - types::engine::{ForkchoiceState, PayloadStatusEnum}, - }, -}; +use reth_chainspec::EthereumHardforks; +use reth_node_api::EngineTypes; +use reth_node_builder::BuiltPayload; use reth_payload_builder::PayloadId; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_provider::CanonStateNotificationStream; +use reth_rpc_api::EngineApiClient; use reth_rpc_layer::AuthClientService; -use std::marker::PhantomData; +use std::{marker::PhantomData, sync::Arc}; /// Helper for engine api operations #[derive(Debug)] -pub struct EngineApiTestContext { +pub struct EngineApiTestContext { + pub chain_spec: Arc, pub canonical_stream: CanonStateNotificationStream, pub engine_api_client: HttpClient>, pub _marker: PhantomData, } -impl EngineApiTestContext { +impl EngineApiTestContext { /// Retrieves a v3 payload from the engine api pub async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> eyre::Result { + ) -> eyre::Result { Ok(EngineApiClient::::get_payload_v3(&self.engine_api_client, payload_id).await?) } @@ -47,24 +47,40 @@ impl EngineApiTestContext { payload: E::BuiltPayload, payload_builder_attributes: E::PayloadBuilderAttributes, expected_status: PayloadStatusEnum, - versioned_hashes: Vec, ) -> eyre::Result where - E::ExecutionPayloadV3: From + PayloadEnvelopeExt, + E::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, + E::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, { - // setup payload for submission - let envelope_v3: ::ExecutionPayloadV3 = payload.into(); - + let versioned_hashes = + payload.block().blob_versioned_hashes_iter().copied().collect::>(); // submit payload to engine api - let submission = EngineApiClient::::new_payload_v3( - &self.engine_api_client, - envelope_v3.execution_payload(), - versioned_hashes, - payload_builder_attributes.parent_beacon_block_root().unwrap(), - ) - .await?; + let submission = if self + .chain_spec + .is_prague_active_at_timestamp(payload_builder_attributes.timestamp()) + { + let requests = payload.requests().unwrap(); + let envelope: ::ExecutionPayloadEnvelopeV4 = payload.into(); + EngineApiClient::::new_payload_v4( + &self.engine_api_client, + envelope.execution_payload(), + versioned_hashes, + payload_builder_attributes.parent_beacon_block_root().unwrap(), + requests, + ) + .await? + } else { + let envelope: ::ExecutionPayloadEnvelopeV3 = payload.into(); + EngineApiClient::::new_payload_v3( + &self.engine_api_client, + envelope.execution_payload(), + versioned_hashes, + payload_builder_attributes.parent_beacon_block_root().unwrap(), + ) + .await? + }; - assert_eq!(submission.status, expected_status); + assert_eq!(submission.status.as_str(), expected_status.as_str()); Ok(submission.latest_valid_hash.unwrap_or_default()) } diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 48e56910e6c..72d912d6b54 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -1,21 +1,26 @@ //! Utilities for end-to-end tests. -use std::sync::Arc; - use node::NodeTestContext; -use reth::{ - args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - network::PeersHandleProvider, - tasks::TaskManager, -}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::EthChainSpec; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; +use reth_engine_local::LocalPayloadAttributesBuilder; +use reth_network_api::test_utils::PeersHandleProvider; +use reth_node_api::EngineValidator; use reth_node_builder::{ - components::NodeComponentsBuilder, rpc::RethRpcAddOns, FullNodeTypesAdapter, Node, NodeAdapter, - NodeComponents, NodeTypesWithDBAdapter, NodeTypesWithEngine, RethFullAdapter, + components::NodeComponentsBuilder, + rpc::{EngineValidatorAddOn, RethRpcAddOns}, + EngineNodeLauncher, FullNodeTypesAdapter, Node, NodeAdapter, NodeBuilder, NodeComponents, + NodeConfig, NodeHandle, NodeTypesWithDBAdapter, NodeTypesWithEngine, PayloadAttributesBuilder, + PayloadTypes, +}; +use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; +use reth_primitives::EthPrimitives; +use reth_provider::providers::{ + BlockchainProvider, BlockchainProvider2, NodeTypesForProvider, NodeTypesForTree, }; -use reth_provider::providers::BlockchainProvider; +use reth_rpc_server_types::RpcModuleSelection; +use reth_tasks::TaskManager; +use std::sync::Arc; use tracing::{span, Level}; use wallet::Wallet; @@ -47,9 +52,10 @@ pub async fn setup( num_nodes: usize, chain_spec: Arc, is_dev: bool, -) -> eyre::Result<(Vec>, TaskManager, Wallet)> + attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, +) -> eyre::Result<(Vec>, TaskManager, Wallet)> where - N: Default + Node> + NodeTypesWithEngine, + N: Default + Node> + NodeTypesForTree + NodeTypesWithEngine, N::ComponentsBuilder: NodeComponentsBuilder< TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, @@ -82,7 +88,103 @@ where .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, attributes_generator).await?; + + // Connect each node in a chain. + if let Some(previous_node) = nodes.last_mut() { + previous_node.connect(&mut node).await; + } + + // Connect last node with the first if there are more than two + if idx + 1 == num_nodes && num_nodes > 2 { + if let Some(first_node) = nodes.first_mut() { + node.connect(first_node).await; + } + } + + nodes.push(node); + } + + Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) +} + +/// Creates the initial setup with `num_nodes` started and interconnected. +pub async fn setup_engine( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, + attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, +) -> eyre::Result<( + Vec>>>, + TaskManager, + Wallet, +)> +where + N: Default + + Node>>> + + NodeTypesWithEngine + + NodeTypesForProvider, + N::ComponentsBuilder: NodeComponentsBuilder< + TmpNodeAdapter>>, + Components: NodeComponents< + TmpNodeAdapter>>, + Network: PeersHandleProvider, + >, + >, + N::AddOns: RethRpcAddOns>>> + + EngineValidatorAddOn< + Adapter>>, + Validator: EngineValidator, + >, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Engine as PayloadTypes>::PayloadAttributes, + >, +{ + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Create nodes and peer them + let mut nodes: Vec> = Vec::with_capacity(num_nodes); + + for idx in 0..num_nodes { + let node_config = NodeConfig::new(chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ) + .set_dev(is_dev); + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + let node = N::default(); + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node(exec.clone()) + .with_types_and_provider::>() + .with_components(node.components_builder()) + .with_add_ons(node.add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }) + .await?; + + let mut node = NodeTestContext::new(node, attributes_generator).await?; + + let genesis = node.block_hash(0); + node.engine_api.update_forkchoice(genesis, genesis).await?; // Connect each node in a chain. if let Some(previous_node) = nodes.last_mut() { @@ -104,19 +206,19 @@ where // Type aliases -type TmpDB = Arc>; -type TmpNodeAdapter = FullNodeTypesAdapter< - NodeTypesWithDBAdapter, - BlockchainProvider>, ->; +/// Testing database +pub type TmpDB = Arc>; +type TmpNodeAdapter>> = + FullNodeTypesAdapter, Provider>; /// Type alias for a `NodeAdapter` -pub type Adapter = NodeAdapter< - RethFullAdapter, - <>>::ComponentsBuilder as NodeComponentsBuilder< - RethFullAdapter, +pub type Adapter>> = NodeAdapter< + TmpNodeAdapter, + <>>::ComponentsBuilder as NodeComponentsBuilder< + TmpNodeAdapter, >>::Components, >; /// Type alias for a type of `NodeHelper` -pub type NodeHelperType = NodeTestContext, AO>; +pub type NodeHelperType>> = + NodeTestContext, >>::AddOns>; diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 3f25915b35b..ce9d0b94612 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,5 +1,8 @@ use futures_util::StreamExt; -use reth::network::{NetworkEvent, NetworkEventListenerProvider, PeersHandleProvider, PeersInfo}; +use reth_network_api::{ + events::PeerEvent, test_utils::PeersHandleProvider, NetworkEvent, NetworkEventListenerProvider, + PeersInfo, +}; use reth_network_peers::{NodeRecord, PeerId}; use reth_tokio_util::EventStream; use reth_tracing::tracing::info; @@ -26,7 +29,7 @@ where self.network.peers_handle().add_peer(node_record.id, node_record.tcp_addr()); match self.network_events.next().await { - Some(NetworkEvent::PeerAdded(_)) => (), + Some(NetworkEvent::Peer(PeerEvent::PeerAdded(_))) => (), ev => panic!("Expected a peer added event, got: {ev:?}"), } } @@ -40,7 +43,9 @@ where pub async fn next_session_established(&mut self) -> Option { while let Some(ev) = self.network_events.next().await { match ev { - NetworkEvent::SessionEstablished { peer_id, .. } => { + NetworkEvent::ActivePeerSession { info, .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + let peer_id = info.peer_id; info!("Session established with peer: {:?}", peer_id); return Some(peer_id) } diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index c22913ba236..b5dd44841dc 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -1,31 +1,28 @@ -use std::{marker::PhantomData, pin::Pin}; - +use crate::{ + engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, + rpc::RpcTestContext, traits::PayloadEnvelopeExt, +}; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockId; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; -use alloy_rpc_types::BlockNumberOrTag; +use alloy_rpc_types_engine::PayloadStatusEnum; +use alloy_rpc_types_eth::BlockNumberOrTag; use eyre::Ok; use futures_util::Future; -use reth::{ - api::{BuiltPayload, EngineTypes, FullNodeComponents, PayloadBuilderAttributes}, - builder::FullNode, - network::PeersHandleProvider, - providers::{BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader}, - rpc::{ - api::eth::{ - helpers::{EthApiSpec, EthTransactions, TraceExt}, - FullEthApiTypes, - }, - types::engine::PayloadStatusEnum, - }, -}; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{rpc::RethRpcAddOns, NodeTypesWithEngine}; +use reth_network_api::test_utils::PeersHandleProvider; +use reth_node_api::{Block, EngineTypes, FullNodeComponents}; +use reth_node_builder::{rpc::RethRpcAddOns, FullNode, NodeTypes, NodeTypesWithEngine}; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; +use reth_primitives::EthPrimitives; +use reth_provider::{ + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, StageCheckpointReader, +}; +use reth_rpc_eth_api::helpers::{EthApiSpec, EthTransactions, TraceExt}; use reth_stages_types::StageId; +use std::{marker::PhantomData, pin::Pin}; use tokio_stream::StreamExt; - -use crate::{ - engine_api::EngineApiTestContext, network::NetworkTestContext, payload::PayloadTestContext, - rpc::RpcTestContext, traits::PayloadEnvelopeExt, -}; +use url::Url; /// An helper struct to handle node actions #[allow(missing_debug_implementations)] @@ -41,7 +38,10 @@ where /// Context for testing network functionalities. pub network: NetworkTestContext, /// Context for testing the Engine API. - pub engine_api: EngineApiTestContext<::Engine>, + pub engine_api: EngineApiTestContext< + ::Engine, + ::ChainSpec, + >, /// Context for testing RPC features. pub rpc: RpcTestContext, } @@ -50,19 +50,27 @@ impl NodeTestContext where Engine: EngineTypes, Node: FullNodeComponents, - Node::Types: NodeTypesWithEngine, + Node::Types: NodeTypesWithEngine< + ChainSpec: EthereumHardforks, + Engine = Engine, + Primitives = EthPrimitives, + >, Node::Network: PeersHandleProvider, AddOns: RethRpcAddOns, { /// Creates a new test node - pub async fn new(node: FullNode) -> eyre::Result { + pub async fn new( + node: FullNode, + attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + 'static, + ) -> eyre::Result { let builder = node.payload_builder.clone(); Ok(Self { inner: node.clone(), - payload: PayloadTestContext::new(builder).await?, + payload: PayloadTestContext::new(builder, attributes_generator).await?, network: NetworkTestContext::new(node.network.clone()), engine_api: EngineApiTestContext { + chain_spec: node.chain_spec(), engine_api_client: node.auth_server_handle().http_client(), canonical_stream: node.provider.canonical_state_stream(), _marker: PhantomData::, @@ -85,17 +93,19 @@ where &mut self, length: u64, tx_generator: impl Fn(u64) -> Pin>>, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes + Copy, ) -> eyre::Result> where - Engine::ExecutionPayloadV3: From + PayloadEnvelopeExt, - AddOns::EthApi: EthApiSpec + EthTransactions + TraceExt + FullEthApiTypes, + Engine::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, + Engine::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, + AddOns::EthApi: EthApiSpec> + + EthTransactions + + TraceExt, { let mut chain = Vec::with_capacity(length as usize); for i in 0..length { let raw_tx = tx_generator(i).await; let tx_hash = self.rpc.inject_tx(raw_tx).await?; - let (payload, eth_attr) = self.advance_block(vec![], attributes_generator).await?; + let (payload, eth_attr) = self.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; self.assert_new_block(tx_hash, block_hash, block_number).await?; @@ -110,14 +120,13 @@ where /// It triggers the resolve payload via engine api and expects the built payload event. pub async fn new_payload( &mut self, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where - ::ExecutionPayloadV3: + ::ExecutionPayloadEnvelopeV3: From + PayloadEnvelopeExt, { // trigger new payload building draining the pool - let eth_attr = self.payload.new_payload(attributes_generator).await.unwrap(); + let eth_attr = self.payload.new_payload().await.unwrap(); // first event is the payload attributes self.payload.expect_attr_event(eth_attr.clone()).await?; // wait for the payload builder to have finished building @@ -128,30 +137,39 @@ where Ok((self.payload.expect_built_payload().await?, eth_attr)) } + /// Triggers payload building job and submits it to the engine. + pub async fn build_and_submit_payload( + &mut self, + ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> + where + ::ExecutionPayloadEnvelopeV3: + From + PayloadEnvelopeExt, + ::ExecutionPayloadEnvelopeV4: + From + PayloadEnvelopeExt, + { + let (payload, eth_attr) = self.new_payload().await?; + + self.engine_api + .submit_payload(payload.clone(), eth_attr.clone(), PayloadStatusEnum::Valid) + .await?; + + Ok((payload, eth_attr)) + } + /// Advances the node forward one block pub async fn advance_block( &mut self, - versioned_hashes: Vec, - attributes_generator: impl Fn(u64) -> Engine::PayloadBuilderAttributes, ) -> eyre::Result<(Engine::BuiltPayload, Engine::PayloadBuilderAttributes)> where - ::ExecutionPayloadV3: + ::ExecutionPayloadEnvelopeV3: + From + PayloadEnvelopeExt, + ::ExecutionPayloadEnvelopeV4: From + PayloadEnvelopeExt, { - let (payload, eth_attr) = self.new_payload(attributes_generator).await?; - - let block_hash = self - .engine_api - .submit_payload( - payload.clone(), - eth_attr.clone(), - PayloadStatusEnum::Valid, - versioned_hashes, - ) - .await?; + let (payload, eth_attr) = self.build_and_submit_payload().await?; // trigger forkchoice update via engine api to commit the block to the blockchain - self.engine_api.update_forkchoice(block_hash, block_hash).await?; + self.engine_api.update_forkchoice(payload.block().hash(), payload.block().hash()).await?; Ok((payload, eth_attr)) } @@ -179,7 +197,7 @@ where if check { if let Some(latest_block) = self.inner.provider.block_by_number(number)? { - assert_eq!(latest_block.hash_slow(), expected_block_hash); + assert_eq!(latest_block.header().hash_slow(), expected_block_hash); break } assert!( @@ -217,7 +235,7 @@ where // get head block from notifications stream and verify the tx has been pushed to the // pool is actually present in the canonical block let head = self.engine_api.canonical_stream.next().await.unwrap(); - let tx = head.tip().transactions().next(); + let tx = head.tip().transactions().first(); assert_eq!(tx.unwrap().hash().as_slice(), tip_tx_hash.as_slice()); loop { @@ -226,14 +244,55 @@ where if let Some(latest_block) = self.inner.provider.block_by_number_or_tag(BlockNumberOrTag::Latest)? { - if latest_block.number == block_number { + if latest_block.header().number() == block_number { // make sure the block hash we submitted via FCU engine api is the new latest // block using an RPC call - assert_eq!(latest_block.hash_slow(), block_hash); + assert_eq!(latest_block.header().hash_slow(), block_hash); break } } } Ok(()) } + + /// Gets block hash by number. + pub fn block_hash(&self, number: u64) -> BlockHash { + self.inner + .provider + .sealed_header_by_number_or_tag(BlockNumberOrTag::Number(number)) + .unwrap() + .unwrap() + .hash() + } + + /// Sends FCU and waits for the node to sync to the given block. + pub async fn sync_to(&self, block: BlockHash) -> eyre::Result<()> { + self.engine_api.update_forkchoice(block, block).await?; + + let start = std::time::Instant::now(); + + while self + .inner + .provider + .sealed_header_by_id(BlockId::Number(BlockNumberOrTag::Latest))? + .is_none_or(|h| h.hash() != block) + { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + assert!(start.elapsed() <= std::time::Duration::from_secs(10), "timed out"); + } + + // Hack to make sure that all components have time to process canonical state update. + // Otherwise, this might result in e.g "nonce too low" errors when advancing chain further, + // making tests flaky. + tokio::time::sleep(std::time::Duration::from_millis(1000)).await; + + Ok(()) + } + + /// Returns the RPC URL. + pub fn rpc_url(&self) -> Url { + let addr = self.inner.rpc_server_handle().http_local_addr().unwrap(); + format!("http://{}", addr).parse().unwrap() + } } diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 1f9a89307b6..45889a171c1 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -1,44 +1,51 @@ use futures_util::StreamExt; -use reth::api::{BuiltPayload, EngineTypes, PayloadBuilderAttributes}; use reth_payload_builder::{PayloadBuilderHandle, PayloadId}; -use reth_payload_primitives::{Events, PayloadBuilder}; +use reth_payload_builder_primitives::{Events, PayloadBuilder}; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadTypes}; use tokio_stream::wrappers::BroadcastStream; /// Helper for payload operations -#[derive(Debug)] -pub struct PayloadTestContext { - pub payload_event_stream: BroadcastStream>, - payload_builder: PayloadBuilderHandle, +#[derive(derive_more::Debug)] +pub struct PayloadTestContext { + pub payload_event_stream: BroadcastStream>, + payload_builder: PayloadBuilderHandle, pub timestamp: u64, + #[debug(skip)] + attributes_generator: Box T::PayloadBuilderAttributes>, } -impl PayloadTestContext { +impl PayloadTestContext { /// Creates a new payload helper - pub async fn new(payload_builder: PayloadBuilderHandle) -> eyre::Result { + pub async fn new( + payload_builder: PayloadBuilderHandle, + attributes_generator: impl Fn(u64) -> T::PayloadBuilderAttributes + 'static, + ) -> eyre::Result { let payload_events = payload_builder.subscribe().await?; let payload_event_stream = payload_events.into_stream(); // Cancun timestamp - Ok(Self { payload_event_stream, payload_builder, timestamp: 1710338135 }) + Ok(Self { + payload_event_stream, + payload_builder, + timestamp: 1710338135, + attributes_generator: Box::new(attributes_generator), + }) } /// Creates a new payload job from static attributes - pub async fn new_payload( - &mut self, - attributes_generator: impl Fn(u64) -> E::PayloadBuilderAttributes, - ) -> eyre::Result { + pub async fn new_payload(&mut self) -> eyre::Result { self.timestamp += 1; - let attributes: E::PayloadBuilderAttributes = attributes_generator(self.timestamp); - self.payload_builder.new_payload(attributes.clone()).await.unwrap(); + let attributes = (self.attributes_generator)(self.timestamp); + self.payload_builder.send_new_payload(attributes.clone()).await.unwrap()?; Ok(attributes) } /// Asserts that the next event is a payload attributes event pub async fn expect_attr_event( &mut self, - attrs: E::PayloadBuilderAttributes, + attrs: T::PayloadBuilderAttributes, ) -> eyre::Result<()> { let first_event = self.payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::Attributes(attr) = first_event { + if let Events::Attributes(attr) = first_event { assert_eq!(attrs.timestamp(), attr.timestamp()); } else { panic!("Expect first event as payload attributes.") @@ -59,9 +66,9 @@ impl PayloadTestContext { } /// Expects the next event to be a built payload event or panics - pub async fn expect_built_payload(&mut self) -> eyre::Result { + pub async fn expect_built_payload(&mut self) -> eyre::Result { let second_event = self.payload_event_stream.next().await.unwrap()?; - if let reth::payload::Events::BuiltPayload(payload) = second_event { + if let Events::BuiltPayload(payload) = second_event { Ok(payload) } else { panic!("Expect a built payload event."); diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index b8cbe4d77ad..cdc72a29538 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,15 +1,15 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; use alloy_primitives::{Bytes, B256}; -use reth::{ - builder::{rpc::RpcRegistry, FullNodeComponents}, - rpc::api::{ - eth::helpers::{EthApiSpec, EthTransactions, TraceExt}, - DebugApiServer, - }, -}; use reth_chainspec::EthereumHardforks; -use reth_node_builder::{EthApiTypes, NodeTypes}; +use reth_node_api::{FullNodeComponents, NodePrimitives}; +use reth_node_builder::{rpc::RpcRegistry, NodeTypes}; +use reth_provider::BlockReader; +use reth_rpc_api::DebugApiServer; +use reth_rpc_eth_api::{ + helpers::{EthApiSpec, EthTransactions, TraceExt}, + EthApiTypes, +}; #[allow(missing_debug_implementations)] pub struct RpcTestContext { @@ -18,8 +18,18 @@ pub struct RpcTestContext { impl RpcTestContext where - Node: FullNodeComponents>, - EthApi: EthApiSpec + EthTransactions + TraceExt, + Node: FullNodeComponents< + Types: NodeTypes< + ChainSpec: EthereumHardforks, + Primitives: NodePrimitives< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + >, + >, + >, + EthApi: EthApiSpec> + + EthTransactions + + TraceExt, { /// Injects a raw transaction into the node tx pool via RPC server pub async fn inject_tx(&self, raw_tx: Bytes) -> Result { diff --git a/crates/e2e-test-utils/src/traits.rs b/crates/e2e-test-utils/src/traits.rs index 6786492140b..6d9bf14dbc1 100644 --- a/crates/e2e-test-utils/src/traits.rs +++ b/crates/e2e-test-utils/src/traits.rs @@ -1,5 +1,7 @@ -use op_alloy_rpc_types_engine::OpExecutionPayloadEnvelopeV3; -use reth::rpc::types::engine::{ExecutionPayloadEnvelopeV3, ExecutionPayloadV3}; +use alloy_rpc_types_engine::{ + ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV3, +}; +use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; /// The execution payload envelope type. pub trait PayloadEnvelopeExt: Send + Sync + std::fmt::Debug { @@ -13,8 +15,20 @@ impl PayloadEnvelopeExt for OpExecutionPayloadEnvelopeV3 { } } +impl PayloadEnvelopeExt for OpExecutionPayloadEnvelopeV4 { + fn execution_payload(&self) -> ExecutionPayloadV3 { + self.execution_payload.clone() + } +} + impl PayloadEnvelopeExt for ExecutionPayloadEnvelopeV3 { fn execution_payload(&self) -> ExecutionPayloadV3 { self.execution_payload.clone() } } + +impl PayloadEnvelopeExt for ExecutionPayloadEnvelopeV4 { + fn execution_payload(&self) -> ExecutionPayloadV3 { + self.execution_payload.clone() + } +} diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index 04960304442..d24c5579313 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -4,7 +4,7 @@ use alloy_network::{ eip2718::Encodable2718, Ethereum, EthereumWallet, TransactionBuilder, TransactionBuilder4844, }; use alloy_primitives::{hex, Address, Bytes, TxKind, B256, U256}; -use alloy_rpc_types::{Authorization, TransactionInput, TransactionRequest}; +use alloy_rpc_types_eth::{Authorization, TransactionInput, TransactionRequest}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; use eyre::Ok; @@ -56,8 +56,7 @@ impl TransactionTestContext { delegate_to: Address, wallet: PrivateKeySigner, ) -> TxEnvelope { - let authorization = - Authorization { chain_id: U256::from(chain_id), address: delegate_to, nonce: 0 }; + let authorization = Authorization { chain_id, address: delegate_to, nonce: 0 }; let signature = wallet .sign_hash_sync(&authorization.signature_hash()) .expect("could not sign authorization"); diff --git a/crates/engine/invalid-block-hooks/Cargo.toml b/crates/engine/invalid-block-hooks/Cargo.toml index b33b8c00a1c..a7b0153d0d4 100644 --- a/crates/engine/invalid-block-hooks/Cargo.toml +++ b/crates/engine/invalid-block-hooks/Cargo.toml @@ -16,16 +16,18 @@ reth-chainspec.workspace = true reth-engine-primitives.workspace = true reth-evm.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-revm = { workspace = true, features = ["serde"] } reth-rpc-api = { workspace = true, features = ["client"] } reth-tracing.workspace = true -reth-trie = { workspace = true, features = ["serde"] } +reth-trie.workspace = true # alloy alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-rpc-types-debug.workspace = true +alloy-consensus.workspace = true # async futures.workspace = true diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index bb227e30419..632428d6b64 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -1,25 +1,25 @@ -use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; - +use alloy_consensus::BlockHeader; use alloy_primitives::{keccak256, B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use eyre::OptionExt; use pretty_assertions::Comparison; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_primitives::InvalidBlockHook; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm}; -use reth_primitives::{Header, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_evm::{ + state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, +}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ - database::StateProviderDatabase, - db::states::bundle_state::BundleRetention, - primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, - state_change::post_block_balance_increments, - DatabaseCommit, StateBuilder, + database::StateProviderDatabase, db::states::bundle_state::BundleRetention, + primitives::EnvWithHandlerCfg, DatabaseCommit, StateBuilder, }; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; -use reth_trie::{updates::TrieUpdates, HashedPostState, HashedStorage}; +use reth_trie::{updates::TrieUpdates, HashedStorage}; use serde::Serialize; +use std::{collections::HashMap, fmt::Debug, fs::File, io::Write, path::PathBuf}; /// Generates a witness for the given block and saves it to a file. #[derive(Debug)] @@ -54,15 +54,18 @@ where + Send + Sync + 'static, - EvmConfig: ConfigureEvm
, { - fn on_invalid_block( + fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, - ) -> eyre::Result<()> { + ) -> eyre::Result<()> + where + N: NodePrimitives, + EvmConfig: ConfigureEvm
, + { // TODO(alexey): unify with `DebugApi::debug_execution_witness` // Setup database. @@ -74,9 +77,7 @@ where .build(); // Setup environment for the execution. - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, block.header(), U256::MAX); + let (cfg, block_env) = self.evm_config.cfg_and_block_env(block.header(), U256::MAX); // Setup EVM let mut evm = self.evm_config.evm_with_env( @@ -88,7 +89,7 @@ where SystemCaller::new(self.evm_config.clone(), self.provider.chain_spec()); // Apply pre-block system contract calls. - system_caller.apply_pre_execution_changes(&block.clone().unseal(), &mut evm)?; + system_caller.apply_pre_execution_changes(&block.clone().unseal().block, &mut evm)?; // Re-execute all of the transactions in the block to load all touched accounts into // the cache DB. @@ -108,7 +109,7 @@ where // NOTE: This is not mut because we are not doing the DAO irregular state change here let balance_increments = post_block_balance_increments( self.provider.chain_spec().as_ref(), - &block.block.clone().unseal(), + &block.clone().unseal().block, U256::MAX, ); @@ -128,7 +129,7 @@ where // // Note: We grab *all* accounts in the cache here, as the `BundleState` prunes // referenced accounts + storage slots. - let mut hashed_state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut hashed_state = db.database.hashed_post_state(&bundle_state); for (address, account) in db.cache.accounts { let hashed_address = keccak256(address); hashed_state @@ -162,27 +163,27 @@ where let response = ExecutionWitness { state: HashMap::from_iter(state), codes: Default::default(), - keys: Some(state_preimages), + keys: state_preimages, }; let re_executed_witness_path = self.save_file( - format!("{}_{}.witness.re_executed.json", block.number, block.hash()), + format!("{}_{}.witness.re_executed.json", block.number(), block.hash()), &response, )?; if let Some(healthy_node_client) = &self.healthy_node_client { // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { - DebugApiClient::debug_execution_witness(healthy_node_client, block.number.into()) + DebugApiClient::debug_execution_witness(healthy_node_client, block.number().into()) .await })?; let healthy_path = self.save_file( - format!("{}_{}.witness.healthy.json", block.number, block.hash()), + format!("{}_{}.witness.healthy.json", block.number(), block.hash()), &healthy_node_witness, )?; // If the witnesses are different, write the diff to the output directory. if response != healthy_node_witness { - let filename = format!("{}_{}.witness.diff", block.number, block.hash()); + let filename = format!("{}_{}.witness.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &response, &healthy_node_witness)?; warn!( target: "engine::invalid_block_hooks::witness", @@ -212,15 +213,15 @@ where if bundle_state != output.state { let original_path = self.save_file( - format!("{}_{}.bundle_state.original.json", block.number, block.hash()), + format!("{}_{}.bundle_state.original.json", block.number(), block.hash()), &output.state, )?; let re_executed_path = self.save_file( - format!("{}_{}.bundle_state.re_executed.json", block.number, block.hash()), + format!("{}_{}.bundle_state.re_executed.json", block.number(), block.hash()), &bundle_state, )?; - let filename = format!("{}_{}.bundle_state.diff", block.number, block.hash()); + let filename = format!("{}_{}.bundle_state.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &bundle_state, &output.state)?; warn!( @@ -238,26 +239,27 @@ where state_provider.state_root_with_updates(hashed_state)?; if let Some((original_updates, original_root)) = trie_updates { if re_executed_root != original_root { - let filename = format!("{}_{}.state_root.diff", block.number, block.hash()); + let filename = format!("{}_{}.state_root.diff", block.number(), block.hash()); let diff_path = self.save_diff(filename, &re_executed_root, &original_root)?; warn!(target: "engine::invalid_block_hooks::witness", ?original_root, ?re_executed_root, diff_path = %diff_path.display(), "State root mismatch after re-execution"); } // If the re-executed state root does not match the _header_ state root, also log that. - if re_executed_root != block.state_root { - let filename = format!("{}_{}.header_state_root.diff", block.number, block.hash()); - let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root)?; - warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root, ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); + if re_executed_root != block.state_root() { + let filename = + format!("{}_{}.header_state_root.diff", block.number(), block.hash()); + let diff_path = self.save_diff(filename, &re_executed_root, &block.state_root())?; + warn!(target: "engine::invalid_block_hooks::witness", header_state_root=?block.state_root(), ?re_executed_root, diff_path = %diff_path.display(), "Re-executed state root does not match block state root"); } if &trie_output != original_updates { // Trie updates are too big to diff, so we just save the original and re-executed let original_path = self.save_file( - format!("{}_{}.trie_updates.original.json", block.number, block.hash()), + format!("{}_{}.trie_updates.original.json", block.number(), block.hash()), original_updates, )?; let re_executed_path = self.save_file( - format!("{}_{}.trie_updates.re_executed.json", block.number, block.hash()), + format!("{}_{}.trie_updates.re_executed.json", block.number(), block.hash()), &trie_output, )?; warn!( @@ -294,23 +296,24 @@ where } } -impl InvalidBlockHook for InvalidBlockWitnessHook +impl InvalidBlockHook for InvalidBlockWitnessHook where + N: NodePrimitives, P: StateProviderFactory + ChainSpecProvider + Send + Sync + 'static, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { - if let Err(err) = self.on_invalid_block(parent_header, block, output, trie_updates) { + if let Err(err) = self.on_invalid_block::(parent_header, block, output, trie_updates) { warn!(target: "engine::invalid_block_hooks::witness", %err, "Failed to invoke hook"); } } diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index f22ab1f8d56..b3ad169e318 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -16,11 +16,12 @@ reth-consensus.workspace = true reth-engine-primitives.workspace = true reth-engine-service.workspace = true reth-engine-tree.workspace = true +reth-node-types.workspace = true reth-evm.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-rpc-types-compat.workspace = true @@ -28,6 +29,7 @@ reth-transaction-pool.workspace = true reth-stages-api.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true @@ -46,4 +48,8 @@ op-alloy-rpc-types-engine = { workspace = true, optional = true } workspace = true [features] -optimism = ["op-alloy-rpc-types-engine"] +optimism = [ + "op-alloy-rpc-types-engine", + "reth-beacon-consensus/optimism", + "reth-provider/optimism", +] diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index f20d70b1489..29418c0b714 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -1,16 +1,15 @@ //! Contains the implementation of the mining mode for the local engine. +use alloy_consensus::BlockHeader; use alloy_primitives::{TxHash, B256}; -use alloy_rpc_types_engine::{CancunPayloadFields, ForkchoiceState}; +use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar, ForkchoiceState}; use eyre::OptionExt; use futures_util::{stream::Fuse, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineApiMessageVersion, EngineTypes}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{ - BuiltPayload, PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, -}; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::{BuiltPayload, PayloadAttributesBuilder, PayloadKind, PayloadTypes}; use reth_provider::{BlockReader, ChainSpecProvider}; use reth_rpc_types_compat::engine::payload::block_to_payload; use reth_transaction_pool::TransactionPool; @@ -116,7 +115,7 @@ where to_engine, mode, payload_builder, - last_timestamp: latest_header.timestamp, + last_timestamp: latest_header.timestamp(), last_block_hashes: vec![latest_header.hash()], }; @@ -167,6 +166,7 @@ where state: self.forkchoice_state(), payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), })?; let res = rx.await??; @@ -193,6 +193,7 @@ where state: self.forkchoice_state(), payload_attrs: Some(self.payload_attributes_builder.build(timestamp)), tx, + version: EngineApiMessageVersion::default(), })?; let res = rx.await??.await?; @@ -202,27 +203,30 @@ where let payload_id = res.payload_id.ok_or_eyre("No payload id")?; - // wait for some time to let the payload be built - tokio::time::sleep(Duration::from_millis(200)).await; - - let Some(Ok(payload)) = self.payload_builder.best_payload(payload_id).await else { + let Some(Ok(payload)) = + self.payload_builder.resolve_kind(payload_id, PayloadKind::WaitForPending).await + else { eyre::bail!("No payload") }; let block = payload.block(); - let cancun_fields = - self.provider.chain_spec().is_cancun_active_at_timestamp(block.timestamp).then(|| { - CancunPayloadFields { - parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), - versioned_hashes: block.blob_versioned_hashes().into_iter().copied().collect(), - } + let cancun_fields = self + .provider + .chain_spec() + .is_cancun_active_at_timestamp(block.timestamp) + .then(|| CancunPayloadFields { + parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), + versioned_hashes: block.body.blob_versioned_hashes().into_iter().copied().collect(), }); let (tx, rx) = oneshot::channel(); self.to_engine.send(BeaconEngineMessage::NewPayload { payload: block_to_payload(payload.block().clone()), - cancun_fields, + // todo: prague support + sidecar: cancun_fields + .map(ExecutionPayloadSidecar::v3) + .unwrap_or_else(ExecutionPayloadSidecar::none), tx, })?; diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 5111360d5bf..6355a2a00af 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -39,6 +39,8 @@ where .chain_spec .is_cancun_active_at_timestamp(timestamp) .then(B256::random), + target_blobs_per_block: None, + max_blobs_per_block: None, } } } diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 93a9cf11ecc..57fdc0c254e 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -16,9 +16,10 @@ use std::{ use crate::miner::{LocalMiner, MiningMode}; use futures_util::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineNodeTypes}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ chain::{ChainEvent, HandlerEvent}, @@ -30,9 +31,9 @@ use reth_engine_tree::{ tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, }; use reth_evm::execute::BlockExecutorProvider; +use reth_node_types::BlockTy; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; -use reth_payload_validator::ExecutionPayloadValidator; use reth_provider::{providers::BlockchainProvider2, ChainSpecProvider, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; @@ -51,7 +52,7 @@ where /// Processes requests. /// /// This type is responsible for processing incoming requests. - handler: EngineApiRequestHandler>, + handler: EngineApiRequestHandler, N::Primitives>, /// Receiver for incoming requests (from the engine API endpoint) that need to be processed. incoming_requests: EngineMessageStream, } @@ -62,15 +63,16 @@ where { /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] - pub fn new( - consensus: Arc, - executor_factory: impl BlockExecutorProvider, + pub fn new( + consensus: Arc, + executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, to_engine: UnboundedSender>, from_engine: EngineMessageStream, @@ -79,18 +81,17 @@ where ) -> Self where B: PayloadAttributesBuilder<::PayloadAttributes>, + V: EngineValidator>, { let chain_spec = provider.chain_spec(); let engine_kind = if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; let persistence_handle = - PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); - + PersistenceHandle::::spawn_service(provider, pruner, sync_metrics_tx); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( blockchain_db.clone(), executor_factory, consensus, diff --git a/crates/engine/primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml index 008af450332..2da1be9c928 100644 --- a/crates/engine/primitives/Cargo.toml +++ b/crates/engine/primitives/Cargo.toml @@ -14,11 +14,21 @@ workspace = true # reth reth-execution-types.workspace = true reth-payload-primitives.workspace = true +reth-payload-builder-primitives.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-trie.workspace = true +reth-errors.workspace = true # alloy alloy-primitives.workspace = true +alloy-consensus.workspace = true +alloy-rpc-types-engine.workspace = true + +# async +tokio = { workspace = true, features = ["sync"] } +futures.workspace = true # misc serde.workspace = true +thiserror.workspace = true diff --git a/crates/engine/primitives/src/error.rs b/crates/engine/primitives/src/error.rs new file mode 100644 index 00000000000..b7deb607bcf --- /dev/null +++ b/crates/engine/primitives/src/error.rs @@ -0,0 +1,20 @@ +/// Represents all error cases when handling a new payload. +/// +/// This represents all possible error cases that must be returned as JSON RCP errors back to the +/// beacon node. +#[derive(Debug, thiserror::Error)] +pub enum BeaconOnNewPayloadError { + /// Thrown when the engine task is unavailable/stopped. + #[error("beacon consensus engine task stopped")] + EngineUnavailable, + /// An internal error occurred, not necessarily related to the payload. + #[error(transparent)] + Internal(Box), +} + +impl BeaconOnNewPayloadError { + /// Create a new internal error. + pub fn internal(e: E) -> Self { + Self::Internal(Box::new(e)) + } +} diff --git a/crates/engine/primitives/src/forkchoice.rs b/crates/engine/primitives/src/forkchoice.rs new file mode 100644 index 00000000000..9d680d5a124 --- /dev/null +++ b/crates/engine/primitives/src/forkchoice.rs @@ -0,0 +1,441 @@ +use alloy_primitives::B256; +use alloy_rpc_types_engine::{ForkchoiceState, PayloadStatusEnum}; + +/// The struct that keeps track of the received forkchoice state and their status. +#[derive(Debug, Clone, Default)] +pub struct ForkchoiceStateTracker { + /// The latest forkchoice state that we received. + /// + /// Caution: this can be invalid. + latest: Option, + /// Tracks the latest forkchoice state that we received to which we need to sync. + last_syncing: Option, + /// The latest valid forkchoice state that we received and processed as valid. + last_valid: Option, +} + +impl ForkchoiceStateTracker { + /// Sets the latest forkchoice state that we received. + /// + /// If the status is `VALID`, we also update the last valid forkchoice state and set the + /// `sync_target` to `None`, since we're now fully synced. + pub fn set_latest(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { + if status.is_valid() { + self.set_valid(state); + } else if status.is_syncing() { + self.last_syncing = Some(state); + } + + let received = ReceivedForkchoiceState { state, status }; + self.latest = Some(received); + } + + fn set_valid(&mut self, state: ForkchoiceState) { + // we no longer need to sync to this state. + self.last_syncing = None; + + self.last_valid = Some(state); + } + + /// Returns the [`ForkchoiceStatus`] of the latest received FCU. + /// + /// Caution: this can be invalid. + pub(crate) fn latest_status(&self) -> Option { + self.latest.as_ref().map(|s| s.status) + } + + /// Returns whether the latest received FCU is valid: [`ForkchoiceStatus::Valid`] + #[allow(dead_code)] + pub(crate) fn is_latest_valid(&self) -> bool { + self.latest_status().is_some_and(|s| s.is_valid()) + } + + /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Syncing`] + #[allow(dead_code)] + pub(crate) fn is_latest_syncing(&self) -> bool { + self.latest_status().is_some_and(|s| s.is_syncing()) + } + + /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] + #[allow(dead_code)] + pub fn is_latest_invalid(&self) -> bool { + self.latest_status().is_some_and(|s| s.is_invalid()) + } + + /// Returns the last valid head hash. + #[allow(dead_code)] + pub fn last_valid_head(&self) -> Option { + self.last_valid.as_ref().map(|s| s.head_block_hash) + } + + /// Returns the head hash of the latest received FCU to which we need to sync. + #[allow(dead_code)] + pub(crate) fn sync_target(&self) -> Option { + self.last_syncing.as_ref().map(|s| s.head_block_hash) + } + + /// Returns the latest received [`ForkchoiceState`]. + /// + /// Caution: this can be invalid. + pub const fn latest_state(&self) -> Option { + self.last_valid + } + + /// Returns the last valid [`ForkchoiceState`]. + pub const fn last_valid_state(&self) -> Option { + self.last_valid + } + + /// Returns the last valid finalized hash. + /// + /// This will return [`None`]: + /// - If either there is no valid finalized forkchoice state, + /// - Or the finalized hash for the latest valid forkchoice state is zero. + #[inline] + pub fn last_valid_finalized(&self) -> Option { + self.last_valid + .filter(|state| !state.finalized_block_hash.is_zero()) + .map(|state| state.finalized_block_hash) + } + + /// Returns the last received `ForkchoiceState` to which we need to sync. + pub const fn sync_target_state(&self) -> Option { + self.last_syncing + } + + /// Returns the sync target finalized hash. + /// + /// This will return [`None`]: + /// - If either there is no sync target forkchoice state, + /// - Or the finalized hash for the sync target forkchoice state is zero. + #[inline] + pub fn sync_target_finalized(&self) -> Option { + self.last_syncing + .filter(|state| !state.finalized_block_hash.is_zero()) + .map(|state| state.finalized_block_hash) + } + + /// Returns true if no forkchoice state has been received yet. + pub const fn is_empty(&self) -> bool { + self.latest.is_none() + } +} + +/// Represents a forkchoice update and tracks the status we assigned to it. +#[derive(Debug, Clone)] +#[allow(dead_code)] +pub(crate) struct ReceivedForkchoiceState { + state: ForkchoiceState, + status: ForkchoiceStatus, +} + +/// A simplified representation of [`PayloadStatusEnum`] specifically for FCU. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum ForkchoiceStatus { + /// The forkchoice state is valid. + Valid, + /// The forkchoice state is invalid. + Invalid, + /// The forkchoice state is unknown. + Syncing, +} + +impl ForkchoiceStatus { + /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Valid`]. + pub const fn is_valid(&self) -> bool { + matches!(self, Self::Valid) + } + + /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Invalid`]. + pub const fn is_invalid(&self) -> bool { + matches!(self, Self::Invalid) + } + + /// Returns `true` if the forkchoice state is [`ForkchoiceStatus::Syncing`]. + pub const fn is_syncing(&self) -> bool { + matches!(self, Self::Syncing) + } + + /// Converts the general purpose [`PayloadStatusEnum`] into a [`ForkchoiceStatus`]. + pub(crate) const fn from_payload_status(status: &PayloadStatusEnum) -> Self { + match status { + PayloadStatusEnum::Valid | PayloadStatusEnum::Accepted => { + // `Accepted` is only returned on `newPayload`. It would be a valid state here. + Self::Valid + } + PayloadStatusEnum::Invalid { .. } => Self::Invalid, + PayloadStatusEnum::Syncing => Self::Syncing, + } + } +} + +impl From for ForkchoiceStatus { + fn from(status: PayloadStatusEnum) -> Self { + Self::from_payload_status(&status) + } +} + +/// A helper type to check represent hashes of a [`ForkchoiceState`] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ForkchoiceStateHash { + /// Head hash of the [`ForkchoiceState`]. + Head(B256), + /// Safe hash of the [`ForkchoiceState`]. + Safe(B256), + /// Finalized hash of the [`ForkchoiceState`]. + Finalized(B256), +} + +impl ForkchoiceStateHash { + /// Tries to find a matching hash in the given [`ForkchoiceState`]. + pub fn find(state: &ForkchoiceState, hash: B256) -> Option { + if state.head_block_hash == hash { + Some(Self::Head(hash)) + } else if state.safe_block_hash == hash { + Some(Self::Safe(hash)) + } else if state.finalized_block_hash == hash { + Some(Self::Finalized(hash)) + } else { + None + } + } + + /// Returns true if this is the head hash of the [`ForkchoiceState`] + pub const fn is_head(&self) -> bool { + matches!(self, Self::Head(_)) + } +} + +impl AsRef for ForkchoiceStateHash { + fn as_ref(&self) -> &B256 { + match self { + Self::Head(h) | Self::Safe(h) | Self::Finalized(h) => h, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_forkchoice_state_tracker_set_latest_valid() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Latest state is None + assert!(tracker.latest_status().is_none()); + + // Create a valid ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + let status = ForkchoiceStatus::Valid; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is updated + assert!(tracker.last_valid.is_some()); + assert_eq!(tracker.last_valid.as_ref().unwrap(), &state); + + // Assert that last syncing state is None + assert!(tracker.last_syncing.is_none()); + + // Test when there is a latest status and it is valid + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Valid)); + } + + #[test] + fn test_forkchoice_state_tracker_set_latest_syncing() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Create a syncing ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[0; 32]), // Zero to simulate not finalized + }; + let status = ForkchoiceStatus::Syncing; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is None since the status is syncing + assert!(tracker.last_valid.is_none()); + + // Assert that last syncing state is updated + assert!(tracker.last_syncing.is_some()); + assert_eq!(tracker.last_syncing.as_ref().unwrap(), &state); + + // Test when there is a latest status and it is syncing + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Syncing)); + } + + #[test] + fn test_forkchoice_state_tracker_set_latest_invalid() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Create an invalid ForkchoiceState + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + let status = ForkchoiceStatus::Invalid; + + tracker.set_latest(state, status); + + // Assert that the latest state is set + assert!(tracker.latest.is_some()); + assert_eq!(tracker.latest.as_ref().unwrap().state, state); + + // Assert that last valid state is None since the status is invalid + assert!(tracker.last_valid.is_none()); + + // Assert that last syncing state is None since the status is invalid + assert!(tracker.last_syncing.is_none()); + + // Test when there is a latest status and it is invalid + assert_eq!(tracker.latest_status(), Some(ForkchoiceStatus::Invalid)); + } + + #[test] + fn test_forkchoice_state_tracker_sync_target() { + let mut tracker = ForkchoiceStateTracker::default(); + + // Test when there is no last syncing state (should return None) + assert!(tracker.sync_target().is_none()); + + // Set a last syncing forkchoice state + let state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[3; 32]), + }; + tracker.last_syncing = Some(state); + + // Test when the last syncing state is set (should return the head block hash) + assert_eq!(tracker.sync_target(), Some(B256::from_slice(&[1; 32]))); + } + + #[test] + fn test_forkchoice_state_tracker_last_valid_finalized() { + let mut tracker = ForkchoiceStateTracker::default(); + + // No valid finalized state (should return None) + assert!(tracker.last_valid_finalized().is_none()); + + // Valid finalized state, but finalized hash is zero (should return None) + let zero_finalized_state = ForkchoiceState { + head_block_hash: B256::ZERO, + safe_block_hash: B256::ZERO, + finalized_block_hash: B256::ZERO, // Zero finalized hash + }; + tracker.last_valid = Some(zero_finalized_state); + assert!(tracker.last_valid_finalized().is_none()); + + // Valid finalized state with non-zero finalized hash (should return finalized hash) + let valid_finalized_state = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[123; 32]), // Non-zero finalized hash + }; + tracker.last_valid = Some(valid_finalized_state); + assert_eq!(tracker.last_valid_finalized(), Some(B256::from_slice(&[123; 32]))); + + // Reset the last valid state to None + tracker.last_valid = None; + assert!(tracker.last_valid_finalized().is_none()); + } + + #[test] + fn test_forkchoice_state_tracker_sync_target_finalized() { + let mut tracker = ForkchoiceStateTracker::default(); + + // No sync target state (should return None) + assert!(tracker.sync_target_finalized().is_none()); + + // Sync target state with finalized hash as zero (should return None) + let zero_finalized_sync_target = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::ZERO, // Zero finalized hash + }; + tracker.last_syncing = Some(zero_finalized_sync_target); + assert!(tracker.sync_target_finalized().is_none()); + + // Sync target state with non-zero finalized hash (should return the hash) + let valid_sync_target = ForkchoiceState { + head_block_hash: B256::from_slice(&[1; 32]), + safe_block_hash: B256::from_slice(&[2; 32]), + finalized_block_hash: B256::from_slice(&[22; 32]), // Non-zero finalized hash + }; + tracker.last_syncing = Some(valid_sync_target); + assert_eq!(tracker.sync_target_finalized(), Some(B256::from_slice(&[22; 32]))); + + // Reset the last sync target state to None + tracker.last_syncing = None; + assert!(tracker.sync_target_finalized().is_none()); + } + + #[test] + fn test_forkchoice_state_tracker_is_empty() { + let mut forkchoice = ForkchoiceStateTracker::default(); + + // Initially, no forkchoice state has been received, so it should be empty. + assert!(forkchoice.is_empty()); + + // After setting a forkchoice state, it should no longer be empty. + forkchoice.set_latest(ForkchoiceState::default(), ForkchoiceStatus::Valid); + assert!(!forkchoice.is_empty()); + + // Reset the forkchoice latest, it should be empty again. + forkchoice.latest = None; + assert!(forkchoice.is_empty()); + } + + #[test] + fn test_forkchoice_state_hash_find() { + // Define example hashes + let head_hash = B256::random(); + let safe_hash = B256::random(); + let finalized_hash = B256::random(); + let non_matching_hash = B256::random(); + + // Create a ForkchoiceState with specific hashes + let state = ForkchoiceState { + head_block_hash: head_hash, + safe_block_hash: safe_hash, + finalized_block_hash: finalized_hash, + }; + + // Test finding the head hash + assert_eq!( + ForkchoiceStateHash::find(&state, head_hash), + Some(ForkchoiceStateHash::Head(head_hash)) + ); + + // Test finding the safe hash + assert_eq!( + ForkchoiceStateHash::find(&state, safe_hash), + Some(ForkchoiceStateHash::Safe(safe_hash)) + ); + + // Test finding the finalized hash + assert_eq!( + ForkchoiceStateHash::find(&state, finalized_hash), + Some(ForkchoiceStateHash::Finalized(finalized_hash)) + ); + + // Test with a hash that doesn't match any of the hashes in ForkchoiceState + assert_eq!(ForkchoiceStateHash::find(&state, non_matching_hash), None); + } +} diff --git a/crates/engine/primitives/src/invalid_block_hook.rs b/crates/engine/primitives/src/invalid_block_hook.rs index 13c606511dd..cfd127ae6f4 100644 --- a/crates/engine/primitives/src/invalid_block_hook.rs +++ b/crates/engine/primitives/src/invalid_block_hook.rs @@ -1,35 +1,36 @@ use alloy_primitives::B256; use reth_execution_types::BlockExecutionOutput; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_trie::updates::TrieUpdates; /// An invalid block hook. -pub trait InvalidBlockHook: Send + Sync { +pub trait InvalidBlockHook: Send + Sync { /// Invoked when an invalid block is encountered. fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ); } -impl InvalidBlockHook for F +impl InvalidBlockHook for F where + N: NodePrimitives, F: Fn( - &SealedHeader, - &SealedBlockWithSenders, - &BlockExecutionOutput, + &SealedHeader, + &SealedBlockWithSenders, + &BlockExecutionOutput, Option<(&TrieUpdates, B256)>, ) + Send + Sync, { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { self(parent_header, block, output, trie_updates) diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 2cf1366eb01..2bd642cfa20 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -8,6 +8,20 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod error; + +use core::fmt; + +use alloy_consensus::BlockHeader; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; +pub use error::BeaconOnNewPayloadError; + +mod forkchoice; +pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; + +mod message; +pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; + mod invalid_block_hook; pub use invalid_block_hook::InvalidBlockHook; @@ -15,6 +29,9 @@ pub use reth_payload_primitives::{ BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; +use reth_payload_primitives::{InvalidPayloadAttributesError, PayloadAttributes}; +use reth_primitives::SealedBlockFor; +use reth_primitives_traits::Block; use serde::{de::DeserializeOwned, ser::Serialize}; /// This type defines the versioned types of the engine API. @@ -23,26 +40,70 @@ use serde::{de::DeserializeOwned, ser::Serialize}; /// payload job. Hence this trait is also [`PayloadTypes`]. pub trait EngineTypes: PayloadTypes< - BuiltPayload: TryInto - + TryInto - + TryInto - + TryInto, + BuiltPayload: TryInto + + TryInto + + TryInto + + TryInto, > + DeserializeOwned + Serialize + 'static { - /// Execution Payload V1 type. - type ExecutionPayloadV1: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; - /// Execution Payload V2 type. - type ExecutionPayloadV2: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; - /// Execution Payload V3 type. - type ExecutionPayloadV3: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; - /// Execution Payload V4 type. - type ExecutionPayloadV4: DeserializeOwned + Serialize + Clone + Unpin + Send + Sync + 'static; + /// Execution Payload V1 envelope type. + type ExecutionPayloadEnvelopeV1: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; + /// Execution Payload V2 envelope type. + type ExecutionPayloadEnvelopeV2: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; + /// Execution Payload V3 envelope type. + type ExecutionPayloadEnvelopeV3: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; + /// Execution Payload V4 envelope type. + type ExecutionPayloadEnvelopeV4: DeserializeOwned + + Serialize + + Clone + + Unpin + + Send + + Sync + + 'static; } -/// Type that validates the payloads sent to the engine. -pub trait EngineValidator: Clone + Send + Sync + Unpin + 'static { +/// Type that validates an [`ExecutionPayload`]. +pub trait PayloadValidator: fmt::Debug + Send + Sync + Unpin + 'static { + /// The block type used by the engine. + type Block: Block; + + /// Ensures that the given payload does not violate any consensus rules that concern the block's + /// layout. + /// + /// This function must convert the payload into the executable block and pre-validate its + /// fields. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError>; +} + +/// Type that validates the payloads processed by the engine. +pub trait EngineValidator: PayloadValidator { /// Validates the presence or exclusion of fork-specific fields based on the payload attributes /// and the message version. fn validate_version_specific_fields( @@ -57,4 +118,24 @@ pub trait EngineValidator: Clone + Send + Sync + Unpin + 'st version: EngineApiMessageVersion, attributes: &::PayloadAttributes, ) -> Result<(), EngineObjectValidationError>; + + /// Validates the payload attributes with respect to the header. + /// + /// By default, this enforces that the payload attributes timestamp is greater than the + /// timestamp according to: + /// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than + /// > timestamp + /// > of a block referenced by forkchoiceState.headBlockHash. + /// + /// See also [engine api spec](https://github.com/ethereum/execution-apis/tree/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine) + fn validate_payload_attributes_against_header( + &self, + attr: &::PayloadAttributes, + header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() <= header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } } diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/engine/primitives/src/message.rs similarity index 93% rename from crates/consensus/beacon/src/engine/message.rs rename to crates/engine/primitives/src/message.rs index fdaad0cc4b0..d8a4c1322ad 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -1,12 +1,11 @@ -use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; +use crate::{BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, ForkchoiceStatus}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkChoiceUpdateResult, ForkchoiceState, + ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; use futures::{future::Either, FutureExt}; -use reth_engine_primitives::EngineTypes; use reth_errors::RethResult; -use reth_payload_primitives::PayloadBuilderError; +use reth_payload_builder_primitives::PayloadBuilderError; use std::{ fmt::Display, future::Future, @@ -144,8 +143,9 @@ pub enum BeaconEngineMessage { NewPayload { /// The execution payload received by Engine API. payload: ExecutionPayload, - /// The cancun-related newPayload fields, if any. - cancun_fields: Option, + /// The execution payload sidecar with additional version-specific fields received by + /// engine API. + sidecar: ExecutionPayloadSidecar, /// The sender for returning payload status result. tx: oneshot::Sender>, }, @@ -155,6 +155,8 @@ pub enum BeaconEngineMessage { state: ForkchoiceState, /// The payload attributes for block building. payload_attrs: Option, + /// The Engine API Version. + version: EngineApiMessageVersion, /// The sender for returning forkchoice updated result. tx: oneshot::Sender>, }, diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index c6098bfe667..326bc06b5e3 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -18,13 +18,14 @@ reth-engine-tree.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true reth-payload-builder.workspace = true -reth-payload-validator.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true reth-node-types.workspace = true reth-chainspec.workspace = true +reth-engine-primitives.workspace = true # async futures.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 026476a8260..5dfe4184257 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -1,8 +1,9 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineNodeTypes}; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::Consensus; +use reth_consensus::FullConsensus; +use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, @@ -15,10 +16,10 @@ pub use reth_engine_tree::{ engine::EngineApiEvent, }; use reth_evm::execute::BlockExecutorProvider; -use reth_network_p2p::BlockClient; -use reth_node_types::NodeTypesWithEngine; +use reth_network_p2p::EthBlockClient; +use reth_node_types::{BlockTy, NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::EthPrimitives; use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; @@ -36,7 +37,10 @@ pub type EngineMessageStream = Pin = ChainOrchestrator< EngineHandler< - EngineApiRequestHandler::Engine>>, + EngineApiRequestHandler< + EngineApiRequest<::Engine, ::Primitives>, + ::Primitives, + >, EngineMessageStream<::Engine>, BasicBlockDownloader, >, @@ -49,7 +53,7 @@ type EngineServiceType = ChainOrchestrator< pub struct EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { orchestrator: EngineServiceType, @@ -59,13 +63,13 @@ where impl EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, - E: BlockExecutorProvider + 'static, + Client: EthBlockClient + 'static, + E: BlockExecutorProvider + 'static, { /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] - pub fn new( - consensus: Arc, + pub fn new( + consensus: Arc, executor_factory: E, chain_spec: Arc, client: Client, @@ -76,22 +80,25 @@ where blockchain_db: BlockchainProvider2, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, + payload_validator: V, tree_config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, - ) -> Self { + ) -> Self + where + V: EngineValidator>, + { let engine_kind = if chain_spec.is_optimism() { EngineApiKind::OpStack } else { EngineApiKind::Ethereum }; - let downloader = BasicBlockDownloader::new(client, consensus.clone()); + let downloader = BasicBlockDownloader::new(client, consensus.clone().as_consensus()); let persistence_handle = - PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx); - let payload_validator = ExecutionPayloadValidator::new(chain_spec); + PersistenceHandle::::spawn_service(provider, pruner, sync_metrics_tx); let canonical_in_memory_state = blockchain_db.canonical_in_memory_state(); - let (to_tree_tx, from_tree) = EngineApiTreeHandler::spawn_new( + let (to_tree_tx, from_tree) = EngineApiTreeHandler::::spawn_new( blockchain_db, executor_factory, consensus, @@ -124,7 +131,7 @@ where impl Stream for EngineService where N: EngineNodeTypes, - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, E: BlockExecutorProvider + 'static, { type Item = ChainEvent; @@ -145,13 +152,16 @@ mod tests { use super::*; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; - use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; + use reth_provider::{ + providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + }; use reth_prune::Pruner; use reth_tasks::TokioTaskExecutor; use std::sync::Arc; @@ -182,7 +192,7 @@ mod tests { let blockchain_db = BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()) .unwrap(); - + let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); @@ -200,6 +210,7 @@ mod tests { blockchain_db, pruner, PayloadBuilderHandle::new(tx), + engine_payload_validator, TreeConfig::default(), Box::new(NoopInvalidBlockHook::default()), sync_metrics_tx, diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 91c9cd5422d..f428c8771cb 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -13,42 +13,50 @@ workspace = true [dependencies] # reth reth-beacon-consensus.workspace = true -reth-blockchain-tree.workspace = true reth-blockchain-tree-api.workspace = true +reth-blockchain-tree.workspace = true reth-chain-state.workspace = true +reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true -reth-chainspec.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-network-p2p.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true reth-stages-api.workspace = true reth-tasks.workspace = true -reth-trie.workspace = true +reth-trie-db.workspace = true reth-trie-parallel.workspace = true +reth-trie-sparse.workspace = true +reth-trie.workspace = true # alloy -alloy-primitives.workspace = true +alloy-consensus.workspace = true alloy-eips.workspace = true +alloy-primitives.workspace = true +alloy-rlp.workspace = true alloy-rpc-types-engine.workspace = true +revm-primitives.workspace = true + # common futures.workspace = true -tokio = { workspace = true, features = ["macros", "sync"] } thiserror.workspace = true +tokio = { workspace = true, features = ["macros", "sync"] } # metrics metrics.workspace = true reth-metrics = { workspace = true, features = ["common"] } # misc +rayon.workspace = true tracing.workspace = true # optional deps for test-utils @@ -59,32 +67,58 @@ reth-tracing = { workspace = true, optional = true } [dev-dependencies] # reth -reth-db = { workspace = true, features = ["test-utils"] } reth-chain-state = { workspace = true, features = ["test-utils"] } +reth-chainspec.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } -reth-prune.workspace = true reth-prune-types.workspace = true +reth-prune.workspace = true reth-rpc-types-compat.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true +reth-testing-utils.workspace = true reth-tracing.workspace = true -reth-chainspec.workspace = true +# alloy alloy-rlp.workspace = true assert_matches.workspace = true +criterion.workspace = true +crossbeam-channel = "0.5.13" rand.workspace = true +[[bench]] +name = "channel_perf" +harness = false + +[[bench]] +name = "state_root_task" +harness = false + [features] test-utils = [ - "reth-db/test-utils", - "reth-chain-state/test-utils", - "reth-network-p2p/test-utils", - "reth-prune-types", - "reth-stages/test-utils", - "reth-static-file", - "reth-tracing" + "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-db/test-utils", + "reth-evm/test-utils", + "reth-network-p2p/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-provider/test-utils", + "reth-prune-types", + "reth-prune-types?/test-utils", + "reth-revm/test-utils", + "reth-stages-api/test-utils", + "reth-stages/test-utils", + "reth-static-file", + "reth-tracing", + "reth-trie/test-utils", + "reth-prune-types?/test-utils", + "reth-trie-db/test-utils", ] diff --git a/crates/engine/tree/benches/channel_perf.rs b/crates/engine/tree/benches/channel_perf.rs new file mode 100644 index 00000000000..c1c65e0a68e --- /dev/null +++ b/crates/engine/tree/benches/channel_perf.rs @@ -0,0 +1,132 @@ +//! Benchmark comparing `std::sync::mpsc` and `crossbeam` channels for `StateRootTask`. + +#![allow(missing_docs)] + +use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion}; +use revm_primitives::{ + Account, AccountInfo, AccountStatus, Address, EvmState, EvmStorage, EvmStorageSlot, HashMap, + B256, U256, +}; +use std::thread; + +/// Creates a mock state with the specified number of accounts for benchmarking +fn create_bench_state(num_accounts: usize) -> EvmState { + let mut state_changes = HashMap::default(); + + for i in 0..num_accounts { + let storage = + EvmStorage::from_iter([(U256::from(i), EvmStorageSlot::new(U256::from(i + 1)))]); + + let account = Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }; + + let address = Address::random(); + state_changes.insert(address, account); + } + + state_changes +} + +/// Simulated `StateRootTask` with `std::sync::mpsc` +struct StdStateRootTask { + rx: std::sync::mpsc::Receiver, +} + +impl StdStateRootTask { + const fn new(rx: std::sync::mpsc::Receiver) -> Self { + Self { rx } + } + + fn run(self) { + while let Ok(state) = self.rx.recv() { + criterion::black_box(state); + } + } +} + +/// Simulated `StateRootTask` with `crossbeam-channel` +struct CrossbeamStateRootTask { + rx: crossbeam_channel::Receiver, +} + +impl CrossbeamStateRootTask { + const fn new(rx: crossbeam_channel::Receiver) -> Self { + Self { rx } + } + + fn run(self) { + while let Ok(state) = self.rx.recv() { + criterion::black_box(state); + } + } +} + +/// Benchmarks the performance of different channel implementations for state streaming +fn bench_state_stream(c: &mut Criterion) { + let mut group = c.benchmark_group("state_stream_channels"); + group.sample_size(10); + + for size in &[1, 10, 100] { + let bench_setup = || { + let states: Vec<_> = (0..100).map(|_| create_bench_state(*size)).collect(); + states + }; + + group.bench_with_input(BenchmarkId::new("std_channel", size), size, |b, _| { + b.iter_batched( + bench_setup, + |states| { + let (tx, rx) = std::sync::mpsc::channel(); + let task = StdStateRootTask::new(rx); + + let processor = thread::spawn(move || { + task.run(); + }); + + for state in states { + tx.send(state).unwrap(); + } + drop(tx); + + processor.join().unwrap(); + }, + BatchSize::LargeInput, + ); + }); + + group.bench_with_input(BenchmarkId::new("crossbeam_channel", size), size, |b, _| { + b.iter_batched( + bench_setup, + |states| { + let (tx, rx) = crossbeam_channel::unbounded(); + let task = CrossbeamStateRootTask::new(rx); + + let processor = thread::spawn(move || { + task.run(); + }); + + for state in states { + tx.send(state).unwrap(); + } + drop(tx); + + processor.join().unwrap(); + }, + BatchSize::LargeInput, + ); + }); + } + + group.finish(); +} + +criterion_group!(benches, bench_state_stream); +criterion_main!(benches); diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs new file mode 100644 index 00000000000..391fd333d12 --- /dev/null +++ b/crates/engine/tree/benches/state_root_task.rs @@ -0,0 +1,166 @@ +//! Benchmark for `StateRootTask` complete workflow, including sending state +//! updates using the incoming messages sender and waiting for the final result. + +#![allow(missing_docs)] + +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use reth_engine_tree::tree::root::{StateRootConfig, StateRootTask}; +use reth_evm::system_calls::OnStateHook; +use reth_primitives::{Account as RethAccount, StorageEntry}; +use reth_provider::{ + providers::ConsistentDbView, + test_utils::{create_test_provider_factory, MockNodeTypesWithDB}, + HashingWriter, ProviderFactory, +}; +use reth_testing_utils::generators::{self, Rng}; +use reth_trie::TrieInput; +use revm_primitives::{ + Account as RevmAccount, AccountInfo, AccountStatus, Address, EvmState, EvmStorageSlot, HashMap, + B256, KECCAK_EMPTY, U256, +}; +use std::sync::Arc; + +#[derive(Debug, Clone)] +struct BenchParams { + num_accounts: usize, + updates_per_account: usize, + storage_slots_per_account: usize, +} + +fn create_bench_state_updates(params: &BenchParams) -> Vec { + let mut rng = generators::rng(); + let all_addresses: Vec
= (0..params.num_accounts).map(|_| rng.gen()).collect(); + let mut updates = Vec::new(); + + for _ in 0..params.updates_per_account { + let num_accounts_in_update = rng.gen_range(1..=params.num_accounts); + let mut state_update = EvmState::default(); + + let selected_addresses = &all_addresses[0..num_accounts_in_update]; + + for &address in selected_addresses { + let mut storage = HashMap::default(); + for _ in 0..params.storage_slots_per_account { + let slot = U256::from(rng.gen::()); + storage.insert( + slot, + EvmStorageSlot::new_changed(U256::ZERO, U256::from(rng.gen::())), + ); + } + + let account = RevmAccount { + info: AccountInfo { + balance: U256::from(rng.gen::()), + nonce: rng.gen::(), + code_hash: KECCAK_EMPTY, + code: Some(Default::default()), + }, + storage, + status: AccountStatus::Touched, + }; + + state_update.insert(address, account); + } + + updates.push(state_update); + } + + updates +} + +fn convert_revm_to_reth_account(revm_account: &RevmAccount) -> RethAccount { + RethAccount { + balance: revm_account.info.balance, + nonce: revm_account.info.nonce, + bytecode_hash: if revm_account.info.code_hash == KECCAK_EMPTY { + None + } else { + Some(revm_account.info.code_hash) + }, + } +} + +fn setup_provider( + factory: &ProviderFactory, + state_updates: &[EvmState], +) -> Result<(), Box> { + let provider_rw = factory.provider_rw()?; + + for update in state_updates { + let account_updates = update + .iter() + .map(|(address, account)| (*address, Some(convert_revm_to_reth_account(account)))); + provider_rw.insert_account_for_hashing(account_updates)?; + + let storage_updates = update.iter().map(|(address, account)| { + let storage_entries = account.storage.iter().map(|(slot, value)| StorageEntry { + key: B256::from(*slot), + value: value.present_value, + }); + (*address, storage_entries) + }); + provider_rw.insert_storage_for_hashing(storage_updates)?; + } + + provider_rw.commit()?; + Ok(()) +} + +fn bench_state_root(c: &mut Criterion) { + let mut group = c.benchmark_group("state_root"); + + let scenarios = vec![ + BenchParams { num_accounts: 100, updates_per_account: 5, storage_slots_per_account: 10 }, + BenchParams { num_accounts: 1000, updates_per_account: 10, storage_slots_per_account: 20 }, + ]; + + for params in scenarios { + group.bench_with_input( + BenchmarkId::new( + "state_root_task", + format!( + "accounts_{}_updates_{}_slots_{}", + params.num_accounts, + params.updates_per_account, + params.storage_slots_per_account + ), + ), + ¶ms, + |b, params| { + b.iter_with_setup( + || { + let factory = create_test_provider_factory(); + let state_updates = create_bench_state_updates(params); + setup_provider(&factory, &state_updates).expect("failed to setup provider"); + + let trie_input = Arc::new(TrieInput::from_state(Default::default())); + + let config = StateRootConfig { + consistent_view: ConsistentDbView::new(factory, None), + input: trie_input, + }; + + (config, state_updates) + }, + |(config, state_updates)| { + let task = StateRootTask::new(config); + let mut hook = task.state_hook(); + let handle = task.spawn(); + + for update in state_updates { + hook.on_state(&update) + } + drop(hook); + + black_box(handle.wait_for_result().expect("task failed")); + }, + ) + }, + ); + } + + group.finish(); +} + +criterion_group!(benches, bench_state_root); +criterion_main!(benches); diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index 78e21a7b5ef..2ed0e758d50 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -230,12 +230,13 @@ impl PipelineState { mod tests { use super::*; use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder}; - use alloy_primitives::{BlockNumber, Sealable, B256}; + use alloy_consensus::Header; + use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use futures::poll; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{Header, SealedHeader}; + use reth_primitives::SealedHeader; use reth_provider::test_utils::MockNodeTypesWithDB; use reth_stages::ExecOutput; use reth_stages_api::StageCheckpoint; @@ -267,14 +268,12 @@ mod tests { let pipeline_sync = PipelineSync::new(pipeline, Box::::default()); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..total_blocks); let tip = client.highest_block().expect("there should be blocks here").hash(); diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 9ecec70ae36..8a7ea583f0f 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -6,12 +6,13 @@ use futures::FutureExt; use reth_consensus::Consensus; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, + BlockClient, EthBlockClient, }; use reth_primitives::{SealedBlock, SealedBlockWithSenders}; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap, HashSet, VecDeque}, + fmt::Debug, sync::Arc, task::{Context, Poll}, }; @@ -72,10 +73,13 @@ where impl BasicBlockDownloader where - Client: BlockClient + 'static, + Client: EthBlockClient + 'static, { /// Create a new instance - pub fn new(client: Client, consensus: Arc) -> Self { + pub fn new( + client: Client, + consensus: Arc>, + ) -> Self { Self { full_block_client: FullBlockClient::new(client, consensus), inflight_full_block_requests: Vec::new(), @@ -182,7 +186,7 @@ where impl BlockDownloader for BasicBlockDownloader where - Client: BlockClient + 'static, + Client: EthBlockClient, { /// Handles incoming download actions. fn on_action(&mut self, action: DownloadAction) { @@ -305,12 +309,12 @@ impl BlockDownloader for NoopBlockDownloader { mod tests { use super::*; use crate::test_utils::insert_headers_into_client; - use alloy_primitives::Sealable; + use alloy_consensus::Header; use assert_matches::assert_matches; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_network_p2p::test_utils::TestFullBlockClient; - use reth_primitives::{Header, SealedHeader}; + use reth_primitives::SealedHeader; use std::{future::poll_fn, sync::Arc}; struct TestHarness { @@ -329,14 +333,12 @@ mod tests { ); let client = TestFullBlockClient::default(); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(7), gas_limit: chain_spec.max_gas_limit, ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + }; + let header = SealedHeader::seal(header); insert_headers_into_client(&client, header, 0..total_blocks); let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index c1571ed8217..9fa0a8c1d21 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -7,10 +7,11 @@ use crate::{ }; use alloy_primitives::B256; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; +use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; -use reth_engine_primitives::EngineTypes; -use reth_primitives::SealedBlockWithSenders; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; +use reth_primitives_traits::Block; use std::{ collections::HashSet, fmt::Display, @@ -66,7 +67,7 @@ impl EngineHandler { impl ChainHandler for EngineHandler where - T: EngineRequestHandler, + T: EngineRequestHandler, S: Stream + Send + Sync + Unpin + 'static, ::Item: Into, D: BlockDownloader, @@ -113,9 +114,11 @@ where } // advance the downloader - if let Poll::Ready(DownloadOutcome::Blocks(blocks)) = self.downloader.poll(cx) { - // delegate the downloaded blocks to the handler - self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + if let Poll::Ready(outcome) = self.downloader.poll(cx) { + if let DownloadOutcome::Blocks(blocks) = outcome { + // delegate the downloaded blocks to the handler + self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + } continue } @@ -137,9 +140,11 @@ pub trait EngineRequestHandler: Send + Sync { type Event: Send; /// The request type this handler can process. type Request; + /// Type of the block sent in [`FromEngine::DownloadedBlocks`] variant. + type Block: Block; /// Informs the handler about an event from the [`EngineHandler`]. - fn on_event(&mut self, event: FromEngine); + fn on_event(&mut self, event: FromEngine); /// Advances the handler. fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; @@ -165,31 +170,32 @@ pub trait EngineRequestHandler: Send + Sync { /// In case required blocks are missing, the handler will request them from the network, by emitting /// a download request upstream. #[derive(Debug)] -pub struct EngineApiRequestHandler { +pub struct EngineApiRequestHandler { /// channel to send messages to the tree to execute the payload. - to_tree: Sender>, + to_tree: Sender>, /// channel to receive messages from the tree. - from_tree: UnboundedReceiver, + from_tree: UnboundedReceiver>, } -impl EngineApiRequestHandler { +impl EngineApiRequestHandler { /// Creates a new `EngineApiRequestHandler`. pub const fn new( - to_tree: Sender>, - from_tree: UnboundedReceiver, + to_tree: Sender>, + from_tree: UnboundedReceiver>, ) -> Self { Self { to_tree, from_tree } } } -impl EngineRequestHandler for EngineApiRequestHandler +impl EngineRequestHandler for EngineApiRequestHandler where Request: Send, { - type Event = BeaconConsensusEngineEvent; + type Event = BeaconConsensusEngineEvent; type Request = Request; + type Block = N::Block; - fn on_event(&mut self, event: FromEngine) { + fn on_event(&mut self, event: FromEngine) { // delegate to the tree let _ = self.to_tree.send(event); } @@ -236,14 +242,14 @@ impl EngineApiKind { /// The request variants that the engine API handler can receive. #[derive(Debug)] -pub enum EngineApiRequest { +pub enum EngineApiRequest { /// A request received from the consensus engine. Beacon(BeaconEngineMessage), /// Request to insert an already executed block, e.g. via payload building. - InsertExecutedBlock(ExecutedBlock), + InsertExecutedBlock(ExecutedBlock), } -impl Display for EngineApiRequest { +impl Display for EngineApiRequest { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Beacon(msg) => msg.fmt(f), @@ -254,55 +260,57 @@ impl Display for EngineApiRequest { } } -impl From> for EngineApiRequest { +impl From> for EngineApiRequest { fn from(msg: BeaconEngineMessage) -> Self { Self::Beacon(msg) } } -impl From> for FromEngine> { - fn from(req: EngineApiRequest) -> Self { +impl From> + for FromEngine, N::Block> +{ + fn from(req: EngineApiRequest) -> Self { Self::Request(req) } } /// Events emitted by the engine API handler. #[derive(Debug)] -pub enum EngineApiEvent { +pub enum EngineApiEvent { /// Event from the consensus engine. // TODO(mattsse): find a more appropriate name for this variant, consider phasing it out. - BeaconConsensus(BeaconConsensusEngineEvent), + BeaconConsensus(BeaconConsensusEngineEvent), /// Backfill action is needed. BackfillAction(BackfillAction), /// Block download is needed. Download(DownloadRequest), } -impl EngineApiEvent { +impl EngineApiEvent { /// Returns `true` if the event is a backfill action. pub const fn is_backfill_action(&self) -> bool { matches!(self, Self::BackfillAction(_)) } } -impl From for EngineApiEvent { - fn from(event: BeaconConsensusEngineEvent) -> Self { +impl From> for EngineApiEvent { + fn from(event: BeaconConsensusEngineEvent) -> Self { Self::BeaconConsensus(event) } } /// Events received from the engine. #[derive(Debug)] -pub enum FromEngine { +pub enum FromEngine { /// Event from the top level orchestrator. Event(FromOrchestrator), /// Request from the engine. Request(Req), /// Downloaded blocks from the network. - DownloadedBlocks(Vec), + DownloadedBlocks(Vec>), } -impl Display for FromEngine { +impl Display for FromEngine { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Event(ev) => write!(f, "Event({ev:?})"), @@ -314,7 +322,7 @@ impl Display for FromEngine { } } -impl From for FromEngine { +impl From for FromEngine { fn from(event: FromOrchestrator) -> Self { Self::Event(event) } diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 25c1f0ed703..c7ad4110086 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -1,7 +1,9 @@ use crate::metrics::PersistenceMetrics; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use reth_chain_state::ExecutedBlock; use reth_errors::ProviderError; +use reth_primitives::{EthPrimitives, NodePrimitives}; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, BlockHashReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StaticFileProviderFactory, @@ -24,11 +26,14 @@ use tracing::{debug, error}; /// This should be spawned in its own thread with [`std::thread::spawn`], since this performs /// blocking I/O operations in an endless loop. #[derive(Debug)] -pub struct PersistenceService { +pub struct PersistenceService +where + N: ProviderNodeTypes, +{ /// The provider factory to use provider: ProviderFactory, /// Incoming requests - incoming: Receiver, + incoming: Receiver>, /// The pruner pruner: PrunerWithFactory>, /// metrics @@ -37,11 +42,14 @@ pub struct PersistenceService { sync_metrics_tx: MetricEventsSender, } -impl PersistenceService { +impl PersistenceService +where + N: ProviderNodeTypes, +{ /// Create a new persistence service pub fn new( provider: ProviderFactory, - incoming: Receiver, + incoming: Receiver>, pruner: PrunerWithFactory>, sync_metrics_tx: MetricEventsSender, ) -> Self { @@ -60,7 +68,10 @@ impl PersistenceService { } } -impl PersistenceService { +impl PersistenceService +where + N: ProviderNodeTypes, +{ /// This is the main loop, that will listen to database events and perform the requested /// database actions pub fn run(mut self) -> Result<(), PersistenceError> { @@ -77,20 +88,22 @@ impl PersistenceService { } PersistenceAction::SaveBlocks(blocks, sender) => { let result = self.on_save_blocks(blocks)?; - if let Some(ref num_hash) = result { + let result_number = result.map(|r| r.number); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(result); + + if let Some(block_number) = result_number { // send new sync metrics based on saved blocks let _ = self .sync_metrics_tx - .send(MetricEvent::SyncHeight { height: num_hash.number }); - } - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(result); - } - PersistenceAction::PruneBefore(block_num, sender) => { - let res = self.prune_before(block_num)?; + .send(MetricEvent::SyncHeight { height: block_number }); - // we ignore the error because the caller may or may not care about the result - let _ = sender.send(res); + if self.pruner.is_pruning_needed(block_number) { + // We log `PrunerOutput` inside the `Pruner` + let _ = self.prune_before(block_number)?; + } + } } PersistenceAction::SaveFinalizedBlock(finalized_block) => { let provider = self.provider.database_provider_rw()?; @@ -118,7 +131,7 @@ impl PersistenceService { let new_tip_hash = provider_rw.block_hash(new_tip_num)?; UnifiedStorageWriter::from(&provider_rw, &sf_provider).remove_blocks_above(new_tip_num)?; - UnifiedStorageWriter::commit_unwind(provider_rw, sf_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; debug!(target: "engine::persistence", ?new_tip_num, ?new_tip_hash, "Removed blocks from disk"); self.metrics.remove_blocks_above_duration_seconds.record(start_time.elapsed()); @@ -127,20 +140,21 @@ impl PersistenceService { fn on_save_blocks( &self, - blocks: Vec, + blocks: Vec>, ) -> Result, PersistenceError> { debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.block.num_hash()), last=?blocks.last().map(|b| b.block.num_hash()), "Saving range of blocks"); let start_time = Instant::now(); - let last_block_hash_num = blocks - .last() - .map(|block| BlockNumHash { hash: block.block().hash(), number: block.block().number }); + let last_block_hash_num = blocks.last().map(|block| BlockNumHash { + hash: block.block().hash(), + number: block.block().header().number(), + }); if last_block_hash_num.is_some() { let provider_rw = self.provider.database_provider_rw()?; let static_file_provider = self.provider.static_file_provider(); - UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(&blocks)?; - UnifiedStorageWriter::commit(provider_rw, static_file_provider)?; + UnifiedStorageWriter::from(&provider_rw, &static_file_provider).save_blocks(blocks)?; + UnifiedStorageWriter::commit(provider_rw)?; } self.metrics.save_blocks_duration_seconds.record(start_time.elapsed()); Ok(last_block_hash_num) @@ -161,13 +175,13 @@ pub enum PersistenceError { /// A signal to the persistence service that part of the tree state can be persisted. #[derive(Debug)] -pub enum PersistenceAction { +pub enum PersistenceAction { /// The section of tree state that should be persisted. These blocks are expected in order of /// increasing block number. /// /// First, header, transaction, and receipt-related data should be written to static files. /// Then the execution history-related data will be written to the database. - SaveBlocks(Vec, oneshot::Sender>), + SaveBlocks(Vec>, oneshot::Sender>), /// Removes block data above the given block number from the database. /// @@ -175,10 +189,6 @@ pub enum PersistenceAction { /// static files. RemoveBlocksAbove(u64, oneshot::Sender>), - /// Prune associated block data before the given block number, according to already-configured - /// prune modes. - PruneBefore(u64, oneshot::Sender), - /// Update the persisted finalized block on disk SaveFinalizedBlock(u64), @@ -188,28 +198,31 @@ pub enum PersistenceAction { /// A handle to the persistence service #[derive(Debug, Clone)] -pub struct PersistenceHandle { +pub struct PersistenceHandle { /// The channel used to communicate with the persistence service - sender: Sender, + sender: Sender>, } -impl PersistenceHandle { +impl PersistenceHandle { /// Create a new [`PersistenceHandle`] from a [`Sender`]. - pub const fn new(sender: Sender) -> Self { + pub const fn new(sender: Sender>) -> Self { Self { sender } } /// Create a new [`PersistenceHandle`], and spawn the persistence service. - pub fn spawn_service( + pub fn spawn_service( provider_factory: ProviderFactory, pruner: PrunerWithFactory>, sync_metrics_tx: MetricEventsSender, - ) -> Self { + ) -> PersistenceHandle + where + N: ProviderNodeTypes, + { // create the initial channels let (db_service_tx, db_service_rx) = std::sync::mpsc::channel(); // construct persistence handle - let persistence_handle = Self::new(db_service_tx); + let persistence_handle = PersistenceHandle::new(db_service_tx); // spawn the persistence service let db_service = @@ -230,8 +243,8 @@ impl PersistenceHandle { /// for creating any channels for the given action. pub fn send_action( &self, - action: PersistenceAction, - ) -> Result<(), SendError> { + action: PersistenceAction, + ) -> Result<(), SendError>> { self.sender.send(action) } @@ -245,9 +258,9 @@ impl PersistenceHandle { /// If there are no blocks to persist, then `None` is sent in the sender. pub fn save_blocks( &self, - blocks: Vec, + blocks: Vec>, tx: oneshot::Sender>, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.send_action(PersistenceAction::SaveBlocks(blocks, tx)) } @@ -255,7 +268,7 @@ impl PersistenceHandle { pub fn save_finalized_block_number( &self, finalized_block: u64, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.send_action(PersistenceAction::SaveFinalizedBlock(finalized_block)) } @@ -263,7 +276,7 @@ impl PersistenceHandle { pub fn save_safe_block_number( &self, safe_block: u64, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.send_action(PersistenceAction::SaveSafeBlock(safe_block)) } @@ -276,21 +289,9 @@ impl PersistenceHandle { &self, block_num: u64, tx: oneshot::Sender>, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.send_action(PersistenceAction::RemoveBlocksAbove(block_num, tx)) } - - /// Tells the persistence service to remove block data before the given hash, according to the - /// configured prune config. - /// - /// The resulting [`PrunerOutput`] is returned in the receiver end of the sender argument. - pub fn prune_before( - &self, - block_num: u64, - tx: oneshot::Sender, - ) -> Result<(), SendError> { - self.send_action(PersistenceAction::PruneBefore(block_num, tx)) - } } #[cfg(test)] @@ -303,7 +304,7 @@ mod tests { use reth_prune::Pruner; use tokio::sync::mpsc::unbounded_channel; - fn default_persistence_handle() -> PersistenceHandle { + fn default_persistence_handle() -> PersistenceHandle { let provider = create_test_provider_factory(); let (_finished_exex_height_tx, finished_exex_height_rx) = @@ -313,7 +314,7 @@ mod tests { Pruner::new_with_factory(provider.clone(), vec![], 5, 0, None, finished_exex_height_rx); let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel(); - PersistenceHandle::spawn_service(provider, pruner, sync_metrics_tx) + PersistenceHandle::::spawn_service(provider, pruner, sync_metrics_tx) } #[tokio::test] diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index f17766a43ed..c1b534ebf5e 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{Sealable, B256}; +use alloy_primitives::B256; use reth_chainspec::ChainSpec; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::{BlockBody, SealedHeader}; @@ -76,9 +76,7 @@ pub fn insert_headers_into_client( header.parent_hash = hash; header.number += 1; header.timestamp += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } } diff --git a/crates/engine/tree/src/tree/invalid_block_hook.rs b/crates/engine/tree/src/tree/invalid_block_hook.rs index 98244ed1349..7c7b0631dd2 100644 --- a/crates/engine/tree/src/tree/invalid_block_hook.rs +++ b/crates/engine/tree/src/tree/invalid_block_hook.rs @@ -1,6 +1,6 @@ use alloy_primitives::B256; use reth_engine_primitives::InvalidBlockHook; -use reth_primitives::{Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_provider::BlockExecutionOutput; use reth_trie::updates::TrieUpdates; @@ -9,32 +9,32 @@ use reth_trie::updates::TrieUpdates; #[non_exhaustive] pub struct NoopInvalidBlockHook; -impl InvalidBlockHook for NoopInvalidBlockHook { +impl InvalidBlockHook for NoopInvalidBlockHook { fn on_invalid_block( &self, - _parent_header: &SealedHeader, - _block: &SealedBlockWithSenders, - _output: &BlockExecutionOutput, + _parent_header: &SealedHeader, + _block: &SealedBlockWithSenders, + _output: &BlockExecutionOutput, _trie_updates: Option<(&TrieUpdates, B256)>, ) { } } /// Multiple [`InvalidBlockHook`]s that are executed in order. -pub struct InvalidBlockHooks(pub Vec>); +pub struct InvalidBlockHooks(pub Vec>>); -impl std::fmt::Debug for InvalidBlockHooks { +impl std::fmt::Debug for InvalidBlockHooks { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InvalidBlockHooks").field("len", &self.0.len()).finish() } } -impl InvalidBlockHook for InvalidBlockHooks { +impl InvalidBlockHook for InvalidBlockHooks { fn on_invalid_block( &self, - parent_header: &SealedHeader, - block: &SealedBlockWithSenders, - output: &BlockExecutionOutput, + parent_header: &SealedHeader, + block: &SealedBlockWithSenders, + output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { for hook in &self.0 { diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 3eadbbd522d..234a96a47d0 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1,21 +1,22 @@ use crate::{ backfill::{BackfillAction, BackfillSyncState}, chain::FromOrchestrator, - engine::{DownloadRequest, EngineApiEvent, FromEngine}, + engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, + tree::metrics::EngineApiMetrics, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::{ map::{HashMap, HashSet}, BlockNumber, B256, U256, }; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, - OnForkChoiceUpdated, MIN_BLOCKS_FOR_PIPELINE_RUN, + BeaconConsensusEngineEvent, InvalidHeaderCache, MIN_BLOCKS_FOR_PIPELINE_RUN, }; use reth_blockchain_tree::{ error::{InsertBlockErrorKindTwo, InsertBlockErrorTwo, InsertBlockFatalError}, @@ -24,26 +25,31 @@ use reth_blockchain_tree::{ use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; -use reth_chainspec::EthereumHardforks; -use reth_consensus::{Consensus, PostExecutionInput}; -use reth_engine_primitives::EngineTypes; +use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, +}; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::execute::BlockExecutorProvider; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilder, PayloadBuilderAttributes}; -use reth_payload_validator::ExecutionPayloadValidator; +use reth_payload_builder_primitives::PayloadBuilder; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - Block, GotExpected, Header, SealedBlock, SealedBlockWithSenders, SealedHeader, + EthPrimitives, GotExpected, NodePrimitives, SealedBlockFor, SealedBlockWithSenders, + SealedHeader, }; +use reth_primitives_traits::Block; use reth_provider::{ providers::ConsistentDbView, BlockReader, DatabaseProviderFactory, ExecutionOutcome, - ProviderError, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - TransactionVariant, + HashedPostStateProvider, ProviderError, StateCommitmentProvider, StateProviderBox, + StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; -use reth_trie_parallel::parallel_root::{ParallelStateRoot, ParallelStateRootError}; +use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; +use revm_primitives::EvmState; use std::{ cmp::Ordering, collections::{btree_map, hash_map, BTreeMap, VecDeque}, @@ -56,9 +62,8 @@ use std::{ time::Instant, }; use tokio::sync::{ - mpsc::{UnboundedReceiver, UnboundedSender}, - oneshot, - oneshot::error::TryRecvError, + mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, + oneshot::{self, error::TryRecvError}, }; use tracing::*; @@ -66,15 +71,13 @@ pub mod config; mod invalid_block_hook; mod metrics; mod persistence_state; -use crate::{ - engine::{EngineApiKind, EngineApiRequest}, - tree::metrics::EngineApiMetrics, -}; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::InvalidBlockHook; +pub mod root; + /// Keeps track of the state of the tree. /// /// ## Invariants @@ -82,17 +85,17 @@ pub use reth_engine_primitives::InvalidBlockHook; /// - This only stores blocks that are connected to the canonical chain. /// - All executed blocks are valid and have been executed. #[derive(Debug, Default)] -pub struct TreeState { +pub struct TreeState { /// __All__ unique executed blocks by block hash that are connected to the canonical chain. /// /// This includes blocks of all forks. - blocks_by_hash: HashMap, + blocks_by_hash: HashMap>, /// Executed blocks grouped by their respective block number. /// /// This maps unique block number to all known blocks for that height. /// /// Note: there can be multiple blocks at the same height due to forks. - blocks_by_number: BTreeMap>, + blocks_by_number: BTreeMap>>, /// Map of any parent block hash to its children. parent_to_child: HashMap>, /// Map of hash to trie updates for canonical blocks that are persisted but not finalized. @@ -103,7 +106,7 @@ pub struct TreeState { current_canonical_head: BlockNumHash, } -impl TreeState { +impl TreeState { /// Returns a new, empty tree state that points to the given canonical head. fn new(current_canonical_head: BlockNumHash) -> Self { Self { @@ -121,12 +124,12 @@ impl TreeState { } /// Returns the [`ExecutedBlock`] by hash. - fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { + fn executed_block_by_hash(&self, hash: B256) -> Option<&ExecutedBlock> { self.blocks_by_hash.get(&hash) } /// Returns the block by hash. - fn block_by_hash(&self, hash: B256) -> Option> { + fn block_by_hash(&self, hash: B256) -> Option>> { self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) } @@ -134,12 +137,12 @@ impl TreeState { /// newest to oldest. And the parent hash of the oldest block that is missing from the buffer. /// /// Returns `None` if the block for the given hash is not found. - fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec)> { + fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec>)> { let block = self.blocks_by_hash.get(&hash).cloned()?; - let mut parent_hash = block.block().parent_hash; + let mut parent_hash = block.block().parent_hash(); let mut blocks = vec![block]; while let Some(executed) = self.blocks_by_hash.get(&parent_hash) { - parent_hash = executed.block.parent_hash; + parent_hash = executed.block.parent_hash(); blocks.push(executed.clone()); } @@ -147,10 +150,10 @@ impl TreeState { } /// Insert executed block into the state. - fn insert_executed(&mut self, executed: ExecutedBlock) { + fn insert_executed(&mut self, executed: ExecutedBlock) { let hash = executed.block.hash(); - let parent_hash = executed.block.parent_hash; - let block_number = executed.block.number; + let parent_hash = executed.block.parent_hash(); + let block_number = executed.block.number(); if self.blocks_by_hash.contains_key(&hash) { return; @@ -178,11 +181,11 @@ impl TreeState { /// ## Returns /// /// The removed block and the block hashes of its children. - fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { + fn remove_by_hash(&mut self, hash: B256) -> Option<(ExecutedBlock, HashSet)> { let executed = self.blocks_by_hash.remove(&hash)?; // Remove this block from collection of children of its parent block. - let parent_entry = self.parent_to_child.entry(executed.block.parent_hash); + let parent_entry = self.parent_to_child.entry(executed.block.parent_hash()); if let hash_map::Entry::Occupied(mut entry) = parent_entry { entry.get_mut().remove(&hash); @@ -195,7 +198,7 @@ impl TreeState { let children = self.parent_to_child.remove(&hash).unwrap_or_default(); // Remove this block from `blocks_by_number`. - let block_number_entry = self.blocks_by_number.entry(executed.block.number); + let block_number_entry = self.blocks_by_number.entry(executed.block.number()); if let btree_map::Entry::Occupied(mut entry) = block_number_entry { // We have to find the index of the block since it exists in a vec if let Some(index) = entry.get().iter().position(|b| b.block.hash() == hash) { @@ -219,7 +222,7 @@ impl TreeState { } while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash; + current_block = executed.block.parent_hash(); if current_block == hash { return true } @@ -247,17 +250,18 @@ impl TreeState { // upper bound let mut current_block = self.current_canonical_head.hash; while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash; - if executed.block.number <= upper_bound { + current_block = executed.block.parent_hash(); + if executed.block.number() <= upper_bound { debug!(target: "engine::tree", num_hash=?executed.block.num_hash(), "Attempting to remove block walking back from the head"); if let Some((removed, _)) = self.remove_by_hash(executed.block.hash()) { debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed block walking back from the head"); // finally, move the trie updates self.persisted_trie_updates - .insert(removed.block.hash(), (removed.block.number, removed.trie)); + .insert(removed.block.hash(), (removed.block.number(), removed.trie)); } } } + debug!(target: "engine::tree", ?upper_bound, ?last_persisted_hash, "Removed canonical blocks from the tree"); } /// Removes all blocks that are below the finalized block, as well as removing non-canonical @@ -382,19 +386,19 @@ impl TreeState { /// /// This type is not shareable. #[derive(Debug)] -pub struct EngineApiTreeState { +pub struct EngineApiTreeState { /// Tracks the state of the blockchain tree. - tree_state: TreeState, + tree_state: TreeState, /// Tracks the forkchoice state updates received by the CL. forkchoice_state_tracker: ForkchoiceStateTracker, /// Buffer of detached blocks. - buffer: BlockBuffer, + buffer: BlockBuffer, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, } -impl EngineApiTreeState { +impl EngineApiTreeState { fn new( block_buffer_limit: u32, max_invalid_header_cache_length: u32, @@ -463,13 +467,17 @@ pub enum TreeAction { /// /// This type is responsible for processing engine API requests, maintaining the canonical state and /// emitting events. -pub struct EngineApiTreeHandler { +pub struct EngineApiTreeHandler +where + N: NodePrimitives, + T: EngineTypes, +{ provider: P, executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, + consensus: Arc>, + payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. - state: EngineApiTreeState, + state: EngineApiTreeState, /// The half for sending messages to the engine. /// /// This is kept so that we can queue in messages to ourself that we can process later, for @@ -478,20 +486,20 @@ pub struct EngineApiTreeHandler { /// them one by one so that we can handle incoming engine API in between and don't become /// unresponsive. This can happen during live sync transition where we're trying to close the /// gap (up to 3 epochs of blocks in the worst case). - incoming_tx: Sender>>, + incoming_tx: Sender, N::Block>>, /// Incoming engine API requests. - incoming: Receiver>>, + incoming: Receiver, N::Block>>, /// Outgoing events that are emitted to the handler. - outgoing: UnboundedSender, + outgoing: UnboundedSender>, /// Channels to the persistence layer. - persistence: PersistenceHandle, + persistence: PersistenceHandle, /// Tracks the state changes of the persistence task. persistence_state: PersistenceState, /// Flag indicating the state of the node's backfill synchronization process. backfill_sync_state: BackfillSyncState, /// Keeps track of the state of the canonical chain that isn't persisted yet. /// This is intended to be accessed from external sources, such as rpc. - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, /// Handle to the payload builder that will receive payload attributes for valid forkchoice /// updates payload_builder: PayloadBuilderHandle, @@ -500,13 +508,15 @@ pub struct EngineApiTreeHandler { /// Metrics for the engine api. metrics: EngineApiMetrics, /// An invalid block hook. - invalid_block_hook: Box, + invalid_block_hook: Box>, /// The engine API variant of this handler engine_kind: EngineApiKind, } -impl std::fmt::Debug - for EngineApiTreeHandler +impl std::fmt::Debug + for EngineApiTreeHandler +where + N: NodePrimitives, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EngineApiTreeHandler") @@ -529,25 +539,34 @@ impl std::fmt::Debug } } -impl EngineApiTreeHandler +impl EngineApiTreeHandler where - P: DatabaseProviderFactory + BlockReader + StateProviderFactory + StateReader + Clone + 'static, -

::Provider: BlockReader, - E: BlockExecutorProvider, + N: NodePrimitives, + P: DatabaseProviderFactory + + BlockReader + + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + + Clone + + 'static, +

::Provider: + BlockReader, + E: BlockExecutorProvider, T: EngineTypes, - Spec: Send + Sync + EthereumHardforks + 'static, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. - #[allow(clippy::too_many_arguments)] + #[expect(clippy::too_many_arguments)] pub fn new( provider: P, executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, - outgoing: UnboundedSender, - state: EngineApiTreeState, - canonical_in_memory_state: CanonicalInMemoryState, - persistence: PersistenceHandle, + consensus: Arc>, + payload_validator: V, + outgoing: UnboundedSender>, + state: EngineApiTreeState, + canonical_in_memory_state: CanonicalInMemoryState, + persistence: PersistenceHandle, persistence_state: PersistenceState, payload_builder: PayloadBuilderHandle, config: TreeConfig, @@ -577,7 +596,7 @@ where } /// Sets the invalid block hook. - fn set_invalid_block_hook(&mut self, invalid_block_hook: Box) { + fn set_invalid_block_hook(&mut self, invalid_block_hook: Box>) { self.invalid_block_hook = invalid_block_hook; } @@ -586,19 +605,20 @@ where /// /// Returns the sender through which incoming requests can be sent to the task and the receiver /// end of a [`EngineApiEvent`] unbounded channel to receive events from the engine. - #[allow(clippy::too_many_arguments)] + #[expect(clippy::complexity)] pub fn spawn_new( provider: P, executor_provider: E, - consensus: Arc, - payload_validator: ExecutionPayloadValidator, - persistence: PersistenceHandle, + consensus: Arc>, + payload_validator: V, + persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, - canonical_in_memory_state: CanonicalInMemoryState, + canonical_in_memory_state: CanonicalInMemoryState, config: TreeConfig, - invalid_block_hook: Box, + invalid_block_hook: Box>, kind: EngineApiKind, - ) -> (Sender>>, UnboundedReceiver) { + ) -> (Sender, N::Block>>, UnboundedReceiver>) + { let best_block_number = provider.best_block_number().unwrap_or(0); let header = provider.sealed_header(best_block_number).ok().flatten().unwrap_or_default(); @@ -608,7 +628,7 @@ where remove_above_state: VecDeque::new(), }; - let (tx, outgoing) = tokio::sync::mpsc::unbounded_channel(); + let (tx, outgoing) = unbounded_channel(); let state = EngineApiTreeState::new( config.block_buffer_limit(), config.max_invalid_header_cache_length(), @@ -636,7 +656,7 @@ where } /// Returns a new [`Sender`] to send messages to this type. - pub fn sender(&self) -> Sender>> { + pub fn sender(&self) -> Sender, N::Block>> { self.incoming_tx.clone() } @@ -676,7 +696,7 @@ where /// block request processing isn't blocked for a long time. fn on_downloaded( &mut self, - mut blocks: Vec, + mut blocks: Vec>, ) -> Result, InsertBlockFatalError> { if blocks.is_empty() { // nothing to execute @@ -720,7 +740,7 @@ where fn on_new_payload( &mut self, payload: ExecutionPayload, - cancun_fields: Option, + sidecar: ExecutionPayloadSidecar, ) -> Result, InsertBlockFatalError> { trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); @@ -751,10 +771,7 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self - .payload_validator - .ensure_well_formed_payload(payload, cancun_fields.into()) - { + let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { Ok(block) => block, Err(error) => { error!(target: "engine::tree", %error, "Invalid payload"); @@ -778,7 +795,7 @@ where let block_hash = block.hash(); let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); if lowest_buffered_ancestor == block_hash { - lowest_buffered_ancestor = block.parent_hash; + lowest_buffered_ancestor = block.parent_hash(); } // now check the block itself @@ -837,17 +854,17 @@ where /// /// Note: This does not update the tracked state and instead returns the new chain based on the /// given head. - fn on_new_head(&self, new_head: B256) -> ProviderResult> { + fn on_new_head(&self, new_head: B256) -> ProviderResult>> { // get the executed new head block let Some(new_head_block) = self.state.tree_state.blocks_by_hash.get(&new_head) else { return Ok(None) }; - let new_head_number = new_head_block.block.number; + let new_head_number = new_head_block.block.number(); let mut current_canonical_number = self.state.tree_state.current_canonical_head.number; let mut new_chain = vec![new_head_block.clone()]; - let mut current_hash = new_head_block.block.parent_hash; + let mut current_hash = new_head_block.block.parent_hash(); let mut current_number = new_head_number - 1; // Walk back the new chain until we reach a block we know about @@ -856,7 +873,7 @@ where // that are _above_ the current canonical head. while current_number > current_canonical_number { if let Some(block) = self.executed_block_by_hash(current_hash)? { - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); current_number -= 1; new_chain.push(block); } else { @@ -885,7 +902,7 @@ where while current_canonical_number > current_number { if let Some(block) = self.executed_block_by_hash(old_hash)? { old_chain.push(block.clone()); - old_hash = block.block.header.parent_hash; + old_hash = block.block.header.parent_hash(); current_canonical_number -= 1; } else { // This shouldn't happen as we're walking back the canonical chain @@ -901,7 +918,7 @@ where // a common ancestor (fork block) is reached. while old_hash != current_hash { if let Some(block) = self.executed_block_by_hash(old_hash)? { - old_hash = block.block.header.parent_hash; + old_hash = block.block.header.parent_hash(); old_chain.push(block); } else { // This shouldn't happen as we're walking back the canonical chain @@ -910,7 +927,7 @@ where } if let Some(block) = self.executed_block_by_hash(current_hash)? { - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); new_chain.push(block); } else { // This shouldn't happen as we've already walked this path @@ -939,10 +956,10 @@ where return Ok(false) } // We already passed the canonical head - if current_block.number <= canonical_head.number { + if current_block.number() <= canonical_head.number { break } - current_hash = current_block.parent_hash; + current_hash = current_block.parent_hash(); } // verify that the given hash is not already part of canonical chain stored in memory @@ -971,6 +988,7 @@ where &mut self, state: ForkchoiceState, attrs: Option, + version: EngineApiMessageVersion, ) -> ProviderResult> { trace!(target: "engine::tree", ?attrs, "invoked forkchoice update"); self.metrics.engine.forkchoice_updated_messages.increment(1); @@ -1020,7 +1038,7 @@ where // to return an error ProviderError::HeaderNotFound(state.head_block_hash.into()) })?; - let updated = self.process_payload_attributes(attr, &tip, state); + let updated = self.process_payload_attributes(attr, tip.header(), state, version); return Ok(TreeOutcome::new(updated)) } @@ -1040,7 +1058,7 @@ where } if let Some(attr) = attrs { - let updated = self.process_payload_attributes(attr, &tip, state); + let updated = self.process_payload_attributes(attr, &tip, state, version); return Ok(TreeOutcome::new(updated)) } @@ -1049,14 +1067,15 @@ where // 3. check if the head is already part of the canonical chain if let Ok(Some(canonical_header)) = self.find_canonical_header(state.head_block_hash) { - debug!(target: "engine::tree", head = canonical_header.number, "fcu head block is already canonical"); + debug!(target: "engine::tree", head = canonical_header.number(), "fcu head block is already canonical"); // For OpStack the proposers are allowed to reorg their own chain at will, so we need to // always trigger a new payload job if requested. if self.engine_kind.is_opstack() { if let Some(attr) = attrs { - debug!(target: "engine::tree", head = canonical_header.number, "handling payload attributes for canonical head"); - let updated = self.process_payload_attributes(attr, &canonical_header, state); + debug!(target: "engine::tree", head = canonical_header.number(), "handling payload attributes for canonical head"); + let updated = + self.process_payload_attributes(attr, &canonical_header, state, version); return Ok(TreeOutcome::new(updated)) } } @@ -1107,9 +1126,10 @@ where /// received in time. /// /// Returns an error if the engine channel is disconnected. + #[expect(clippy::type_complexity)] fn try_recv_engine_message( &self, - ) -> Result>>, RecvError> { + ) -> Result, N::Block>>, RecvError> { if self.persistence_state.in_progress() { // try to receive the next request with a timeout to not block indefinitely match self.incoming.recv_timeout(std::time::Duration::from_millis(500)) { @@ -1143,6 +1163,7 @@ where if blocks_to_persist.is_empty() { debug!(target: "engine::tree", "Returned empty set of blocks to persist"); } else { + debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.block.num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); self.persistence_state.start(rx); @@ -1171,7 +1192,7 @@ where return Ok(()) }; - trace!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, "Finished persisting, calling finish"); + debug!(target: "engine::tree", ?last_persisted_block_hash, ?last_persisted_block_number, "Finished persisting, calling finish"); self.persistence_state .finish(last_persisted_block_hash, last_persisted_block_number); self.on_new_persisted_block()?; @@ -1186,7 +1207,7 @@ where /// Handles a message from the engine. fn on_engine_message( &mut self, - msg: FromEngine>, + msg: FromEngine, N::Block>, ) -> Result<(), InsertBlockFatalError> { match msg { FromEngine::Event(event) => match event { @@ -1202,13 +1223,28 @@ where match request { EngineApiRequest::InsertExecutedBlock(block) => { debug!(target: "engine::tree", block=?block.block().num_hash(), "inserting already executed block"); + let now = Instant::now(); + let sealed_block = block.block.clone(); self.state.tree_state.insert_executed(block); self.metrics.engine.inserted_already_executed_blocks.increment(1); + + self.emit_event(EngineApiEvent::BeaconConsensus( + BeaconConsensusEngineEvent::CanonicalBlockAdded( + sealed_block, + now.elapsed(), + ), + )); } EngineApiRequest::Beacon(request) => { match request { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { - let mut output = self.on_forkchoice_updated(state, payload_attrs); + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + } => { + let mut output = + self.on_forkchoice_updated(state, payload_attrs, version); if let Ok(res) = &mut output { // track last received forkchoice state @@ -1236,13 +1272,13 @@ where error!(target: "engine::tree", "Failed to send event: {err:?}"); } } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { - let output = self.on_new_payload(payload, cancun_fields); - if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { - reth_beacon_consensus::BeaconOnNewPayloadError::Internal( - Box::new(e), - ) - })) { + BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { + let output = self.on_new_payload(payload, sidecar); + if let Err(err) = + tx.send(output.map(|o| o.outcome).map_err(|e| { + BeaconOnNewPayloadError::Internal(Box::new(e)) + })) + { error!(target: "engine::tree", "Failed to send event: {err:?}"); self.metrics .engine @@ -1288,7 +1324,7 @@ where // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "engine::tree", invalid_hash=?bad_block.hash(), invalid_number=?bad_block.number, "Bad block detected in unwind"); + warn!(target: "engine::tree", invalid_block=?bad_block, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header self.state.invalid_headers.insert(*bad_block); return Ok(()) @@ -1326,7 +1362,7 @@ where // update the tracked chain height, after backfill sync both the canonical height and // persisted height are the same self.state.tree_state.set_canonical_head(new_head.num_hash()); - self.persistence_state.finish(new_head.hash(), new_head.number); + self.persistence_state.finish(new_head.hash(), new_head.number()); // update the tracked canonical head self.canonical_in_memory_state.set_canonical_head(new_head); @@ -1347,7 +1383,7 @@ where .state .buffer .block(&sync_target_state.finalized_block_hash) - .map(|block| block.number); + .map(|block| block.number()); // The block number that the backfill finished at - if the progress or newest // finalized is None then we can't check the distance anyways. @@ -1412,7 +1448,7 @@ where } /// Emits an outgoing event to the engine. - fn emit_event(&mut self, event: impl Into) { + fn emit_event(&mut self, event: impl Into>) { let event = event.into(); if event.is_backfill_action() { @@ -1456,7 +1492,7 @@ where /// Returns a batch of consecutive canonical blocks to persist in the range /// `(last_persisted_number .. canonical_head - threshold]` . The expected /// order is oldest -> newest. - fn get_canonical_blocks_to_persist(&self) -> Vec { + fn get_canonical_blocks_to_persist(&self) -> Vec> { let mut blocks_to_persist = Vec::new(); let mut current_hash = self.state.tree_state.canonical_block_hash(); let last_persisted_number = self.persistence_state.last_persisted_block.number; @@ -1468,15 +1504,15 @@ where debug!(target: "engine::tree", ?last_persisted_number, ?canonical_head_number, ?target_number, ?current_hash, "Returning canonical blocks to persist"); while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) { - if block.block.number <= last_persisted_number { + if block.block.number() <= last_persisted_number { break; } - if block.block.number <= target_number { + if block.block.number() <= target_number { blocks_to_persist.push(block.clone()); } - current_hash = block.block.parent_hash; + current_hash = block.block.parent_hash(); } // reverse the order so that the oldest block comes first @@ -1509,7 +1545,7 @@ where /// has in memory. /// /// For finalized blocks, this will return `None`. - fn executed_block_by_hash(&self, hash: B256) -> ProviderResult> { + fn executed_block_by_hash(&self, hash: B256) -> ProviderResult>> { trace!(target: "engine::tree", ?hash, "Fetching executed block by hash"); // check memory first let block = self.state.tree_state.executed_block_by_hash(hash).cloned(); @@ -1528,9 +1564,9 @@ where .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))?; let execution_output = self .provider - .get_state(block.number)? - .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number))?; - let hashed_state = execution_output.hash_state_slow(); + .get_state(block.number())? + .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number()))?; + let hashed_state = self.provider.hashed_post_state(execution_output.state()); Ok(Some(ExecutedBlock { block: Arc::new(block), @@ -1542,7 +1578,10 @@ where } /// Return sealed block from database or in-memory state by hash. - fn sealed_header_by_hash(&self, hash: B256) -> ProviderResult> { + fn sealed_header_by_hash( + &self, + hash: B256, + ) -> ProviderResult>> { // check memory first let block = self.state.tree_state.block_by_hash(hash).map(|block| block.as_ref().clone().header); @@ -1555,7 +1594,7 @@ where } /// Return block from database or in-memory state by hash. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { + fn block_by_hash(&self, hash: B256) -> ProviderResult> { // check database first let mut block = self.provider.block_by_hash(hash)?; if block.is_none() { @@ -1583,7 +1622,7 @@ where /// Returns an error if we failed to fetch the state from the database. fn state_provider(&self, hash: B256) -> ProviderResult> { if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(hash) { - trace!(target: "engine::tree", %hash, "found canonical state for block in memory"); + debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory"); // the block leads back to the canonical chain let historical = self.provider.state_by_block_hash(historical)?; return Ok(Some(Box::new(MemoryOverlayStateProvider::new(historical, blocks)))) @@ -1591,13 +1630,13 @@ where // the hash could belong to an unknown block or a persisted block if let Some(header) = self.provider.header(&hash)? { - trace!(target: "engine::tree", %hash, number = %header.number, "found canonical state for block in database"); + debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database"); // the block is known and persisted let historical = self.provider.state_by_block_hash(hash)?; return Ok(Some(historical)) } - trace!(target: "engine::tree", %hash, "no canonical state found for block"); + debug!(target: "engine::tree", %hash, "no canonical state found for block"); Ok(None) } @@ -1612,7 +1651,7 @@ where self.state .buffer .lowest_ancestor(&hash) - .map(|block| block.parent_hash) + .map(|block| block.parent_hash()) .unwrap_or_else(|| hash) } @@ -1638,14 +1677,14 @@ where // iterate over ancestors in the invalid cache // until we encounter the first valid ancestor let mut current_hash = parent_hash; - let mut current_header = self.state.invalid_headers.get(¤t_hash); - while let Some(header) = current_header { - current_hash = header.parent_hash; - current_header = self.state.invalid_headers.get(¤t_hash); + let mut current_block = self.state.invalid_headers.get(¤t_hash); + while let Some(block_with_parent) = current_block { + current_hash = block_with_parent.parent; + current_block = self.state.invalid_headers.get(¤t_hash); // If current_header is None, then the current_hash does not have an invalid // ancestor in the cache, check its presence in blockchain tree - if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { + if current_block.is_none() && self.block_by_hash(current_hash)?.is_some() { return Ok(Some(current_hash)) } } @@ -1659,7 +1698,7 @@ where // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal // PoW block, which we need to identify by looking at the parent's block difficulty if let Some(parent) = self.block_by_hash(parent_hash)? { - if !parent.is_zero_difficulty() { + if !parent.header().difficulty().is_zero() { parent_hash = B256::ZERO; } } @@ -1695,7 +1734,7 @@ where let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent_hash)?; + let status = self.prepare_invalid_response(header.parent)?; // insert the head block into the invalid header cache self.state.invalid_headers.insert_with_invalid_ancestor(head, header); @@ -1709,12 +1748,15 @@ where // check if the head was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + Ok(Some(self.prepare_invalid_response(header.parent)?)) } /// Validate if block is correct and satisfies all the consensus rules that concern the header /// and block body itself. - fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { + fn validate_block( + &self, + block: &SealedBlockWithSenders, + ) -> Result<(), ConsensusError> { if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { error!( target: "engine::tree", @@ -1783,8 +1825,8 @@ where /// Returns an error if sender recovery failed or inserting into the buffer failed. fn buffer_block_without_senders( &mut self, - block: SealedBlock, - ) -> Result<(), InsertBlockErrorTwo> { + block: SealedBlockFor, + ) -> Result<(), InsertBlockErrorTwo> { match block.try_seal_with_senders() { Ok(block) => self.buffer_block(block), Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), @@ -1792,7 +1834,10 @@ where } /// Pre-validates the block and inserts it into the buffer. - fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockErrorTwo> { + fn buffer_block( + &mut self, + block: SealedBlockWithSenders, + ) -> Result<(), InsertBlockErrorTwo> { if let Err(err) = self.validate_block(&block) { return Err(InsertBlockErrorTwo::consensus_error(err, block.block)) } @@ -1846,7 +1891,7 @@ where // if we have buffered the finalized block, we should check how far // we're off exceeds_backfill_threshold = - self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number); + self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number()); } // If this is invoked after we downloaded a block we can check if this block is the @@ -1911,7 +1956,7 @@ where /// If either of these are true, then this returns the height of the first block. Otherwise, /// this returns [`None`]. This should be used to check whether or not we should be sending a /// remove command to the persistence task. - fn find_disk_reorg(&self, chain_update: &NewCanonicalChain) -> Option { + fn find_disk_reorg(&self, chain_update: &NewCanonicalChain) -> Option { let NewCanonicalChain::Reorg { new, old: _ } = chain_update else { return None }; let BlockNumHash { number: new_num, hash: new_hash } = @@ -1938,7 +1983,7 @@ where /// Invoked when we the canonical chain has been updated. /// /// This is invoked on a valid forkchoice update, or if we can make the target block canonical. - fn on_canonical_chain_update(&mut self, chain_update: NewCanonicalChain) { + fn on_canonical_chain_update(&mut self, chain_update: NewCanonicalChain) { trace!(target: "engine::tree", new_blocks = %chain_update.new_block_count(), reorged_blocks = %chain_update.reorged_block_count(), "applying new chain update"); let start = Instant::now(); @@ -1971,7 +2016,7 @@ where self.canonical_in_memory_state.set_canonical_head(tip.clone()); // Update metrics based on new tip - self.metrics.tree.canonical_chain_height.set(tip.number as f64); + self.metrics.tree.canonical_chain_height.set(tip.number() as f64); // sends an event to all active listeners about the new canonical chain self.canonical_in_memory_state.notify_canon_state(notification); @@ -1990,10 +2035,10 @@ where } /// This reinserts any blocks in the new chain that do not already exist in the tree - fn reinsert_reorged_blocks(&mut self, new_chain: Vec) { + fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { for block in new_chain { if self.state.tree_state.executed_block_by_hash(block.block.hash()).is_none() { - trace!(target: "engine::tree", num=?block.block.number, hash=?block.block.hash(), "Reinserting block into tree state"); + trace!(target: "engine::tree", num=?block.block.number(), hash=?block.block.hash(), "Reinserting block into tree state"); self.state.tree_state.insert_executed(block); } } @@ -2046,10 +2091,10 @@ where /// Returns an event with the appropriate action to take, such as: /// - download more missing blocks /// - try to canonicalize the target if the `block` is the tracked target (head) block. - #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number,), target = "engine::tree")] + #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] fn on_downloaded_block( &mut self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, ) -> Result, InsertBlockFatalError> { let block_num_hash = block.num_hash(); let lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_num_hash.hash); @@ -2107,8 +2152,8 @@ where fn insert_block_without_senders( &mut self, - block: SealedBlock, - ) -> Result { + block: SealedBlockFor, + ) -> Result> { match block.try_seal_with_senders() { Ok(block) => self.insert_block(block), Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), @@ -2117,17 +2162,18 @@ where fn insert_block( &mut self, - block: SealedBlockWithSenders, - ) -> Result { + block: SealedBlockWithSenders, + ) -> Result> { self.insert_block_inner(block.clone()) .map_err(|kind| InsertBlockErrorTwo::new(block.block, kind)) } fn insert_block_inner( &mut self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders, ) -> Result { - debug!(target: "engine::tree", block=?block.num_hash(), "Inserting new block into tree"); + debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash(), state_root = ?block.state_root(), "Inserting new block into tree"); + if self.block_by_hash(block.hash())?.is_some() { return Ok(InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid)) } @@ -2138,14 +2184,14 @@ where // validate block consensus rules self.validate_block(&block)?; - trace!(target: "engine::tree", block=?block.num_hash(), parent=?block.parent_hash, "Fetching block state provider"); - let Some(state_provider) = self.state_provider(block.parent_hash)? else { + trace!(target: "engine::tree", block=?block.num_hash(), parent=?block.parent_hash(), "Fetching block state provider"); + let Some(state_provider) = self.state_provider(block.parent_hash())? else { // we don't have the state required to execute this block, buffering it and find the // missing parent block let missing_ancestor = self .state .buffer - .lowest_ancestor(&block.parent_hash) + .lowest_ancestor(&block.parent_hash()) .map(|block| block.parent_num_hash()) .unwrap_or_else(|| block.parent_num_hash()); @@ -2158,9 +2204,9 @@ where }; // now validate against the parent - let parent_block = self.sealed_header_by_hash(block.parent_hash)?.ok_or_else(|| { + let parent_block = self.sealed_header_by_hash(block.parent_hash())?.ok_or_else(|| { InsertBlockErrorKindTwo::Provider(ProviderError::HeaderNotFound( - block.parent_hash.into(), + block.parent_hash().into(), )) })?; if let Err(e) = self.consensus.validate_header_against_parent(&block, &parent_block) { @@ -2171,15 +2217,24 @@ where trace!(target: "engine::tree", block=?block.num_hash(), "Executing block"); let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); - let block_number = block.number; + let block_number = block.number(); let block_hash = block.hash(); let sealed_block = Arc::new(block.block.clone()); let block = block.unseal(); let exec_time = Instant::now(); - let output = self.metrics.executor.execute_metered(executor, (&block, U256::MAX).into())?; + + // TODO: create StateRootTask with the receiving end of a channel and + // pass the sending end of the channel to the state hook. + let noop_state_hook = |_state: &EvmState| {}; + let output = self.metrics.executor.execute_metered( + executor, + (&block, U256::MAX).into(), + Box::new(noop_state_hook), + )?; trace!(target: "engine::tree", elapsed=?exec_time.elapsed(), ?block_number, "Executed block"); + if let Err(err) = self.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&output.receipts, &output.requests), @@ -2194,12 +2249,14 @@ where return Err(err.into()) } - let hashed_state = HashedPostState::from_bundle_state(&output.state.state); + let hashed_state = self.provider.hashed_post_state(&output.state); - trace!(target: "engine::tree", block=?BlockNumHash::new(block_number, block_hash), "Calculating block state root"); + trace!(target: "engine::tree", block=?sealed_block.num_hash(), "Calculating block state root"); let root_time = Instant::now(); let mut state_root_result = None; + // TODO: switch to calculate state root using `StateRootTask`. + // We attempt to compute state root in parallel if we are currently not persisting anything // to database. This is safe, because the database state cannot change until we // finish parallel computation. It is important that nothing is being persisted as @@ -2208,7 +2265,7 @@ where let persistence_in_progress = self.persistence_state.in_progress(); if !persistence_in_progress { state_root_result = match self - .compute_state_root_parallel(block.parent_hash, &hashed_state) + .compute_state_root_parallel(block.header().parent_hash(), &hashed_state) { Ok((state_root, trie_output)) => Some((state_root, trie_output)), Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { @@ -2222,11 +2279,11 @@ where let (state_root, trie_output) = if let Some(result) = state_root_result { result } else { - debug!(target: "engine::tree", persistence_in_progress, "Failed to compute state root in parallel"); + debug!(target: "engine::tree", block=?sealed_block.num_hash(), persistence_in_progress, "Failed to compute state root in parallel"); state_provider.state_root_with_updates(hashed_state.clone())? }; - if state_root != block.state_root { + if state_root != block.header().state_root() { // call post-block hook self.invalid_block_hook.on_invalid_block( &parent_block, @@ -2235,16 +2292,16 @@ where Some((&trie_output, state_root)), ); return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), + GotExpected { got: state_root, expected: block.header().state_root() }.into(), ) .into()) } let root_elapsed = root_time.elapsed(); self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, ?block_number, "Calculated state root"); + debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); - let executed = ExecutedBlock { + let executed: ExecutedBlock = ExecutedBlock { block: sealed_block.clone(), senders: Arc::new(block.senders), execution_output: Arc::new(ExecutionOutcome::from((output, block_number))), @@ -2252,7 +2309,7 @@ where trie: Arc::new(trie_output), }; - if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash { + if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash() { debug!(target: "engine::tree", pending = ?executed.block().num_hash() ,"updating pending block"); // if the parent is the canonical head, we can insert the block as the pending block self.canonical_in_memory_state.set_pending_block(executed.clone()); @@ -2287,10 +2344,14 @@ where parent_hash: B256, hashed_state: &HashedPostState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { + // TODO: when we switch to calculate state root using `StateRootTask` this + // method can be still useful to calculate the required `TrieInput` to + // create the task. let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; let mut input = TrieInput::default(); if let Some((historical, blocks)) = self.state.tree_state.blocks_by_hash(parent_hash) { + debug!(target: "engine::tree", %parent_hash, %historical, "Calculating state root in parallel, parent found in memory"); // Retrieve revert state for historical block. let revert_state = consistent_view.revert_state(historical)?; input.append(revert_state); @@ -2301,6 +2362,7 @@ where } } else { // The block attaches to canonical persisted parent. + debug!(target: "engine::tree", %parent_hash, "Calculating state root in parallel, parent found in disk"); let revert_state = consistent_view.revert_state(parent_hash)?; input.append(revert_state); } @@ -2318,7 +2380,7 @@ where /// Returns the proper payload status response if the block is invalid. fn on_insert_block_error( &mut self, - error: InsertBlockErrorTwo, + error: InsertBlockErrorTwo, ) -> Result { let (block, error) = error.split(); @@ -2329,16 +2391,16 @@ where // If the error was due to an invalid payload, the payload is added to the // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is // returned. - warn!(target: "engine::tree", invalid_hash=?block.hash(), invalid_number=?block.number, %validation_err, "Invalid block error on new payload"); + warn!(target: "engine::tree", invalid_hash=?block.hash(), invalid_number=?block.number(), %validation_err, "Invalid block error on new payload"); let latest_valid_hash = if validation_err.is_block_pre_merge() { // zero hash must be returned if block is pre-merge Some(B256::ZERO) } else { - self.latest_valid_hash_for_invalid_payload(block.parent_hash)? + self.latest_valid_hash_for_invalid_payload(block.parent_hash())? }; // keep track of the invalid header - self.state.invalid_headers.insert(block.header); + self.state.invalid_headers.insert(block.header.block_with_parent()); Ok(PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: validation_err.to_string() }, latest_valid_hash, @@ -2346,7 +2408,10 @@ where } /// Attempts to find the header for the given block hash if it is canonical. - pub fn find_canonical_header(&self, hash: B256) -> Result, ProviderError> { + pub fn find_canonical_header( + &self, + hash: B256, + ) -> Result>, ProviderError> { let mut canonical = self.canonical_in_memory_state.header_by_hash(hash); if canonical.is_none() { @@ -2377,7 +2442,7 @@ where { // we're also persisting the finalized block on disk so we can reload it on // restart this is required by optimism which queries the finalized block: - let _ = self.persistence.save_finalized_block_number(finalized.number); + let _ = self.persistence.save_finalized_block_number(finalized.number()); self.canonical_in_memory_state.set_finalized(finalized); } } @@ -2405,7 +2470,7 @@ where if Some(safe.num_hash()) != self.canonical_in_memory_state.get_safe_num_hash() { // we're also persisting the safe block on disk so we can reload it on // restart this is required by optimism which queries the safe block: - let _ = self.persistence.save_safe_block_number(safe.number); + let _ = self.persistence.save_safe_block_number(safe.number()); self.canonical_in_memory_state.set_safe(safe); } } @@ -2480,15 +2545,14 @@ where fn process_payload_attributes( &self, attrs: T::PayloadAttributes, - head: &Header, + head: &N::BlockHeader, state: ForkchoiceState, + version: EngineApiMessageVersion, ) -> OnForkChoiceUpdated { - // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp - // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held - // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT - // begin a payload build process. In such an event, the forkchoiceState update MUST NOT - // be rolled back. - if attrs.timestamp() <= head.timestamp { + if let Err(err) = + self.payload_validator.validate_payload_attributes_against_header(&attrs, head) + { + warn!(target: "engine::tree", %err, ?head, "Invalid payload attributes"); return OnForkChoiceUpdated::invalid_payload_attributes() } @@ -2499,6 +2563,7 @@ where match ::try_new( state.head_block_hash, attrs, + version as u8, ) { Ok(attributes) => { // send the payload to the builder and return the receiver for the pending payload @@ -2569,14 +2634,18 @@ pub enum AdvancePersistenceError { mod tests { use super::*; use crate::persistence::PersistenceAction; - use alloy_primitives::{Bytes, Sealable}; + use alloy_consensus::Header; + use alloy_primitives::Bytes; use alloy_rlp::Decodable; + use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; - use reth_beacon_consensus::{EthBeaconConsensus, ForkchoiceStatus}; + use reth_beacon_consensus::EthBeaconConsensus; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; - use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_engine_primitives::ForkchoiceStatus; + use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; + use reth_primitives::{Block, BlockExt, EthPrimitives}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::updates::TrieUpdates; @@ -2584,7 +2653,6 @@ mod tests { str::FromStr, sync::mpsc::{channel, Sender}, }; - use tokio::sync::mpsc::unbounded_channel; /// This is a test channel that allows you to `release` any value that is in the channel. /// @@ -2642,9 +2710,14 @@ mod tests { } struct TestHarness { - tree: - EngineApiTreeHandler, - to_tree_tx: Sender>>, + tree: EngineApiTreeHandler< + EthPrimitives, + MockEthProvider, + MockExecutorProvider, + EthEngineTypes, + EthereumEngineValidator, + >, + to_tree_tx: Sender, Block>>, from_tree_rx: UnboundedReceiver, blocks: Vec, action_rx: Receiver, @@ -2677,13 +2750,12 @@ mod tests { let provider = MockEthProvider::default(); let executor_provider = MockExecutorProvider::default(); - let payload_validator = ExecutionPayloadValidator::new(chain_spec.clone()); + let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (from_tree_tx, from_tree_rx) = unbounded_channel(); - let sealed = chain_spec.genesis_header().clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = chain_spec.genesis_header().clone(); + let header = SealedHeader::seal(header); let engine_api_tree_state = EngineApiTreeState::new(10, 10, header.num_hash()); let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None, None); @@ -2780,7 +2852,7 @@ mod tests { fn insert_block( &mut self, block: SealedBlockWithSenders, - ) -> Result { + ) -> Result> { let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); self.extend_execution_outcome([execution_outcome]); self.tree.provider.add_state_root(block.state_root); @@ -2805,6 +2877,7 @@ mod tests { state: fcu_state, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), } .into(), )) @@ -2848,7 +2921,7 @@ mod tests { self.tree .on_new_payload( payload.into(), - Some(CancunPayloadFields { + ExecutionPayloadSidecar::v3(CancunPayloadFields { parent_beacon_block_root: block.parent_beacon_block_root.unwrap(), versioned_hashes: vec![], }), @@ -2902,7 +2975,7 @@ mod tests { EngineApiEvent::BeaconConsensus( BeaconConsensusEngineEvent::CanonicalBlockAdded(block, _), ) => { - assert!(block.hash() == expected_hash); + assert_eq!(block.hash(), expected_hash); } _ => panic!("Unexpected event: {:#?}", event), } @@ -3094,6 +3167,7 @@ mod tests { }, payload_attrs: None, tx, + version: EngineApiMessageVersion::default(), } .into(), )) @@ -3114,7 +3188,10 @@ mod tests { let mut test_harness = TestHarness::new(HOLESKY.clone()); - let outcome = test_harness.tree.on_new_payload(payload.into(), None).unwrap(); + let outcome = test_harness + .tree + .on_new_payload(payload.into(), ExecutionPayloadSidecar::none()) + .unwrap(); assert!(outcome.outcome.is_syncing()); // ensure block is buffered @@ -3158,7 +3235,7 @@ mod tests { .on_engine_message(FromEngine::Request( BeaconEngineMessage::NewPayload { payload: payload.clone().into(), - cancun_fields: None, + sidecar: ExecutionPayloadSidecar::none(), tx, } .into(), diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs new file mode 100644 index 00000000000..e2ed6aa1470 --- /dev/null +++ b/crates/engine/tree/src/tree/root.rs @@ -0,0 +1,994 @@ +//! State root task related functionality. + +use alloy_primitives::map::{HashMap, HashSet}; +use reth_evm::system_calls::OnStateHook; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, + StateCommitmentProvider, +}; +use reth_trie::{ + proof::Proof, updates::TrieUpdates, HashedPostState, HashedStorage, MultiProof, Nibbles, + TrieInput, +}; +use reth_trie_db::DatabaseProof; +use reth_trie_parallel::root::ParallelStateRootError; +use reth_trie_sparse::{ + errors::{SparseStateTrieResult, SparseTrieError}, + SparseStateTrie, +}; +use revm_primitives::{keccak256, EvmState, B256}; +use std::{ + collections::BTreeMap, + ops::Deref, + sync::{ + mpsc::{self, channel, Receiver, Sender}, + Arc, + }, + time::{Duration, Instant}, +}; +use tracing::{debug, error, trace}; + +/// The level below which the sparse trie hashes are calculated in [`update_sparse_trie`]. +const SPARSE_TRIE_INCREMENTAL_LEVEL: usize = 2; + +/// Result of the state root calculation +pub(crate) type StateRootResult = Result<(B256, TrieUpdates), ParallelStateRootError>; + +/// Handle to a spawned state root task. +#[derive(Debug)] +#[allow(dead_code)] +pub struct StateRootHandle { + /// Channel for receiving the final result. + rx: mpsc::Receiver, +} + +#[allow(dead_code)] +impl StateRootHandle { + /// Creates a new handle from a receiver. + pub(crate) const fn new(rx: mpsc::Receiver) -> Self { + Self { rx } + } + + /// Waits for the state root calculation to complete. + pub fn wait_for_result(self) -> StateRootResult { + self.rx.recv().expect("state root task was dropped without sending result") + } +} + +/// Common configuration for state root tasks +#[derive(Debug)] +pub struct StateRootConfig { + /// View over the state in the database. + pub consistent_view: ConsistentDbView, + /// Latest trie input. + pub input: Arc, +} + +/// Messages used internally by the state root task +#[derive(Debug)] +#[allow(dead_code)] +pub enum StateRootMessage { + /// New state update from transaction execution + StateUpdate(EvmState), + /// Proof calculation completed for a specific state update + ProofCalculated { + /// The calculated proof + proof: MultiProof, + /// The state update that was used to calculate the proof + state_update: HashedPostState, + /// The index of this proof in the sequence of state updates + sequence_number: u64, + }, + /// State root calculation completed + RootCalculated { + /// The updated sparse trie + trie: Box, + /// Time taken to calculate the root + elapsed: Duration, + }, + /// Signals state update stream end. + FinishedStateUpdates, +} + +/// Handle to track proof calculation ordering +#[derive(Debug, Default)] +pub(crate) struct ProofSequencer { + /// The next proof sequence number to be produced. + next_sequence: u64, + /// The next sequence number expected to be delivered. + next_to_deliver: u64, + /// Buffer for out-of-order proofs and corresponding state updates + pending_proofs: BTreeMap, +} + +impl ProofSequencer { + /// Creates a new proof sequencer + pub(crate) fn new() -> Self { + Self::default() + } + + /// Gets the next sequence number and increments the counter + pub(crate) fn next_sequence(&mut self) -> u64 { + let seq = self.next_sequence; + self.next_sequence += 1; + seq + } + + /// Adds a proof with the corresponding state update and returns all sequential proofs and state + /// updates if we have a continuous sequence + pub(crate) fn add_proof( + &mut self, + sequence: u64, + proof: MultiProof, + state_update: HashedPostState, + ) -> Vec<(MultiProof, HashedPostState)> { + if sequence >= self.next_to_deliver { + self.pending_proofs.insert(sequence, (proof, state_update)); + } + + // return early if we don't have the next expected proof + if !self.pending_proofs.contains_key(&self.next_to_deliver) { + return Vec::new() + } + + let mut consecutive_proofs = Vec::with_capacity(self.pending_proofs.len()); + let mut current_sequence = self.next_to_deliver; + + // keep collecting proofs and state updates as long as we have consecutive sequence numbers + while let Some((proof, state_update)) = self.pending_proofs.remove(¤t_sequence) { + consecutive_proofs.push((proof, state_update)); + current_sequence += 1; + + // if we don't have the next number, stop collecting + if !self.pending_proofs.contains_key(¤t_sequence) { + break; + } + } + + self.next_to_deliver += consecutive_proofs.len() as u64; + + consecutive_proofs + } + + /// Returns true if we still have pending proofs + pub(crate) fn has_pending(&self) -> bool { + !self.pending_proofs.is_empty() + } +} + +/// A wrapper for the sender that signals completion when dropped +#[allow(dead_code)] +pub(crate) struct StateHookSender(Sender); + +#[allow(dead_code)] +impl StateHookSender { + pub(crate) const fn new(inner: Sender) -> Self { + Self(inner) + } +} + +impl Deref for StateHookSender { + type Target = Sender; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Drop for StateHookSender { + fn drop(&mut self) { + // Send completion signal when the sender is dropped + let _ = self.0.send(StateRootMessage::FinishedStateUpdates); + } +} + +fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostState { + let mut hashed_state = HashedPostState::default(); + + for (address, account) in update { + if account.is_touched() { + let hashed_address = keccak256(address); + trace!(target: "engine::root", ?address, ?hashed_address, "Adding account to state update"); + + let destroyed = account.is_selfdestructed(); + let info = if destroyed { None } else { Some(account.info.into()) }; + hashed_state.accounts.insert(hashed_address, info); + + let mut changed_storage_iter = account + .storage + .into_iter() + .filter_map(|(slot, value)| { + value.is_changed().then(|| (keccak256(B256::from(slot)), value.present_value)) + }) + .peekable(); + + if destroyed || changed_storage_iter.peek().is_some() { + hashed_state.storages.insert( + hashed_address, + HashedStorage::from_iter(destroyed, changed_storage_iter), + ); + } + } + } + + hashed_state +} + +/// Standalone task that receives a transaction state stream and updates relevant +/// data structures to calculate state root. +/// +/// It is responsible of initializing a blinded sparse trie and subscribe to +/// transaction state stream. As it receives transaction execution results, it +/// fetches the proofs for relevant accounts from the database and reveal them +/// to the tree. +/// Then it updates relevant leaves according to the result of the transaction. +#[derive(Debug)] +pub struct StateRootTask { + /// Task configuration. + config: StateRootConfig, + /// Receiver for state root related messages. + rx: Receiver, + /// Sender for state root related messages. + tx: Sender, + /// Proof targets that have been already fetched. + fetched_proof_targets: HashMap>, + /// Proof sequencing handler. + proof_sequencer: ProofSequencer, + /// The sparse trie used for the state root calculation. If [`None`], then update is in + /// progress. + sparse_trie: Option>, +} + +#[allow(dead_code)] +impl StateRootTask +where + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, +{ + /// Creates a new state root task with the unified message channel + pub fn new(config: StateRootConfig) -> Self { + let (tx, rx) = channel(); + + Self { + config, + rx, + tx, + fetched_proof_targets: Default::default(), + proof_sequencer: ProofSequencer::new(), + sparse_trie: Some(Box::new(SparseStateTrie::default().with_updates(true))), + } + } + + /// Spawns the state root task and returns a handle to await its result. + pub fn spawn(self) -> StateRootHandle { + let (tx, rx) = mpsc::sync_channel(1); + std::thread::Builder::new() + .name("State Root Task".to_string()) + .spawn(move || { + debug!(target: "engine::tree", "Starting state root task"); + let result = self.run(); + let _ = tx.send(result); + }) + .expect("failed to spawn state root thread"); + + StateRootHandle::new(rx) + } + + /// Returns a state hook to be used to send state updates to this task. + pub fn state_hook(&self) -> impl OnStateHook { + let state_hook = StateHookSender::new(self.tx.clone()); + + move |state: &EvmState| { + if let Err(error) = state_hook.send(StateRootMessage::StateUpdate(state.clone())) { + error!(target: "engine::root", ?error, "Failed to send state update"); + } + } + } + + /// Handles state updates. + /// + /// Returns proof targets derived from the state update. + fn on_state_update( + view: ConsistentDbView, + input: Arc, + update: EvmState, + fetched_proof_targets: &mut HashMap>, + proof_sequence_number: u64, + state_root_message_sender: Sender, + ) { + let hashed_state_update = evm_state_to_hashed_post_state(update); + + let proof_targets = get_proof_targets(&hashed_state_update, fetched_proof_targets); + for (address, slots) in &proof_targets { + fetched_proof_targets.entry(*address).or_default().extend(slots) + } + + // Dispatch proof gathering for this state update + rayon::spawn(move || { + let provider = match view.provider_ro() { + Ok(provider) => provider, + Err(error) => { + error!(target: "engine::root", ?error, "Could not get provider"); + return; + } + }; + + // TODO: replace with parallel proof + let result = Proof::overlay_multiproof( + provider.tx_ref(), + // TODO(alexey): this clone can be expensive, we should avoid it + input.as_ref().clone(), + proof_targets, + ); + match result { + Ok(proof) => { + let _ = state_root_message_sender.send(StateRootMessage::ProofCalculated { + proof, + state_update: hashed_state_update, + sequence_number: proof_sequence_number, + }); + } + Err(e) => { + error!(target: "engine::root", error = ?e, "Could not calculate multiproof"); + } + } + }); + } + + /// Handler for new proof calculated, aggregates all the existing sequential proofs. + fn on_proof( + &mut self, + sequence_number: u64, + proof: MultiProof, + state_update: HashedPostState, + ) -> Option<(MultiProof, HashedPostState)> { + let ready_proofs = self.proof_sequencer.add_proof(sequence_number, proof, state_update); + + if ready_proofs.is_empty() { + None + } else { + // Merge all ready proofs and state updates + ready_proofs.into_iter().reduce(|mut acc, (proof, state_update)| { + acc.0.extend(proof); + acc.1.extend(state_update); + acc + }) + } + } + + /// Spawns root calculation with the current state and proofs. + fn spawn_root_calculation(&mut self, state: HashedPostState, multiproof: MultiProof) { + let Some(trie) = self.sparse_trie.take() else { return }; + + trace!( + target: "engine::root", + account_proofs = multiproof.account_subtree.len(), + storage_proofs = multiproof.storages.len(), + "Spawning root calculation" + ); + + // TODO(alexey): store proof targets in `ProofSequecner` to avoid recomputing them + let targets = get_proof_targets(&state, &HashMap::default()); + + let tx = self.tx.clone(); + rayon::spawn(move || { + let result = update_sparse_trie(trie, multiproof, targets, state); + match result { + Ok((trie, elapsed)) => { + trace!( + target: "engine::root", + ?elapsed, + "Root calculation completed, sending result" + ); + let _ = tx.send(StateRootMessage::RootCalculated { trie, elapsed }); + } + Err(e) => { + error!(target: "engine::root", error = ?e, "Could not calculate state root"); + } + } + }); + } + + fn run(mut self) -> StateRootResult { + let mut current_state_update = HashedPostState::default(); + let mut current_multiproof = MultiProof::default(); + let mut updates_received = 0; + let mut proofs_processed = 0; + let mut roots_calculated = 0; + let mut updates_finished = false; + + loop { + match self.rx.recv() { + Ok(message) => match message { + StateRootMessage::StateUpdate(update) => { + updates_received += 1; + trace!( + target: "engine::root", + len = update.len(), + total_updates = updates_received, + "Received new state update" + ); + Self::on_state_update( + self.config.consistent_view.clone(), + self.config.input.clone(), + update, + &mut self.fetched_proof_targets, + self.proof_sequencer.next_sequence(), + self.tx.clone(), + ); + } + StateRootMessage::FinishedStateUpdates => { + updates_finished = true; + } + StateRootMessage::ProofCalculated { proof, state_update, sequence_number } => { + proofs_processed += 1; + trace!( + target: "engine::root", + sequence = sequence_number, + total_proofs = proofs_processed, + "Processing calculated proof" + ); + + trace!(target: "engine::root", ?proof, "Proof calculated"); + + if let Some((combined_proof, combined_state_update)) = + self.on_proof(sequence_number, proof, state_update) + { + if self.sparse_trie.is_none() { + current_multiproof.extend(combined_proof); + current_state_update.extend(combined_state_update); + } else { + self.spawn_root_calculation(combined_state_update, combined_proof); + } + } + } + StateRootMessage::RootCalculated { trie, elapsed } => { + roots_calculated += 1; + trace!( + target: "engine::root", + ?elapsed, + roots_calculated, + proofs = proofs_processed, + updates = updates_received, + "Computed intermediate root" + ); + self.sparse_trie = Some(trie); + + let has_new_proofs = !current_multiproof.account_subtree.is_empty() || + !current_multiproof.storages.is_empty(); + let all_proofs_received = proofs_processed >= updates_received; + let no_pending = !self.proof_sequencer.has_pending(); + + trace!( + target: "engine::root", + has_new_proofs, + all_proofs_received, + no_pending, + "State check" + ); + + // only spawn new calculation if we have accumulated new proofs + if has_new_proofs { + trace!( + target: "engine::root", + account_proofs = current_multiproof.account_subtree.len(), + storage_proofs = current_multiproof.storages.len(), + "Spawning subsequent root calculation" + ); + self.spawn_root_calculation( + std::mem::take(&mut current_state_update), + std::mem::take(&mut current_multiproof), + ); + } else if all_proofs_received && no_pending && updates_finished { + debug!( + target: "engine::root", + total_updates = updates_received, + total_proofs = proofs_processed, + roots_calculated, + "All proofs processed, ending calculation" + ); + let mut trie = self + .sparse_trie + .take() + .expect("sparse trie update should not be in progress"); + let root = trie.root().expect("sparse trie should be revealed"); + let trie_updates = trie + .take_trie_updates() + .expect("sparse trie should have updates retention enabled"); + return Ok((root, trie_updates)); + } + } + }, + Err(_) => { + // this means our internal message channel is closed, which shouldn't happen + // in normal operation since we hold both ends + error!( + target: "engine::root", + "Internal message channel closed unexpectedly" + ); + return Err(ParallelStateRootError::Other( + "Internal message channel closed unexpectedly".into(), + )); + } + } + } + } +} + +/// Returns accounts only with those storages that were not already fetched, and +/// if there are no such storages and the account itself was already fetched, the +/// account shouldn't be included. +fn get_proof_targets( + state_update: &HashedPostState, + fetched_proof_targets: &HashMap>, +) -> HashMap> { + let mut targets = HashMap::default(); + + // first collect all new accounts (not previously fetched) + for &hashed_address in state_update.accounts.keys() { + if !fetched_proof_targets.contains_key(&hashed_address) { + targets.insert(hashed_address, HashSet::default()); + } + } + + // then process storage slots for all accounts in the state update + for (hashed_address, storage) in &state_update.storages { + let fetched = fetched_proof_targets.get(hashed_address); + let mut changed_slots = storage + .storage + .keys() + .filter(|slot| !fetched.is_some_and(|f| f.contains(*slot))) + .peekable(); + + if changed_slots.peek().is_some() { + targets.entry(*hashed_address).or_default().extend(changed_slots); + } + } + + targets +} + +/// Updates the sparse trie with the given proofs and state, and returns the updated trie and the +/// time it took. +fn update_sparse_trie( + mut trie: Box, + multiproof: MultiProof, + targets: HashMap>, + state: HashedPostState, +) -> SparseStateTrieResult<(Box, Duration)> { + trace!(target: "engine::root::sparse", "Updating sparse trie"); + let started_at = Instant::now(); + + // Reveal new accounts and storage slots. + trie.reveal_multiproof(targets, multiproof)?; + + // Update storage slots with new values and calculate storage roots. + for (address, storage) in state.storages { + trace!(target: "engine::root::sparse", ?address, "Updating storage"); + let storage_trie = trie.storage_trie_mut(&address).ok_or(SparseTrieError::Blind)?; + + if storage.wiped { + trace!(target: "engine::root::sparse", ?address, "Wiping storage"); + storage_trie.wipe(); + } + + for (slot, value) in storage.storage { + let slot_nibbles = Nibbles::unpack(slot); + if value.is_zero() { + trace!(target: "engine::root::sparse", ?address, ?slot, "Removing storage slot"); + + // TODO: handle blinded node error + storage_trie.remove_leaf(&slot_nibbles)?; + } else { + trace!(target: "engine::root::sparse", ?address, ?slot, "Updating storage slot"); + storage_trie + .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + } + } + + storage_trie.root(); + } + + // Update accounts with new values + for (address, account) in state.accounts { + trace!(target: "engine::root::sparse", ?address, "Updating account"); + trie.update_account(address, account.unwrap_or_default())?; + } + + trie.calculate_below_level(SPARSE_TRIE_INCREMENTAL_LEVEL); + let elapsed = started_at.elapsed(); + + Ok((trie, elapsed)) +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::{Account as RethAccount, StorageEntry}; + use reth_provider::{ + providers::ConsistentDbView, test_utils::create_test_provider_factory, HashingWriter, + }; + use reth_testing_utils::generators::{self, Rng}; + use reth_trie::{test_utils::state_root, TrieInput}; + use revm_primitives::{ + Account as RevmAccount, AccountInfo, AccountStatus, Address, EvmState, EvmStorageSlot, + HashMap, B256, KECCAK_EMPTY, U256, + }; + use std::sync::Arc; + + fn convert_revm_to_reth_account(revm_account: &RevmAccount) -> RethAccount { + RethAccount { + balance: revm_account.info.balance, + nonce: revm_account.info.nonce, + bytecode_hash: if revm_account.info.code_hash == KECCAK_EMPTY { + None + } else { + Some(revm_account.info.code_hash) + }, + } + } + + fn create_mock_state_updates(num_accounts: usize, updates_per_account: usize) -> Vec { + let mut rng = generators::rng(); + let all_addresses: Vec

= (0..num_accounts).map(|_| rng.gen()).collect(); + let mut updates = Vec::new(); + + for _ in 0..updates_per_account { + let num_accounts_in_update = rng.gen_range(1..=num_accounts); + let mut state_update = EvmState::default(); + + let selected_addresses = &all_addresses[0..num_accounts_in_update]; + + for &address in selected_addresses { + let mut storage = HashMap::default(); + if rng.gen_bool(0.7) { + for _ in 0..rng.gen_range(1..10) { + let slot = U256::from(rng.gen::()); + storage.insert( + slot, + EvmStorageSlot::new_changed(U256::ZERO, U256::from(rng.gen::())), + ); + } + } + + let account = RevmAccount { + info: AccountInfo { + balance: U256::from(rng.gen::()), + nonce: rng.gen::(), + code_hash: KECCAK_EMPTY, + code: Some(Default::default()), + }, + storage, + status: AccountStatus::Touched, + }; + + state_update.insert(address, account); + } + + updates.push(state_update); + } + + updates + } + + #[test] + fn test_state_root_task() { + reth_tracing::init_test_tracing(); + + let factory = create_test_provider_factory(); + + let state_updates = create_mock_state_updates(10, 10); + let mut hashed_state = HashedPostState::default(); + let mut accumulated_state: HashMap)> = + HashMap::default(); + + { + let provider_rw = factory.provider_rw().expect("failed to get provider"); + + for update in &state_updates { + let account_updates = update.iter().map(|(address, account)| { + (*address, Some(convert_revm_to_reth_account(account))) + }); + provider_rw + .insert_account_for_hashing(account_updates) + .expect("failed to insert accounts"); + + let storage_updates = update.iter().map(|(address, account)| { + let storage_entries = account.storage.iter().map(|(slot, value)| { + StorageEntry { key: B256::from(*slot), value: value.present_value } + }); + (*address, storage_entries) + }); + provider_rw + .insert_storage_for_hashing(storage_updates) + .expect("failed to insert storage"); + } + provider_rw.commit().expect("failed to commit changes"); + } + + for update in &state_updates { + hashed_state.extend(evm_state_to_hashed_post_state(update.clone())); + + for (address, account) in update { + let storage: HashMap = account + .storage + .iter() + .map(|(k, v)| (B256::from(*k), v.present_value)) + .collect(); + + let entry = accumulated_state.entry(*address).or_default(); + entry.0 = convert_revm_to_reth_account(account); + entry.1.extend(storage); + } + } + + let config = StateRootConfig { + consistent_view: ConsistentDbView::new(factory, None), + input: Arc::new(TrieInput::from_state(hashed_state)), + }; + let task = StateRootTask::new(config); + let mut state_hook = task.state_hook(); + let handle = task.spawn(); + + for update in state_updates { + state_hook.on_state(&update); + } + drop(state_hook); + + let (root_from_task, _) = handle.wait_for_result().expect("task failed"); + let root_from_base = state_root(accumulated_state); + + assert_eq!( + root_from_task, root_from_base, + "State root mismatch: task={root_from_task:?}, base={root_from_base:?}" + ); + } + + #[test] + fn test_add_proof_in_sequence() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + sequencer.next_sequence = 2; + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(!sequencer.has_pending()); + + let ready = sequencer.add_proof(1, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_out_of_order() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + let proof3 = MultiProof::default(); + sequencer.next_sequence = 3; + + let ready = sequencer.add_proof(2, proof3, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(sequencer.has_pending()); + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + assert!(sequencer.has_pending()); + + let ready = sequencer.add_proof(1, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 2); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_with_gaps() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof3 = MultiProof::default(); + sequencer.next_sequence = 3; + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + + let ready = sequencer.add_proof(2, proof3, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(sequencer.has_pending()); + } + + #[test] + fn test_add_proof_duplicate_sequence() { + let mut sequencer = ProofSequencer::new(); + let proof1 = MultiProof::default(); + let proof2 = MultiProof::default(); + + let ready = sequencer.add_proof(0, proof1, HashedPostState::default()); + assert_eq!(ready.len(), 1); + + let ready = sequencer.add_proof(0, proof2, HashedPostState::default()); + assert_eq!(ready.len(), 0); + assert!(!sequencer.has_pending()); + } + + #[test] + fn test_add_proof_batch_processing() { + let mut sequencer = ProofSequencer::new(); + let proofs: Vec<_> = (0..5).map(|_| MultiProof::default()).collect(); + sequencer.next_sequence = 5; + + sequencer.add_proof(4, proofs[4].clone(), HashedPostState::default()); + sequencer.add_proof(2, proofs[2].clone(), HashedPostState::default()); + sequencer.add_proof(1, proofs[1].clone(), HashedPostState::default()); + sequencer.add_proof(3, proofs[3].clone(), HashedPostState::default()); + + let ready = sequencer.add_proof(0, proofs[0].clone(), HashedPostState::default()); + assert_eq!(ready.len(), 5); + assert!(!sequencer.has_pending()); + } + + fn create_get_proof_targets_state() -> HashedPostState { + let mut state = HashedPostState::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + state.accounts.insert(addr1, Some(Default::default())); + state.accounts.insert(addr2, Some(Default::default())); + + let mut storage = HashedStorage::default(); + let slot1 = B256::random(); + let slot2 = B256::random(); + storage.storage.insert(slot1, U256::ZERO); + storage.storage.insert(slot2, U256::from(1)); + state.storages.insert(addr1, storage); + + state + } + + #[test] + fn test_get_proof_targets_new_account_targets() { + let state = create_get_proof_targets_state(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + // should return all accounts as targets since nothing was fetched before + assert_eq!(targets.len(), state.accounts.len()); + for addr in state.accounts.keys() { + assert!(targets.contains_key(addr)); + } + } + + #[test] + fn test_get_proof_targets_new_storage_targets() { + let state = create_get_proof_targets_state(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + // verify storage slots are included for accounts with storage + for (addr, storage) in &state.storages { + assert!(targets.contains_key(addr)); + let target_slots = &targets[addr]; + assert_eq!(target_slots.len(), storage.storage.len()); + for slot in storage.storage.keys() { + assert!(target_slots.contains(slot)); + } + } + } + + #[test] + fn test_get_proof_targets_filter_already_fetched_accounts() { + let state = create_get_proof_targets_state(); + let mut fetched = HashMap::default(); + + // select an account that has no storage updates + let fetched_addr = state + .accounts + .keys() + .find(|&&addr| !state.storages.contains_key(&addr)) + .expect("Should have an account without storage"); + + // mark the account as already fetched + fetched.insert(*fetched_addr, HashSet::default()); + + let targets = get_proof_targets(&state, &fetched); + + // should not include the already fetched account since it has no storage updates + assert!(!targets.contains_key(fetched_addr)); + // other accounts should still be included + assert_eq!(targets.len(), state.accounts.len() - 1); + } + + #[test] + fn test_get_proof_targets_filter_already_fetched_storage() { + let state = create_get_proof_targets_state(); + let mut fetched = HashMap::default(); + + // mark one storage slot as already fetched + let (addr, storage) = state.storages.iter().next().unwrap(); + let mut fetched_slots = HashSet::default(); + let fetched_slot = *storage.storage.keys().next().unwrap(); + fetched_slots.insert(fetched_slot); + fetched.insert(*addr, fetched_slots); + + let targets = get_proof_targets(&state, &fetched); + + // should not include the already fetched storage slot + let target_slots = &targets[addr]; + assert!(!target_slots.contains(&fetched_slot)); + assert_eq!(target_slots.len(), storage.storage.len() - 1); + } + + #[test] + fn test_get_proof_targets_empty_state() { + let state = HashedPostState::default(); + let fetched = HashMap::default(); + + let targets = get_proof_targets(&state, &fetched); + + assert!(targets.is_empty()); + } + + #[test] + fn test_get_proof_targets_mixed_fetched_state() { + let mut state = HashedPostState::default(); + let mut fetched = HashMap::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + let slot1 = B256::random(); + let slot2 = B256::random(); + + state.accounts.insert(addr1, Some(Default::default())); + state.accounts.insert(addr2, Some(Default::default())); + + let mut storage = HashedStorage::default(); + storage.storage.insert(slot1, U256::ZERO); + storage.storage.insert(slot2, U256::from(1)); + state.storages.insert(addr1, storage); + + let mut fetched_slots = HashSet::default(); + fetched_slots.insert(slot1); + fetched.insert(addr1, fetched_slots); + + let targets = get_proof_targets(&state, &fetched); + + assert!(targets.contains_key(&addr2)); + assert!(!targets[&addr1].contains(&slot1)); + assert!(targets[&addr1].contains(&slot2)); + } + + #[test] + fn test_get_proof_targets_unmodified_account_with_storage() { + let mut state = HashedPostState::default(); + let fetched = HashMap::default(); + + let addr = B256::random(); + let slot1 = B256::random(); + let slot2 = B256::random(); + + // don't add the account to state.accounts (simulating unmodified account) + // but add storage updates for this account + let mut storage = HashedStorage::default(); + storage.storage.insert(slot1, U256::from(1)); + storage.storage.insert(slot2, U256::from(2)); + state.storages.insert(addr, storage); + + assert!(!state.accounts.contains_key(&addr)); + assert!(!fetched.contains_key(&addr)); + + let targets = get_proof_targets(&state, &fetched); + + // verify that we still get the storage slots for the unmodified account + assert!(targets.contains_key(&addr)); + + let target_slots = &targets[&addr]; + assert_eq!(target_slots.len(), 2); + assert!(target_slots.contains(&slot1)); + assert!(target_slots.contains(&slot2)); + } +} diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index c11948b9405..54f9321f239 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -14,10 +14,10 @@ workspace = true # reth reth-primitives.workspace = true reth-errors.workspace = true +reth-consensus-common.workspace = true reth-fs-util.workspace = true reth-rpc-types-compat.workspace = true reth-engine-primitives.workspace = true -reth-beacon-consensus.workspace = true reth-payload-validator.workspace = true reth-evm.workspace = true reth-revm.workspace = true @@ -27,6 +27,7 @@ revm-primitives.workspace = true reth-trie.workspace = true # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true @@ -50,5 +51,7 @@ tracing.workspace = true [features] optimism = [ - "reth-beacon-consensus/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "revm-primitives/optimism", ] diff --git a/crates/engine/util/src/engine_store.rs b/crates/engine/util/src/engine_store.rs index 1f344519961..efed83159b3 100644 --- a/crates/engine/util/src/engine_store.rs +++ b/crates/engine/util/src/engine_store.rs @@ -1,9 +1,8 @@ //! Stores engine API messages to disk for later inspection and replay. -use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayload, ForkchoiceState}; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState}; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_fs_util as fs; use serde::{Deserialize, Serialize}; use std::{ @@ -30,8 +29,9 @@ pub enum StoredEngineApiMessage { NewPayload { /// The [`ExecutionPayload`] sent in the persisted call. payload: ExecutionPayload, - /// The Cancun-specific fields sent in the persisted call, if any. - cancun_fields: Option, + /// The execution payload sidecar with additional version-specific fields received by + /// engine API. + sidecar: ExecutionPayloadSidecar, }, } @@ -63,7 +63,12 @@ impl EngineMessageStore { fs::create_dir_all(&self.path)?; // ensure that store path had been created let timestamp = received_at.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis(); match msg { - BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx: _tx } => { + BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx: _tx, + version: _version, + } => { let filename = format!("{}-fcu-{}.json", timestamp, state.head_block_hash); fs::write( self.path.join(filename), @@ -73,14 +78,14 @@ impl EngineMessageStore { })?, )?; } - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx: _tx } => { + BeaconEngineMessage::NewPayload { payload, sidecar, tx: _tx } => { let filename = format!("{}-new_payload-{}.json", timestamp, payload.block_hash()); fs::write( self.path.join(filename), serde_json::to_vec( &StoredEngineApiMessage::::NewPayload { payload: payload.clone(), - cancun_fields: cancun_fields.clone(), + sidecar: sidecar.clone(), }, )?, )?; diff --git a/crates/engine/util/src/lib.rs b/crates/engine/util/src/lib.rs index 26dc817fc95..42746c376cf 100644 --- a/crates/engine/util/src/lib.rs +++ b/crates/engine/util/src/lib.rs @@ -1,8 +1,7 @@ //! Collection of various stream utilities for consensus engine. use futures::Stream; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use reth_payload_validator::ExecutionPayloadValidator; use std::path::PathBuf; use tokio_util::either::Either; diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index abfa23a57b3..24e14162284 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -1,31 +1,35 @@ //! Stream wrapper that simulates reorgs. -use alloy_consensus::Transaction; +use alloy_consensus::{Header, Transaction}; use alloy_primitives::U256; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ForkchoiceState, PayloadStatus, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, }; use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; use itertools::Either; -use reth_beacon_consensus::{BeaconEngineMessage, BeaconOnNewPayloadError, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{ + BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, + OnForkChoiceUpdated, +}; use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm}; +use reth_evm::{ + state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, + ConfigureEvm, +}; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{proofs, Block, BlockBody, Header, Receipt, Receipts}; +use reth_primitives::{ + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, Receipt, + Receipts, +}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::{states::bundle_state::BundleRetention, State}, - state_change::post_block_withdrawals_balance_increments, DatabaseCommit, }; use reth_rpc_types_compat::engine::payload::block_to_payload; -use reth_trie::HashedPostState; -use revm_primitives::{ - calc_excess_blob_gas, BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, -}; +use revm_primitives::{calc_excess_blob_gas, EVMError, EnvWithHandlerCfg}; use std::{ collections::VecDeque, future::Future, @@ -105,8 +109,8 @@ impl Stream for EngineReorg>, Engine: EngineTypes, - Provider: BlockReader + StateProviderFactory, - Evm: ConfigureEvm
, + Provider: BlockReader + StateProviderFactory, + Evm: ConfigureEvm
, Spec: EthereumHardforks, { type Item = S::Item; @@ -147,7 +151,7 @@ where let next = ready!(this.stream.poll_next_unpin(cx)); let item = match (next, &this.last_forkchoice_state) { ( - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }), + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }), Some(last_forkchoice_state), ) if this.forkchoice_states_forwarded > this.frequency && // Only enter reorg state if new payload attaches to current head. @@ -162,13 +166,13 @@ where // forkchoice state. We will rely on CL to reorg us back to canonical chain. // TODO: This is an expensive blocking operation, ideally it's spawned as a task // so that the stream could yield the control back. - let (reorg_payload, reorg_cancun_fields) = match create_reorg_head( + let (reorg_payload, reorg_sidecar) = match create_reorg_head( this.provider, this.evm_config, this.payload_validator, *this.depth, payload.clone(), - cancun_fields.clone(), + sidecar.clone(), ) { Ok(result) => result, Err(error) => { @@ -177,7 +181,7 @@ where // the next one return Poll::Ready(Some(BeaconEngineMessage::NewPayload { payload, - cancun_fields, + sidecar, tx, })) } @@ -197,11 +201,11 @@ where let queue = VecDeque::from([ // Current payload - BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }, + BeaconEngineMessage::NewPayload { payload, sidecar, tx }, // Reorg payload BeaconEngineMessage::NewPayload { payload: reorg_payload, - cancun_fields: reorg_cancun_fields, + sidecar: reorg_sidecar, tx: reorg_payload_tx, }, // Reorg forkchoice state @@ -209,18 +213,32 @@ where state: reorg_forkchoice_state, payload_attrs: None, tx: reorg_fcu_tx, + version: EngineApiMessageVersion::default(), }, ]); *this.state = EngineReorgState::Reorg { queue }; continue } - (Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }), _) => { + ( + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }), + _, + ) => { // Record last forkchoice state forwarded to the engine. // We do not care if it's valid since engine should be able to handle // reorgs that rely on invalid forkchoice state. *this.last_forkchoice_state = Some(state); *this.forkchoice_states_forwarded += 1; - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) } (item, _) => item, }; @@ -235,18 +253,18 @@ fn create_reorg_head( payload_validator: &ExecutionPayloadValidator, mut depth: usize, next_payload: ExecutionPayload, - next_cancun_fields: Option, -) -> RethResult<(ExecutionPayload, Option)> + next_sidecar: ExecutionPayloadSidecar, +) -> RethResult<(ExecutionPayload, ExecutionPayloadSidecar)> where - Provider: BlockReader + StateProviderFactory, - Evm: ConfigureEvm
, + Provider: BlockReader + StateProviderFactory, + Evm: ConfigureEvm
, Spec: EthereumHardforks, { let chain_spec = payload_validator.chain_spec(); // Ensure next payload is valid. let next_block = payload_validator - .ensure_well_formed_payload(next_payload, next_cancun_fields.into()) + .ensure_well_formed_payload(next_payload, next_sidecar) .map_err(RethError::msg)?; // Fetch reorg target block depending on its depth and its parent. @@ -280,14 +298,12 @@ where .build(); // Configure environments - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, &reorg_target.header, U256::MAX); + let (cfg, block_env) = evm_config.cfg_and_block_env(&reorg_target.header, U256::MAX); let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); let mut evm = evm_config.evm_with_env(&mut state, env); // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec); + let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); system_caller.apply_beacon_root_contract_call( reorg_target.timestamp, @@ -321,7 +337,7 @@ where // Treat error as fatal Err(error) => { return Err(RethError::Execution(BlockExecutionError::Validation( - BlockValidationError::EVM { hash: tx.hash, error: Box::new(error) }, + BlockValidationError::EVM { hash: tx.hash(), error: Box::new(error) }, ))) } }; @@ -359,13 +375,13 @@ where // and 4788 contract call state.merge_transitions(BundleRetention::PlainState); - let outcome = ExecutionOutcome::new( + let outcome: ExecutionOutcome = ExecutionOutcome::new( state.take_bundle(), Receipts::from(vec![receipts]), reorg_target.number, Default::default(), ); - let hashed_state = HashedPostState::from_bundle_state(&outcome.state().state); + let hashed_state = state_provider.hashed_post_state(outcome.state()); let (blob_gas_used, excess_blob_gas) = if chain_spec.is_cancun_active_at_timestamp(reorg_target.timestamp) { @@ -401,26 +417,33 @@ where transactions_root: proofs::calculate_transaction_root(&transactions), receipts_root: outcome.receipts_root_slow(reorg_target.header.number).unwrap(), logs_bloom: outcome.block_logs_bloom(reorg_target.header.number).unwrap(), - requests_root: None, // TODO(prague) gas_used: cumulative_gas_used, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), state_root: state_provider.state_root(hashed_state)?, + requests_hash: None, // TODO(prague) + target_blobs_per_block: None, // TODO(prague) }, body: BlockBody { transactions, ommers: reorg_target.body.ommers, withdrawals: reorg_target.body.withdrawals, - requests: None, // TODO(prague) }, } .seal_slow(); Ok(( block_to_payload(reorg_block), + // todo(onbjerg): how do we support execution requests? reorg_target .header .parent_beacon_block_root - .map(|root| CancunPayloadFields { parent_beacon_block_root: root, versioned_hashes }), + .map(|root| { + ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: root, + versioned_hashes, + }) + }) + .unwrap_or_else(ExecutionPayloadSidecar::none), )) } diff --git a/crates/engine/util/src/skip_fcu.rs b/crates/engine/util/src/skip_fcu.rs index e110cecedc8..daa39ad572d 100644 --- a/crates/engine/util/src/skip_fcu.rs +++ b/crates/engine/util/src/skip_fcu.rs @@ -1,8 +1,7 @@ //! Stream wrapper that skips specified number of FCUs. use futures::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconEngineMessage, OnForkChoiceUpdated}; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes, OnForkChoiceUpdated}; use std::{ pin::Pin, task::{ready, Context, Poll}, @@ -45,7 +44,12 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) => { + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!(target: "engine::stream::skip_fcu", ?state, ?payload_attrs, threshold=this.threshold, skipped=this.skipped, "Skipping FCU"); @@ -53,7 +57,12 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx }) + Some(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }) } next => next, }; diff --git a/crates/engine/util/src/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs index d2450711ecf..ea89bdf6d10 100644 --- a/crates/engine/util/src/skip_new_payload.rs +++ b/crates/engine/util/src/skip_new_payload.rs @@ -2,8 +2,7 @@ use alloy_rpc_types_engine::{PayloadStatus, PayloadStatusEnum}; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; +use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; use std::{ pin::Pin, task::{ready, Context, Poll}, @@ -41,14 +40,14 @@ where loop { let next = ready!(this.stream.poll_next_unpin(cx)); let item = match next { - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) => { + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) => { if this.skipped < this.threshold { *this.skipped += 1; tracing::warn!( target: "engine::stream::skip_new_payload", block_number = payload.block_number(), block_hash = %payload.block_hash(), - ?cancun_fields, + ?sidecar, threshold=this.threshold, skipped=this.skipped, "Skipping new payload" ); @@ -56,7 +55,7 @@ where continue } *this.skipped = 0; - Some(BeaconEngineMessage::NewPayload { payload, cancun_fields, tx }) + Some(BeaconEngineMessage::NewPayload { payload, sidecar, tx }) } next => next, }; diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 08a0bc98dbc..1a08498633c 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # ethereum alloy-chains.workspace = true -alloy-primitives = { workspace = true, features = ["serde", "rand", "rlp"] } +alloy-primitives = { workspace = true, features = ["serde", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec", "derive"] } once_cell.workspace = true @@ -23,7 +23,7 @@ crc = "3" # misc serde = { workspace = true, features = ["derive"], optional = true } -thiserror-no-std = { workspace = true, default-features = false } +thiserror.workspace = true dyn-clone.workspace = true rustc-hash = { workspace = true, optional = true } @@ -35,16 +35,32 @@ auto_impl.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } -proptest.workspace = true +alloy-consensus.workspace = true [features] default = ["std", "serde", "rustc-hash"] -arbitrary = ["dep:arbitrary", "dep:proptest", "dep:proptest-derive"] -serde = ["dep:serde"] +arbitrary = [ + "dep:arbitrary", + "dep:proptest", + "dep:proptest-derive", + "alloy-chains/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-primitives/serde" +] std = [ - "alloy-chains/std", - "alloy-primitives/std", - "thiserror-no-std/std", - "rustc-hash/std", + "alloy-chains/std", + "alloy-primitives/std", + "thiserror/std", + "rustc-hash/std", + "alloy-consensus/std", + "once_cell/std", + "serde?/std", + "alloy-rlp/std" ] rustc-hash = ["dep:rustc-hash"] diff --git a/crates/ethereum-forks/src/forkid.rs b/crates/ethereum-forks/src/forkid.rs index 6876d0eb926..ebc9fb10637 100644 --- a/crates/ethereum-forks/src/forkid.rs +++ b/crates/ethereum-forks/src/forkid.rs @@ -176,7 +176,7 @@ impl From for ForkId { } /// Reason for rejecting provided `ForkId`. -#[derive(Clone, Copy, Debug, thiserror_no_std::Error, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, Debug, thiserror::Error, PartialEq, Eq, Hash)] pub enum ValidationError { /// Remote node is outdated and needs a software update. #[error( @@ -446,15 +446,12 @@ impl Cache { #[cfg(test)] mod tests { use super::*; - use alloy_primitives::b256; - - const GENESIS_HASH: B256 = - b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + use alloy_consensus::constants::MAINNET_GENESIS_HASH; // EIP test vectors. #[test] fn forkhash() { - let mut fork_hash = ForkHash::from(GENESIS_HASH); + let mut fork_hash = ForkHash::from(MAINNET_GENESIS_HASH); assert_eq!(fork_hash.0, hex!("fc64ec04")); fork_hash += 1_150_000u64; @@ -468,7 +465,7 @@ mod tests { fn compatibility_check() { let mut filter = ForkFilter::new( Head { number: 0, ..Default::default() }, - GENESIS_HASH, + MAINNET_GENESIS_HASH, 0, vec![ ForkFilterKey::Block(1_150_000), @@ -727,7 +724,7 @@ mod tests { let mut fork_filter = ForkFilter::new( Head { number: 0, ..Default::default() }, - GENESIS_HASH, + MAINNET_GENESIS_HASH, 0, vec![ForkFilterKey::Block(b1), ForkFilterKey::Block(b2)], ); diff --git a/crates/ethereum-forks/src/hardfork/ethereum.rs b/crates/ethereum-forks/src/hardfork/ethereum.rs index 3d85b54a960..4e13b001786 100644 --- a/crates/ethereum-forks/src/hardfork/ethereum.rs +++ b/crates/ethereum-forks/src/hardfork/ethereum.rs @@ -49,6 +49,8 @@ hardfork!( Cancun, /// Prague: Prague, + /// Osaka: + Osaka, } ); diff --git a/crates/ethereum-forks/src/hardforks/ethereum.rs b/crates/ethereum-forks/src/hardforks/ethereum.rs index 3069367158f..086d2d3b46e 100644 --- a/crates/ethereum-forks/src/hardforks/ethereum.rs +++ b/crates/ethereum-forks/src/hardforks/ethereum.rs @@ -21,6 +21,11 @@ pub trait EthereumHardforks: Hardforks { self.is_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) } + /// Convenience method to check if [`EthereumHardfork::Osaka`] is active at a given timestamp. + fn is_osaka_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Osaka, timestamp) + } + /// Convenience method to check if [`EthereumHardfork::Byzantium`] is active at a given block /// number. fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { diff --git a/crates/ethereum/cli/src/chainspec.rs b/crates/ethereum/cli/src/chainspec.rs index cbcce9f69f6..a60d7017942 100644 --- a/crates/ethereum/cli/src/chainspec.rs +++ b/crates/ethereum/cli/src/chainspec.rs @@ -89,7 +89,8 @@ mod tests { "terminalTotalDifficulty": 0, "shanghaiTime": 0, "cancunTime": 0, - "pragueTime": 0 + "pragueTime": 0, + "osakaTime": 0 } }"#; @@ -97,5 +98,6 @@ mod tests { assert!(spec.is_shanghai_active_at_timestamp(0)); assert!(spec.is_cancun_active_at_timestamp(0)); assert!(spec.is_prague_active_at_timestamp(0)); + assert!(spec.is_osaka_active_at_timestamp(0)); } } diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index 02d217b63b2..8e6158ff46c 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -15,9 +15,12 @@ workspace = true reth-chainspec.workspace = true reth-consensus-common.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-consensus.workspace = true # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true tracing.workspace = true diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index e74f3498fa5..4d3ba628269 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,23 +8,26 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{ + Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput, +}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, validate_against_parent_eip1559_base_fee, validate_against_parent_hash_number, - validate_against_parent_timestamp, validate_block_pre_execution, validate_header_base_fee, - validate_header_extradata, validate_header_gas, + validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, + validate_header_base_fee, validate_header_extradata, validate_header_gas, }; use reth_primitives::{ - constants::MINIMUM_GAS_LIMIT, BlockWithSenders, Header, SealedBlock, SealedHeader, - EMPTY_OMMER_ROOT_HASH, + Block, BlockBody, BlockWithSenders, NodePrimitives, Receipt, SealedBlock, SealedHeader, }; +use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; use std::{fmt::Debug, sync::Arc, time::SystemTime}; /// The bound divisor of the gas limit, used in update calculations. -const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; +pub const GAS_LIMIT_BOUND_DIVISOR: u64 = 1024; mod validation; pub use validation::validate_block_post_execution; @@ -32,7 +35,7 @@ pub use validation::validate_block_post_execution; /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthBeaconConsensus { /// Configuration chain_spec: Arc, @@ -91,12 +94,47 @@ impl EthBeaconConsensus } } +impl FullConsensus for EthBeaconConsensus +where + ChainSpec: Send + Sync + EthChainSpec + EthereumHardforks + Debug, + N: NodePrimitives< + BlockHeader = Header, + BlockBody = BlockBody, + Block = Block, + Receipt = Receipt, + >, +{ + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) + } +} + impl Consensus for EthBeaconConsensus +{ + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header.header()) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validate_block_pre_execution(block, &self.chain_spec) + } +} + +impl HeaderValidator + for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_gas(header)?; - validate_header_base_fee(header, &self.chain_spec)?; + validate_header_gas(header.header())?; + validate_header_base_fee(header.header(), &self.chain_spec)?; // EIP-4895: Beacon chain push withdrawals as operations if self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp) && @@ -111,7 +149,7 @@ impl Consensu // Ensures that EIP-4844 fields are valid once cancun is active. if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_4844_header_standalone(header)?; + validate_4844_header_standalone(header.header())?; } else if header.blob_gas_used.is_some() { return Err(ConsensusError::BlobGasUsedUnexpected) } else if header.excess_blob_gas.is_some() { @@ -121,11 +159,11 @@ impl Consensu } if self.chain_spec.is_prague_active_at_timestamp(header.timestamp) { - if header.requests_root.is_none() { - return Err(ConsensusError::RequestsRootMissing) + if header.requests_hash.is_none() { + return Err(ConsensusError::RequestsHashMissing) } - } else if header.requests_root.is_some() { - return Err(ConsensusError::RequestsRootUnexpected) + } else if header.requests_hash.is_some() { + return Err(ConsensusError::RequestsHashUnexpected) } Ok(()) @@ -136,19 +174,23 @@ impl Consensu header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_against_parent_hash_number(header, parent)?; + validate_against_parent_hash_number(header.header(), parent)?; - validate_against_parent_timestamp(header, parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; // TODO Check difficulty increment between parent and self // Ace age did increment it by some formula that we need to follow. self.validate_against_parent_gas_limit(header, parent)?; - validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_against_parent_4844(header, parent)?; + validate_against_parent_4844(header.header(), parent.header())?; } Ok(()) @@ -211,24 +253,12 @@ impl Consensu Ok(()) } - - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validate_block_pre_execution(block, &self.chain_spec) - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) - } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{Sealable, B256}; + use alloy_primitives::B256; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_primitives::proofs; @@ -313,16 +343,14 @@ mod tests { // that the header is valid let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); - let sealed = Header { + let header = Header { base_fee_per_gas: Some(1337), withdrawals_root: Some(proofs::calculate_withdrawals_root(&[])), ..Default::default() - } - .seal_slow(); - let (header, seal) = sealed.into_parts(); + }; assert_eq!( - EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::new(header, seal)), + EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::seal(header,)), Ok(()) ); } diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index e510a91ab96..c339c8d25c6 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,7 +1,9 @@ +use alloy_consensus::{proofs::calculate_receipt_root, TxReceipt}; +use alloy_eips::eip7685::Requests; use alloy_primitives::{Bloom, B256}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; -use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt, Request}; +use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt}; /// Validate a block with regard to execution results: /// @@ -11,7 +13,7 @@ pub fn validate_block_post_execution( block: &BlockWithSenders, chain_spec: &ChainSpec, receipts: &[Receipt], - requests: &[Request], + requests: &Requests, ) -> Result<(), ConsensusError> { // Check if gas used matches the value set in header. let cumulative_gas_used = @@ -36,15 +38,15 @@ pub fn validate_block_post_execution( } } - // Validate that the header requests root matches the calculated requests root + // Validate that the header requests hash matches the calculated requests hash if chain_spec.is_prague_active_at_timestamp(block.timestamp) { - let Some(header_requests_root) = block.header.requests_root else { - return Err(ConsensusError::RequestsRootMissing) + let Some(header_requests_hash) = block.header.requests_hash else { + return Err(ConsensusError::RequestsHashMissing) }; - let requests_root = reth_primitives::proofs::calculate_requests_root(requests); - if requests_root != header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected::new(requests_root, header_requests_root).into(), + let requests_hash = requests.requests_hash(); + if requests_hash != header_requests_hash { + return Err(ConsensusError::BodyRequestsHashDiff( + GotExpected::new(requests_hash, header_requests_hash).into(), )) } } @@ -61,10 +63,10 @@ fn verify_receipts( ) -> Result<(), ConsensusError> { // Calculate receipts root. let receipts_with_bloom = receipts.iter().map(Receipt::with_bloom_ref).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root_ref(&receipts_with_bloom); + let receipts_root = calculate_receipt_root(&receipts_with_bloom); // Calculate header logs bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom()); compare_receipts_root_and_logs_bloom( receipts_root, diff --git a/crates/ethereum/engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml index e9bcd425686..f019f6e5f2a 100644 --- a/crates/ethereum/engine-primitives/Cargo.toml +++ b/crates/ethereum/engine-primitives/Cargo.toml @@ -16,6 +16,7 @@ reth-chainspec.workspace = true reth-primitives.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true +reth-payload-validator.workspace = true reth-rpc-types-compat.workspace = true alloy-rlp.workspace = true reth-chain-state.workspace = true diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 034a8c6bffb..59c870f4d28 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -11,17 +11,20 @@ mod payload; use std::sync::Arc; +use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, }; pub use payload::{EthBuiltPayload, EthPayloadBuilderAttributes}; use reth_chainspec::ChainSpec; -use reth_engine_primitives::{EngineTypes, EngineValidator}; +use reth_engine_primitives::{EngineTypes, EngineValidator, PayloadValidator}; use reth_payload_primitives::{ validate_version_specific_fields, EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes, PayloadTypes, }; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, SealedBlock}; /// The types used in the default mainnet ethereum beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] @@ -43,10 +46,10 @@ where + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; } /// A default payload type for [`EthEngineTypes`] @@ -63,13 +66,31 @@ impl PayloadTypes for EthPayloadTypes { /// Validator for the ethereum engine API. #[derive(Debug, Clone)] pub struct EthereumEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, } impl EthereumEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &ChainSpec { + self.inner.chain_spec() + } +} + +impl PayloadValidator for EthereumEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result { + self.inner.ensure_well_formed_payload(payload, sidecar) } } @@ -82,7 +103,7 @@ where version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, EthPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, payload_or_attrs) + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } fn ensure_well_formed_attributes( @@ -90,6 +111,6 @@ where version: EngineApiMessageVersion, attributes: &EthPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into()) + validate_version_specific_fields(self.chain_spec(), version, attributes.into()) } } diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index ae370fdb9d7..ff07856f1ca 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -1,6 +1,6 @@ //! Contains types required for building a payload. -use alloy_eips::eip4844::BlobTransactionSidecar; +use alloy_eips::{eip4844::BlobTransactionSidecar, eip4895::Withdrawals, eip7685::Requests}; use alloy_primitives::{Address, B256, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ @@ -9,12 +9,11 @@ use alloy_rpc_types_engine::{ }; use reth_chain_state::ExecutedBlock; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{SealedBlock, Withdrawals}; +use reth_primitives::SealedBlock; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, - convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; -use std::convert::Infallible; +use std::{convert::Infallible, sync::Arc}; /// Contains the built payload. /// @@ -26,7 +25,7 @@ pub struct EthBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block - pub(crate) block: SealedBlock, + pub(crate) block: Arc, /// Block execution data for the payload, if any. pub(crate) executed_block: Option, /// The fees of the block @@ -34,19 +33,24 @@ pub struct EthBuiltPayload { /// The blobs, proofs, and commitments in the block. If the block is pre-cancun, this will be /// empty. pub(crate) sidecars: Vec, + /// The requests of the payload + pub(crate) requests: Option, } // === impl BuiltPayload === impl EthBuiltPayload { - /// Initializes the payload with the given initial block. + /// Initializes the payload with the given initial block + /// + /// Caution: This does not set any [`BlobTransactionSidecar`]. pub const fn new( id: PayloadId, - block: SealedBlock, + block: Arc, fees: U256, executed_block: Option, + requests: Option, ) -> Self { - Self { id, block, executed_block, fees, sidecars: Vec::new() } + Self { id, block, executed_block, fees, sidecars: Vec::new(), requests } } /// Returns the identifier of the payload. @@ -55,7 +59,7 @@ impl EthBuiltPayload { } /// Returns the built block(sealed) - pub const fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlock { &self.block } @@ -70,9 +74,18 @@ impl EthBuiltPayload { } /// Adds sidecars to the payload. - pub fn extend_sidecars(&mut self, sidecars: Vec) { + pub fn extend_sidecars(&mut self, sidecars: impl IntoIterator) { self.sidecars.extend(sidecars) } + + /// Same as [`Self::extend_sidecars`] but returns the type again. + pub fn with_sidecars( + mut self, + sidecars: impl IntoIterator, + ) -> Self { + self.extend_sidecars(sidecars); + self + } } impl BuiltPayload for EthBuiltPayload { @@ -87,6 +100,10 @@ impl BuiltPayload for EthBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + self.requests.clone() + } } impl BuiltPayload for &EthBuiltPayload { @@ -101,12 +118,16 @@ impl BuiltPayload for &EthBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + self.requests.clone() + } } // V1 engine_getPayloadV1 response impl From for ExecutionPayloadV1 { fn from(value: EthBuiltPayload) -> Self { - block_to_payload_v1(value.block) + block_to_payload_v1(Arc::unwrap_or_clone(value.block)) } } @@ -115,7 +136,10 @@ impl From for ExecutionPayloadEnvelopeV2 { fn from(value: EthBuiltPayload) -> Self { let EthBuiltPayload { block, fees, .. } = value; - Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } + Self { + block_value: fees, + execution_payload: convert_block_to_payload_field_v2(Arc::unwrap_or_clone(block)), + } } } @@ -124,7 +148,7 @@ impl From for ExecutionPayloadEnvelopeV3 { let EthBuiltPayload { block, fees, sidecars, .. } = value; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -135,17 +159,17 @@ impl From for ExecutionPayloadEnvelopeV3 { // Spec: // should_override_builder: false, - blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + blobs_bundle: sidecars.into(), } } } impl From for ExecutionPayloadEnvelopeV4 { fn from(value: EthBuiltPayload) -> Self { - let EthBuiltPayload { block, fees, sidecars, .. } = value; + let EthBuiltPayload { block, fees, sidecars, requests, .. } = value; Self { - execution_payload: block_to_payload_v4(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -157,12 +181,13 @@ impl From for ExecutionPayloadEnvelopeV4 { // should_override_builder: false, blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), + execution_requests: requests.unwrap_or_default().take(), } } } /// Container type for all components required to build a payload. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct EthPayloadBuilderAttributes { /// Id of the payload pub id: PayloadId, @@ -215,7 +240,11 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: PayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: PayloadAttributes, + _version: u8, + ) -> Result { Ok(Self::new(parent, attributes)) } @@ -275,10 +304,115 @@ pub(crate) fn payload_id(parent: &B256, attributes: &PayloadAttributes) -> Paylo #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip4895::Withdrawal; + use alloy_primitives::B64; + use std::str::FromStr; #[test] fn attributes_serde() { let attributes = r#"{"timestamp":"0x1235","prevRandao":"0xf343b00e02dc34ec0124241f74f32191be28fb370bb48060f5fa4df99bda774c","suggestedFeeRecipient":"0x0000000000000000000000000000000000000000","withdrawals":null,"parentBeaconBlockRoot":null}"#; let _attributes: PayloadAttributes = serde_json::from_str(attributes).unwrap(); } + + #[test] + fn test_payload_id_basic() { + // Create a parent block and payload attributes + let parent = + B256::from_str("0x3b8fb240d288781d4aac94d3fd16809ee413bc99294a085798a589dae51ddd4a") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 0x5, + prev_randao: B256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + suggested_fee_recipient: Address::from_str( + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: None, + parent_beacon_block_root: None, + target_blobs_per_block: None, + max_blobs_per_block: None, + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0xa247243752eb10b4").unwrap()) + ); + } + + #[test] + fn test_payload_id_with_withdrawals() { + // Set up the parent and attributes with withdrawals + let parent = + B256::from_str("0x9876543210abcdef9876543210abcdef9876543210abcdef9876543210abcdef") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 1622553200, + prev_randao: B256::from_slice(&[1; 32]), + suggested_fee_recipient: Address::from_str( + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: Some(vec![ + Withdrawal { + index: 1, + validator_index: 123, + address: Address::from([0xAA; 20]), + amount: 10, + }, + Withdrawal { + index: 2, + validator_index: 456, + address: Address::from([0xBB; 20]), + amount: 20, + }, + ]), + parent_beacon_block_root: None, + target_blobs_per_block: None, + max_blobs_per_block: None, + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0xedddc2f84ba59865").unwrap()) + ); + } + + #[test] + fn test_payload_id_with_parent_beacon_block_root() { + // Set up the parent and attributes with a parent beacon block root + let parent = + B256::from_str("0x9876543210abcdef9876543210abcdef9876543210abcdef9876543210abcdef") + .unwrap(); + let attributes = PayloadAttributes { + timestamp: 1622553200, + prev_randao: B256::from_str( + "0x123456789abcdef123456789abcdef123456789abcdef123456789abcdef1234", + ) + .unwrap(), + suggested_fee_recipient: Address::from_str( + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + ) + .unwrap(), + withdrawals: None, + parent_beacon_block_root: Some( + B256::from_str( + "0x2222222222222222222222222222222222222222222222222222222222222222", + ) + .unwrap(), + ), + target_blobs_per_block: None, + max_blobs_per_block: None, + }; + + // Verify that the generated payload ID matches the expected value + assert_eq!( + payload_id(&parent, &attributes), + PayloadId(B64::from_str("0x0fc49cd532094cce").unwrap()) + ); + } } diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 7215efa68c6..4ee07259918 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -18,8 +18,6 @@ reth-evm.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } reth-revm.workspace = true reth-ethereum-consensus.workspace = true -reth-prune-types.workspace = true -reth-execution-types.workspace = true reth-consensus.workspace = true # Ethereum @@ -36,10 +34,22 @@ reth-testing-utils.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-execution-types.workspace = true secp256k1.workspace = true serde_json.workspace = true alloy-genesis.workspace = true [features] default = ["std"] -std = [] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "reth-revm/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "secp256k1/std", + "reth-ethereum-forks/std" +] diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs index e5253307b33..9d6b6d8796c 100644 --- a/crates/ethereum/evm/src/config.rs +++ b/crates/ethereum/evm/src/config.rs @@ -11,7 +11,9 @@ pub fn revm_spec_by_timestamp_after_merge( chain_spec: &ChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.is_prague_active_at_timestamp(timestamp) { + if chain_spec.is_osaka_active_at_timestamp(timestamp) { + revm_primitives::OSAKA + } else if chain_spec.is_prague_active_at_timestamp(timestamp) { revm_primitives::PRAGUE } else if chain_spec.is_cancun_active_at_timestamp(timestamp) { revm_primitives::CANCUN diff --git a/crates/ethereum/evm/src/eip6110.rs b/crates/ethereum/evm/src/eip6110.rs index e78becd960c..d5700208195 100644 --- a/crates/ethereum/evm/src/eip6110.rs +++ b/crates/ethereum/evm/src/eip6110.rs @@ -1,11 +1,17 @@ //! EIP-6110 deposit requests parsing use alloc::{string::ToString, vec::Vec}; -use alloy_eips::eip6110::{DepositRequest, MAINNET_DEPOSIT_CONTRACT_ADDRESS}; -use alloy_primitives::Log; +use alloy_eips::eip6110::MAINNET_DEPOSIT_CONTRACT_ADDRESS; +use alloy_primitives::{Address, Bytes, Log}; use alloy_sol_types::{sol, SolEvent}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthChainSpec}; use reth_evm::execute::BlockValidationError; -use reth_primitives::{Receipt, Request}; +use reth_primitives::Receipt; + +/// The size of a deposit request in bytes. While the event fields emit +/// bytestrings, those bytestrings are fixed size. The fields are: 48-byte +/// pubkey, 32-byte withdrawal credentials, 8-byte amount, 96-byte signature, +/// and 8-byte index. +const DEPOSIT_BYTES_SIZE: usize = 48 + 32 + 8 + 96 + 8; sol! { #[allow(missing_docs)] @@ -18,75 +24,85 @@ sol! { ); } -/// Parse [deposit contract](https://etherscan.io/address/0x00000000219ab540356cbb839cbe05303d7705fa) -/// (address is from the passed [`ChainSpec`]) deposits from receipts, and return them as a -/// [vector](Vec) of (requests)[Request]. -pub fn parse_deposits_from_receipts<'a, I>( - chain_spec: &ChainSpec, +/// Accumulate a deposit request from a log. containing a [`DepositEvent`]. +pub fn accumulate_deposit_from_log(log: &Log, out: &mut Vec) { + out.reserve(DEPOSIT_BYTES_SIZE); + out.extend_from_slice(log.pubkey.as_ref()); + out.extend_from_slice(log.withdrawal_credentials.as_ref()); + out.extend_from_slice(log.amount.as_ref()); + out.extend_from_slice(log.signature.as_ref()); + out.extend_from_slice(log.index.as_ref()); +} + +/// Accumulate deposits from an iterator of logs. +pub fn accumulate_deposits_from_logs<'a>( + address: Address, + logs: impl IntoIterator, + out: &mut Vec, +) -> Result<(), BlockValidationError> { + logs.into_iter().filter(|log| log.address == address).try_for_each(|log| { + // We assume that the log is valid because it was emitted by the + // deposit contract. + let decoded_log = + DepositEvent::decode_log(log, false).map_err(|err: alloy_sol_types::Error| { + BlockValidationError::DepositRequestDecode(err.to_string()) + })?; + accumulate_deposit_from_log(&decoded_log, out); + Ok(()) + }) +} + +/// Accumulate deposits from a receipt. Iterates over the logs in the receipt +/// and accumulates the deposit request bytestrings. +pub fn accumulate_deposits_from_receipt( + address: Address, + receipt: &Receipt, + out: &mut Vec, +) -> Result<(), BlockValidationError> { + accumulate_deposits_from_logs(address, &receipt.logs, out) +} + +/// Accumulate deposits from a list of receipts. Iterates over the logs in the +/// receipts and accumulates the deposit request bytestrings. +pub fn accumulate_deposits_from_receipts<'a, I>( + address: Address, receipts: I, -) -> Result, BlockValidationError> + out: &mut Vec, +) -> Result<(), BlockValidationError> where I: IntoIterator, { - let deposit_contract_address = chain_spec - .deposit_contract - .as_ref() - .map_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS, |contract| contract.address); receipts .into_iter() - .flat_map(|receipt| receipt.logs.iter()) - // No need to filter for topic because there's only one event and that's the Deposit event - // in the deposit contract. - .filter(|log| log.address == deposit_contract_address) - .map(|log| { - let decoded_log = DepositEvent::decode_log(log, false)?; - let deposit = parse_deposit_from_log(&decoded_log); - Ok(Request::DepositRequest(deposit)) - }) - .collect::, _>>() - .map_err(|err: alloy_sol_types::Error| { - BlockValidationError::DepositRequestDecode(err.to_string()) - }) + .try_for_each(|receipt| accumulate_deposits_from_receipt(address, receipt, out)) } -fn parse_deposit_from_log(log: &Log) -> DepositRequest { - // SAFETY: These `expect` https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/solidity_deposit_contract/deposit_contract.sol#L107-L110 - // are safe because the `DepositEvent` is the only event in the deposit contract and the length - // checks are done there. - DepositRequest { - pubkey: log - .pubkey - .as_ref() - .try_into() - .expect("pubkey length should be enforced in deposit contract"), - withdrawal_credentials: log - .withdrawal_credentials - .as_ref() - .try_into() - .expect("withdrawal_credentials length should be enforced in deposit contract"), - amount: u64::from_le_bytes( - log.amount - .as_ref() - .try_into() - .expect("amount length should be enforced in deposit contract"), - ), - signature: log - .signature - .as_ref() - .try_into() - .expect("signature length should be enforced in deposit contract"), - index: u64::from_le_bytes( - log.index - .as_ref() - .try_into() - .expect("deposit index length should be enforced in deposit contract"), - ), - } +/// Find deposit logs in a list of receipts, and return the concatenated +/// deposit request bytestring. +/// +/// The address of the deposit contract is taken from the chain spec, and +/// defaults to [`MAINNET_DEPOSIT_CONTRACT_ADDRESS`] if not specified in +/// the chain spec. +pub fn parse_deposits_from_receipts<'a, I>( + chainspec: &ChainSpec, + receipts: I, +) -> Result +where + I: IntoIterator, +{ + let mut out = Vec::new(); + accumulate_deposits_from_receipts( + chainspec.deposit_contract().map(|c| c.address).unwrap_or(MAINNET_DEPOSIT_CONTRACT_ADDRESS), + receipts, + &mut out, + )?; + Ok(out.into()) } #[cfg(test)] mod tests { use super::*; + use alloy_primitives::bytes; use reth_chainspec::MAINNET; use reth_primitives::TxType; @@ -119,9 +135,12 @@ mod tests { }, ]; - let requests = parse_deposits_from_receipts(&MAINNET, &receipts).unwrap(); - assert_eq!(requests.len(), 2); - assert_eq!(requests[0].as_deposit_request().unwrap().amount, 32e9 as u64); - assert_eq!(requests[1].as_deposit_request().unwrap().amount, 32e9 as u64); + let request_data = parse_deposits_from_receipts(&MAINNET, &receipts).unwrap(); + assert_eq!( + request_data, + bytes!( + "998c8086669bf65e24581cda47d8537966e9f5066fc6ffdcba910a1bfb91eae7a4873fcce166a1c4ea217e6b1afd396201000000000000000000000001c340fb72ed14d4eaa71f7633ee9e33b88d4f39004059730700000098ddbffd700c1aac324cfdf0492ff289223661eb26718ce3651ba2469b22f480d56efab432ed91af05a006bde0c1ea68134e0acd8cacca0c13ad1f716db874b44abfcc966368019753174753bca3af2ea84bc569c46f76592a91e97f311eddece474160000000000a1a2ba870a90e889aa594a0cc1c6feffb94c2d8f65646c937f1f456a315ef649533e25a4614d8f4f66ebdb06481b90af0100000000000000000000000a0f04a231efbc29e1db7d086300ff550211c2f60040597307000000ad416d590e1a7f52baff770a12835b68904efad22cc9f8ba531e50cbbd26f32b9c7373cf6538a0577f501e4d3e3e63e208767bcccaae94e1e3720bfb734a286f9c017d17af46536545ccb7ca94d71f295e71f6d25bf978c09ada6f8d3f7ba039e374160000000000" + ) + ); } } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 9c7748a561f..6cbbb69c906 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -1,157 +1,180 @@ -//! Ethereum block executor. +//! Ethereum block execution strategy. use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; -use alloc::{boxed::Box, sync::Arc, vec, vec::Vec}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::Transaction as _; -use alloy_primitives::{BlockNumber, U256}; +use alloy_eips::eip7685::Requests; use core::fmt::Display; -use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; +use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; +use reth_consensus::ConsensusError; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ - BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, BlockValidationError, Executor, ProviderError, + balance_increment_state, BasicBlockExecutorProvider, BlockExecutionError, + BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, + ProviderError, }, - system_calls::{NoopHook, OnStateHook, SystemCaller}, - ConfigureEvm, -}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, EthereumHardfork, Header, Receipt, Request}; -use reth_prune_types::PruneModes; -use reth_revm::{ - batch::BlockBatchRecord, - db::{states::bundle_state::BundleRetention, State}, state_change::post_block_balance_increments, - Evm, + system_calls::{OnStateHook, SystemCaller}, + ConfigureEvm, TxEnvOverrides, }; +use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; +use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + EnvWithHandlerCfg, ResultAndState, U256, }; -/// Provides executors to execute regular ethereum blocks +/// Factory for [`EthExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct EthExecutorProvider { +pub struct EthExecutionStrategyFactory { + /// The chainspec chain_spec: Arc, + /// How to create an EVM. evm_config: EvmConfig, } -impl EthExecutorProvider { - /// Creates a new default ethereum executor provider. +impl EthExecutionStrategyFactory { + /// Creates a new default ethereum executor strategy factory. pub fn ethereum(chain_spec: Arc) -> Self { Self::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)) } - /// Returns a new provider for the mainnet. + /// Returns a new factory for the mainnet. pub fn mainnet() -> Self { Self::ethereum(MAINNET.clone()) } } -impl EthExecutorProvider { - /// Creates a new executor provider. +impl EthExecutionStrategyFactory { + /// Creates a new executor strategy factory. pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } -impl EthExecutorProvider -where - EvmConfig: ConfigureEvm
, -{ - fn eth_executor(&self, db: DB) -> EthBlockExecutor - where - DB: Database>, - { - EthBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder().with_database(db).with_bundle_update().without_state_clear().build(), - ) - } -} - -impl BlockExecutorProvider for EthExecutorProvider +impl BlockExecutionStrategyFactory for EthExecutionStrategyFactory where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone + + Unpin + + Sync + + Send + + 'static + + ConfigureEvm< + Header = alloy_consensus::Header, + Transaction = reth_primitives::TransactionSigned, + >, { - type Executor + Display>> = - EthBlockExecutor; + type Primitives = EthPrimitives; - type BatchExecutor + Display>> = - EthBatchExecutor; + type Strategy + Display>> = + EthExecutionStrategy; - fn executor(&self, db: DB) -> Self::Executor + fn create_strategy(&self, db: DB) -> Self::Strategy where DB: Database + Display>, { - self.eth_executor(db) + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + EthExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) } - - fn batch_executor(&self, db: DB) -> Self::BatchExecutor - where - DB: Database + Display>, - { - let executor = self.eth_executor(db); - EthBatchExecutor { executor, batch_record: BlockBatchRecord::default() } - } -} - -/// Helper type for the output of executing a block. -#[derive(Debug, Clone)] -struct EthExecuteOutput { - receipts: Vec, - requests: Vec, - gas_used: u64, } -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -struct EthEvmExecutor { +/// Block execution strategy for Ethereum. +#[allow(missing_debug_implementations)] +pub struct EthExecutionStrategy +where + EvmConfig: Clone, +{ /// The chainspec chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Optional overrides for the transactions environment. + tx_env_overrides: Option>, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, } -impl EthEvmExecutor +impl EthExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - /// Executes the transactions in the block and returns the receipts of the transactions in the - /// block, the total gas used and the list of EIP-7685 [requests](Request). - /// - /// This applies the pre-execution and post-execution changes that require an [EVM](Evm), and - /// executes the transactions. - /// - /// The optional `state_hook` will be executed with the state changes if present. + /// Creates a new [`EthExecutionStrategy`] + pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); + Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } + } +} + +impl EthExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + /// Configures a new evm configuration and block environment for the given block. /// - /// # Note + /// # Caution /// - /// It does __not__ apply post-execution changes that do not require an [EVM](Evm), for that see - /// [`EthBlockExecutor::post_execution`]. - fn execute_state_transitions( + /// This does not initialize the tx environment. + fn evm_env_for_block( &self, + header: &alloy_consensus::Header, + total_difficulty: U256, + ) -> EnvWithHandlerCfg { + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for EthExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm< + Header = alloy_consensus::Header, + Transaction = reth_primitives::TransactionSigned, + >, +{ + type DB = DB; + type Error = BlockExecutionError; + + type Primitives = EthPrimitives; + + fn init(&mut self, tx_env_overrides: Box) { + self.tx_env_overrides = Some(tx_env_overrides); + } + + fn apply_pre_execution_changes( + &mut self, block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - state_hook: Option, - ) -> Result - where - DB: Database, - DB::Error: Into + Display, - F: OnStateHook + 'static, - { - let mut system_caller = SystemCaller::new(self.evm_config.clone(), &self.chain_spec); - if let Some(hook) = state_hook { - system_caller.with_state_hook(Some(Box::new(hook) as Box)); - } + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + self.system_caller.apply_pre_execution_changes(&block.block, &mut evm)?; + + Ok(()) + } - system_caller.apply_pre_execution_changes(block, &mut evm)?; + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result, Self::Error> { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - // execute transactions let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); for (sender, transaction) in block.transactions_with_sender() { @@ -168,6 +191,10 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + if let Some(tx_env_overrides) = &mut self.tx_env_overrides { + tx_env_overrides.apply(evm.tx_mut()); + } + // Execute transaction. let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); @@ -177,7 +204,7 @@ where error: Box::new(new_err), } })?; - system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state.state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); @@ -199,137 +226,41 @@ where }, ); } - - let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { - // Collect all EIP-6110 deposits - let deposit_requests = - crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, &receipts)?; - - let post_execution_requests = system_caller.apply_post_execution_changes(&mut evm)?; - - [deposit_requests, post_execution_requests].concat() - } else { - vec![] - }; - - Ok(EthExecuteOutput { receipts, requests, gas_used: cumulative_gas_used }) - } -} - -/// A basic Ethereum block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct EthBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: EthEvmExecutor, - /// The state to use for execution - state: State, -} - -impl EthBlockExecutor { - /// Creates a new Ethereum block executor. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig, state: State) -> Self { - Self { executor: EthEvmExecutor { chain_spec, evm_config }, state } - } - - #[inline] - fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] - fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl EthBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// # Caution - /// - /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.executor.evm_config.fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - header, - total_difficulty, - ); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) } - /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the - /// state hook as `None`. - fn execute_without_verification( + fn apply_post_execution_changes( &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result { - self.execute_without_verification_with_state_hook(block, total_difficulty, None::) - } - - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block, the total gas used and the list of - /// EIP-7685 [requests](Request). - /// - /// Returns an error if execution fails. - fn execute_without_verification_with_state_hook( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - state_hook: Option, - ) -> Result - where - F: OnStateHook + 'static, - { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute + receipts: &[Receipt], + ) -> Result { let env = self.evm_env_for_block(&block.header, total_difficulty); - let output = { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_state_transitions(block, evm, state_hook) - }?; + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; + let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { + // Collect all EIP-6110 deposits + let deposit_requests = + crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; - Ok(output) - } + let mut requests = Requests::default(); - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } + if !deposit_requests.is_empty() { + requests.push_request(core::iter::once(0).chain(deposit_requests).collect()); + } + + requests.extend(self.system_caller.apply_post_execution_changes(&mut evm)?); + requests + } else { + Requests::default() + }; + drop(evm); - /// Apply post execution state changes that do not require an [EVM](Evm), such as: block - /// rewards, withdrawals, and irregular DAO hardfork state change - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { let mut balance_increments = - post_block_balance_increments(self.chain_spec(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec, &block.block, total_difficulty); // Irregular state change at Ethereum DAO hardfork - if self.chain_spec().fork(EthereumHardfork::Dao).transitions_at_block(block.number) { + if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { // drain balances from hardcoded addresses. let drained_balance: u128 = self .state @@ -343,175 +274,83 @@ where } // increment balances self.state - .increment_balances(balance_increments) + .increment_balances(balance_increments.clone()) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + // call state hook with changes due to balance increments. + let balance_state = balance_increment_state(&balance_increments, &mut self.state)?; + self.system_caller.on_state(&balance_state); - Ok(()) + Ok(requests) } -} - -impl Executor for EthBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; - - /// Executes the block and commits the changes to the internal state. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let BlockExecutionInput { block, total_difficulty } = input; - let EthExecuteOutput { receipts, requests, gas_used } = - self.execute_without_verification(block, total_difficulty)?; - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) + fn state_ref(&self) -> &State { + &self.state } - fn execute_with_state_closure( - mut self, - input: Self::Input<'_>, - mut witness: F, - ) -> Result - where - F: FnMut(&State), - { - let BlockExecutionInput { block, total_difficulty } = input; - let EthExecuteOutput { receipts, requests, gas_used } = - self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - witness(&self.state); - Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) - } - - fn execute_with_state_hook( - mut self, - input: Self::Input<'_>, - state_hook: F, - ) -> Result - where - F: OnStateHook + 'static, - { - let BlockExecutionInput { block, total_difficulty } = input; - let EthExecuteOutput { receipts, requests, gas_used } = self - .execute_without_verification_with_state_hook( - block, - total_difficulty, - Some(state_hook), - )?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) - } -} -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct EthBatchExecutor { - /// The executor used to execute single blocks - /// - /// All state changes are committed to the [State]. - executor: EthBlockExecutor, - /// Keeps track of the batch and records receipts based on the configured prune mode - batch_record: BlockBatchRecord, -} - -impl EthBatchExecutor { - /// Returns mutable reference to the state that wraps the underlying database. - #[allow(unused)] fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() + &mut self.state } -} - -impl BatchExecutor for EthBatchExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; - - fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { - let BlockExecutionInput { block, total_difficulty } = input; - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - - let EthExecuteOutput { receipts, requests, gas_used: _ } = - self.executor.execute_without_verification(block, total_difficulty)?; - - validate_block_post_execution(block, self.executor.chain_spec(), &receipts, &requests)?; - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - // store requests in the set - self.batch_record.save_requests(requests); - - Ok(()) + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); } - fn finalize(mut self) -> Self::Output { - ExecutionOutcome::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - self.batch_record.take_requests(), - ) + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + requests: &Requests, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts, requests) } +} - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } +/// Helper type with backwards compatible methods to obtain Ethereum executor +/// providers. +#[derive(Debug)] +pub struct EthExecutorProvider; - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); +impl EthExecutorProvider { + /// Creates a new default ethereum executor provider. + pub fn ethereum( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::ethereum(chain_spec)) } - fn size_hint(&self) -> Option { - Some(self.executor.state.bundle_state.size_hint()) + /// Returns a new provider for the mainnet. + pub fn mainnet() -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::mainnet()) } } #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{TxLegacy, EMPTY_ROOT_HASH}; + use alloy_consensus::{constants::ETH_TO_WEI, Header, TxLegacy}; use alloy_eips::{ eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, + eip4895::Withdrawal, eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, + eip7685::EMPTY_REQUESTS_HASH, }; use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; use reth_chainspec::{ChainSpecBuilder, ForkCondition}; + use reth_evm::execute::{ + BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, + }; + use reth_execution_types::BlockExecutionOutput; use reth_primitives::{ - constants::ETH_TO_WEI, public_key_to_address, Account, Block, BlockBody, Transaction, + public_key_to_address, Account, Block, BlockBody, BlockExt, Transaction, }; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; - use revm_primitives::BLOCKHASH_SERVE_WINDOW; + use revm_primitives::{address, EvmState, BLOCKHASH_SERVE_WINDOW}; use secp256k1::{Keypair, Secp256k1}; - use std::collections::HashMap; + use std::{collections::HashMap, sync::mpsc}; fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { let mut db = StateProviderTest::default(); @@ -551,8 +390,13 @@ mod tests { db } - fn executor_provider(chain_spec: Arc) -> EthExecutorProvider { - EthExecutorProvider { evm_config: EthEvmConfig::new(chain_spec.clone()), chain_spec } + fn executor_provider( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + let strategy_factory = + EthExecutionStrategyFactory::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)); + + BasicBlockExecutorProvider::new(strategy_factory) } #[test] @@ -571,10 +415,11 @@ mod tests { let provider = executor_provider(chain_spec); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); + // attempt to execute a block without parent beacon block root, expect err - let err = provider - .executor(StateProviderDatabase::new(&db)) - .execute( + let err = executor + .execute_and_verify_one( ( &BlockWithSenders { block: Block { @@ -583,7 +428,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -604,24 +448,24 @@ mod tests { // fix header, set a gas limit header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let mut executor = provider.executor(StateProviderDatabase::new(&db)); - // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_without_verification( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - requests: None, + .execute_and_verify_one( + ( + &BlockWithSenders { + block: Block { + header: header.clone(), + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: None, + }, }, + senders: vec![], }, - senders: vec![], - }, - U256::ZERO, + U256::ZERO, + ) + .into(), ) .unwrap(); @@ -635,16 +479,17 @@ mod tests { let parent_beacon_block_root_index = timestamp_index % history_buffer_length + history_buffer_length; - // get timestamp storage and compare - let timestamp_storage = - executor.state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap(); + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); assert_eq!(timestamp_storage, U256::from(header.timestamp)); // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .state - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist"); + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state + .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) + .expect("storage value should exist") + }); assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); } @@ -684,7 +529,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -739,7 +583,6 @@ mod tests { transactions: vec![], ommers: vec![], withdrawals: None, - requests: None, }, }, senders: vec![], @@ -753,7 +596,8 @@ mod tests { ); // ensure that the nonce of the system address account has not changed - let nonce = executor.state_mut().basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce; + let nonce = + executor.with_state_mut(|state| state.basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce); assert_eq!(nonce, 0); } @@ -811,11 +655,12 @@ mod tests { // there is no system contract call so there should be NO STORAGE CHANGES // this means we'll check the transition state - let transition_state = executor - .state_mut() - .transition_state - .take() - .expect("the evm should be initialized with bundle updates"); + let transition_state = executor.with_state_mut(|state| { + state + .transition_state + .take() + .expect("the evm should be initialized with bundle updates") + }); // assert that it is the default (empty) transition state assert_eq!(transition_state, TransitionState::default()); @@ -873,17 +718,15 @@ mod tests { timestamp_index % history_buffer_length + history_buffer_length; // get timestamp storage and compare - let timestamp_storage = executor - .state_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)) - .unwrap(); + let timestamp_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() + }); assert_eq!(timestamp_storage, U256::from(header.timestamp)); // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor - .state_mut() - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .unwrap(); + let parent_beacon_block_root_storage = executor.with_state_mut(|state| { + state.storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)).unwrap() + }); assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); } @@ -909,7 +752,6 @@ mod tests { db } - #[test] fn eip_2935_pre_fork() { let db = create_state_provider_with_block_hashes(1); @@ -948,12 +790,11 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -992,12 +833,11 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1016,7 +856,7 @@ mod tests { parent_hash: B256::random(), timestamp: 1, number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; let provider = executor_provider(chain_spec); @@ -1039,21 +879,20 @@ mod tests { ); // the hash for the ancestor of the fork activation block should be present - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor - .state_mut() + executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block - 1)) - .unwrap(), + .unwrap()), U256::ZERO ); // the hash of the block itself should not be in storage - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block)) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1075,7 +914,7 @@ mod tests { parent_hash: B256::random(), timestamp: 1, number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; @@ -1096,15 +935,15 @@ mod tests { ); // the hash for the ancestor of the fork activation block should be present - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor - .state_mut() + executor.with_state_mut(|state| state .storage( HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block % BLOCKHASH_SERVE_WINDOW as u64 - 1) ) - .unwrap(), + .unwrap()), U256::ZERO ); } @@ -1121,7 +960,7 @@ mod tests { ); let mut header = chain_spec.genesis_header().clone(); - header.requests_root = Some(EMPTY_ROOT_HASH); + header.requests_hash = Some(EMPTY_REQUESTS_HASH); let header_hash = header.hash_slow(); let provider = executor_provider(chain_spec); @@ -1147,19 +986,18 @@ mod tests { // // we load the account first, because revm expects it to be // loaded - executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap(); - assert!(executor - .state_mut() + executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) .unwrap() - .is_zero()); + .is_zero())); // attempt to execute block 1, this should not fail let header = Header { parent_hash: header_hash, timestamp: 1, number: 1, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; let header_hash = header.hash_slow(); @@ -1180,23 +1018,25 @@ mod tests { ); // the block hash of genesis should now be in storage, but not block 1 - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap()), U256::ZERO ); - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) .unwrap() - .is_zero()); + .is_zero())); // attempt to execute block 2, this should not fail let header = Header { parent_hash: header_hash, timestamp: 1, number: 2, - requests_root: Some(EMPTY_ROOT_HASH), + requests_hash: Some(EMPTY_REQUESTS_HASH), ..Header::default() }; @@ -1216,20 +1056,24 @@ mod tests { ); // the block hash of genesis and block 1 should now be in storage, but not block 2 - assert!(executor.state_mut().basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some()); + assert!(executor + .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::ZERO).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) + .unwrap()), U256::ZERO ); assert_ne!( - executor.state_mut().storage(HISTORY_STORAGE_ADDRESS, U256::from(1)).unwrap(), + executor.with_state_mut(|state| state + .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) + .unwrap()), U256::ZERO ); - assert!(executor - .state_mut() + assert!(executor.with_state_mut(|state| state .storage(HISTORY_STORAGE_ADDRESS, U256::from(2)) .unwrap() - .is_zero()); + .is_zero())); } #[test] @@ -1254,15 +1098,16 @@ mod tests { HashMap::default(), ); - // https://github.com/lightclient/7002asm/blob/e0d68e04d15f25057af7b6d180423d94b6b3bdb3/test/Contract.t.sol.in#L49-L64 + // https://github.com/lightclient/sys-asm/blob/9282bdb9fd64e024e27f60f507486ffb2183cba2/test/Withdrawal.t.sol.in#L36 let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); - let withdrawal_amount = fixed_bytes!("2222222222222222"); + let withdrawal_amount = fixed_bytes!("0203040506070809"); let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); assert_eq!(input.len(), 56); let mut header = chain_spec.genesis_header().clone(); header.gas_limit = 1_500_000; - header.gas_used = 134_807; + // measured + header.gas_used = 135_856; header.receipts_root = b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); @@ -1272,10 +1117,10 @@ mod tests { chain_id: Some(chain_spec.chain.id()), nonce: 1, gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: 134_807, + gas_limit: header.gas_used, to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), // `MIN_WITHDRAWAL_REQUEST_FEE` - value: U256::from(1), + value: U256::from(2), input, }), ); @@ -1302,11 +1147,9 @@ mod tests { let receipt = receipts.first().unwrap(); assert!(receipt.success); - let request = requests.first().unwrap(); - let withdrawal_request = request.as_withdrawal_request().unwrap(); - assert_eq!(withdrawal_request.source_address, sender_address); - assert_eq!(withdrawal_request.validator_pubkey, validator_public_key); - assert_eq!(withdrawal_request.amount, u64::from_be_bytes(withdrawal_amount.into())); + // There should be exactly one entry with withdrawal requests + assert_eq!(requests.len(), 1); + assert_eq!(requests[0][0], 1); } #[test] @@ -1392,4 +1235,67 @@ mod tests { ), } } + + #[test] + fn test_balance_increment_not_duplicated() { + let chain_spec = Arc::new( + ChainSpecBuilder::from(&*MAINNET) + .shanghai_activated() + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) + .build(), + ); + + let withdrawal_recipient = address!("1000000000000000000000000000000000000000"); + + let mut db = StateProviderTest::default(); + let initial_balance = 100; + db.insert_account( + withdrawal_recipient, + Account { balance: U256::from(initial_balance), nonce: 1, bytecode_hash: None }, + None, + HashMap::default(), + ); + + let withdrawal = + Withdrawal { index: 0, validator_index: 0, address: withdrawal_recipient, amount: 1 }; + + let header = Header { timestamp: 1, number: 1, ..Header::default() }; + + let block = BlockWithSenders { + block: Block { + header, + body: BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: Some(vec![withdrawal].into()), + }, + }, + senders: vec![], + }; + + let provider = executor_provider(chain_spec); + let executor = provider.executor(StateProviderDatabase::new(&db)); + + let (tx, rx) = mpsc::channel(); + let tx_clone = tx.clone(); + + let _output = executor + .execute_with_state_hook((&block, U256::ZERO).into(), move |state: &EvmState| { + if let Some(account) = state.get(&withdrawal_recipient) { + let _ = tx_clone.send(account.info.balance); + } + }) + .expect("Block execution should succeed"); + + drop(tx); + let balance_changes: Vec = rx.try_iter().collect(); + + if let Some(final_balance) = balance_changes.last() { + let expected_final_balance = U256::from(initial_balance) + U256::from(1_000_000_000); // initial + 1 Gwei in Wei + assert_eq!( + *final_balance, expected_final_balance, + "Final balance should match expected value after withdrawal" + ); + } + } } diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index ed18a24fb19..509b61cb2ec 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -17,23 +17,24 @@ extern crate alloc; -use alloc::vec::Vec; +use core::convert::Infallible; + +use alloc::{sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_primitives::{Address, Bytes, TxKind, U256}; use reth_chainspec::{ChainSpec, Head}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; -use reth_primitives::{transaction::FillTxEnv, Header, TransactionSigned}; +use reth_primitives::{transaction::FillTxEnv, TransactionSigned}; use revm_primitives::{ AnalysisKind, BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv, }; -use std::sync::Arc; mod config; +use alloy_eips::eip1559::INITIAL_BASE_FEE; pub use config::{revm_spec, revm_spec_by_timestamp_after_merge}; use reth_ethereum_forks::EthereumHardfork; -use reth_primitives::constants::EIP1559_INITIAL_BASE_FEE; pub mod execute; -pub mod strategy; /// Ethereum DAO hardfork state change data. pub mod dao_fork; @@ -54,13 +55,15 @@ impl EthEvmConfig { } /// Returns the chain spec associated with this configuration. - pub fn chain_spec(&self) -> &ChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } impl ConfigureEvmEnv for EthEvmConfig { type Header = Header; + type Transaction = TransactionSigned; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -133,7 +136,7 @@ impl ConfigureEvmEnv for EthEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -165,7 +168,7 @@ impl ConfigureEvmEnv for EthEvmConfig { gas_limit *= U256::from(elasticity_multiplier); // set the base fee to the initial base fee from the EIP-1559 spec - basefee = Some(EIP1559_INITIAL_BASE_FEE) + basefee = Some(INITIAL_BASE_FEE) } let block_env = BlockEnv { @@ -181,7 +184,7 @@ impl ConfigureEvmEnv for EthEvmConfig { blob_excess_gas_and_price, }; - (CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env) + Ok((CfgEnvWithHandlerCfg::new_with_spec_id(cfg, spec_id), block_env)) } } @@ -194,30 +197,22 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::{constants::KECCAK_EMPTY, Header}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::execute::ProviderError; - use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, KECCAK_EMPTY, - }; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, + primitives::{BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; use std::collections::HashSet; #[test] fn test_fill_cfg_and_block_env() { - // Create a new configuration environment - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - // Create a default block environment - let mut block_env = BlockEnv::default(); - // Create a default header let header = Header::default(); @@ -236,12 +231,8 @@ mod tests { // Use the `EthEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - EthEvmConfig::new(Arc::new(chain_spec.clone())).fill_cfg_and_block_env( - &mut cfg_env, - &mut block_env, - &header, - total_difficulty, - ); + let (cfg_env, _) = EthEvmConfig::new(Arc::new(chain_spec.clone())) + .cfg_and_block_env(&header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec diff --git a/crates/ethereum/evm/src/strategy.rs b/crates/ethereum/evm/src/strategy.rs deleted file mode 100644 index 7a297be498a..00000000000 --- a/crates/ethereum/evm/src/strategy.rs +++ /dev/null @@ -1,1180 +0,0 @@ -//! Ethereum block execution strategy, - -use crate::{ - dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - EthEvmConfig, -}; -use alloc::sync::Arc; -use alloy_consensus::Transaction as _; -use core::fmt::Display; -use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; -use reth_consensus::ConsensusError; -use reth_ethereum_consensus::validate_block_post_execution; -use reth_evm::{ - execute::{ - BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, - BlockValidationError, ProviderError, - }, - system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, ConfigureEvmEnv, -}; -use reth_primitives::{BlockWithSenders, Header, Receipt, Request}; -use reth_revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, - state_change::post_block_balance_increments, - Database, DatabaseCommit, State, -}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256}; - -/// Factory for [`EthExecutionStrategy`]. -#[derive(Debug, Clone)] -pub struct EthExecutionStrategyFactory { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, -} - -impl EthExecutionStrategyFactory { - /// Creates a new default ethereum executor strategy factory. - pub fn ethereum(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)) - } - - /// Returns a new factory for the mainnet. - pub fn mainnet() -> Self { - Self::ethereum(MAINNET.clone()) - } -} - -impl EthExecutionStrategyFactory { - /// Creates a new executor strategy factory. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config } - } -} - -impl BlockExecutionStrategyFactory for EthExecutionStrategyFactory { - type Strategy + Display>> = EthExecutionStrategy; - - fn create_strategy(&self, db: DB) -> Self::Strategy - where - DB: Database + Display>, - { - let state = - State::builder().with_database(db).with_bundle_update().without_state_clear().build(); - EthExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) - } -} - -/// Block execution strategy for Ethereum. -#[allow(missing_debug_implementations)] -pub struct EthExecutionStrategy { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, - /// Current state for block execution. - state: State, - /// Utility to call system smart contracts. - system_caller: SystemCaller, -} - -impl EthExecutionStrategy { - /// Creates a new [`EthExecutionStrategy`] - pub fn new(state: State, chain_spec: Arc, evm_config: EthEvmConfig) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); - Self { state, chain_spec, evm_config, system_caller } - } -} - -impl EthExecutionStrategy -where - DB: Database + Display>, - EvmConfig: ConfigureEvm
, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// # Caution - /// - /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } -} - -impl BlockExecutionStrategy for EthExecutionStrategy -where - DB: Database + Display>, -{ - type Error = BlockExecutionError; - - fn apply_pre_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), Self::Error> { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); - self.state.set_state_clear_flag(state_clear_flag); - - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - self.system_caller.apply_pre_execution_changes(block, &mut evm)?; - - Ok(()) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.transactions.len()); - for (sender, transaction) in block.transactions_with_sender() { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); - - // Execute transaction. - let result_and_state = evm.transact().map_err(move |err| { - let new_err = err.map_db_err(|e| e.into()); - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: Box::new(new_err), - } - })?; - self.system_caller.on_state(&result_and_state); - let ResultAndState { result, state } = result_and_state; - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push( - #[allow(clippy::needless_update)] // side-effect of optimism fields - Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs(), - ..Default::default() - }, - ); - } - Ok((receipts, cumulative_gas_used)) - } - - fn apply_post_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - receipts: &[Receipt], - ) -> Result, Self::Error> { - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { - // Collect all EIP-6110 deposits - let deposit_requests = - crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, receipts)?; - - let post_execution_requests = - self.system_caller.apply_post_execution_changes(&mut evm)?; - - [deposit_requests, post_execution_requests].concat() - } else { - vec![] - }; - drop(evm); - - let mut balance_increments = - post_block_balance_increments(&self.chain_spec, block, total_difficulty); - - // Irregular state change at Ethereum DAO hardfork - if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { - // drain balances from hardcoded addresses. - let drained_balance: u128 = self - .state - .drain_balances(DAO_HARDKFORK_ACCOUNTS) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)? - .into_iter() - .sum(); - - // return balance to DAO beneficiary. - *balance_increments.entry(DAO_HARDFORK_BENEFICIARY).or_default() += drained_balance; - } - // increment balances - self.state - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(requests) - } - - fn state_ref(&self) -> &State { - &self.state - } - - fn state_mut(&mut self) -> &mut State { - &mut self.state - } - - fn with_state_hook(&mut self, hook: Option>) { - self.system_caller.with_state_hook(hook); - } - - fn finish(&mut self) -> BundleState { - self.state.merge_transitions(BundleRetention::Reverts); - self.state.take_bundle() - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - receipts: &[Receipt], - requests: &[Request], - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec.clone(), receipts, requests) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::{TxLegacy, EMPTY_ROOT_HASH}; - use alloy_eips::{ - eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, - eip4788::{BEACON_ROOTS_ADDRESS, BEACON_ROOTS_CODE, SYSTEM_ADDRESS}, - eip7002::{WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, WITHDRAWAL_REQUEST_PREDEPLOY_CODE}, - }; - use alloy_primitives::{b256, fixed_bytes, keccak256, Bytes, TxKind, B256}; - use reth_chainspec::{ChainSpecBuilder, ForkCondition}; - use reth_evm::execute::{ - BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, - }; - use reth_execution_types::BlockExecutionOutput; - use reth_primitives::{ - constants::ETH_TO_WEI, public_key_to_address, Account, Block, BlockBody, Transaction, - }; - use reth_revm::{ - database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, - }; - use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; - use revm_primitives::BLOCKHASH_SERVE_WINDOW; - use secp256k1::{Keypair, Secp256k1}; - use std::collections::HashMap; - - fn create_state_provider_with_beacon_root_contract() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let beacon_root_contract_account = Account { - balance: U256::ZERO, - bytecode_hash: Some(keccak256(BEACON_ROOTS_CODE.clone())), - nonce: 1, - }; - - db.insert_account( - BEACON_ROOTS_ADDRESS, - beacon_root_contract_account, - Some(BEACON_ROOTS_CODE.clone()), - HashMap::default(), - ); - - db - } - - fn create_state_provider_with_withdrawal_requests_contract() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let withdrawal_requests_contract_account = Account { - nonce: 1, - balance: U256::ZERO, - bytecode_hash: Some(keccak256(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone())), - }; - - db.insert_account( - WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, - withdrawal_requests_contract_account, - Some(WITHDRAWAL_REQUEST_PREDEPLOY_CODE.clone()), - HashMap::default(), - ); - - db - } - - fn executor_provider( - chain_spec: Arc, - ) -> BasicBlockExecutorProvider { - let strategy_factory = - EthExecutionStrategyFactory::new(chain_spec.clone(), EthEvmConfig::new(chain_spec)); - - BasicBlockExecutorProvider::new(strategy_factory) - } - - #[test] - fn eip_4788_non_genesis_call() { - let mut header = - Header { timestamp: 1, number: 1, excess_blob_gas: Some(0), ..Header::default() }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute a block without parent beacon block root, expect err - let err = executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - requests: None, - }, - }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect_err( - "Executing cancun block without parent beacon block root field should fail", - ); - - assert_eq!( - err.as_validation().unwrap().clone(), - BlockValidationError::MissingParentBeaconBlockRoot - ); - - // fix header, set a gas limit - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header: header.clone(), - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - requests: None, - }, - }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - let timestamp_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() - }); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor.with_state_mut(|state| { - state - .storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)) - .expect("storage value should exist") - }); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - #[test] - fn eip_4788_no_code_cancun() { - // This test ensures that we "silently fail" when cancun is active and there is no code at - // // BEACON_ROOTS_ADDRESS - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = StateProviderTest::default(); - - // DON'T deploy the contract at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - - // attempt to execute an empty block with parent beacon block root, this should not fail - provider - .batch_executor(StateProviderDatabase::new(&db)) - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - requests: None, - }, - }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - } - - #[test] - fn eip_4788_empty_account_call() { - // This test ensures that we do not increment the nonce of an empty SYSTEM_ADDRESS account - // // during the pre-block call - - let mut db = create_state_provider_with_beacon_root_contract(); - - // insert an empty SYSTEM_ADDRESS - db.insert_account(SYSTEM_ADDRESS, Account::default(), None, HashMap::default()); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - - // construct the header for block one - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: None, - requests: None, - }, - }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while cancun is active should not fail", - ); - - // ensure that the nonce of the system address account has not changed - let nonce = - executor.with_state_mut(|state| state.basic(SYSTEM_ADDRESS).unwrap().unwrap().nonce); - assert_eq!(nonce, 0); - } - - #[test] - fn eip_4788_genesis_call() { - let db = create_state_provider_with_beacon_root_contract(); - - // activate cancun at genesis - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut header = chain_spec.genesis_header().clone(); - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute the genesis block with non-zero parent beacon block root, expect err - header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); - let _err = executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header: header.clone(), body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect_err( - "Executing genesis cancun block with non-zero parent beacon block root field - should fail", - ); - - // fix header - header.parent_beacon_block_root = Some(B256::ZERO); - - // now try to process the genesis block again, this time ensuring that a system contract - // call does not occur - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - // there is no system contract call so there should be NO STORAGE CHANGES - // this means we'll check the transition state - let transition_state = executor.with_state_mut(|state| { - state - .transition_state - .take() - .expect("the evm should be initialized with bundle updates") - }); - - // assert that it is the default (empty) transition state - assert_eq!(transition_state, TransitionState::default()); - } - - #[test] - fn eip_4788_high_base_fee() { - // This test ensures that if we have a base fee, then we don't return an error when the - // system contract is called, due to the gas price being less than the base fee. - let header = Header { - timestamp: 1, - number: 1, - parent_beacon_block_root: Some(B256::with_last_byte(0x69)), - base_fee_per_gas: Some(u64::MAX), - excess_blob_gas: Some(0), - ..Header::default() - }; - - let db = create_state_provider_with_beacon_root_contract(); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - - // execute header - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // Now execute a block with the fixed header, ensure that it does not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header: header.clone(), body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - // check the actual storage of the contract - it should be: - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH should be - // header.timestamp - // * The storage value at header.timestamp % HISTORY_BUFFER_LENGTH + HISTORY_BUFFER_LENGTH - // // should be parent_beacon_block_root - let history_buffer_length = 8191u64; - let timestamp_index = header.timestamp % history_buffer_length; - let parent_beacon_block_root_index = - timestamp_index % history_buffer_length + history_buffer_length; - - // get timestamp storage and compare - let timestamp_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(timestamp_index)).unwrap() - }); - assert_eq!(timestamp_storage, U256::from(header.timestamp)); - - // get parent beacon block root storage and compare - let parent_beacon_block_root_storage = executor.with_state_mut(|state| { - state.storage(BEACON_ROOTS_ADDRESS, U256::from(parent_beacon_block_root_index)).unwrap() - }); - assert_eq!(parent_beacon_block_root_storage, U256::from(0x69)); - } - - /// Create a state provider with blockhashes and the EIP-2935 system contract. - fn create_state_provider_with_block_hashes(latest_block: u64) -> StateProviderTest { - let mut db = StateProviderTest::default(); - for block_number in 0..=latest_block { - db.insert_block_hash(block_number, keccak256(block_number.to_string())); - } - - let blockhashes_contract_account = Account { - balance: U256::ZERO, - bytecode_hash: Some(keccak256(HISTORY_STORAGE_CODE.clone())), - nonce: 1, - }; - - db.insert_account( - HISTORY_STORAGE_ADDRESS, - blockhashes_contract_account, - Some(HISTORY_STORAGE_CODE.clone()), - HashMap::default(), - ); - - db - } - #[test] - fn eip_2935_pre_fork() { - let db = create_state_provider_with_block_hashes(1); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Never) - .build(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // construct the header for block one - let header = Header { timestamp: 1, number: 1, ..Header::default() }; - - // attempt to execute an empty block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // ensure that the block hash was *not* written to storage, since this is before the fork - // was activated - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap() - .is_zero())); - } - - #[test] - fn eip_2935_fork_activation_genesis() { - let db = create_state_provider_with_block_hashes(0); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - let header = chain_spec.genesis_header().clone(); - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute genesis block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // ensure that the block hash was *not* written to storage, since there are no blocks - // preceding genesis - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap() - .is_zero())); - } - - #[test] - fn eip_2935_fork_activation_within_window_bounds() { - let fork_activation_block = (BLOCKHASH_SERVE_WINDOW - 10) as u64; - let db = create_state_provider_with_block_hashes(fork_activation_block); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) - .build(), - ); - - let header = Header { - parent_hash: B256::random(), - timestamp: 1, - number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), - ..Header::default() - }; - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute the fork activation block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the hash for the ancestor of the fork activation block should be present - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block - 1)) - .unwrap()), - U256::ZERO - ); - - // the hash of the block itself should not be in storage - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(fork_activation_block)) - .unwrap() - .is_zero())); - } - - #[test] - fn eip_2935_fork_activation_outside_window_bounds() { - let fork_activation_block = (BLOCKHASH_SERVE_WINDOW + 256) as u64; - let db = create_state_provider_with_block_hashes(fork_activation_block); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) - .build(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - let header = Header { - parent_hash: B256::random(), - timestamp: 1, - number: fork_activation_block, - requests_root: Some(EMPTY_ROOT_HASH), - ..Header::default() - }; - - // attempt to execute the fork activation block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the hash for the ancestor of the fork activation block should be present - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage( - HISTORY_STORAGE_ADDRESS, - U256::from(fork_activation_block % BLOCKHASH_SERVE_WINDOW as u64 - 1) - ) - .unwrap()), - U256::ZERO - ); - } - - #[test] - fn eip_2935_state_transition_inside_fork() { - let db = create_state_provider_with_block_hashes(2); - - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut header = chain_spec.genesis_header().clone(); - header.requests_root = Some(EMPTY_ROOT_HASH); - let header_hash = header.hash_slow(); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // attempt to execute the genesis block, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // nothing should be written as the genesis has no ancestors - // - // we load the account first, because revm expects it to be - // loaded - executor.with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap()); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap() - .is_zero())); - - // attempt to execute block 1, this should not fail - let header = Header { - parent_hash: header_hash, - timestamp: 1, - number: 1, - requests_root: Some(EMPTY_ROOT_HASH), - ..Header::default() - }; - let header_hash = header.hash_slow(); - - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the block hash of genesis should now be in storage, but not block 1 - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap()), - U256::ZERO - ); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) - .unwrap() - .is_zero())); - - // attempt to execute block 2, this should not fail - let header = Header { - parent_hash: header_hash, - timestamp: 1, - number: 2, - requests_root: Some(EMPTY_ROOT_HASH), - ..Header::default() - }; - - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { header, body: Default::default() }, - senders: vec![], - }, - U256::ZERO, - ) - .into(), - ) - .expect( - "Executing a block with no transactions while Prague is active should not fail", - ); - - // the block hash of genesis and block 1 should now be in storage, but not block 2 - assert!(executor - .with_state_mut(|state| state.basic(HISTORY_STORAGE_ADDRESS).unwrap().is_some())); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::ZERO) - .unwrap()), - U256::ZERO - ); - assert_ne!( - executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(1)) - .unwrap()), - U256::ZERO - ); - assert!(executor.with_state_mut(|state| state - .storage(HISTORY_STORAGE_ADDRESS, U256::from(2)) - .unwrap() - .is_zero())); - } - - #[test] - fn eip_7002() { - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - let mut db = create_state_provider_with_withdrawal_requests_contract(); - - let secp = Secp256k1::new(); - let sender_key_pair = Keypair::new(&secp, &mut generators::rng()); - let sender_address = public_key_to_address(sender_key_pair.public_key()); - - db.insert_account( - sender_address, - Account { nonce: 1, balance: U256::from(ETH_TO_WEI), bytecode_hash: None }, - None, - HashMap::default(), - ); - - // https://github.com/lightclient/7002asm/blob/e0d68e04d15f25057af7b6d180423d94b6b3bdb3/test/Contract.t.sol.in#L49-L64 - let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); - let withdrawal_amount = fixed_bytes!("2222222222222222"); - let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); - assert_eq!(input.len(), 56); - - let mut header = chain_spec.genesis_header().clone(); - header.gas_limit = 1_500_000; - header.gas_used = 134_807; - header.receipts_root = - b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); - - let tx = sign_tx_with_key_pair( - sender_key_pair, - Transaction::Legacy(TxLegacy { - chain_id: Some(chain_spec.chain.id()), - nonce: 1, - gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: 134_807, - to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), - // `MIN_WITHDRAWAL_REQUEST_FEE` - value: U256::from(1), - input, - }), - ); - - let provider = executor_provider(chain_spec); - - let executor = provider.executor(StateProviderDatabase::new(&db)); - - let BlockExecutionOutput { receipts, requests, .. } = executor - .execute( - ( - &Block { - header, - body: BlockBody { transactions: vec![tx], ..Default::default() }, - } - .with_recovered_senders() - .unwrap(), - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - let receipt = receipts.first().unwrap(); - assert!(receipt.success); - - let request = requests.first().unwrap(); - let withdrawal_request = request.as_withdrawal_request().unwrap(); - assert_eq!(withdrawal_request.source_address, sender_address); - assert_eq!(withdrawal_request.validator_pubkey, validator_public_key); - assert_eq!(withdrawal_request.amount, u64::from_be_bytes(withdrawal_amount.into())); - } - - #[test] - fn block_gas_limit_error() { - // Create a chain specification with fork conditions set for Prague - let chain_spec = Arc::new( - ChainSpecBuilder::from(&*MAINNET) - .shanghai_activated() - .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) - .build(), - ); - - // Create a state provider with the withdrawal requests contract pre-deployed - let mut db = create_state_provider_with_withdrawal_requests_contract(); - - // Initialize Secp256k1 for key pair generation - let secp = Secp256k1::new(); - // Generate a new key pair for the sender - let sender_key_pair = Keypair::new(&secp, &mut generators::rng()); - // Get the sender's address from the public key - let sender_address = public_key_to_address(sender_key_pair.public_key()); - - // Insert the sender account into the state with a nonce of 1 and a balance of 1 ETH in Wei - db.insert_account( - sender_address, - Account { nonce: 1, balance: U256::from(ETH_TO_WEI), bytecode_hash: None }, - None, - HashMap::default(), - ); - - // Define the validator public key and withdrawal amount as fixed bytes - let validator_public_key = fixed_bytes!("111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"); - let withdrawal_amount = fixed_bytes!("2222222222222222"); - // Concatenate the validator public key and withdrawal amount into a single byte array - let input: Bytes = [&validator_public_key[..], &withdrawal_amount[..]].concat().into(); - // Ensure the input length is 56 bytes - assert_eq!(input.len(), 56); - - // Create a genesis block header with a specified gas limit and gas used - let mut header = chain_spec.genesis_header().clone(); - header.gas_limit = 1_500_000; - header.gas_used = 134_807; - header.receipts_root = - b256!("b31a3e47b902e9211c4d349af4e4c5604ce388471e79ca008907ae4616bb0ed3"); - - // Create a transaction with a gas limit higher than the block gas limit - let tx = sign_tx_with_key_pair( - sender_key_pair, - Transaction::Legacy(TxLegacy { - chain_id: Some(chain_spec.chain.id()), - nonce: 1, - gas_price: header.base_fee_per_gas.unwrap().into(), - gas_limit: 2_500_000, // higher than block gas limit - to: TxKind::Call(WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS), - value: U256::from(1), - input, - }), - ); - - // Create an executor from the state provider - let executor = executor_provider(chain_spec).executor(StateProviderDatabase::new(&db)); - - // Execute the block and capture the result - let exec_result = executor.execute( - ( - &Block { header, body: BlockBody { transactions: vec![tx], ..Default::default() } } - .with_recovered_senders() - .unwrap(), - U256::ZERO, - ) - .into(), - ); - - // Check if the execution result is an error and assert the specific error type - match exec_result { - Ok(_) => panic!("Expected block gas limit error"), - Err(err) => assert_eq!( - *err.as_validation().unwrap(), - BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: 2_500_000, - block_available_gas: 1_500_000, - } - ), - } - } -} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 7a323f91d87..f5fe1dac234 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -21,15 +21,16 @@ reth-tracing.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network.workspace = true +reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-rpc.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-revm = { workspace = true, features = ["std"] } +reth-trie-db.workspace = true # revm with required ethereum features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -38,21 +39,50 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } eyre.workspace = true [dev-dependencies] -reth.workspace = true reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true +reth-payload-primitives.workspace = true reth-e2e-test-utils.workspace = true +reth-rpc-eth-api.workspace = true reth-tasks.workspace = true -futures.workspace = true + alloy-primitives.workspace = true +alloy-consensus.workspace = true +alloy-provider.workspace = true alloy-genesis.workspace = true +alloy-signer.workspace = true +alloy-eips.workspace = true +alloy-sol-types.workspace = true +alloy-contract.workspace = true +alloy-rpc-types-beacon.workspace = true +alloy-rpc-types-engine.workspace = true +alloy-rpc-types-eth.workspace = true + +futures.workspace = true tokio.workspace = true -futures-util.workspace = true serde_json.workspace = true +rand.workspace = true [features] default = [] -test-utils = ["reth-node-builder/test-utils"] +js-tracer = [ + "reth-node-builder/js-tracer" +] +test-utils = [ + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-evm/test-utils", +] diff --git a/crates/ethereum/node/src/evm.rs b/crates/ethereum/node/src/evm.rs index d710d8d8d45..bcdcaac6bfa 100644 --- a/crates/ethereum/node/src/evm.rs +++ b/crates/ethereum/node/src/evm.rs @@ -1,6 +1,8 @@ //! Ethereum EVM support #[doc(inline)] -pub use reth_evm_ethereum::execute::EthExecutorProvider; +pub use reth_evm::execute::BasicBlockExecutorProvider; +#[doc(inline)] +pub use reth_evm_ethereum::execute::{EthExecutionStrategyFactory, EthExecutorProvider}; #[doc(inline)] pub use reth_evm_ethereum::EthEvmConfig; diff --git a/crates/ethereum/node/src/lib.rs b/crates/ethereum/node/src/lib.rs index 37ebc33c22b..8dae6031577 100644 --- a/crates/ethereum/node/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for ethereum-specific Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -14,7 +17,9 @@ use revm as _; pub use reth_ethereum_engine_primitives::EthEngineTypes; pub mod evm; -pub use evm::{EthEvmConfig, EthExecutorProvider}; +pub use evm::{ + BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthExecutorProvider, +}; pub mod node; pub use node::EthereumNode; diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 82f313fbb0b..54707e69b26 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -2,37 +2,42 @@ use std::sync::Arc; -use reth_auto_seal_consensus::AutoSealConsensus; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_ethereum_engine_primitives::{ - EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, EthereumEngineValidator, + EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, +}; +use reth_evm::execute::BasicBlockExecutorProvider; +use reth_evm_ethereum::execute::EthExecutionStrategyFactory; +use reth_network::{NetworkHandle, PeersInfo}; +use reth_node_api::{ + AddOnsContext, ConfigureEvm, FullNodeComponents, HeaderTy, NodeTypesWithDB, TxTy, }; -use reth_evm_ethereum::execute::EthExecutorProvider; -use reth_network::NetworkHandle; -use reth_node_api::{ConfigureEvm, EngineValidator, FullNodeComponents, NodeTypesWithDB}; use reth_node_builder::{ components::{ - ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, - NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, + PayloadServiceBuilder, PoolBuilder, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - rpc::RpcAddOns, + rpc::{EngineValidatorBuilder, RpcAddOns}, BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::Header; -use reth_provider::CanonStateSubscriptions; +use reth_primitives::{EthPrimitives, PooledTransactionsElement}; +use reth_provider::{CanonStateSubscriptions, EthStorage}; use reth_rpc::EthApi; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, EthTransactionPool, TransactionPool, + blobstore::DiskFileBlobStore, EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; use crate::{EthEngineTypes, EthEvmConfig}; +pub use reth_ethereum_engine_primitives::EthereumEngineValidator; + /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] @@ -47,10 +52,9 @@ impl EthereumNode { EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - EthereumEngineValidatorBuilder, > where - Node: FullNodeTypes>, + Node: FullNodeTypes>, ::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -64,13 +68,14 @@ impl EthereumNode { .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) .consensus(EthereumConsensusBuilder::default()) - .engine_validator(EthereumEngineValidatorBuilder::default()) } } impl NodeTypes for EthereumNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } impl NodeTypesWithEngine for EthereumNode { @@ -86,11 +91,18 @@ pub type EthereumAddOns = RpcAddOns< NetworkHandle, ::Evm, >, + EthereumEngineValidatorBuilder, >; impl Node for EthereumNode where - Types: NodeTypesWithDB + NodeTypesWithEngine, + Types: NodeTypesWithDB + + NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< @@ -100,7 +112,6 @@ where EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - EthereumEngineValidatorBuilder, >; type AddOns = EthereumAddOns< @@ -123,11 +134,11 @@ pub struct EthereumExecutorBuilder; impl ExecutorBuilder for EthereumExecutorBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type EVM = EthEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider; async fn build_evm( self, @@ -135,7 +146,8 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { let chain_spec = ctx.chain_spec(); let evm_config = EthEvmConfig::new(ctx.chain_spec()); - let executor = EthExecutorProvider::new(chain_spec, evm_config.clone()); + let strategy_factory = EthExecutionStrategyFactory::new(chain_spec, evm_config.clone()); + let executor = BasicBlockExecutorProvider::new(strategy_factory); Ok((evm_config, executor)) } @@ -153,7 +165,7 @@ pub struct EthereumPoolBuilder { impl PoolBuilder for EthereumPoolBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, { type Pool = EthTransactionPool; @@ -229,10 +241,12 @@ impl EthereumPayloadBuilder { pool: Pool, ) -> eyre::Result> where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, - Evm: ConfigureEvm
, - Pool: TransactionPool + Unpin + 'static, + Evm: ConfigureEvm
, Transaction = TxTy>, + Pool: TransactionPool>> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -267,9 +281,11 @@ impl EthereumPayloadBuilder { impl PayloadServiceBuilder for EthereumPayloadBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = EthPayloadAttributes, @@ -293,8 +309,14 @@ pub struct EthereumNetworkBuilder { impl NetworkBuilder for EthereumNetworkBuilder where - Node: FullNodeTypes>, - Pool: TransactionPool + Unpin + 'static, + Node: FullNodeTypes>, + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TxTy, + Pooled = PooledTransactionsElement, + >, + > + Unpin + + 'static, { async fn build_network( self, @@ -303,7 +325,7 @@ where ) -> eyre::Result { let network = ctx.network_builder().await?; let handle = ctx.start_network(network, pool); - + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) } } @@ -316,16 +338,12 @@ pub struct EthereumConsensusBuilder { impl ConsensusBuilder for EthereumConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { - type Consensus = Arc; + type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - if ctx.is_dev() { - Ok(Arc::new(AutoSealConsensus::new(ctx.chain_spec()))) - } else { - Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) - } + Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) } } @@ -336,13 +354,16 @@ pub struct EthereumEngineValidatorBuilder; impl EngineValidatorBuilder for EthereumEngineValidatorBuilder where - Types: NodeTypesWithEngine, - Node: FullNodeTypes, - EthereumEngineValidator: EngineValidator, + Types: NodeTypesWithEngine< + ChainSpec = ChainSpec, + Engine = EthEngineTypes, + Primitives = EthPrimitives, + >, + Node: FullNodeComponents, { type Validator = EthereumEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(EthereumEngineValidator::new(ctx.chain_spec())) + async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { + Ok(EthereumEngineValidator::new(ctx.config.chain.clone())) } } diff --git a/crates/ethereum/node/tests/e2e/blobs.rs b/crates/ethereum/node/tests/e2e/blobs.rs index 9390b34f444..11181051450 100644 --- a/crates/ethereum/node/tests/e2e/blobs.rs +++ b/crates/ethereum/node/tests/e2e/blobs.rs @@ -1,21 +1,17 @@ -use std::sync::Arc; - +use crate::utils::eth_payload_attributes; +use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; -use alloy_primitives::b256; -use reth::{ - args::RpcServerArgs, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - rpc::types::engine::PayloadStatusEnum, - tasks::TaskManager, -}; +use alloy_rpc_types_engine::PayloadStatusEnum; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; use reth_transaction_pool::TransactionPool; - -use crate::utils::eth_payload_attributes; +use std::sync::Arc; #[tokio::test] async fn can_handle_blobs() -> eyre::Result<()> { @@ -41,7 +37,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; let wallets = Wallet::new(2).gen(); let blob_wallet = wallets.first().unwrap(); @@ -51,7 +47,7 @@ async fn can_handle_blobs() -> eyre::Result<()> { let raw_tx = TransactionTestContext::transfer_tx_bytes(1, second_wallet.clone()).await; let tx_hash = node.rpc.inject_tx(raw_tx).await?; // build payload with normal tx - let (payload, attributes) = node.new_payload(eth_payload_attributes).await?; + let (payload, attributes) = node.new_payload().await?; // clean the pool node.inner.pool.remove_transactions(vec![tx_hash]); @@ -64,28 +60,24 @@ async fn can_handle_blobs() -> eyre::Result<()> { // fetch it from rpc let envelope = node.rpc.envelope_by_hash(blob_tx_hash).await?; // validate sidecar - let versioned_hashes = TransactionTestContext::validate_sidecar(envelope); + TransactionTestContext::validate_sidecar(envelope); // build a payload - let (blob_payload, blob_attr) = node.new_payload(eth_payload_attributes).await?; + let (blob_payload, blob_attr) = node.new_payload().await?; // submit the blob payload - let blob_block_hash = node - .engine_api - .submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid, versioned_hashes.clone()) - .await?; - - let genesis_hash = b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); + let blob_block_hash = + node.engine_api.submit_payload(blob_payload, blob_attr, PayloadStatusEnum::Valid).await?; let (_, _) = tokio::join!( // send fcu with blob hash - node.engine_api.update_forkchoice(genesis_hash, blob_block_hash), + node.engine_api.update_forkchoice(MAINNET_GENESIS_HASH, blob_block_hash), // send fcu with normal hash - node.engine_api.update_forkchoice(genesis_hash, payload.block().hash()) + node.engine_api.update_forkchoice(MAINNET_GENESIS_HASH, payload.block().hash()) ); // submit normal payload - node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid, vec![]).await?; + node.engine_api.submit_payload(payload, attributes, PayloadStatusEnum::Valid).await?; tokio::time::sleep(std::time::Duration::from_secs(3)).await; diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index cad2fb34e5d..325575998c2 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,30 +1,21 @@ -use std::sync::Arc; - +use alloy_eips::eip2718::Encodable2718; use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::{args::DevArgs, core::rpc::eth::helpers::EthTransactions}; use reth_chainspec::ChainSpec; -use reth_e2e_test_utils::setup; -use reth_node_api::FullNodeComponents; +use reth_node_api::{FullNodeComponents, FullNodePrimitives, NodeTypes}; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, }; +use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; use reth_provider::{providers::BlockchainProvider2, CanonStateSubscriptions}; +use reth_rpc_eth_api::helpers::EthTransactions; use reth_tasks::TaskManager; +use std::sync::Arc; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { - reth_tracing::init_test_tracing(); - let (mut nodes, _tasks, _) = setup::(1, custom_chain(), true).await?; - - assert_chain_advances(nodes.pop().unwrap().inner).await; - Ok(()) -} - -#[tokio::test] -async fn can_run_dev_node_new_engine() -> eyre::Result<()> { reth_tracing::init_test_tracing(); let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -56,6 +47,7 @@ async fn assert_chain_advances(node: FullNode) where N: FullNodeComponents, AddOns: RethRpcAddOns, + N::Types: NodeTypes, { let mut notifications = node.provider.canonical_state_stream(); @@ -73,8 +65,8 @@ where let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next().unwrap(); - assert_eq!(tx.hash(), hash); + let tx = &head.tip().transactions()[0]; + assert_eq!(tx.trie_hash(), hash); println!("mined transaction: {hash}"); } diff --git a/crates/ethereum/node/tests/e2e/eth.rs b/crates/ethereum/node/tests/e2e/eth.rs index 14bfb92d477..a91ccf6e391 100644 --- a/crates/ethereum/node/tests/e2e/eth.rs +++ b/crates/ethereum/node/tests/e2e/eth.rs @@ -1,15 +1,13 @@ use crate::utils::eth_payload_attributes; use alloy_genesis::Genesis; -use reth::{ - args::RpcServerArgs, - builder::{NodeBuilder, NodeConfig, NodeHandle}, - tasks::TaskManager, -}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::{ node::NodeTestContext, setup, transaction::TransactionTestContext, wallet::Wallet, }; +use reth_node_builder::{NodeBuilder, NodeHandle}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; +use reth_tasks::TaskManager; use std::sync::Arc; #[tokio::test] @@ -26,6 +24,7 @@ async fn can_run_eth_node() -> eyre::Result<()> { .build(), ), false, + eth_payload_attributes, ) .await?; @@ -36,7 +35,7 @@ async fn can_run_eth_node() -> eyre::Result<()> { let tx_hash = node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -74,7 +73,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { .node(EthereumNode::default()) .launch() .await?; - let mut node = NodeTestContext::new(node).await?; + let mut node = NodeTestContext::new(node, eth_payload_attributes).await?; // Configure wallet from test mnemonic and create dummy transfer tx let wallet = Wallet::default(); @@ -84,7 +83,7 @@ async fn can_run_eth_node_with_auth_engine_api_over_ipc() -> eyre::Result<()> { let tx_hash = node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -120,7 +119,7 @@ async fn test_failed_run_eth_node_with_no_auth_engine_api_over_ipc_opts() -> eyr .launch() .await?; - let node = NodeTestContext::new(node).await?; + let node = NodeTestContext::new(node, eth_payload_attributes).await?; // Ensure that the engine api client is not available let client = node.inner.engine_ipc_client().await; diff --git a/crates/ethereum/node/tests/e2e/main.rs b/crates/ethereum/node/tests/e2e/main.rs index 5dff7be17e1..4ed8ac5fcb6 100644 --- a/crates/ethereum/node/tests/e2e/main.rs +++ b/crates/ethereum/node/tests/e2e/main.rs @@ -4,6 +4,7 @@ mod blobs; mod dev; mod eth; mod p2p; +mod rpc; mod utils; const fn main() {} diff --git a/crates/ethereum/node/tests/e2e/p2p.rs b/crates/ethereum/node/tests/e2e/p2p.rs index a40c1b3f4b4..343521ef8eb 100644 --- a/crates/ethereum/node/tests/e2e/p2p.rs +++ b/crates/ethereum/node/tests/e2e/p2p.rs @@ -1,6 +1,8 @@ -use crate::utils::eth_payload_attributes; +use crate::utils::{advance_with_random_transactions, eth_payload_attributes}; +use alloy_provider::{Provider, ProviderBuilder}; +use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_chainspec::{ChainSpecBuilder, MAINNET}; -use reth_e2e_test_utils::{setup, transaction::TransactionTestContext}; +use reth_e2e_test_utils::{setup, setup_engine, transaction::TransactionTestContext}; use reth_node_ethereum::EthereumNode; use std::sync::Arc; @@ -18,6 +20,7 @@ async fn can_sync() -> eyre::Result<()> { .build(), ), false, + eth_payload_attributes, ) .await?; @@ -29,7 +32,7 @@ async fn can_sync() -> eyre::Result<()> { let tx_hash = first_node.rpc.inject_tx(raw_tx).await?; // make the node advance - let (payload, _) = first_node.advance_block(vec![], eth_payload_attributes).await?; + let (payload, _) = first_node.advance_block().await?; let block_hash = payload.block().hash(); let block_number = payload.block().number; @@ -45,3 +48,91 @@ async fn can_sync() -> eyre::Result<()> { Ok(()) } + +#[tokio::test] +async fn e2e_test_send_transactions() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, _) = + setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); + + advance_with_random_transactions(&mut node, 100, &mut rng, true).await?; + + let second_node = nodes.pop().unwrap(); + let second_provider = + ProviderBuilder::new().with_recommended_fillers().on_http(second_node.rpc_url()); + + assert_eq!(second_provider.get_block_number().await?, 0); + + let head = + provider.get_block_by_number(Default::default(), false.into()).await?.unwrap().header.hash; + + second_node.sync_to(head).await?; + + Ok(()) +} + +#[tokio::test] +async fn test_long_reorg() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, _) = + setup_engine::(2, chain_spec.clone(), false, eth_payload_attributes).await?; + + let mut first_node = nodes.pop().unwrap(); + let mut second_node = nodes.pop().unwrap(); + + let first_provider = ProviderBuilder::new().on_http(first_node.rpc_url()); + + // Advance first node 100 blocks. + advance_with_random_transactions(&mut first_node, 100, &mut rng, false).await?; + + // Sync second node to 20th block. + let head = first_provider.get_block_by_number(20.into(), false.into()).await?.unwrap(); + second_node.sync_to(head.header.hash).await?; + + // Produce a fork chain with blocks 21.60 + second_node.payload.timestamp = head.header.timestamp; + advance_with_random_transactions(&mut second_node, 40, &mut rng, true).await?; + + // Reorg first node from 100th block to new 60th block. + first_node.sync_to(second_node.block_hash(60)).await?; + + // Advance second node 20 blocks and ensure that first node is able to follow it. + advance_with_random_transactions(&mut second_node, 20, &mut rng, true).await?; + first_node.sync_to(second_node.block_hash(80)).await?; + + // Ensure that it works the other way around too. + advance_with_random_transactions(&mut first_node, 20, &mut rng, true).await?; + second_node.sync_to(first_node.block_hash(100)).await?; + + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/rpc.rs b/crates/ethereum/node/tests/e2e/rpc.rs new file mode 100644 index 00000000000..664f447cf25 --- /dev/null +++ b/crates/ethereum/node/tests/e2e/rpc.rs @@ -0,0 +1,268 @@ +use crate::utils::eth_payload_attributes; +use alloy_eips::{calc_next_block_base_fee, eip2718::Encodable2718, eip4844}; +use alloy_primitives::{Address, B256, U256}; +use alloy_provider::{network::EthereumWallet, Provider, ProviderBuilder, SendableTx}; +use alloy_rpc_types_beacon::relay::{ + BidTrace, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, + SignedBidSubmissionV3, SignedBidSubmissionV4, +}; +use alloy_rpc_types_engine::BlobsBundleV1; +use alloy_rpc_types_eth::TransactionRequest; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::setup_engine; +use reth_node_core::rpc::compat::engine::payload::block_to_payload_v3; +use reth_node_ethereum::EthereumNode; +use reth_payload_primitives::BuiltPayload; +use std::sync::Arc; + +alloy_sol_types::sol! { + #[sol(rpc, bytecode = "6080604052348015600f57600080fd5b5060405160db38038060db833981016040819052602a91607a565b60005b818110156074576040805143602082015290810182905260009060600160408051601f19818403018152919052805160209091012080555080606d816092565b915050602d565b505060b8565b600060208284031215608b57600080fd5b5051919050565b60006001820160b157634e487b7160e01b600052601160045260246000fd5b5060010190565b60168060c56000396000f3fe6080604052600080fdfea164736f6c6343000810000a")] + contract GasWaster { + constructor(uint256 iterations) { + for (uint256 i = 0; i < iterations; i++) { + bytes32 slot = keccak256(abi.encode(block.number, i)); + assembly { + sstore(slot, slot) + } + } + } + } +} + +#[tokio::test] +async fn test_fee_history() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let seed: [u8; 32] = rand::thread_rng().gen(); + let mut rng = StdRng::from_seed(seed); + println!("Seed: {:?}", seed); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + let fee_history = provider.get_fee_history(10, 0_u64.into(), &[]).await?; + + let genesis_base_fee = chain_spec.initial_base_fee().unwrap() as u128; + let expected_first_base_fee = genesis_base_fee - + genesis_base_fee / chain_spec.base_fee_params_at_block(0).max_change_denominator; + assert_eq!(fee_history.base_fee_per_gas[0], genesis_base_fee); + assert_eq!(fee_history.base_fee_per_gas[1], expected_first_base_fee,); + + // Spend some gas + let builder = GasWaster::deploy_builder(&provider, U256::from(500)).send().await?; + node.advance_block().await?; + let receipt = builder.get_receipt().await?; + assert!(receipt.status()); + + let block = provider.get_block_by_number(1.into(), false.into()).await?.unwrap(); + assert_eq!(block.header.gas_used as u128, receipt.gas_used,); + assert_eq!(block.header.base_fee_per_gas.unwrap(), expected_first_base_fee as u64); + + for _ in 0..100 { + let _ = + GasWaster::deploy_builder(&provider, U256::from(rng.gen_range(0..1000))).send().await?; + + node.advance_block().await?; + } + + let latest_block = provider.get_block_number().await?; + + for _ in 0..100 { + let latest_block = rng.gen_range(0..=latest_block); + let block_count = rng.gen_range(1..=(latest_block + 1)); + + let fee_history = provider.get_fee_history(block_count, latest_block.into(), &[]).await?; + + let mut prev_header = provider + .get_block_by_number((latest_block + 1 - block_count).into(), false.into()) + .await? + .unwrap() + .header; + for block in (latest_block + 2 - block_count)..=latest_block { + let expected_base_fee = calc_next_block_base_fee( + prev_header.gas_used, + prev_header.gas_limit, + prev_header.base_fee_per_gas.unwrap(), + chain_spec.base_fee_params_at_block(block), + ); + + let header = + provider.get_block_by_number(block.into(), false.into()).await?.unwrap().header; + + assert_eq!(header.base_fee_per_gas.unwrap(), expected_base_fee as u64); + assert_eq!( + header.base_fee_per_gas.unwrap(), + fee_history.base_fee_per_gas[(block + block_count - 1 - latest_block) as usize] + as u64 + ); + + prev_header = header; + } + } + + Ok(()) +} + +#[tokio::test] +async fn test_flashbots_validate_v3() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .cancun_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + node.advance(100, |_| { + let provider = provider.clone(); + Box::pin(async move { + let SendableTx::Envelope(tx) = + provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap() + else { + unreachable!() + }; + + tx.encoded_2718().into() + }) + }) + .await?; + + let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?; + let (payload, attrs) = node.new_payload().await?; + + let mut request = BuilderBlockValidationRequestV3 { + request: SignedBidSubmissionV3 { + message: BidTrace { + parent_hash: payload.block().parent_hash, + block_hash: payload.block().hash(), + gas_used: payload.block().gas_used, + gas_limit: payload.block().gas_limit, + ..Default::default() + }, + execution_payload: block_to_payload_v3(payload.block().clone()), + blobs_bundle: BlobsBundleV1::new([]), + signature: Default::default(), + }, + parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), + registered_gas_limit: payload.block().gas_limit, + }; + + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_ok()); + + request.registered_gas_limit -= 1; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_err()); + request.registered_gas_limit += 1; + + request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV3".into(), (&request,)) + .await + .is_err()); + Ok(()) +} + +#[tokio::test] +async fn test_flashbots_validate_v4() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("../assets/genesis.json")).unwrap()) + .prague_activated() + .build(), + ); + + let (mut nodes, _tasks, wallet) = + setup_engine::(1, chain_spec.clone(), false, eth_payload_attributes).await?; + let mut node = nodes.pop().unwrap(); + let provider = ProviderBuilder::new() + .with_recommended_fillers() + .wallet(EthereumWallet::new(wallet.gen().swap_remove(0))) + .on_http(node.rpc_url()); + + node.advance(100, |_| { + let provider = provider.clone(); + Box::pin(async move { + let SendableTx::Envelope(tx) = + provider.fill(TransactionRequest::default().to(Address::ZERO)).await.unwrap() + else { + unreachable!() + }; + + tx.encoded_2718().into() + }) + }) + .await?; + + let _ = provider.send_transaction(TransactionRequest::default().to(Address::ZERO)).await?; + let (payload, attrs) = node.new_payload().await?; + + let mut request = BuilderBlockValidationRequestV4 { + request: SignedBidSubmissionV4 { + message: BidTrace { + parent_hash: payload.block().parent_hash, + block_hash: payload.block().hash(), + gas_used: payload.block().gas_used, + gas_limit: payload.block().gas_limit, + ..Default::default() + }, + execution_payload: block_to_payload_v3(payload.block().clone()), + blobs_bundle: BlobsBundleV1::new([]), + execution_requests: payload.requests().unwrap_or_default().to_vec(), + target_blobs_per_block: eip4844::TARGET_BLOBS_PER_BLOCK, + signature: Default::default(), + }, + parent_beacon_block_root: attrs.parent_beacon_block_root.unwrap(), + registered_gas_limit: payload.block().gas_limit, + }; + + provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .expect("request should validate"); + + request.registered_gas_limit -= 1; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .is_err()); + request.registered_gas_limit += 1; + + request.request.execution_payload.payload_inner.payload_inner.state_root = B256::ZERO; + assert!(provider + .raw_request::<_, ()>("flashbots_validateBuilderSubmissionV4".into(), (&request,)) + .await + .is_err()); + Ok(()) +} diff --git a/crates/ethereum/node/tests/e2e/utils.rs b/crates/ethereum/node/tests/e2e/utils.rs index 6e534f5dc0e..84741a46aa6 100644 --- a/crates/ethereum/node/tests/e2e/utils.rs +++ b/crates/ethereum/node/tests/e2e/utils.rs @@ -1,6 +1,22 @@ -use alloy_primitives::{Address, B256}; -use reth::rpc::types::engine::PayloadAttributes; +use alloy_eips::{BlockId, BlockNumberOrTag}; +use alloy_primitives::{bytes, Address, B256}; +use alloy_provider::{ + network::{ + Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder, TransactionBuilder7702, + }, + Provider, ProviderBuilder, SendableTx, +}; +use alloy_rpc_types_engine::PayloadAttributes; +use alloy_rpc_types_eth::TransactionRequest; +use alloy_signer::SignerSync; +use rand::{seq::SliceRandom, Rng}; +use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType, TmpDB}; +use reth_node_api::NodeTypesWithDBAdapter; +use reth_node_ethereum::EthereumNode; use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_primitives::TxType; +use reth_provider::FullProvider; +use revm::primitives::{AccessListItem, Authorization}; /// Helper function to create a new eth payload attributes pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttributes { @@ -10,6 +26,121 @@ pub(crate) fn eth_payload_attributes(timestamp: u64) -> EthPayloadBuilderAttribu suggested_fee_recipient: Address::ZERO, withdrawals: Some(vec![]), parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, }; EthPayloadBuilderAttributes::new(B256::ZERO, attributes) } + +/// Advances node by producing blocks with random transactions. +pub(crate) async fn advance_with_random_transactions( + node: &mut NodeHelperType, + num_blocks: usize, + rng: &mut impl Rng, + finalize: bool, +) -> eyre::Result<()> +where + Provider: FullProvider>, +{ + let provider = ProviderBuilder::new().with_recommended_fillers().on_http(node.rpc_url()); + let signers = Wallet::new(1).with_chain_id(provider.get_chain_id().await?).gen(); + + // simple contract which writes to storage on any call + let dummy_bytecode = bytes!("6080604052348015600f57600080fd5b50602880601d6000396000f3fe4360a09081523360c0526040608081905260e08152902080805500fea164736f6c6343000810000a"); + let mut call_destinations = signers.iter().map(|s| s.address()).collect::>(); + + for _ in 0..num_blocks { + let tx_count = rng.gen_range(1..20); + + let mut pending = vec![]; + for _ in 0..tx_count { + let signer = signers.choose(rng).unwrap(); + let tx_type = TxType::try_from(rng.gen_range(0..=4) as u64).unwrap(); + + let nonce = provider + .get_transaction_count(signer.address()) + .block_id(BlockId::Number(BlockNumberOrTag::Pending)) + .await?; + + let mut tx = + TransactionRequest::default().with_from(signer.address()).with_nonce(nonce); + + let should_create = + rng.gen::() && tx_type != TxType::Eip4844 && tx_type != TxType::Eip7702; + if should_create { + tx = tx.into_create().with_input(dummy_bytecode.clone()); + } else { + tx = tx.with_to(*call_destinations.choose(rng).unwrap()).with_input( + (0..rng.gen_range(0..10000)).map(|_| rng.gen()).collect::>(), + ); + } + + if matches!(tx_type, TxType::Legacy | TxType::Eip2930) { + tx = tx.with_gas_price(provider.get_gas_price().await?); + } + + if rng.gen::() || tx_type == TxType::Eip2930 { + tx = tx.with_access_list( + vec![AccessListItem { + address: *call_destinations.choose(rng).unwrap(), + storage_keys: (0..rng.gen_range(0..100)).map(|_| rng.gen()).collect(), + }] + .into(), + ); + } + + if tx_type == TxType::Eip7702 { + let signer = signers.choose(rng).unwrap(); + let auth = Authorization { + chain_id: provider.get_chain_id().await?, + address: *call_destinations.choose(rng).unwrap(), + nonce: provider + .get_transaction_count(signer.address()) + .block_id(BlockId::Number(BlockNumberOrTag::Pending)) + .await?, + }; + let sig = signer.sign_hash_sync(&auth.signature_hash())?; + tx = tx.with_authorization_list(vec![auth.into_signed(sig)]) + } + + let gas = provider + .estimate_gas(&tx) + .block(BlockId::Number(BlockNumberOrTag::Pending)) + .await + .unwrap_or(1_000_000); + + tx.set_gas_limit(gas); + + let SendableTx::Builder(tx) = provider.fill(tx).await? else { unreachable!() }; + let tx = + NetworkWallet::::sign_request(&EthereumWallet::new(signer.clone()), tx) + .await?; + + pending.push(provider.send_tx_envelope(tx).await?); + } + + let (payload, _) = node.build_and_submit_payload().await?; + if finalize { + node.engine_api + .update_forkchoice(payload.block().hash(), payload.block().hash()) + .await?; + } else { + let last_safe = provider + .get_block_by_number(BlockNumberOrTag::Safe, false.into()) + .await? + .unwrap() + .header + .hash; + node.engine_api.update_forkchoice(last_safe, payload.block().hash()).await?; + } + + for pending in pending { + let receipt = pending.get_receipt().await?; + if let Some(address) = receipt.contract_address { + call_destinations.push(address); + } + } + } + + Ok(()) +} diff --git a/crates/ethereum/payload/Cargo.toml b/crates/ethereum/payload/Cargo.toml index f169d58f7e8..b01f4c5bc74 100644 --- a/crates/ethereum/payload/Cargo.toml +++ b/crates/ethereum/payload/Cargo.toml @@ -18,21 +18,22 @@ reth-revm.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-execution-types.workspace = true reth-basic-payload-builder.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-errors.workspace = true -reth-trie.workspace = true reth-chain-state.workspace = true reth-chainspec.workspace = true # ethereum revm.workspace = true -revm-primitives.workspace = true # alloy +alloy-eips.workspace = true +alloy-consensus.workspace = true alloy-primitives.workspace = true # misc diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 248aa3486de..f909d3840e2 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -9,10 +9,15 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{ + eip4844::MAX_DATA_GAS_PER_BLOCK, eip7002::WITHDRAWAL_REQUEST_TYPE, + eip7251::CONSOLIDATION_REQUEST_TYPE, eip7685::Requests, merge::BEACON_NONCE, +}; use alloy_primitives::U256; use reth_basic_payload_builder::{ commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, - PayloadConfig, WithdrawalsOutcome, + PayloadConfig, }; use reth_chain_state::ExecutedBlock; use reth_chainspec::ChainSpec; @@ -21,28 +26,34 @@ use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes} use reth_evm_ethereum::{eip6110::parse_deposits_from_receipts, EthEvmConfig}; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes}; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, - proofs::{self, calculate_requests_root}, - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, EthereumHardforks, Header, Receipt, EMPTY_OMMER_ROOT_HASH, + proofs::{self}, + Block, BlockBody, BlockExt, EthereumHardforks, InvalidTransactionError, Receipt, + TransactionSigned, }; use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, + error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, + BestTransactionsAttributes, PoolTransaction, TransactionPool, ValidPoolTransaction, }; -use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, + primitives::{ + calc_excess_blob_gas, BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, + InvalidTransaction, ResultAndState, TxEnv, + }, DatabaseCommit, }; -use revm_primitives::calc_excess_blob_gas; use std::sync::Arc; use tracing::{debug, trace, warn}; +type BestTransactionsIter = Box< + dyn BestTransactions::Transaction>>>, +>; + /// Ethereum payload builder #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct EthereumPayloadBuilder { @@ -67,7 +78,7 @@ where &self, config: &PayloadConfig, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { timestamp: config.attributes.timestamp(), suggested_fee_recipient: config.attributes.suggested_fee_recipient(), @@ -80,9 +91,9 @@ where // Default implementation of [PayloadBuilder] for unit type impl PayloadBuilder for EthereumPayloadBuilder where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, { type Attributes = EthPayloadBuilderAttributes; type BuiltPayload = EthBuiltPayload; @@ -91,8 +102,14 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env) + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + + let pool = args.pool.clone(); + default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { + pool.best_transactions_with_attributes(attributes) + }) } fn build_empty_payload( @@ -100,19 +117,27 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let args = BuildArguments { + let args = BuildArguments::new( client, - config, // we use defaults here because for the empty payload we don't need to execute anything - pool: NoopTransactionPool::default(), - cached_reads: Default::default(), - cancel: Default::default(), - best_payload: None, - }; - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env)? - .into_payload() - .ok_or_else(|| PayloadBuilderError::MissingPayload) + NoopTransactionPool::default(), + Default::default(), + config, + Default::default(), + None, + ); + + let (cfg_env, block_env) = self + .cfg_and_block_env(&args.config, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + + let pool = args.pool.clone(); + + default_ethereum_payload(self.evm_config.clone(), args, cfg_env, block_env, |attributes| { + pool.best_transactions_with_attributes(attributes) + })? + .into_payload() + .ok_or_else(|| PayloadBuilderError::MissingPayload) } } @@ -122,27 +147,29 @@ where /// and configuration, this function creates a transaction payload. Returns /// a result indicating success with the payload or an error in case of failure. #[inline] -pub fn default_ethereum_payload( +pub fn default_ethereum_payload( evm_config: EvmConfig, args: BuildArguments, initialized_cfg: CfgEnvWithHandlerCfg, initialized_block_env: BlockEnv, + best_txs: F, ) -> Result, PayloadBuilderError> where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, + F: FnOnce(BestTransactionsAttributes) -> BestTransactionsIter, { let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; + let state_provider = client.state_by_block_hash(config.parent_header.hash())?; let state = StateProviderDatabase::new(state_provider); let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, extra_data, attributes } = config; + State::builder().with_database(cached_reads.as_db_mut(state)).with_bundle_update().build(); + let PayloadConfig { parent_header, extra_data, attributes } = config; - debug!(target: "payload_builder", id=%attributes.id, parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = initialized_block_env.gas_limit.to::(); @@ -151,11 +178,10 @@ where let mut executed_txs = Vec::new(); let mut executed_senders = Vec::new(); - let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( + let mut best_txs = best_txs(BestTransactionsAttributes::new( base_fee, initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); - let mut total_fees = U256::ZERO; let block_number = initialized_block_env.number.to::(); @@ -172,7 +198,7 @@ where ) .map_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to apply beacon root contract call for payload" ); @@ -184,13 +210,20 @@ where &mut db, &initialized_cfg, &initialized_block_env, - parent_block.hash(), + parent_header.hash(), ) .map_err(|err| { - warn!(target: "payload_builder", parent_hash=%parent_block.hash(), %err, "failed to update blockhashes for payload"); + warn!(target: "payload_builder", parent_hash=%parent_header.hash(), %err, "failed to update parent header blockhashes for payload"); PayloadBuilderError::Internal(err.into()) })?; + let env = EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + TxEnv::default(), + ); + let mut evm = evm_config.evm_with_env(&mut db, env); + let mut receipts = Vec::new(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction @@ -198,7 +231,10 @@ where // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit(pool_tx.gas_limit(), block_gas_limit), + ); continue } @@ -208,7 +244,7 @@ where } // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); + let tx = pool_tx.to_consensus(); // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block @@ -220,19 +256,19 @@ where // the iterator. This is similar to the gas limit condition // for regular transactions above. trace!(target: "payload_builder", tx=?tx.hash, ?sum_blob_gas_used, ?tx_blob_gas, "skipping blob transaction because it would exceed the max data gas per block"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } - let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(tx.as_signed(), tx.signer()), - ); - - // Configure the environment for the block. - let mut evm = evm_config.evm_with_env(&mut db, env); + // Configure the environment for the tx. + *evm.tx_mut() = evm_config.tx_env(tx.as_signed(), tx.signer()); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -246,7 +282,12 @@ where // if the transaction is invalid, we can skip it and all of its // descendants trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue @@ -258,10 +299,9 @@ where } } }; - // drop evm so db is released. - drop(evm); + // commit changes - db.commit(state); + evm.db_mut().commit(state); // add to the total blob gas used if the transaction successfully executed if let Some(blob_tx) = tx.transaction.as_eip4844() { @@ -300,6 +340,9 @@ where executed_txs.push(tx.into_signed()); } + // Release db + drop(evm); + // check if we have a better block if !is_better_payload(best_payload.as_ref(), total_fees) { // can skip building the block @@ -307,9 +350,7 @@ where } // calculate the requests and the requests root - let (requests, requests_root) = if chain_spec - .is_prague_active_at_timestamp(attributes.timestamp) - { + let requests = if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { let deposit_requests = parse_deposits_from_receipts(&chain_spec, receipts.iter().flatten()) .map_err(|err| PayloadBuilderError::Internal(RethError::Execution(err.into())))?; let withdrawal_requests = system_caller @@ -327,23 +368,42 @@ where ) .map_err(|err| PayloadBuilderError::Internal(err.into()))?; - let requests = [deposit_requests, withdrawal_requests, consolidation_requests].concat(); - let requests_root = calculate_requests_root(&requests); - (Some(requests.into()), Some(requests_root)) + let mut requests = Requests::default(); + + if !deposit_requests.is_empty() { + requests.push_request(core::iter::once(0).chain(deposit_requests).collect()); + } + + if !withdrawal_requests.is_empty() { + requests.push_request( + core::iter::once(WITHDRAWAL_REQUEST_TYPE).chain(withdrawal_requests).collect(), + ); + } + + if !consolidation_requests.is_empty() { + requests.push_request( + core::iter::once(CONSOLIDATION_REQUEST_TYPE) + .chain(consolidation_requests) + .collect(), + ); + } + + Some(requests) } else { - (None, None) + None }; - let WithdrawalsOutcome { withdrawals_root, withdrawals } = - commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, attributes.withdrawals)?; + let withdrawals_root = + commit_withdrawals(&mut db, &chain_spec, attributes.timestamp, &attributes.withdrawals)?; // merge all transitions into bundle state, this would apply the withdrawal balance changes // and 4788 contract call db.merge_transitions(BundleRetention::Reverts); + let requests_hash = requests.as_ref().map(|requests| requests.requests_hash()); let execution_outcome = ExecutionOutcome::new( db.take_bundle(), - vec![receipts.clone()].into(), + vec![receipts].into(), block_number, vec![requests.clone().unwrap_or_default()], ); @@ -352,12 +412,11 @@ where let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); + let hashed_state = db.database.db.hashed_post_state(execution_outcome.state()); let (state_root, trie_output) = { - let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + db.database.inner().state_root_with_updates(hashed_state.clone()).inspect_err(|err| { warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), + parent_hash=%parent_header.hash(), %err, "failed to calculate state root for payload" ); @@ -375,13 +434,15 @@ where // only determine cancun fields when active if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp) { // grab the blob sidecars from the executed txs - blob_sidecars = pool.get_all_blobs_exact( - executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash).collect(), - )?; + blob_sidecars = pool + .get_all_blobs_exact( + executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash()).collect(), + ) + .map_err(PayloadBuilderError::other)?; - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); + excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { + let parent_excess_blob_gas = parent_header.excess_blob_gas.unwrap_or_default(); + let parent_blob_gas_used = parent_header.blob_gas_used.unwrap_or_default(); Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) } else { // for the first post-fork block, both parent.blob_gas_used and @@ -393,7 +454,7 @@ where } let header = Header { - parent_hash: parent_block.hash(), + parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: initialized_block_env.coinbase, state_root, @@ -405,7 +466,7 @@ where mix_hash: attributes.prev_randao, nonce: BEACON_NONCE.into(), base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, + number: parent_header.number + 1, gas_limit: block_gas_limit, difficulty: U256::ZERO, gas_used: cumulative_gas_used, @@ -413,31 +474,37 @@ where parent_beacon_block_root: attributes.parent_beacon_block_root, blob_gas_used: blob_gas_used.map(Into::into), excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root, + requests_hash, + target_blobs_per_block: None, }; + let withdrawals = chain_spec + .is_shanghai_active_at_timestamp(attributes.timestamp) + .then(|| attributes.withdrawals.clone()); + // seal the block let block = Block { header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, requests }, + body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals }, }; - let sealed_block = block.seal_slow(); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); + let sealed_block = Arc::new(block.seal_slow()); + debug!(target: "payload_builder", id=%attributes.id, sealed_block_header = ?sealed_block.header, "sealed built block"); // create the executed block data let executed = ExecutedBlock { - block: Arc::new(sealed_block.clone()), + block: sealed_block.clone(), senders: Arc::new(executed_senders), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), trie: Arc::new(trie_output), }; - let mut payload = EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed)); + let mut payload = + EthBuiltPayload::new(attributes.id, sealed_block, total_fees, Some(executed), requests); // extend the payload with the blob sidecars from the executed txs - payload.extend_sidecars(blob_sidecars); + payload.extend_sidecars(blob_sidecars.into_iter().map(Arc::unwrap_or_clone)); Ok(BuildOutcome::Better { payload, cached_reads }) } diff --git a/crates/ethereum/primitives/Cargo.toml b/crates/ethereum/primitives/Cargo.toml new file mode 100644 index 00000000000..a016d7dd652 --- /dev/null +++ b/crates/ethereum/primitives/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "reth-ethereum-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Ethereum primitive types" + +[lints] +workspace = true + +[dependencies] + +[features] +default = ["std"] +std = [] \ No newline at end of file diff --git a/crates/ethereum/primitives/src/lib.rs b/crates/ethereum/primitives/src/lib.rs new file mode 100644 index 00000000000..78bb5d75f19 --- /dev/null +++ b/crates/ethereum/primitives/src/lib.rs @@ -0,0 +1,10 @@ +//! Standalone crate for ethereum-specific Reth primitive types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(not(feature = "std"), no_std)] diff --git a/crates/etl/src/lib.rs b/crates/etl/src/lib.rs index d30f432f9c1..46d41d704d0 100644 --- a/crates/etl/src/lib.rs +++ b/crates/etl/src/lib.rs @@ -281,9 +281,8 @@ impl EtlFile { #[cfg(test)] mod tests { - use alloy_primitives::{TxHash, TxNumber}; - use super::*; + use alloy_primitives::{TxHash, TxNumber}; #[test] fn etl_hashes() { diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 6081eae420c..fe5505b52bd 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -14,6 +14,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-consensus.workspace = true +reth-consensus-common.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-metrics = { workspace = true, optional = true } @@ -29,6 +30,7 @@ revm-primitives.workspace = true # alloy alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-consensus.workspace = true auto_impl.workspace = true futures-util.workspace = true @@ -37,8 +39,33 @@ parking_lot = { workspace = true, optional = true } [dev-dependencies] parking_lot.workspace = true +reth-ethereum-forks.workspace = true +alloy-consensus.workspace = true +metrics-util = { workspace = true, features = ["debugging"] } [features] default = ["std"] -std = ["dep:metrics", "dep:reth-metrics"] -test-utils = ["dep:parking_lot"] +std = [ + "dep:metrics", + "dep:reth-metrics", + "reth-consensus/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "reth-revm/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "revm-primitives/std", + "revm/std", + "reth-ethereum-forks/std" +] +test-utils = [ + "dep:parking_lot", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils" +] diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml index d4f8534e752..b4b9992a979 100644 --- a/crates/evm/execution-errors/Cargo.toml +++ b/crates/evm/execution-errors/Cargo.toml @@ -22,8 +22,16 @@ alloy-eips.workspace = true revm-primitives.workspace = true nybbles.workspace = true -derive_more.workspace = true +thiserror.workspace = true [features] default = ["std"] -std = ["reth-consensus/std"] +std = [ + "reth-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "revm-primitives/std", + "alloy-rlp/std", + "thiserror/std", + "nybbles/std" +] diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 4dbbfb7abdc..db7887d1b8d 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -14,20 +14,20 @@ extern crate alloc; use alloc::{boxed::Box, string::String}; use alloy_eips::BlockNumHash; use alloy_primitives::B256; -use derive_more::{Display, From}; use reth_consensus::ConsensusError; use reth_prune_types::PruneSegmentError; use reth_storage_errors::provider::ProviderError; use revm_primitives::EVMError; +use thiserror::Error; pub mod trie; pub use trie::*; /// Transaction validation errors -#[derive(Clone, Debug, Display, Eq, PartialEq)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum BlockValidationError { /// EVM error with transaction hash and message - #[display("EVM reported invalid transaction ({hash}): {error}")] + #[error("EVM reported invalid transaction ({hash}): {error}")] EVM { /// The hash of the transaction hash: B256, @@ -35,16 +35,16 @@ pub enum BlockValidationError { error: Box>, }, /// Error when recovering the sender for a transaction - #[display("failed to recover sender for transaction")] + #[error("failed to recover sender for transaction")] SenderRecoveryError, /// Error when incrementing balance in post execution - #[display("incrementing balance in post execution failed")] + #[error("incrementing balance in post execution failed")] IncrementBalanceFailed, /// Error when the state root does not match the expected value. - // #[from(ignore)] - StateRoot(StateRootError), + #[error(transparent)] + StateRoot(#[from] StateRootError), /// Error when transaction gas limit exceeds available block gas - #[display( + #[error( "transaction gas limit {transaction_gas_limit} is more than blocks available gas {block_available_gas}" )] TransactionGasLimitMoreThanAvailableBlockGas { @@ -54,22 +54,22 @@ pub enum BlockValidationError { block_available_gas: u64, }, /// Error for pre-merge block - #[display("block {hash} is pre merge")] + #[error("block {hash} is pre merge")] BlockPreMerge { /// The hash of the block hash: B256, }, /// Error for missing total difficulty - #[display("missing total difficulty for block {hash}")] + #[error("missing total difficulty for block {hash}")] MissingTotalDifficulty { /// The hash of the block hash: B256, }, /// Error for EIP-4788 when parent beacon block root is missing - #[display("EIP-4788 parent beacon block root missing for active Cancun block")] + #[error("EIP-4788 parent beacon block root missing for active Cancun block")] MissingParentBeaconBlockRoot, /// Error for Cancun genesis block when parent beacon block root is not zero - #[display( + #[error( "the parent beacon block root is not zero for Cancun genesis block: {parent_beacon_block_root}" )] CancunGenesisParentBeaconBlockRootNotZero { @@ -79,9 +79,7 @@ pub enum BlockValidationError { /// EVM error during [EIP-4788] beacon root contract call. /// /// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 - #[display( - "failed to apply beacon root contract call at {parent_beacon_block_root}: {message}" - )] + #[error("failed to apply beacon root contract call at {parent_beacon_block_root}: {message}")] BeaconRootContractCall { /// The beacon block root parent_beacon_block_root: Box, @@ -91,7 +89,7 @@ pub enum BlockValidationError { /// EVM error during [EIP-2935] blockhash contract call. /// /// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 - #[display("failed to apply blockhash contract call: {message}")] + #[error("failed to apply blockhash contract call: {message}")] BlockHashContractCall { /// The error message. message: String, @@ -99,7 +97,7 @@ pub enum BlockValidationError { /// EVM error during withdrawal requests contract call [EIP-7002] /// /// [EIP-7002]: https://eips.ethereum.org/EIPS/eip-7002 - #[display("failed to apply withdrawal requests contract call: {message}")] + #[error("failed to apply withdrawal requests contract call: {message}")] WithdrawalRequestsContractCall { /// The error message. message: String, @@ -107,7 +105,7 @@ pub enum BlockValidationError { /// EVM error during consolidation requests contract call [EIP-7251] /// /// [EIP-7251]: https://eips.ethereum.org/EIPS/eip-7251 - #[display("failed to apply consolidation requests contract call: {message}")] + #[error("failed to apply consolidation requests contract call: {message}")] ConsolidationRequestsContractCall { /// The error message. message: String, @@ -115,35 +113,22 @@ pub enum BlockValidationError { /// Error when decoding deposit requests from receipts [EIP-6110] /// /// [EIP-6110]: https://eips.ethereum.org/EIPS/eip-6110 - #[display("failed to decode deposit requests from receipts: {_0}")] + #[error("failed to decode deposit requests from receipts: {_0}")] DepositRequestDecode(String), } -impl From for BlockValidationError { - fn from(error: StateRootError) -> Self { - Self::StateRoot(error) - } -} - -impl core::error::Error for BlockValidationError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::EVM { error, .. } => core::error::Error::source(error), - Self::StateRoot(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} - /// `BlockExecutor` Errors -#[derive(Debug, From, Display)] +#[derive(Error, Debug)] pub enum BlockExecutionError { /// Validation error, transparently wrapping [`BlockValidationError`] - Validation(BlockValidationError), + #[error(transparent)] + Validation(#[from] BlockValidationError), /// Consensus error, transparently wrapping [`ConsensusError`] - Consensus(ConsensusError), + #[error(transparent)] + Consensus(#[from] ConsensusError), /// Internal, i.e. non consensus or validation related Block Executor Errors - Internal(InternalBlockExecutionError), + #[error(transparent)] + Internal(#[from] InternalBlockExecutionError), } impl BlockExecutionError { @@ -184,24 +169,14 @@ impl From for BlockExecutionError { } } -impl core::error::Error for BlockExecutionError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Validation(source) => core::error::Error::source(source), - Self::Consensus(source) => core::error::Error::source(source), - Self::Internal(source) => core::error::Error::source(source), - } - } -} - /// Internal (i.e., not validation or consensus related) `BlockExecutor` Errors -#[derive(Display, Debug, From)] +#[derive(Error, Debug)] pub enum InternalBlockExecutionError { /// Pruning error, transparently wrapping [`PruneSegmentError`] - #[from] - Pruning(PruneSegmentError), + #[error(transparent)] + Pruning(#[from] PruneSegmentError), /// Error when appending chain on fork is not possible - #[display( + #[error( "appending chain on fork (other_chain_fork:?) is not possible as the tip is {chain_tip:?}" )] AppendChainDoesntConnect { @@ -211,9 +186,10 @@ pub enum InternalBlockExecutionError { other_chain_fork: Box, }, /// Error when fetching latest block state. - #[from] - LatestBlock(ProviderError), + #[error(transparent)] + LatestBlock(#[from] ProviderError), /// Arbitrary Block Executor Errors + #[error(transparent)] Other(Box), } @@ -233,13 +209,3 @@ impl InternalBlockExecutionError { Self::Other(msg.to_string().into()) } } - -impl core::error::Error for InternalBlockExecutionError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Pruning(source) => core::error::Error::source(source), - Self::LatestBlock(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} diff --git a/crates/evm/execution-errors/src/trie.rs b/crates/evm/execution-errors/src/trie.rs index 9e4b16d8d0c..8d04f97e8ea 100644 --- a/crates/evm/execution-errors/src/trie.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,27 +1,20 @@ //! Errors when computing the state root. -use alloc::string::ToString; -use alloy_primitives::B256; -use derive_more::{Display, From}; +use alloc::{boxed::Box, string::ToString}; +use alloy_primitives::{Bytes, B256}; use nybbles::Nibbles; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; +use thiserror::Error; /// State root errors. -#[derive(Display, Debug, From, PartialEq, Eq, Clone)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StateRootError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), /// Storage root error. - StorageRootError(StorageRootError), -} - -impl core::error::Error for StateRootError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - Self::StorageRootError(source) => core::error::Error::source(source), - } - } + #[error(transparent)] + StorageRootError(#[from] StorageRootError), } impl From for DatabaseError { @@ -34,10 +27,11 @@ impl From for DatabaseError { } /// Storage root error. -#[derive(Display, From, PartialEq, Eq, Clone, Debug)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StorageRootError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), } impl From for DatabaseError { @@ -48,21 +42,15 @@ impl From for DatabaseError { } } -impl core::error::Error for StorageRootError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - } - } -} - /// State proof errors. -#[derive(Display, From, Debug, PartialEq, Eq, Clone)] +#[derive(Error, PartialEq, Eq, Clone, Debug)] pub enum StateProofError { /// Internal database error. - Database(DatabaseError), + #[error(transparent)] + Database(#[from] DatabaseError), /// RLP decoding error. - Rlp(alloy_rlp::Error), + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), } impl From for ProviderError { @@ -74,33 +62,76 @@ impl From for ProviderError { } } -impl core::error::Error for StateProofError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Database(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), - } - } +/// Result type with [`SparseStateTrieError`] as error. +pub type SparseStateTrieResult = Result; + +/// Error encountered in `SparseStateTrie`. +#[derive(Error, Debug)] +pub enum SparseStateTrieError { + /// Encountered invalid root node. + #[error("invalid root node at {path:?}: {node:?}")] + InvalidRootNode { + /// Path to first proof node. + path: Nibbles, + /// Encoded first proof node. + node: Bytes, + }, + /// Sparse trie error. + #[error(transparent)] + Sparse(#[from] SparseTrieError), + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), +} + +/// Result type with [`SparseTrieError`] as error. +pub type SparseTrieResult = Result; + +/// Error encountered in `SparseTrie`. +#[derive(Error, Debug)] +pub enum SparseTrieError { + /// Sparse trie is still blind. Thrown on attempt to update it. + #[error("sparse trie is blind")] + Blind, + /// Encountered blinded node on update. + #[error("attempted to update blind node at {path:?}: {hash}")] + BlindedNode { + /// Blind node path. + path: Nibbles, + /// Node hash + hash: B256, + }, + /// Encountered unexpected node at path when revealing. + #[error("encountered an invalid node at path {path:?} when revealing: {node:?}")] + Reveal { + /// Path to the node. + path: Nibbles, + /// Node that was at the path when revealing. + node: Box, + }, + /// RLP error. + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), + /// Other. + #[error(transparent)] + Other(#[from] Box), } /// Trie witness errors. -#[derive(Display, From, Debug, PartialEq, Eq, Clone)] +#[derive(Error, Debug)] pub enum TrieWitnessError { /// Error gather proofs. - #[from] - Proof(StateProofError), + #[error(transparent)] + Proof(#[from] StateProofError), /// RLP decoding error. - #[from] - Rlp(alloy_rlp::Error), + #[error(transparent)] + Rlp(#[from] alloy_rlp::Error), + /// Sparse state trie error. + #[error(transparent)] + Sparse(#[from] SparseStateTrieError), /// Missing account. - #[display("missing account {_0}")] + #[error("missing account {_0}")] MissingAccount(B256), - /// Missing target node. - #[display("target node missing from proof {_0:?}")] - MissingTargetNode(Nibbles), - /// Unexpected empty root. - #[display("unexpected empty root: {_0:?}")] - UnexpectedEmptyRoot(Nibbles), } impl From for ProviderError { @@ -108,13 +139,3 @@ impl From for ProviderError { Self::TrieWitnessError(error.to_string()) } } - -impl core::error::Error for TrieWitnessError { - fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { - match self { - Self::Proof(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), - _ => Option::None, - } - } -} diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 9bd6537326b..c0ef2c5a694 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -12,12 +12,15 @@ workspace = true [dependencies] reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-execution-errors.workspace = true +reth-trie-common = { workspace = true, optional = true } reth-trie.workspace = true revm.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -25,7 +28,6 @@ serde = { workspace = true, optional = true } serde_with = { workspace = true, optional = true } [dev-dependencies] -alloy-eips.workspace = true arbitrary.workspace = true bincode.workspace = true rand.workspace = true @@ -34,6 +36,33 @@ reth-primitives = { workspace = true, features = ["arbitrary", "test-utils"] } [features] default = ["std"] optimism = ["reth-primitives/optimism", "revm/optimism"] -serde = ["dep:serde", "reth-trie/serde", "revm/serde"] -serde-bincode-compat = ["reth-primitives/serde-bincode-compat", "reth-trie/serde-bincode-compat", "serde_with"] -std = [] +serde = [ + "dep:serde", + "rand/serde", + "revm/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "reth-primitives-traits/serde", + "alloy-consensus/serde", + "reth-trie/serde", + "reth-trie-common?/serde" +] +serde-bincode-compat = [ + "serde", + "reth-trie-common/serde-bincode-compat", + "reth-primitives/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "serde_with", + "alloy-eips/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", +] +std = [ + "reth-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "revm/std", + "serde?/std", + "reth-primitives-traits/std", + "alloy-consensus/std", + "serde_with?/std" +] diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index d3ed2913ea3..cbdb2296bf6 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -2,14 +2,16 @@ use crate::ExecutionOutcome; use alloc::{borrow::Cow, collections::BTreeMap}; -use alloy_eips::{eip1898::ForkBlock, BlockNumHash}; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::ForkBlock, eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, - TransactionSignedEcRecovered, + transaction::SignedTransactionIntoRecoveredExt, RecoveredTx, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, }; +use reth_primitives_traits::{Block, BlockBody, NodePrimitives, SignedTransaction}; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; @@ -25,34 +27,34 @@ use revm::db::BundleState; /// A chain of blocks should not be empty. #[derive(Clone, Debug, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct Chain { +pub struct Chain { /// All blocks in this chain. - blocks: BTreeMap, + blocks: BTreeMap>, /// The outcome of block execution for this chain. /// /// This field contains the state of all accounts after the execution of all blocks in this /// chain, ranging from the [`Chain::first`] block to the [`Chain::tip`] block, inclusive. /// /// Additionally, it includes the individual state changes that led to the current state. - execution_outcome: ExecutionOutcome, + execution_outcome: ExecutionOutcome, /// State trie updates after block is added to the chain. /// NOTE: Currently, trie updates are present only for /// single-block chains that extend the canonical chain. trie_updates: Option, } -impl Chain { +impl Chain { /// Create new Chain from blocks and state. /// /// # Warning /// /// A chain of blocks should not be empty. pub fn new( - blocks: impl IntoIterator, - execution_outcome: ExecutionOutcome, + blocks: impl IntoIterator>, + execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { - let blocks = blocks.into_iter().map(|b| (b.number, b)).collect::>(); + let blocks = blocks.into_iter().map(|b| (b.number(), b)).collect::>(); debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); Self { blocks, execution_outcome, trie_updates } @@ -60,25 +62,25 @@ impl Chain { /// Create new Chain from a single block and its state. pub fn from_block( - block: SealedBlockWithSenders, - execution_outcome: ExecutionOutcome, + block: SealedBlockWithSenders, + execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { Self::new([block], execution_outcome, trie_updates) } /// Get the blocks in this chain. - pub const fn blocks(&self) -> &BTreeMap { + pub const fn blocks(&self) -> &BTreeMap> { &self.blocks } /// Consumes the type and only returns the blocks in this chain. - pub fn into_blocks(self) -> BTreeMap { + pub fn into_blocks(self) -> BTreeMap> { self.blocks } /// Returns an iterator over all headers in the block with increasing block numbers. - pub fn headers(&self) -> impl Iterator + '_ { + pub fn headers(&self) -> impl Iterator> + '_ { self.blocks.values().map(|block| block.header.clone()) } @@ -93,12 +95,12 @@ impl Chain { } /// Get execution outcome of this chain - pub const fn execution_outcome(&self) -> &ExecutionOutcome { + pub const fn execution_outcome(&self) -> &ExecutionOutcome { &self.execution_outcome } /// Get mutable execution outcome of this chain - pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { + pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { &mut self.execution_outcome } @@ -119,12 +121,15 @@ impl Chain { } /// Returns the block with matching hash. - pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlock> { + pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlockFor> { self.block_with_senders(block_hash).map(|block| &block.block) } /// Returns the block with matching hash. - pub fn block_with_senders(&self, block_hash: BlockHash) -> Option<&SealedBlockWithSenders> { + pub fn block_with_senders( + &self, + block_hash: BlockHash, + ) -> Option<&SealedBlockWithSenders> { self.blocks.iter().find_map(|(_num, block)| (block.hash() == block_hash).then_some(block)) } @@ -132,8 +137,8 @@ impl Chain { pub fn execution_outcome_at_block( &self, block_number: BlockNumber, - ) -> Option { - if self.tip().number == block_number { + ) -> Option> { + if self.tip().number() == block_number { return Some(self.execution_outcome.clone()) } @@ -149,31 +154,34 @@ impl Chain { /// 1. The blocks contained in the chain. /// 2. The execution outcome representing the final state. /// 3. The optional trie updates. - pub fn into_inner(self) -> (ChainBlocks<'static>, ExecutionOutcome, Option) { + pub fn into_inner( + self, + ) -> (ChainBlocks<'static, N::Block>, ExecutionOutcome, Option) { (ChainBlocks { blocks: Cow::Owned(self.blocks) }, self.execution_outcome, self.trie_updates) } /// Destructure the chain into its inner components: /// 1. A reference to the blocks contained in the chain. /// 2. A reference to the execution outcome representing the final state. - pub const fn inner(&self) -> (ChainBlocks<'_>, &ExecutionOutcome) { + pub const fn inner(&self) -> (ChainBlocks<'_, N::Block>, &ExecutionOutcome) { (ChainBlocks { blocks: Cow::Borrowed(&self.blocks) }, &self.execution_outcome) } /// Returns an iterator over all the receipts of the blocks in the chain. - pub fn block_receipts_iter(&self) -> impl Iterator>> + '_ { + pub fn block_receipts_iter(&self) -> impl Iterator>> + '_ { self.execution_outcome.receipts().iter() } /// Returns an iterator over all blocks in the chain with increasing block number. - pub fn blocks_iter(&self) -> impl Iterator + '_ { + pub fn blocks_iter(&self) -> impl Iterator> + '_ { self.blocks().iter().map(|block| block.1) } /// Returns an iterator over all blocks and their receipts in the chain. pub fn blocks_and_receipts( &self, - ) -> impl Iterator>)> + '_ { + ) -> impl Iterator, &Vec>)> + '_ + { self.blocks_iter().zip(self.block_receipts_iter()) } @@ -181,7 +189,7 @@ impl Chain { #[track_caller] pub fn fork_block(&self) -> ForkBlock { let first = self.first(); - ForkBlock { number: first.number.saturating_sub(1), hash: first.parent_hash } + ForkBlock { number: first.number().saturating_sub(1), hash: first.parent_hash() } } /// Get the first block in this chain. @@ -190,7 +198,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &SealedBlockWithSenders { self.blocks.first_key_value().expect("Chain should have at least one block").1 } @@ -200,7 +208,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -215,11 +223,11 @@ impl Chain { /// /// If chain doesn't have any blocks. pub fn range(&self) -> RangeInclusive { - self.first().number..=self.tip().number + self.first().number()..=self.tip().number() } /// Get all receipts for the given block. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { + pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { let num = self.block_number(block_hash)?; self.execution_outcome.receipts_by_block(num).iter().map(Option::as_ref).collect() } @@ -227,15 +235,18 @@ impl Chain { /// Get all receipts with attachment. /// /// Attachment includes block number, block hash, transaction hash and transaction index. - pub fn receipts_with_attachment(&self) -> Vec { - let mut receipt_attach = Vec::new(); + pub fn receipts_with_attachment(&self) -> Vec> + where + N::SignedTx: Encodable2718, + { + let mut receipt_attach = Vec::with_capacity(self.blocks().len()); for ((block_num, block), receipts) in self.blocks().iter().zip(self.execution_outcome.receipts().iter()) { - let mut tx_receipts = Vec::new(); - for (tx, receipt) in block.body.transactions().zip(receipts.iter()) { + let mut tx_receipts = Vec::with_capacity(receipts.len()); + for (tx, receipt) in block.body.transactions().iter().zip(receipts.iter()) { tx_receipts.push(( - tx.hash(), + tx.trie_hash(), receipt.as_ref().expect("receipts have not been pruned").clone(), )); } @@ -249,10 +260,10 @@ impl Chain { /// This method assumes that blocks attachment to the chain has already been validated. pub fn append_block( &mut self, - block: SealedBlockWithSenders, - execution_outcome: ExecutionOutcome, + block: SealedBlockWithSenders, + execution_outcome: ExecutionOutcome, ) { - self.blocks.insert(block.number, block); + self.blocks.insert(block.number(), block); self.execution_outcome.extend(execution_outcome); self.trie_updates.take(); // reset } @@ -300,7 +311,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { + pub fn split(mut self, split_at: ChainSplitTarget) -> ChainSplit { let chain_tip = *self.blocks.last_entry().expect("chain is never empty").key(); let block_number = match split_at { ChainSplitTarget::Hash(block_hash) => { @@ -372,22 +383,22 @@ impl fmt::Display for DisplayBlocksChain<'_> { /// All blocks in the chain #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct ChainBlocks<'a> { - blocks: Cow<'a, BTreeMap>, +pub struct ChainBlocks<'a, B: Block> { + blocks: Cow<'a, BTreeMap>>, } -impl ChainBlocks<'_> { +impl>> ChainBlocks<'_, B> { /// Creates a consuming iterator over all blocks in the chain with increasing block number. /// /// Note: this always yields at least one block. #[inline] - pub fn into_blocks(self) -> impl Iterator { + pub fn into_blocks(self) -> impl Iterator> { self.blocks.into_owned().into_values() } /// Creates an iterator over all blocks in the chain with increasing block number. #[inline] - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator)> { self.blocks.iter() } @@ -397,7 +408,7 @@ impl ChainBlocks<'_> { /// /// Chains always have at least one block. #[inline] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &SealedBlockWithSenders { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -407,44 +418,44 @@ impl ChainBlocks<'_> { /// /// Chains always have at least one block. #[inline] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &SealedBlockWithSenders { self.blocks.first_key_value().expect("Chain should have at least one block").1 } /// Returns an iterator over all transactions in the chain. #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.body.transactions()) + pub fn transactions(&self) -> impl Iterator::Transaction> + '_ { + self.blocks.values().flat_map(|block| block.body.transactions().iter()) } /// Returns an iterator over all transactions and their senders. #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction)> + '_ { self.blocks.values().flat_map(|block| block.transactions_with_sender()) } - /// Returns an iterator over all [`TransactionSignedEcRecovered`] in the blocks + /// Returns an iterator over all [`RecoveredTx`] in the blocks /// /// Note: This clones the transactions since it is assumed this is part of a shared [Chain]. #[inline] pub fn transactions_ecrecovered( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction>> + '_ { self.transactions_with_sender().map(|(signer, tx)| tx.clone().with_signer(*signer)) } /// Returns an iterator over all transaction hashes in the block #[inline] pub fn transaction_hashes(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.transactions().map(|tx| tx.hash)) + self.blocks.values().flat_map(|block| block.transactions().iter().map(|tx| tx.trie_hash())) } } -impl IntoIterator for ChainBlocks<'_> { - type Item = (BlockNumber, SealedBlockWithSenders); - type IntoIter = std::collections::btree_map::IntoIter; +impl IntoIterator for ChainBlocks<'_, B> { + type Item = (BlockNumber, SealedBlockWithSenders); + type IntoIter = std::collections::btree_map::IntoIter>; fn into_iter(self) -> Self::IntoIter { #[allow(clippy::unnecessary_to_owned)] @@ -453,12 +464,12 @@ impl IntoIterator for ChainBlocks<'_> { } /// Used to hold receipts and their attachment. -#[derive(Default, Clone, Debug)] -pub struct BlockReceipts { +#[derive(Default, Clone, Debug, PartialEq, Eq)] +pub struct BlockReceipts { /// Block identifier pub block: BlockNumHash, /// Transaction identifier and receipt. - pub tx_receipts: Vec<(TxHash, Receipt)>, + pub tx_receipts: Vec<(TxHash, T)>, } /// The target block where the chain should be split. @@ -484,42 +495,42 @@ impl From for ChainSplitTarget { /// Result of a split chain. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ChainSplit { +pub enum ChainSplit { /// Chain is not split. Pending chain is returned. /// Given block split is higher than last block. /// Or in case of split by hash when hash is unknown. - NoSplitPending(Chain), + NoSplitPending(Chain), /// Chain is not split. Canonical chain is returned. /// Given block split is lower than first block. - NoSplitCanonical(Chain), + NoSplitCanonical(Chain), /// Chain is split into two: `[canonical]` and `[pending]` /// The target of this chain split [`ChainSplitTarget`] belongs to the `canonical` chain. Split { /// Contains lower block numbers that are considered canonicalized. It ends with /// the [`ChainSplitTarget`] block. The state of this chain is now empty and no longer /// usable. - canonical: Chain, + canonical: Chain, /// Right contains all subsequent blocks __after__ the [`ChainSplitTarget`] that are still /// pending. /// /// The state of the original chain is moved here. - pending: Chain, + pending: Chain, }, } /// Bincode-compatible [`Chain`] serde implementation. -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { - use std::collections::BTreeMap; - + use crate::ExecutionOutcome; use alloc::borrow::Cow; use alloy_primitives::BlockNumber; - use reth_primitives::serde_bincode_compat::SealedBlockWithSenders; - use reth_trie::serde_bincode_compat::updates::TrieUpdates; + use reth_primitives::{ + serde_bincode_compat::SealedBlockWithSenders, EthPrimitives, NodePrimitives, + }; + use reth_trie_common::serde_bincode_compat::updates::TrieUpdates; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; - - use crate::ExecutionOutcome; + use std::collections::BTreeMap; /// Bincode-compatible [`super::Chain`] serde implementation. /// @@ -537,18 +548,24 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct Chain<'a> { - blocks: SealedBlocksWithSenders<'a>, - execution_outcome: Cow<'a, ExecutionOutcome>, + pub struct Chain<'a, N = EthPrimitives> + where + N: NodePrimitives, + { + blocks: SealedBlocksWithSenders<'a, N::Block>, + execution_outcome: Cow<'a, ExecutionOutcome>, trie_updates: Option>, } #[derive(Debug)] - struct SealedBlocksWithSenders<'a>( - Cow<'a, BTreeMap>, + struct SealedBlocksWithSenders<'a, B: reth_primitives_traits::Block>( + Cow<'a, BTreeMap>>, ); - impl Serialize for SealedBlocksWithSenders<'_> { + impl Serialize for SealedBlocksWithSenders<'_, B> + where + B: reth_primitives_traits::Block, + { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -563,20 +580,26 @@ pub(super) mod serde_bincode_compat { } } - impl<'de> Deserialize<'de> for SealedBlocksWithSenders<'_> { + impl<'de, B> Deserialize<'de> for SealedBlocksWithSenders<'_, B> + where + B: reth_primitives_traits::Block, + { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { Ok(Self(Cow::Owned( - BTreeMap::>::deserialize(deserializer) + BTreeMap::>::deserialize(deserializer) .map(|blocks| blocks.into_iter().map(|(n, b)| (n, b.into())).collect())?, ))) } } - impl<'a> From<&'a super::Chain> for Chain<'a> { - fn from(value: &'a super::Chain) -> Self { + impl<'a, N> From<&'a super::Chain> for Chain<'a, N> + where + N: NodePrimitives, + { + fn from(value: &'a super::Chain) -> Self { Self { blocks: SealedBlocksWithSenders(Cow::Borrowed(&value.blocks)), execution_outcome: Cow::Borrowed(&value.execution_outcome), @@ -585,8 +608,11 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> From> for super::Chain { - fn from(value: Chain<'a>) -> Self { + impl<'a, N> From> for super::Chain + where + N: NodePrimitives, + { + fn from(value: Chain<'a, N>) -> Self { Self { blocks: value.blocks.0.into_owned(), execution_outcome: value.execution_outcome.into_owned(), @@ -660,7 +686,7 @@ mod tests { #[test] fn chain_append() { - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -678,7 +704,7 @@ mod tests { block3.set_parent_hash(block2_hash); - let mut chain1 = + let mut chain1: Chain = Chain { blocks: BTreeMap::from([(1, block1), (2, block2)]), ..Default::default() }; let chain2 = @@ -692,7 +718,7 @@ mod tests { #[test] fn test_number_split() { - let execution_outcome1 = ExecutionOutcome::new( + let execution_outcome1: ExecutionOutcome = ExecutionOutcome::new( BundleState::new( vec![( Address::new([2; 20]), @@ -724,13 +750,13 @@ mod tests { vec![], ); - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); let block1_hash = B256::new([15; 32]); block1.set_block_number(1); block1.set_hash(block1_hash); block1.senders.push(Address::new([4; 20])); - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); let block2_hash = B256::new([16; 32]); block2.set_block_number(2); block2.set_hash(block2_hash); @@ -739,7 +765,8 @@ mod tests { let mut block_state_extended = execution_outcome1; block_state_extended.extend(execution_outcome2); - let chain = Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None); + let chain: Chain = + Chain::new(vec![block1.clone(), block2.clone()], block_state_extended, None); let (split1_execution_outcome, split2_execution_outcome) = chain.execution_outcome.clone().split_at(2); @@ -793,7 +820,7 @@ mod tests { use reth_primitives::{Receipt, Receipts, TxType}; // Create a default SealedBlockWithSenders object - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); @@ -838,7 +865,7 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain = Chain { + let chain: Chain = Chain { blocks: BTreeMap::from([(10, block1), (11, block2)]), execution_outcome: execution_outcome.clone(), ..Default::default() diff --git a/crates/evm/execution-types/src/execute.rs b/crates/evm/execution-types/src/execute.rs index 0cf5d705079..ae5ad2c0b7c 100644 --- a/crates/evm/execution-types/src/execute.rs +++ b/crates/evm/execution-types/src/execute.rs @@ -1,5 +1,5 @@ +use alloy_eips::eip7685::Requests; use alloy_primitives::U256; -use reth_primitives::Request; use revm::db::BundleState; /// A helper type for ethereum block inputs that consists of a block and the total difficulty. @@ -33,8 +33,8 @@ pub struct BlockExecutionOutput { pub state: BundleState, /// All the receipts of the transactions in the block. pub receipts: Vec, - /// All the EIP-7685 requests of the transactions in the block. - pub requests: Vec, + /// All the EIP-7685 requests in the block. + pub requests: Requests, /// The total gas used by the block. pub gas_used: u64, } diff --git a/crates/evm/execution-types/src/execution_outcome.rs b/crates/evm/execution-types/src/execution_outcome.rs index 08ddf9e4167..830508dc92d 100644 --- a/crates/evm/execution-types/src/execution_outcome.rs +++ b/crates/evm/execution-types/src/execution_outcome.rs @@ -1,7 +1,9 @@ use crate::BlockExecutionOutput; -use alloy_primitives::{Address, BlockNumber, Bloom, Log, B256, U256}; -use reth_primitives::{logs_bloom, Account, Bytecode, Receipt, Receipts, Requests, StorageEntry}; -use reth_trie::HashedPostState; +use alloy_eips::eip7685::Requests; +use alloy_primitives::{logs_bloom, Address, BlockNumber, Bloom, Log, B256, U256}; +use reth_primitives::Receipts; +use reth_primitives_traits::{receipt::ReceiptExt, Account, Bytecode, Receipt, StorageEntry}; +use reth_trie::{HashedPostState, KeyHasher}; use revm::{ db::{states::BundleState, BundleAccount}, primitives::AccountInfo, @@ -32,7 +34,7 @@ impl ChangedAccount { /// blocks, capturing the resulting state, receipts, and requests following the execution. #[derive(Default, Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ExecutionOutcome { +pub struct ExecutionOutcome { /// Bundle state with reverts. pub bundle: BundleState, /// The collection of receipts. @@ -40,7 +42,7 @@ pub struct ExecutionOutcome { /// The inner vector stores receipts ordered by transaction number. /// /// If receipt is None it means it is pruned. - pub receipts: Receipts, + pub receipts: Receipts, /// First block of bundle state. pub first_block: BlockNumber, /// The collection of EIP-7685 requests. @@ -62,14 +64,14 @@ pub type AccountRevertInit = (Option>, Vec); /// Type used to initialize revms reverts. pub type RevertsInit = HashMap>; -impl ExecutionOutcome { +impl ExecutionOutcome { /// Creates a new `ExecutionOutcome`. /// /// This constructor initializes a new `ExecutionOutcome` instance with the provided /// bundle state, receipts, first block number, and EIP-7685 requests. pub const fn new( bundle: BundleState, - receipts: Receipts, + receipts: Receipts, first_block: BlockNumber, requests: Vec, ) -> Self { @@ -84,7 +86,7 @@ impl ExecutionOutcome { state_init: BundleStateInit, revert_init: RevertsInit, contracts_init: impl IntoIterator, - receipts: Receipts, + receipts: Receipts, first_block: BlockNumber, requests: Vec, ) -> Self { @@ -145,7 +147,7 @@ impl ExecutionOutcome { /// Get account if account is known. pub fn account(&self, address: &Address) -> Option> { - self.bundle.account(address).map(|a| a.info.clone().map(Into::into)) + self.bundle.account(address).map(|a| a.info.as_ref().map(Into::into)) } /// Get storage if value is known. @@ -162,12 +164,12 @@ impl ExecutionOutcome { /// Returns [`HashedPostState`] for this execution outcome. /// See [`HashedPostState::from_bundle_state`] for more info. - pub fn hash_state_slow(&self) -> HashedPostState { - HashedPostState::from_bundle_state(&self.bundle.state) + pub fn hash_state_slow(&self) -> HashedPostState { + HashedPostState::from_bundle_state::(&self.bundle.state) } /// Transform block number to the index of block. - fn block_number_to_index(&self, block_number: BlockNumber) -> Option { + pub fn block_number_to_index(&self, block_number: BlockNumber) -> Option { if self.first_block > block_number { return None } @@ -178,53 +180,29 @@ impl ExecutionOutcome { Some(index as usize) } - /// Returns an iterator over all block logs. - pub fn logs(&self, block_number: BlockNumber) -> Option> { - let index = self.block_number_to_index(block_number)?; - Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs.iter())).flatten()) - } - - /// Return blocks logs bloom - pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { - Some(logs_bloom(self.logs(block_number)?)) - } - - /// Returns the receipt root for all recorded receipts. - /// Note: this function calculated Bloom filters for every receipt and created merkle trees - /// of receipt. This is a expensive operation. - pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option { - #[cfg(feature = "optimism")] - panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); - #[cfg(not(feature = "optimism"))] - self.receipts.root_slow( - self.block_number_to_index(_block_number)?, - reth_primitives::proofs::calculate_receipt_root_no_memo, - ) - } - /// Returns the receipt root for all recorded receipts. /// Note: this function calculated Bloom filters for every receipt and created merkle trees /// of receipt. This is a expensive operation. pub fn generic_receipts_root_slow( &self, block_number: BlockNumber, - f: impl FnOnce(&[&Receipt]) -> B256, + f: impl FnOnce(&[&T]) -> B256, ) -> Option { self.receipts.root_slow(self.block_number_to_index(block_number)?, f) } /// Returns reference to receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts { &self.receipts } /// Returns mutable reference to receipts. - pub fn receipts_mut(&mut self) -> &mut Receipts { + pub fn receipts_mut(&mut self) -> &mut Receipts { &mut self.receipts } /// Return all block receipts - pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { + pub fn receipts_by_block(&self, block_number: BlockNumber) -> &[Option] { let Some(index) = self.block_number_to_index(block_number) else { return &[] }; &self.receipts[index] } @@ -276,7 +254,10 @@ impl ExecutionOutcome { /// # Panics /// /// If the target block number is not included in the state block range. - pub fn split_at(self, at: BlockNumber) -> (Option, Self) { + pub fn split_at(self, at: BlockNumber) -> (Option, Self) + where + T: Clone, + { if at == self.first_block { return (None, self) } @@ -328,7 +309,7 @@ impl ExecutionOutcome { } /// Create a new instance with updated receipts. - pub fn with_receipts(mut self, receipts: Receipts) -> Self { + pub fn with_receipts(mut self, receipts: Receipts) -> Self { self.receipts = receipts; self } @@ -351,13 +332,39 @@ impl ExecutionOutcome { } } -impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { - fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { +impl> ExecutionOutcome { + /// Returns an iterator over all block logs. + pub fn logs(&self, block_number: BlockNumber) -> Option> { + let index = self.block_number_to_index(block_number)?; + Some(self.receipts[index].iter().filter_map(|r| Some(r.as_ref()?.logs().iter())).flatten()) + } + + /// Return blocks logs bloom + pub fn block_logs_bloom(&self, block_number: BlockNumber) -> Option { + Some(logs_bloom(self.logs(block_number)?)) + } + + /// Returns the receipt root for all recorded receipts. + /// Note: this function calculated Bloom filters for every receipt and created merkle trees + /// of receipt. This is a expensive operation. + pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option + where + T: ReceiptExt, + { + #[cfg(feature = "optimism")] + panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); + #[cfg(not(feature = "optimism"))] + self.receipts.root_slow(self.block_number_to_index(_block_number)?, T::receipts_root) + } +} + +impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { + fn from(value: (BlockExecutionOutput, BlockNumber)) -> Self { Self { bundle: value.0.state, receipts: Receipts::from(value.0.receipts), first_block: value.1, - requests: vec![Requests::from(value.0.requests)], + requests: vec![value.0.requests], } } } @@ -365,12 +372,17 @@ impl From<(BlockExecutionOutput, BlockNumber)> for ExecutionOutcome { #[cfg(test)] mod tests { use super::*; - use alloy_eips::{eip6110::DepositRequest, eip7002::WithdrawalRequest}; - use alloy_primitives::{Address, FixedBytes, LogData, B256}; - use reth_primitives::{Receipts, Request, Requests, TxType}; - use std::collections::HashMap; + #[cfg(not(feature = "optimism"))] + use alloy_primitives::bytes; + #[cfg(not(feature = "optimism"))] + use alloy_primitives::LogData; + use alloy_primitives::{Address, B256}; + use reth_primitives::Receipts; + #[cfg(not(feature = "optimism"))] + use reth_primitives::TxType; #[test] + #[cfg(not(feature = "optimism"))] fn test_initialisation() { // Create a new BundleState object with initial data let bundle = BundleState::new( @@ -381,41 +393,16 @@ mod tests { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; - // Create a Requests object with a vector of requests, including DepositRequest and - // WithdrawalRequest - let requests = vec![Requests(vec![ - Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }), - Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([23; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 34343, - signature: FixedBytes::<96>::from([43; 96]), - index: 1212, - }), - Request::WithdrawalRequest(WithdrawalRequest { - source_address: Address::from([1; 20]), - validator_pubkey: FixedBytes::<48>::from([10; 48]), - amount: 72, - }), - ])]; + // Create a Requests object with a vector of requests + let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])]; // Define the first block number let first_block = 123; @@ -464,18 +451,15 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_block_number_to_index() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -502,18 +486,15 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_get_logs() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -537,18 +518,15 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_receipts_by_block() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; @@ -570,37 +548,30 @@ mod tests { // Assert that the receipts for block number 123 match the expected receipts assert_eq!( receipts_by_block, - vec![&Some(Receipt { + vec![&Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })] ); } #[test] + #[cfg(not(feature = "optimism"))] fn test_receipts_len() { // Create a Receipts object with a vector of receipt vectors let receipts = Receipts { - receipt_vec: vec![vec![Some(Receipt { + receipt_vec: vec![vec![Some(reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![Log::::default()], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), })]], }; // Create an empty Receipts object - let receipts_empty = Receipts { receipt_vec: vec![] }; + let receipts_empty: Receipts = Receipts { receipt_vec: vec![] }; // Define the first block number let first_block = 123; @@ -636,17 +607,14 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_revert_to() { // Create a random receipt object - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors @@ -657,17 +625,12 @@ mod tests { // Define the first block number let first_block = 123; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = vec![Requests(vec![request]), Requests(vec![request])]; + let requests = + vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])]; // Create a ExecutionOutcome object with the created bundle, receipts, requests, and // first_block @@ -681,7 +644,7 @@ mod tests { assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] }); // Assert that the requests are properly cut after reverting to the initial block number. - assert_eq!(exec_res.requests, vec![Requests(vec![request])]); + assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]); // Assert that the revert_to method returns false when attempting to revert to a block // number greater than the initial block number. @@ -693,33 +656,24 @@ mod tests { } #[test] + #[cfg(not(feature = "optimism"))] fn test_extend_execution_outcome() { // Create a Receipt object with specific attributes. - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object containing the receipt. let receipts = Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = vec![Requests(vec![request])]; + let requests = vec![Requests::new(vec![request.clone()])]; // Define the initial block number. let first_block = 123; @@ -739,24 +693,21 @@ mod tests { receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]] }, - requests: vec![Requests(vec![request]), Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], first_block: 123, } ); } #[test] + #[cfg(not(feature = "optimism"))] fn test_split_at_execution_outcome() { // Create a random receipt object - let receipt = Receipt { + let receipt = reth_primitives::Receipt { tx_type: TxType::Legacy, cumulative_gas_used: 46913, logs: vec![], success: true, - #[cfg(feature = "optimism")] - deposit_nonce: Some(18), - #[cfg(feature = "optimism")] - deposit_receipt_version: Some(34), }; // Create a Receipts object with a vector of receipt vectors @@ -771,18 +722,15 @@ mod tests { // Define the first block number let first_block = 123; - // Create a DepositRequest object with specific attributes. - let request = Request::DepositRequest(DepositRequest { - pubkey: FixedBytes::<48>::from([1; 48]), - withdrawal_credentials: B256::from([0; 32]), - amount: 1111, - signature: FixedBytes::<96>::from([2; 96]), - index: 222, - }); + // Create a request. + let request = bytes!("deadbeef"); // Create a vector of Requests containing the request. - let requests = - vec![Requests(vec![request]), Requests(vec![request]), Requests(vec![request])]; + let requests = vec![ + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + ]; // Create a ExecutionOutcome object with the created bundle, receipts, requests, and // first_block @@ -796,7 +744,7 @@ mod tests { let lower_execution_outcome = ExecutionOutcome { bundle: Default::default(), receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }, - requests: vec![Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()])], first_block, }; @@ -806,7 +754,7 @@ mod tests { receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]], }, - requests: vec![Requests(vec![request]), Requests(vec![request])], + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], first_block: 124, }; @@ -863,7 +811,7 @@ mod tests { }, ); - let execution_outcome = ExecutionOutcome { + let execution_outcome: ExecutionOutcome = ExecutionOutcome { bundle: bundle_state, receipts: Receipts::default(), first_block: 0, diff --git a/crates/evm/execution-types/src/lib.rs b/crates/evm/execution-types/src/lib.rs index f98ebfe73a5..fb872cd596e 100644 --- a/crates/evm/execution-types/src/lib.rs +++ b/crates/evm/execution-types/src/lib.rs @@ -26,7 +26,7 @@ pub use execution_outcome::*; /// all fields are serialized. /// /// Read more: -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { pub use super::chain::serde_bincode_compat::*; } diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 82f84301f03..4faeb1a7203 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -6,10 +6,8 @@ use crate::{ execute::{BatchExecutor, BlockExecutorProvider, Executor}, system_calls::OnStateHook, }; +use alloc::boxed::Box; use alloy_primitives::BlockNumber; -use reth_execution_errors::BlockExecutionError; -use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; @@ -21,8 +19,10 @@ use revm::State; impl BlockExecutorProvider for Either where A: BlockExecutorProvider, - B: BlockExecutorProvider, + B: BlockExecutorProvider, { + type Primitives = A::Primitives; + type Executor + Display>> = Either, B::Executor>; @@ -52,23 +52,20 @@ where impl Executor for Either where - A: for<'a> Executor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, - Error = BlockExecutionError, - >, - B: for<'a> Executor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, - Error = BlockExecutionError, - >, + A: Executor, + B: for<'a> Executor = A::Input<'a>, Output = A::Output, Error = A::Error>, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; + type Input<'a> = A::Input<'a>; + type Output = A::Output; + type Error = A::Error; + + fn init(&mut self, tx_env_overrides: Box) { + match self { + Self::Left(a) => a.init(tx_env_overrides), + Self::Right(b) => b.init(tx_env_overrides), + } + } fn execute(self, input: Self::Input<'_>) -> Result { match self { @@ -108,23 +105,13 @@ where impl BatchExecutor for Either where - A: for<'a> BatchExecutor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, - Error = BlockExecutionError, - >, - B: for<'a> BatchExecutor< - DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, - Error = BlockExecutionError, - >, + A: BatchExecutor, + B: for<'a> BatchExecutor = A::Input<'a>, Output = A::Output, Error = A::Error>, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; + type Input<'a> = A::Input<'a>; + type Output = A::Output; + type Error = A::Error; fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { match self { diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index f52325b43e8..8c3e0108fcc 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,22 +1,31 @@ //! Traits for execution. +use alloy_consensus::BlockHeader; // Re-export execution types pub use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; +use reth_primitives_traits::Block as _; pub use reth_storage_errors::provider::ProviderError; -use crate::system_calls::OnStateHook; +use crate::{system_calls::OnStateHook, TxEnvOverrides}; use alloc::{boxed::Box, vec::Vec}; -use alloy_primitives::BlockNumber; -use core::{fmt::Display, marker::PhantomData}; +use alloy_eips::eip7685::Requests; +use alloy_primitives::{ + map::{DefaultHashBuilder, HashMap}, + Address, BlockNumber, +}; +use core::fmt::Display; use reth_consensus::ConsensusError; -use reth_primitives::{BlockWithSenders, Receipt, Request}; +use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt}; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; -use revm::{db::BundleState, State}; -use revm_primitives::{db::Database, U256}; +use revm::{ + db::{states::bundle_state::BundleRetention, BundleState}, + State, +}; +use revm_primitives::{db::Database, Account, AccountStatus, EvmState, U256}; /// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). @@ -30,6 +39,9 @@ pub trait Executor { /// The error type returned by the executor. type Error; + /// Initialize the executor with the given transaction environment overrides. + fn init(&mut self, _tx_env_overrides: Box) {} + /// Consumes the type and executes the block. /// /// # Note @@ -123,6 +135,9 @@ pub trait BatchExecutor { /// A type that can create a new executor for block execution. pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { + /// Receipt type. + type Primitives: NodePrimitives; + /// An executor that can execute a single block given a database. /// /// # Verification @@ -136,16 +151,22 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// the returned state. type Executor + Display>>: for<'a> Executor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = BlockExecutionOutput, + Input<'a> = BlockExecutionInput< + 'a, + BlockWithSenders<::Block>, + >, + Output = BlockExecutionOutput<::Receipt>, Error = BlockExecutionError, >; /// An executor that can execute a batch of blocks given a database. type BatchExecutor + Display>>: for<'a> BatchExecutor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, - Output = ExecutionOutcome, + Input<'a> = BlockExecutionInput< + 'a, + BlockWithSenders<::Block>, + >, + Output = ExecutionOutcome<::Receipt>, Error = BlockExecutionError, >; @@ -165,59 +186,86 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { DB: Database + Display>; } +/// Helper type for the output of executing a block. +#[derive(Debug, Clone)] +pub struct ExecuteOutput { + /// Receipts obtained after executing a block. + pub receipts: Vec, + /// Cumulative gas used in the block execution. + pub gas_used: u64, +} + /// Defines the strategy for executing a single block. -pub trait BlockExecutionStrategy { +pub trait BlockExecutionStrategy { + /// Database this strategy operates on. + type DB: Database; + + /// Primitive types used by the strategy. + type Primitives: NodePrimitives; + /// The error type returned by this strategy's methods. type Error: From + core::error::Error; + /// Initialize the strategy with the given transaction environment overrides. + fn init(&mut self, _tx_env_overrides: Box) {} + /// Applies any necessary changes before executing the block's transactions. fn apply_pre_execution_changes( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, ) -> Result<(), Self::Error>; /// Executes all transactions in the block. fn execute_transactions( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error>; + ) -> Result::Receipt>, Self::Error>; /// Applies any necessary changes after executing the block's transactions. fn apply_post_execution_changes( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders<::Block>, total_difficulty: U256, - receipts: &[Receipt], - ) -> Result, Self::Error>; + receipts: &[::Receipt], + ) -> Result; /// Returns a reference to the current state. - fn state_ref(&self) -> &State; + fn state_ref(&self) -> &State; /// Returns a mutable reference to the current state. - fn state_mut(&mut self) -> &mut State; + fn state_mut(&mut self) -> &mut State; /// Sets a hook to be called after each state change during execution. - fn with_state_hook(&mut self, hook: Option>); + fn with_state_hook(&mut self, _hook: Option>) {} /// Returns the final bundle state. - fn finish(&mut self) -> BundleState; + fn finish(&mut self) -> BundleState { + self.state_mut().merge_transitions(BundleRetention::Reverts); + self.state_mut().take_bundle() + } /// Validate a block with regard to execution results. fn validate_block_post_execution( &self, - block: &BlockWithSenders, - receipts: &[Receipt], - requests: &[Request], - ) -> Result<(), ConsensusError>; + _block: &BlockWithSenders<::Block>, + _receipts: &[::Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + Ok(()) + } } /// A strategy factory that can create block execution strategies. pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { + /// Primitive types used by the strategy. + type Primitives: NodePrimitives; + /// Associated strategy type. type Strategy + Display>>: BlockExecutionStrategy< - DB, + DB = DB, + Primitives = Self::Primitives, Error = BlockExecutionError, >; @@ -253,11 +301,13 @@ impl BlockExecutorProvider for BasicBlockExecutorProvider where F: BlockExecutionStrategyFactory, { + type Primitives = F::Primitives; + type Executor + Display>> = - BasicBlockExecutor, DB>; + BasicBlockExecutor>; type BatchExecutor + Display>> = - BasicBatchExecutor, DB>; + BasicBatchExecutor>; fn executor(&self, db: DB) -> Self::Executor where @@ -280,39 +330,38 @@ where /// A generic block executor that uses a [`BlockExecutionStrategy`] to /// execute blocks. #[allow(missing_debug_implementations, dead_code)] -pub struct BasicBlockExecutor -where - S: BlockExecutionStrategy, -{ +pub struct BasicBlockExecutor { /// Block execution strategy. pub(crate) strategy: S, - _phantom: PhantomData, } -impl BasicBlockExecutor -where - S: BlockExecutionStrategy, -{ +impl BasicBlockExecutor { /// Creates a new `BasicBlockExecutor` with the given strategy. pub const fn new(strategy: S) -> Self { - Self { strategy, _phantom: PhantomData } + Self { strategy } } } -impl Executor for BasicBlockExecutor +impl Executor for BasicBlockExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; + type Input<'a> = + BlockExecutionInput<'a, BlockWithSenders<::Block>>; + type Output = BlockExecutionOutput<::Receipt>; type Error = S::Error; + fn init(&mut self, env_overrides: Box) { + self.strategy.init(env_overrides); + } + fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; let state = self.strategy.finish(); @@ -331,7 +380,8 @@ where let BlockExecutionInput { block, total_difficulty } = input; self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; @@ -355,7 +405,8 @@ where self.strategy.with_state_hook(Some(Box::new(state_hook))); self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, gas_used } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; @@ -368,52 +419,56 @@ where /// A generic batch executor that uses a [`BlockExecutionStrategy`] to /// execute batches. #[allow(missing_debug_implementations)] -pub struct BasicBatchExecutor +pub struct BasicBatchExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, { /// Batch execution strategy. pub(crate) strategy: S, /// Keeps track of batch execution receipts and requests. - pub(crate) batch_record: BlockBatchRecord, - _phantom: PhantomData, + pub(crate) batch_record: BlockBatchRecord<::Receipt>, } -impl BasicBatchExecutor +impl BasicBatchExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, { /// Creates a new `BasicBatchExecutor` with the given strategy. - pub const fn new(strategy: S, batch_record: BlockBatchRecord) -> Self { - Self { strategy, batch_record, _phantom: PhantomData } + pub const fn new( + strategy: S, + batch_record: BlockBatchRecord<::Receipt>, + ) -> Self { + Self { strategy, batch_record } } } -impl BatchExecutor for BasicBatchExecutor +impl BatchExecutor for BasicBatchExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, DB: Database + Display>, { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; + type Input<'a> = + BlockExecutionInput<'a, BlockWithSenders<::Block>>; + type Output = ExecutionOutcome<::Receipt>; type Error = BlockExecutionError; fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { let BlockExecutionInput { block, total_difficulty } = input; if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); + self.batch_record.set_first_block(block.header().number()); } self.strategy.apply_pre_execution_changes(block, total_difficulty)?; - let (receipts, _gas_used) = self.strategy.execute_transactions(block, total_difficulty)?; + let ExecuteOutput { receipts, .. } = + self.strategy.execute_transactions(block, total_difficulty)?; let requests = self.strategy.apply_post_execution_changes(block, total_difficulty, &receipts)?; self.strategy.validate_block_post_execution(block, &receipts, &requests)?; // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); + let retention = self.batch_record.bundle_retention(block.header().number()); self.strategy.state_mut().merge_transitions(retention); // store receipts in the set @@ -447,19 +502,58 @@ where } } +/// Creates an `EvmState` from a map of balance increments and the current state +/// to load accounts from. No balance increment is done in the function. +/// Zero balance increments are ignored and won't create state entries. +pub fn balance_increment_state( + balance_increments: &HashMap, + state: &mut State, +) -> Result +where + DB: Database, +{ + let mut load_account = |address: &Address| -> Result<(Address, Account), BlockExecutionError> { + let cache_account = state.load_cache_account(*address).map_err(|_| { + BlockExecutionError::msg("could not load account for balance increment") + })?; + + let account = cache_account.account.as_ref().ok_or_else(|| { + BlockExecutionError::msg("could not load account for balance increment") + })?; + + Ok(( + *address, + Account { + info: account.info.clone(), + storage: Default::default(), + status: AccountStatus::Touched, + }, + )) + }; + + balance_increments + .iter() + .filter(|(_, &balance)| balance != 0) + .map(|(addr, _)| load_account(addr)) + .collect::>() +} + #[cfg(test)] mod tests { use super::*; - use alloy_eips::eip6110::DepositRequest; use alloy_primitives::U256; + use core::marker::PhantomData; use reth_chainspec::{ChainSpec, MAINNET}; + use reth_primitives::EthPrimitives; use revm::db::{CacheDB, EmptyDBTyped}; + use revm_primitives::{address, bytes, AccountInfo, TxEnv, KECCAK_EMPTY}; use std::sync::Arc; #[derive(Clone, Default)] struct TestExecutorProvider; impl BlockExecutorProvider for TestExecutorProvider { + type Primitives = EthPrimitives; type Executor + Display>> = TestExecutor; type BatchExecutor + Display>> = TestExecutor; @@ -544,19 +638,20 @@ mod tests { _chain_spec: Arc, _evm_config: EvmConfig, state: State, - execute_transactions_result: (Vec, u64), - apply_post_execution_changes_result: Vec, + execute_transactions_result: ExecuteOutput, + apply_post_execution_changes_result: Requests, finish_result: BundleState, } #[derive(Clone)] struct TestExecutorStrategyFactory { - execute_transactions_result: (Vec, u64), - apply_post_execution_changes_result: Vec, + execute_transactions_result: ExecuteOutput, + apply_post_execution_changes_result: Requests, finish_result: BundleState, } impl BlockExecutionStrategyFactory for TestExecutorStrategyFactory { + type Primitives = EthPrimitives; type Strategy + Display>> = TestExecutorStrategy; @@ -583,7 +678,12 @@ mod tests { } } - impl BlockExecutionStrategy for TestExecutorStrategy { + impl BlockExecutionStrategy for TestExecutorStrategy + where + DB: Database, + { + type DB = DB; + type Primitives = EthPrimitives; type Error = BlockExecutionError; fn apply_pre_execution_changes( @@ -598,7 +698,7 @@ mod tests { &mut self, _block: &BlockWithSenders, _total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { + ) -> Result, Self::Error> { Ok(self.execute_transactions_result.clone()) } @@ -607,7 +707,7 @@ mod tests { _block: &BlockWithSenders, _total_difficulty: U256, _receipts: &[Receipt], - ) -> Result, Self::Error> { + ) -> Result { Ok(self.apply_post_execution_changes_result.clone()) } @@ -629,7 +729,7 @@ mod tests { &self, _block: &BlockWithSenders, _receipts: &[Receipt], - _requests: &[Request], + _requests: &Requests, ) -> Result<(), ConsensusError> { Ok(()) } @@ -650,9 +750,11 @@ mod tests { fn test_strategy() { let expected_gas_used = 10; let expected_receipts = vec![Receipt::default()]; - let expected_execute_transactions_result = (expected_receipts.clone(), expected_gas_used); - let expected_apply_post_execution_changes_result = - vec![Request::DepositRequest(DepositRequest::default())]; + let expected_execute_transactions_result = ExecuteOutput:: { + receipts: expected_receipts.clone(), + gas_used: expected_gas_used, + }; + let expected_apply_post_execution_changes_result = Requests::new(vec![bytes!("deadbeef")]); let expected_finish_result = BundleState::default(); let strategy_factory = TestExecutorStrategyFactory { @@ -673,4 +775,114 @@ mod tests { assert_eq!(block_execution_output.requests, expected_apply_post_execution_changes_result); assert_eq!(block_execution_output.state, expected_finish_result); } + + #[test] + fn test_tx_env_overrider() { + let strategy_factory = TestExecutorStrategyFactory { + execute_transactions_result: ExecuteOutput { + receipts: vec![Receipt::default()], + gas_used: 10, + }, + apply_post_execution_changes_result: Requests::new(vec![bytes!("deadbeef")]), + finish_result: BundleState::default(), + }; + let provider = BasicBlockExecutorProvider::new(strategy_factory); + let db = CacheDB::>::default(); + + // if we want to apply tx env overrides the executor must be mut. + let mut executor = provider.executor(db); + // execute consumes the executor, so we can only call it once. + // let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); + executor.init(Box::new(|tx_env: &mut TxEnv| { + tx_env.nonce.take(); + })); + let result = executor.execute(BlockExecutionInput::new(&Default::default(), U256::ZERO)); + assert!(result.is_ok()); + } + + fn setup_state_with_account( + addr: Address, + balance: u128, + nonce: u64, + ) -> State>> { + let db = CacheDB::>::default(); + let mut state = State::builder().with_database(db).with_bundle_update().build(); + + let account_info = AccountInfo { + balance: U256::from(balance), + nonce, + code_hash: KECCAK_EMPTY, + code: None, + }; + state.insert_account(addr, account_info); + state + } + + #[test] + fn test_balance_increment_state_zero() { + let addr = address!("1000000000000000000000000000000000000000"); + let mut state = setup_state_with_account(addr, 100, 1); + + let mut increments = HashMap::::default(); + increments.insert(addr, 0); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + assert!(result.is_empty(), "Zero increments should be ignored"); + } + + #[test] + fn test_balance_increment_state_empty_increments_map() { + let mut state = State::builder() + .with_database(CacheDB::>::default()) + .with_bundle_update() + .build(); + + let increments = HashMap::::default(); + let result = balance_increment_state(&increments, &mut state).unwrap(); + assert!(result.is_empty(), "Empty increments map should return empty state"); + } + + #[test] + fn test_balance_increment_state_multiple_valid_increments() { + let addr1 = address!("1000000000000000000000000000000000000000"); + let addr2 = address!("2000000000000000000000000000000000000000"); + + let mut state = setup_state_with_account(addr1, 100, 1); + + let account2 = + AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + state.insert_account(addr2, account2); + + let mut increments = HashMap::::default(); + increments.insert(addr1, 50); + increments.insert(addr2, 100); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + + assert_eq!(result.len(), 2); + assert_eq!(result.get(&addr1).unwrap().info.balance, U256::from(100)); + assert_eq!(result.get(&addr2).unwrap().info.balance, U256::from(200)); + } + + #[test] + fn test_balance_increment_state_mixed_zero_and_nonzero_increments() { + let addr1 = address!("1000000000000000000000000000000000000000"); + let addr2 = address!("2000000000000000000000000000000000000000"); + + let mut state = setup_state_with_account(addr1, 100, 1); + + let account2 = + AccountInfo { balance: U256::from(200), nonce: 1, code_hash: KECCAK_EMPTY, code: None }; + state.insert_account(addr2, account2); + + let mut increments = HashMap::::default(); + increments.insert(addr1, 0); + increments.insert(addr2, 100); + + let result = balance_increment_state(&increments, &mut state).unwrap(); + + assert_eq!(result.len(), 1, "Only non-zero increments should be included"); + assert!(!result.contains_key(&addr1), "Zero increment account should not be included"); + assert_eq!(result.get(&addr2).unwrap().info.balance, U256::from(200)); + } } diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 66026a07c94..29f6d7c6581 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -18,8 +18,8 @@ extern crate alloc; use crate::builder::RethEvmBuilder; +use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; -use reth_primitives::TransactionSigned; use reth_primitives_traits::BlockHeader; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; @@ -31,8 +31,8 @@ pub mod execute; pub mod metrics; pub mod noop; pub mod provider; +pub mod state_change; pub mod system_calls; - #[cfg(any(test, feature = "test-utils"))] /// test helpers for mocking executor pub mod test_utils; @@ -115,15 +115,21 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { /// The header type used by the EVM. type Header: BlockHeader; - /// Returns a [`TxEnv`] from a [`TransactionSigned`] and [`Address`]. - fn tx_env(&self, transaction: &TransactionSigned, signer: Address) -> TxEnv { + /// The transaction type. + type Transaction; + + /// The error type that is returned by [`Self::next_cfg_and_block_env`]. + type Error: core::error::Error + Send + Sync; + + /// Returns a [`TxEnv`] from a transaction and [`Address`]. + fn tx_env(&self, transaction: &Self::Transaction, signer: Address) -> TxEnv { let mut tx_env = TxEnv::default(); self.fill_tx_env(&mut tx_env, transaction, signer); tx_env } - /// Fill transaction environment from a [`TransactionSigned`] and the given sender address. - fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); + /// Fill transaction environment from a transaction and the given sender address. + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &Self::Transaction, sender: Address); /// Fill transaction environment with a system contract call. fn fill_tx_env_system_contract_call( @@ -134,9 +140,16 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { data: Bytes, ); + /// Returns a [`CfgEnvWithHandlerCfg`] for the given header. + fn cfg_env(&self, header: &Self::Header, total_difficulty: U256) -> CfgEnvWithHandlerCfg { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + self.fill_cfg_env(&mut cfg, header, total_difficulty); + cfg + } + /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header. /// - /// This must set the corresponding spec id in the handler cfg, based on timestamp or total + /// This __must__ set the corresponding spec id in the handler cfg, based on timestamp or total /// difficulty fn fill_cfg_env( &self, @@ -151,7 +164,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { block_env.coinbase = header.beneficiary(); block_env.timestamp = U256::from(header.timestamp()); if after_merge { - block_env.prevrandao = Some(header.mix_hash()); + block_env.prevrandao = header.mix_hash(); block_env.difficulty = U256::ZERO; } else { block_env.difficulty = header.difficulty(); @@ -166,6 +179,18 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { } } + /// Creates a new [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the given header. + fn cfg_and_block_env( + &self, + header: &Self::Header, + total_difficulty: U256, + ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); + let mut block_env = BlockEnv::default(); + self.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); + (cfg, block_env) + } + /// Convenience function to call both [`fill_cfg_env`](ConfigureEvmEnv::fill_cfg_env) and /// [`ConfigureEvmEnv::fill_block_env`]. /// @@ -191,7 +216,7 @@ pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv); + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error>; } /// Represents additional attributes required to configure the next block. @@ -207,3 +232,18 @@ pub struct NextBlockEnvAttributes { /// The randomness value for the next block. pub prev_randao: B256, } + +/// Function hook that allows to modify a transaction environment. +pub trait TxEnvOverrides { + /// Apply the overrides by modifying the given `TxEnv`. + fn apply(&mut self, env: &mut TxEnv); +} + +impl TxEnvOverrides for F +where + F: FnMut(&mut TxEnv), +{ + fn apply(&mut self, env: &mut TxEnv) { + self(env) + } +} diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index fbb2b858b15..1f21cb4d3a4 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -2,14 +2,40 @@ //! //! Block processing related to syncing should take care to update the metrics by using either //! [`ExecutorMetrics::execute_metered`] or [`ExecutorMetrics::metered_one`]. -use std::time::Instant; - +use crate::{execute::Executor, system_calls::OnStateHook}; +use alloy_consensus::BlockHeader; use metrics::{Counter, Gauge, Histogram}; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput}; use reth_metrics::Metrics; use reth_primitives::BlockWithSenders; +use revm_primitives::EvmState; +use std::time::Instant; + +/// Wrapper struct that combines metrics and state hook +struct MeteredStateHook { + metrics: ExecutorMetrics, + inner_hook: Box, +} + +impl OnStateHook for MeteredStateHook { + fn on_state(&mut self, state: &EvmState) { + // Update the metrics for the number of accounts, storage slots and bytecodes loaded + let accounts = state.keys().len(); + let storage_slots = state.values().map(|account| account.storage.len()).sum::(); + let bytecodes = state + .values() + .filter(|account| !account.info.is_empty_code_hash()) + .collect::>() + .len(); -use crate::execute::Executor; + self.metrics.accounts_loaded_histogram.record(accounts as f64); + self.metrics.storage_slots_loaded_histogram.record(storage_slots as f64); + self.metrics.bytecodes_loaded_histogram.record(bytecodes as f64); + + // Call the original state hook + self.inner_hook.on_state(state); + } +} /// Executor metrics. // TODO(onbjerg): add sload/sstore @@ -42,9 +68,10 @@ pub struct ExecutorMetrics { } impl ExecutorMetrics { - fn metered(&self, block: &BlockWithSenders, f: F) -> R + fn metered(&self, block: &BlockWithSenders, f: F) -> R where F: FnOnce() -> R, + B: reth_primitives_traits::Block, { // Execute the block and record the elapsed time. let execute_start = Instant::now(); @@ -52,8 +79,8 @@ impl ExecutorMetrics { let execution_duration = execute_start.elapsed().as_secs_f64(); // Update gas metrics. - self.gas_processed_total.increment(block.gas_used); - self.gas_per_second.set(block.gas_used as f64 / execution_duration); + self.gas_processed_total.increment(block.header().gas_used()); + self.gas_per_second.set(block.header().gas_used() as f64 / execution_duration); self.execution_histogram.record(execution_duration); self.execution_duration.set(execution_duration); @@ -65,42 +92,33 @@ impl ExecutorMetrics { /// /// Compared to [`Self::metered_one`], this method additionally updates metrics for the number /// of accounts, storage slots and bytecodes loaded and updated. - pub fn execute_metered<'a, E, DB, O, Error>( + /// Execute the given block using the provided [`Executor`] and update metrics for the + /// execution. + pub fn execute_metered<'a, E, DB, O, Error, B>( &self, executor: E, - input: BlockExecutionInput<'a, BlockWithSenders>, + input: BlockExecutionInput<'a, BlockWithSenders>, + state_hook: Box, ) -> Result, Error> where E: Executor< DB, - Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, + Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, Output = BlockExecutionOutput, Error = Error, >, + B: reth_primitives_traits::Block, { - let output = self.metered(input.block, || { - executor.execute_with_state_closure(input, |state: &revm::db::State| { - // Update the metrics for the number of accounts, storage slots and bytecodes - // loaded - let accounts = state.cache.accounts.len(); - let storage_slots = state - .cache - .accounts - .values() - .filter_map(|account| { - account.account.as_ref().map(|account| account.storage.len()) - }) - .sum::(); - let bytecodes = state.cache.contracts.len(); - - // Record all state present in the cache state as loaded even though some might have - // been newly created. - // TODO: Consider spitting these into loaded and newly created. - self.accounts_loaded_histogram.record(accounts as f64); - self.storage_slots_loaded_histogram.record(storage_slots as f64); - self.bytecodes_loaded_histogram.record(bytecodes as f64); - }) - })?; + // clone here is cheap, all the metrics are Option>. additionally + // they are gloally registered so that the data recorded in the hook will + // be accessible. + let wrapper = MeteredStateHook { metrics: self.clone(), inner_hook: state_hook }; + + // Store reference to block for metered + let block = input.block; + + // Use metered to execute and track timing/gas metrics + let output = self.metered(block, || executor.execute_with_state_hook(input, wrapper))?; // Update the metrics for the number of accounts, storage slots and bytecodes updated let accounts = output.state.state.len(); @@ -116,10 +134,177 @@ impl ExecutorMetrics { } /// Execute the given block and update metrics for the execution. - pub fn metered_one(&self, input: BlockExecutionInput<'_, BlockWithSenders>, f: F) -> R + pub fn metered_one( + &self, + input: BlockExecutionInput<'_, BlockWithSenders>, + f: F, + ) -> R where - F: FnOnce(BlockExecutionInput<'_, BlockWithSenders>) -> R, + F: FnOnce(BlockExecutionInput<'_, BlockWithSenders>) -> R, + B: reth_primitives_traits::Block, { self.metered(input.block, || f(input)) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip7685::Requests; + use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; + use revm::db::BundleState; + use revm_primitives::{ + Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, + }; + use std::sync::mpsc; + + /// A mock executor that simulates state changes + struct MockExecutor { + state: EvmState, + } + + impl Executor<()> for MockExecutor { + type Input<'a> + = BlockExecutionInput<'a, BlockWithSenders> + where + Self: 'a; + type Output = BlockExecutionOutput<()>; + type Error = std::convert::Infallible; + + fn execute(self, _input: Self::Input<'_>) -> Result { + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + fn execute_with_state_closure( + self, + _input: Self::Input<'_>, + _state: F, + ) -> Result + where + F: FnMut(&revm::State<()>), + { + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + fn execute_with_state_hook( + self, + _input: Self::Input<'_>, + mut hook: F, + ) -> Result + where + F: OnStateHook + 'static, + { + // Call hook with our mock state + hook.on_state(&self.state); + + Ok(BlockExecutionOutput { + state: BundleState::default(), + receipts: vec![], + requests: Requests::default(), + gas_used: 0, + }) + } + } + + struct ChannelStateHook { + output: i32, + sender: mpsc::Sender, + } + + impl OnStateHook for ChannelStateHook { + fn on_state(&mut self, _state: &EvmState) { + let _ = self.sender.send(self.output); + } + } + + fn setup_test_recorder() -> Snapshotter { + let recorder = DebuggingRecorder::new(); + let snapshotter = recorder.snapshotter(); + recorder.install().unwrap(); + snapshotter + } + + #[test] + fn test_executor_metrics_hook_metrics_recorded() { + let snapshotter = setup_test_recorder(); + let metrics = ExecutorMetrics::default(); + + let input = BlockExecutionInput { + block: &BlockWithSenders::default(), + total_difficulty: Default::default(), + }; + + let (tx, _rx) = mpsc::channel(); + let expected_output = 42; + let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); + + let state = { + let mut state = EvmState::default(); + let storage = + EvmStorage::from_iter([(U256::from(1), EvmStorageSlot::new(U256::from(2)))]); + state.insert( + Default::default(), + Account { + info: AccountInfo { + balance: U256::from(100), + nonce: 10, + code_hash: B256::random(), + code: Default::default(), + }, + storage, + status: AccountStatus::Loaded, + }, + ); + state + }; + let executor = MockExecutor { state }; + let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); + + let snapshot = snapshotter.snapshot().into_vec(); + + for metric in snapshot { + let metric_name = metric.0.key().name(); + if metric_name == "sync.execution.accounts_loaded_histogram" || + metric_name == "sync.execution.storage_slots_loaded_histogram" || + metric_name == "sync.execution.bytecodes_loaded_histogram" + { + if let DebugValue::Histogram(vs) = metric.3 { + assert!( + vs.iter().any(|v| v.into_inner() > 0.0), + "metric {metric_name} not recorded" + ); + } + } + } + } + + #[test] + fn test_executor_metrics_hook_called() { + let metrics = ExecutorMetrics::default(); + + let input = BlockExecutionInput { + block: &BlockWithSenders::default(), + total_difficulty: Default::default(), + }; + + let (tx, rx) = mpsc::channel(); + let expected_output = 42; + let state_hook = Box::new(ChannelStateHook { sender: tx, output: expected_output }); + + let state = EvmState::default(); + + let executor = MockExecutor { state }; + let _result = metrics.execute_metered(executor, input, state_hook).unwrap(); + + let actual_output = rx.try_recv().unwrap(); + assert_eq!(actual_output, expected_output); + } +} diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 4fdc6d367a2..816a4c83564 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -4,7 +4,7 @@ use alloy_primitives::BlockNumber; use core::fmt::Display; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_primitives::{BlockWithSenders, NodePrimitives}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -20,9 +20,11 @@ const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; /// A [`BlockExecutorProvider`] implementation that does nothing. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct NoopBlockExecutorProvider; +pub struct NoopBlockExecutorProvider

(core::marker::PhantomData

); + +impl BlockExecutorProvider for NoopBlockExecutorProvider

{ + type Primitives = P; -impl BlockExecutorProvider for NoopBlockExecutorProvider { type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; @@ -31,20 +33,20 @@ impl BlockExecutorProvider for NoopBlockExecutorProvider { where DB: Database + Display>, { - Self + Self::default() } fn batch_executor(&self, _: DB) -> Self::BatchExecutor where DB: Database + Display>, { - Self + Self::default() } } -impl Executor for NoopBlockExecutorProvider { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; +impl Executor for NoopBlockExecutorProvider

{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = BlockExecutionOutput; type Error = BlockExecutionError; fn execute(self, _: Self::Input<'_>) -> Result { @@ -74,9 +76,9 @@ impl Executor for NoopBlockExecutorProvider { } } -impl BatchExecutor for NoopBlockExecutorProvider { - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; +impl BatchExecutor for NoopBlockExecutorProvider

{ + type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; + type Output = ExecutionOutcome; type Error = BlockExecutionError; fn execute_and_verify_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index 8db828ec4a0..6ef4cefbb48 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -1,8 +1,8 @@ //! Provider trait for populating the EVM environment. use crate::ConfigureEvmEnv; -use alloy_eips::eip1898::BlockHashOrNumber; -use reth_primitives::Header; +use alloy_consensus::Header; +use alloy_eips::BlockHashOrNumber; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; @@ -12,28 +12,16 @@ use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// This type is mainly used to provide required data to configure the EVM environment that is /// usually stored on disk. #[auto_impl::auto_impl(&, Arc)] -pub trait EvmEnvProvider: Send + Sync { - /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given - /// [BlockHashOrNumber]. - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv

; - +pub trait EvmEnvProvider: Send + Sync { /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the - /// given [Header]. + /// given block header. fn env_with_header( &self, - header: &Header, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
, { let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); let mut block_env = BlockEnv::default(); @@ -42,16 +30,16 @@ pub trait EvmEnvProvider: Send + Sync { } /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given - /// [Header]. + /// block header. fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given /// [BlockHashOrNumber]. @@ -62,15 +50,15 @@ pub trait EvmEnvProvider: Send + Sync { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; - /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given [Header]. + /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given block header. fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &H, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
; + EvmConfig: ConfigureEvmEnv
; } diff --git a/crates/revm/src/state_change.rs b/crates/evm/src/state_change.rs similarity index 89% rename from crates/revm/src/state_change.rs rename to crates/evm/src/state_change.rs index afe92561bcd..5104c466399 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -1,40 +1,55 @@ +//! State changes that are not related to transactions. + +use alloy_consensus::BlockHeader; +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{map::HashMap, Address, U256}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; -use reth_primitives::{Block, Withdrawal, Withdrawals}; +use reth_primitives_traits::BlockBody; /// Collect all balance changes at the end of the block. /// /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular /// state changes (DAO fork). #[inline] -pub fn post_block_balance_increments( +pub fn post_block_balance_increments( chain_spec: &ChainSpec, block: &Block, total_difficulty: U256, -) -> HashMap { +) -> HashMap +where + ChainSpec: EthereumHardforks, + Block: reth_primitives_traits::Block, +{ let mut balance_increments = HashMap::default(); // Add block rewards if they are enabled. - if let Some(base_block_reward) = - calc::base_block_reward(chain_spec, block.number, block.difficulty, total_difficulty) - { + if let Some(base_block_reward) = calc::base_block_reward( + chain_spec, + block.header().number(), + block.header().difficulty(), + total_difficulty, + ) { // Ommer rewards - for ommer in &block.body.ommers { - *balance_increments.entry(ommer.beneficiary).or_default() += - calc::ommer_reward(base_block_reward, block.number, ommer.number); + if let Some(ommers) = block.body().ommers() { + for ommer in ommers { + *balance_increments.entry(ommer.beneficiary()).or_default() += + calc::ommer_reward(base_block_reward, block.header().number(), ommer.number()); + } } // Full block reward - *balance_increments.entry(block.beneficiary).or_default() += - calc::block_reward(base_block_reward, block.body.ommers.len()); + *balance_increments.entry(block.header().beneficiary()).or_default() += calc::block_reward( + base_block_reward, + block.body().ommers().map(|s| s.len()).unwrap_or(0), + ); } // process withdrawals insert_post_block_withdrawals_balance_increments( chain_spec, - block.timestamp, - block.body.withdrawals.as_ref().map(Withdrawals::as_ref), + block.header().timestamp(), + block.body().withdrawals().as_ref().map(|w| w.as_slice()), &mut balance_increments, ); @@ -89,9 +104,9 @@ pub fn insert_post_block_withdrawals_balance_increments( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { if !chain_spec.is_prague_active_at_timestamp(block_timestamp) { return Ok(None) diff --git a/crates/evm/src/system_calls/eip4788.rs b/crates/evm/src/system_calls/eip4788.rs index bc535809680..bfd5797214e 100644 --- a/crates/evm/src/system_calls/eip4788.rs +++ b/crates/evm/src/system_calls/eip4788.rs @@ -6,7 +6,6 @@ use alloy_eips::eip4788::BEACON_ROOTS_ADDRESS; use alloy_primitives::B256; use reth_chainspec::EthereumHardforks; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::Header; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::ResultAndState; @@ -31,7 +30,7 @@ pub(crate) fn transact_beacon_root_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, Spec: EthereumHardforks, { if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { diff --git a/crates/evm/src/system_calls/eip7002.rs b/crates/evm/src/system_calls/eip7002.rs index 9af944e42a5..d3c6d84903e 100644 --- a/crates/evm/src/system_calls/eip7002.rs +++ b/crates/evm/src/system_calls/eip7002.rs @@ -1,10 +1,9 @@ //! [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) system call implementation. use crate::ConfigureEvm; -use alloc::{boxed::Box, format, string::ToString, vec::Vec}; -use alloy_eips::eip7002::{WithdrawalRequest, WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS}; -use alloy_primitives::{bytes::Buf, Address, Bytes, FixedBytes}; +use alloc::{boxed::Box, format}; +use alloy_eips::eip7002::WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS; +use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Header, Request}; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; @@ -21,7 +20,7 @@ pub(crate) fn transact_withdrawal_requests_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { // get previous env let previous_env = Box::new(evm.context.env().clone()); @@ -62,52 +61,23 @@ where Ok(res) } -/// Parses the withdrawal requests from the execution output. +/// Calls the withdrawals requests system contract, and returns the requests from the execution +/// output. #[inline] -pub(crate) fn post_commit(result: ExecutionResult) -> Result, BlockExecutionError> { - let mut data = match result { +pub(crate) fn post_commit(result: ExecutionResult) -> Result { + match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { Err(BlockValidationError::WithdrawalRequestsContractCall { message: format!("execution reverted: {output}"), - }) + } + .into()) } ExecutionResult::Halt { reason, .. } => { Err(BlockValidationError::WithdrawalRequestsContractCall { message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Withdrawals are encoded as a series of withdrawal requests, each with the following - // format: - // - // +------+--------+--------+ - // | addr | pubkey | amount | - // +------+--------+--------+ - // 20 48 8 - - const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; - let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < WITHDRAWAL_REQUEST_SIZE { - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: "invalid withdrawal request length".to_string(), } .into()) } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut validator_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(validator_pubkey.as_mut_slice()); - - let amount = data.get_u64(); - - withdrawal_requests - .push(WithdrawalRequest { source_address, validator_pubkey, amount }.into()); } - - Ok(withdrawal_requests) } diff --git a/crates/evm/src/system_calls/eip7251.rs b/crates/evm/src/system_calls/eip7251.rs index f09d4be81af..28ae0160cdf 100644 --- a/crates/evm/src/system_calls/eip7251.rs +++ b/crates/evm/src/system_calls/eip7251.rs @@ -1,10 +1,9 @@ //! [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) system call implementation. use crate::ConfigureEvm; -use alloc::{boxed::Box, format, string::ToString, vec::Vec}; -use alloy_eips::eip7251::{ConsolidationRequest, CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS}; -use alloy_primitives::{bytes::Buf, Address, Bytes, FixedBytes}; +use alloc::{boxed::Box, format}; +use alloy_eips::eip7251::CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS; +use alloy_primitives::Bytes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Header, Request}; use revm::{interpreter::Host, Database, Evm}; use revm_primitives::{ExecutionResult, ResultAndState}; @@ -22,7 +21,7 @@ pub(crate) fn transact_consolidation_requests_contract_call( where DB: Database, DB::Error: core::fmt::Display, - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, { // get previous env let previous_env = Box::new(evm.context.env().clone()); @@ -64,56 +63,23 @@ where Ok(res) } -/// Parses the consolidation requests from the execution output. +/// Calls the consolidation requests system contract, and returns the requests from the execution +/// output. #[inline] -pub(crate) fn post_commit(result: ExecutionResult) -> Result, BlockExecutionError> { - let mut data = match result { +pub(crate) fn post_commit(result: ExecutionResult) -> Result { + match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { Err(BlockValidationError::ConsolidationRequestsContractCall { message: format!("execution reverted: {output}"), - }) + } + .into()) } ExecutionResult::Halt { reason, .. } => { Err(BlockValidationError::ConsolidationRequestsContractCall { message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Consolidations are encoded as a series of consolidation requests, each with the following - // format: - // - // +------+--------+---------------+ - // | addr | pubkey | target pubkey | - // +------+--------+---------------+ - // 20 48 48 - - const CONSOLIDATION_REQUEST_SIZE: usize = 20 + 48 + 48; - let mut consolidation_requests = Vec::with_capacity(data.len() / CONSOLIDATION_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < CONSOLIDATION_REQUEST_SIZE { - return Err(BlockValidationError::ConsolidationRequestsContractCall { - message: "invalid consolidation request length".to_string(), } .into()) } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut source_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(source_pubkey.as_mut_slice()); - - let mut target_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(target_pubkey.as_mut_slice()); - - consolidation_requests.push(Request::ConsolidationRequest(ConsolidationRequest { - source_address, - source_pubkey, - target_pubkey, - })); } - - Ok(consolidation_requests) } diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 5dc3f35bd3a..8af72094be4 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,13 +1,17 @@ //! System contract call functions. use crate::ConfigureEvm; -use alloc::{boxed::Box, vec::Vec}; +use alloc::{boxed::Box, sync::Arc}; +use alloy_consensus::BlockHeader; +use alloy_eips::{ + eip7002::WITHDRAWAL_REQUEST_TYPE, eip7251::CONSOLIDATION_REQUEST_TYPE, eip7685::Requests, +}; +use alloy_primitives::Bytes; use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; -use reth_primitives::{Block, Header, Request}; use revm::{Database, DatabaseCommit, Evm}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, B256}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, B256}; mod eip2935; mod eip4788; @@ -16,15 +20,15 @@ mod eip7251; /// A hook that is called after each state change. pub trait OnStateHook { - /// Invoked with the result and state after each system call. - fn on_state(&mut self, state: &ResultAndState); + /// Invoked with the state after each system call. + fn on_state(&mut self, state: &EvmState); } impl OnStateHook for F where - F: FnMut(&ResultAndState), + F: FnMut(&EvmState), { - fn on_state(&mut self, state: &ResultAndState) { + fn on_state(&mut self, state: &EvmState) { self(state) } } @@ -35,7 +39,7 @@ where pub struct NoopHook; impl OnStateHook for NoopHook { - fn on_state(&mut self, _state: &ResultAndState) {} + fn on_state(&mut self, _state: &EvmState) {} } /// An ephemeral helper type for executing system calls. @@ -44,7 +48,7 @@ impl OnStateHook for NoopHook { #[allow(missing_debug_implementations)] pub struct SystemCaller { evm_config: EvmConfig, - chain_spec: Chainspec, + chain_spec: Arc, /// Optional hook to be called after each state change. hook: Option>, } @@ -52,7 +56,7 @@ pub struct SystemCaller { impl SystemCaller { /// Create a new system caller with the given EVM config, database, and chain spec, and creates /// the EVM with the given initialized config and block environment. - pub const fn new(evm_config: EvmConfig, chain_spec: Chainspec) -> Self { + pub const fn new(evm_config: EvmConfig, chain_spec: Arc) -> Self { Self { evm_config, chain_spec, hook: None } } @@ -86,11 +90,11 @@ where impl SystemCaller where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm, Chainspec: EthereumHardforks, { /// Apply pre execution changes. - pub fn apply_pre_execution_changes( + pub fn apply_pre_execution_changes( &mut self, block: &Block, evm: &mut Evm<'_, Ext, DB>, @@ -98,17 +102,18 @@ where where DB: Database + DatabaseCommit, DB::Error: Display, + Block: reth_primitives_traits::Block
, { self.apply_blockhashes_contract_call( - block.timestamp, - block.number, - block.parent_hash, + block.header().timestamp(), + block.header().number(), + block.header().parent_hash(), evm, )?; self.apply_beacon_root_contract_call( - block.timestamp, - block.number, - block.parent_beacon_block_root, + block.header().timestamp(), + block.header().number(), + block.header().parent_beacon_block_root(), evm, )?; @@ -119,17 +124,32 @@ where pub fn apply_post_execution_changes( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, { + let mut requests = Requests::default(); + // Collect all EIP-7685 requests let withdrawal_requests = self.apply_withdrawal_requests_contract_call(evm)?; + if !withdrawal_requests.is_empty() { + requests.push_request( + core::iter::once(WITHDRAWAL_REQUEST_TYPE).chain(withdrawal_requests).collect(), + ); + } // Collect all EIP-7251 requests let consolidation_requests = self.apply_consolidation_requests_contract_call(evm)?; - Ok([withdrawal_requests, consolidation_requests].concat()) + if !consolidation_requests.is_empty() { + requests.push_request( + core::iter::once(CONSOLIDATION_REQUEST_TYPE) + .chain(consolidation_requests) + .collect(), + ); + } + + Ok(requests) } /// Applies the pre-block call to the EIP-2935 blockhashes contract. @@ -168,7 +188,7 @@ where DB::Error: Display, { let result_and_state = eip2935::transact_blockhashes_contract_call( - &self.evm_config.clone(), + &self.evm_config, &self.chain_spec, timestamp, block_number, @@ -178,7 +198,7 @@ where if let Some(res) = result_and_state { if let Some(ref mut hook) = self.hook { - hook.on_state(&res); + hook.on_state(&res.state); } evm.context.evm.db.commit(res.state); } @@ -223,7 +243,7 @@ where DB::Error: Display, { let result_and_state = eip4788::transact_beacon_root_contract_call( - &self.evm_config.clone(), + &self.evm_config, &self.chain_spec, timestamp, block_number, @@ -233,7 +253,7 @@ where if let Some(res) = result_and_state { if let Some(ref mut hook) = self.hook { - hook.on_state(&res); + hook.on_state(&res.state); } evm.context.evm.db.commit(res.state); } @@ -247,7 +267,7 @@ where db: &mut DB, initialized_cfg: &CfgEnvWithHandlerCfg, initialized_block_env: &BlockEnv, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -263,7 +283,7 @@ where pub fn apply_withdrawal_requests_contract_call( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -272,7 +292,7 @@ where eip7002::transact_withdrawal_requests_contract_call(&self.evm_config.clone(), evm)?; if let Some(ref mut hook) = self.hook { - hook.on_state(&result_and_state); + hook.on_state(&result_and_state.state); } evm.context.evm.db.commit(result_and_state.state); @@ -285,7 +305,7 @@ where db: &mut DB, initialized_cfg: &CfgEnvWithHandlerCfg, initialized_block_env: &BlockEnv, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -301,7 +321,7 @@ where pub fn apply_consolidation_requests_contract_call( &mut self, evm: &mut Evm<'_, Ext, DB>, - ) -> Result, BlockExecutionError> + ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, @@ -310,7 +330,7 @@ where eip7251::transact_consolidation_requests_contract_call(&self.evm_config.clone(), evm)?; if let Some(ref mut hook) = self.hook { - hook.on_state(&result_and_state); + hook.on_state(&result_and_state.state); } evm.context.evm.db.commit(result_and_state.state); @@ -318,7 +338,7 @@ where } /// Delegate to stored `OnStateHook`, noop if hook is `None`. - pub fn on_state(&mut self, state: &ResultAndState) { + pub fn on_state(&mut self, state: &EvmState) { if let Some(ref mut hook) = &mut self.hook { hook.on_state(state); } diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index 261b36420b4..22ba4a316e2 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -7,11 +7,12 @@ use crate::{ }, system_calls::OnStateHook, }; +use alloy_eips::eip7685::Requests; use alloy_primitives::BlockNumber; use parking_lot::Mutex; use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, Receipt, Receipts}; +use reth_primitives::{BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, Receipts}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -32,6 +33,8 @@ impl MockExecutorProvider { } impl BlockExecutorProvider for MockExecutorProvider { + type Primitives = EthPrimitives; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; @@ -62,7 +65,10 @@ impl Executor for MockExecutorProvider { Ok(BlockExecutionOutput { state: bundle, receipts: receipts.into_iter().flatten().flatten().collect(), - requests: requests.into_iter().flatten().collect(), + requests: requests.into_iter().fold(Requests::default(), |mut reqs, req| { + reqs.extend(req); + reqs + }), gas_used: 0, }) } @@ -112,14 +118,14 @@ impl BatchExecutor for MockExecutorProvider { } } -impl BasicBlockExecutor +impl BasicBlockExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R where - F: FnOnce(&State) -> R, + F: FnOnce(&State) -> R, { f(self.strategy.state_ref()) } @@ -127,20 +133,20 @@ where /// Provides safe write access to the state pub fn with_state_mut(&mut self, f: F) -> R where - F: FnOnce(&mut State) -> R, + F: FnOnce(&mut State) -> R, { f(self.strategy.state_mut()) } } -impl BasicBatchExecutor +impl BasicBatchExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, { /// Provides safe read access to the state pub fn with_state(&self, f: F) -> R where - F: FnOnce(&State) -> R, + F: FnOnce(&State) -> R, { f(self.strategy.state_ref()) } @@ -148,13 +154,13 @@ where /// Provides safe write access to the state pub fn with_state_mut(&mut self, f: F) -> R where - F: FnOnce(&mut State) -> R, + F: FnOnce(&mut State) -> R, { f(self.strategy.state_mut()) } /// Accessor for batch executor receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts<::Receipt> { self.batch_record.receipts() } } diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 6a3815e4045..b70fb921599 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -17,12 +17,14 @@ reth-chain-state.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-evm.workspace = true -reth-exex-types = { workspace = true, features = ["serde", "serde-bincode-compat"] } +reth-exex-types = { workspace = true, features = [ + "serde", + "serde-bincode-compat", +] } reth-fs-util.workspace = true reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-payload-builder.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } reth-primitives-traits.workspace = true reth-provider.workspace = true @@ -33,6 +35,7 @@ reth-tasks.workspace = true reth-tracing.workspace = true # alloy +alloy-consensus.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true @@ -51,7 +54,6 @@ tracing.workspace = true [dev-dependencies] reth-blockchain-tree.workspace = true -reth-db-api.workspace = true reth-db-common.workspace = true reth-evm-ethereum.workspace = true reth-node-api.workspace = true @@ -68,4 +70,15 @@ tempfile.workspace = true [features] default = [] -serde = ["reth-provider/serde", "reth-exex-types/serde"] +serde = [ + "reth-provider/serde", + "reth-exex-types/serde", + "reth-revm/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "parking_lot/serde", + "rand/serde", + "secp256k1/serde", + "reth-primitives-traits/serde", +] diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 77a7b50477b..5888368e3c2 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -4,12 +4,14 @@ use std::{ time::{Duration, Instant}, }; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_primitives::{Block, BlockWithSenders, Receipt}; -use reth_primitives_traits::format_gas_throughput; +use reth_node_api::{Block as _, BlockBody as _, NodePrimitives}; +use reth_primitives::{BlockExt, BlockWithSenders, Receipt}; +use reth_primitives_traits::{format_gas_throughput, SignedTransaction}; use reth_provider::{ BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, }; @@ -23,7 +25,8 @@ pub(super) type BackfillJobResult = Result; /// Backfill job started for a specific range. /// /// It implements [`Iterator`] that executes blocks in batches according to the provided thresholds -/// and yields [`Chain`] +/// and yields [`Chain`]. In other words, this iterator can yield multiple items for the given range +/// depending on the configured thresholds. #[derive(Debug)] pub struct BackfillJob { pub(crate) executor: E, @@ -36,10 +39,10 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider, - P: HeaderProvider + BlockReader + StateProviderFactory, + E: BlockExecutorProvider>, + P: HeaderProvider + BlockReader + StateProviderFactory, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn next(&mut self) -> Option { if self.range.is_empty() { @@ -52,8 +55,8 @@ where impl BackfillJob where - E: BlockExecutorProvider, - P: BlockReader + HeaderProvider + StateProviderFactory, + E: BlockExecutorProvider>, + P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. pub fn into_single_blocks(self) -> SingleBlockBackfillJob { @@ -61,11 +64,11 @@ where } /// Converts the backfill job into a stream. - pub fn into_stream(self) -> StreamBackfillJob { + pub fn into_stream(self) -> StreamBackfillJob> { self.into() } - fn execute_range(&mut self) -> BackfillJobResult { + fn execute_range(&mut self) -> BackfillJobResult> { debug!( target: "exex::backfill", range = ?self.range, @@ -100,10 +103,10 @@ where fetch_block_duration += fetch_block_start.elapsed(); - cumulative_gas += block.gas_used; + cumulative_gas += block.gas_used(); // Configure the executor to use the current state. - trace!(target: "exex::backfill", number = block_number, txs = block.body.transactions.len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block.body.transactions().len(), "Executing block"); // Execute the block let execute_start = Instant::now(); @@ -111,8 +114,7 @@ where // Unseal the block for execution let (block, senders) = block.into_components(); let (unsealed_header, hash) = block.header.split(); - let block = - Block { header: unsealed_header, body: block.body }.with_senders_unchecked(senders); + let block = P::Block::new(unsealed_header, block.body).with_senders_unchecked(senders); executor.execute_and_verify_one((&block, td).into())?; execution_duration += execute_start.elapsed(); @@ -134,7 +136,7 @@ where } } - let last_block_number = blocks.last().expect("blocks should not be empty").number; + let last_block_number = blocks.last().expect("blocks should not be empty").number(); debug!( target: "exex::backfill", range = ?*self.range.start()..=last_block_number, @@ -164,10 +166,13 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider, + E: BlockExecutorProvider>, P: HeaderProvider + BlockReader + StateProviderFactory, { - type Item = BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)>; + type Item = BackfillJobResult<( + BlockWithSenders, + BlockExecutionOutput<::Receipt>, + )>; fn next(&mut self) -> Option { self.range.next().map(|block_number| self.execute_block(block_number)) @@ -176,7 +181,7 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider, + E: BlockExecutorProvider>, P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. @@ -186,10 +191,14 @@ where self.into() } + #[expect(clippy::type_complexity)] pub(crate) fn execute_block( &self, block_number: u64, - ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { + ) -> BackfillJobResult<( + BlockWithSenders, + BlockExecutionOutput<::Receipt>, + )> { let td = self .provider .header_td_by_number(block_number)? @@ -206,7 +215,7 @@ where self.provider.history_by_block_number(block_number.saturating_sub(1))?, )); - trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body.transactions.len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body().transactions().len(), "Executing block"); let block_execution_output = executor.execute((&block_with_senders, td).into())?; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index c55b8651daf..d88ca87e7ac 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -11,7 +11,8 @@ use futures::{ StreamExt, }; use reth_evm::execute::{BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider}; -use reth_primitives::{BlockWithSenders, Receipt}; +use reth_node_api::NodePrimitives; +use reth_primitives::{BlockWithSenders, EthPrimitives}; use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; use reth_prune_types::PruneModes; use reth_stages_api::ExecutionStageThresholds; @@ -38,8 +39,11 @@ struct BackfillTaskOutput { /// Ordered queue of [`JoinHandle`]s that yield [`BackfillTaskOutput`]s. type BackfillTasks = FuturesOrdered>>; -type SingleBlockStreamItem = (BlockWithSenders, BlockExecutionOutput); -type BatchBlockStreamItem = Chain; +type SingleBlockStreamItem = ( + BlockWithSenders<::Block>, + BlockExecutionOutput<::Receipt>, +); +type BatchBlockStreamItem = Chain; /// Stream for processing backfill jobs asynchronously. /// @@ -73,15 +77,24 @@ where self } - /// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the - /// [`BackfillTasks`] queue. - fn push_task(&mut self, mut job: BackfillTaskIterator) { + /// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the end + /// of the [`BackfillTasks`] queue. + fn push_back(&mut self, mut job: BackfillTaskIterator) { self.tasks.push_back(tokio::task::spawn_blocking(move || BackfillTaskOutput { result: job.next(), job, })); } + /// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the + /// front of the [`BackfillTasks`] queue. + fn push_front(&mut self, mut job: BackfillTaskIterator) { + self.tasks.push_front(tokio::task::spawn_blocking(move || BackfillTaskOutput { + result: job.next(), + job, + })); + } + /// Polls the next task in the [`BackfillTasks`] queue until it returns a non-empty result. fn poll_next_task(&mut self, cx: &mut Context<'_>) -> Poll>> { while let Some(res) = ready!(self.tasks.poll_next_unpin(cx)) { @@ -89,8 +102,9 @@ where if let BackfillTaskOutput { result: Some(job_result), job } = task_result { // If the task returned a non-empty result, a new task advancing the job is created - // and pushed to the front of the queue. - self.push_task(job); + // and pushed to the __front__ of the queue, so that the next item of this returned + // next. + self.push_front(job); return Poll::Ready(Some(job_result)) }; @@ -100,12 +114,12 @@ where } } -impl Stream for StreamBackfillJob +impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider + Clone + Send + 'static, + E: BlockExecutorProvider> + Clone + Send + 'static, P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -126,19 +140,19 @@ where range: block_number..=block_number, stream_parallelism: this.parallelism, }) as BackfillTaskIterator<_>; - this.push_task(job); + this.push_back(job); } this.poll_next_task(cx) } } -impl Stream for StreamBackfillJob +impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider + Clone + Send + 'static, + E: BlockExecutorProvider> + Clone + Send + 'static, P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, { - type Item = BackfillJobResult; + type Item = BackfillJobResult>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -166,7 +180,7 @@ where range, stream_parallelism: this.parallelism, }) as BackfillTaskIterator<_>; - this.push_task(job); + this.push_back(job); } this.poll_next_task(cx) @@ -188,7 +202,10 @@ impl From> for StreamBackfillJob From> for StreamBackfillJob { +impl From> for StreamBackfillJob> +where + E: BlockExecutorProvider, +{ fn from(job: BackfillJob) -> Self { let batch_size = job.thresholds.max_blocks.map_or(DEFAULT_BATCH_SIZE, |max| max as usize); Self { diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 1c793975c75..6d93314e22b 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use alloy_consensus::TxEip2930; +use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip2930}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{b256, Address, TxKind, U256}; use eyre::OptionExt; @@ -9,13 +9,13 @@ use reth_evm::execute::{ BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; use reth_evm_ethereum::execute::EthExecutorProvider; +use reth_node_api::FullNodePrimitives; use reth_primitives::{ - constants::ETH_TO_WEI, Block, BlockBody, BlockWithSenders, Header, Receipt, Requests, - SealedBlockWithSenders, Transaction, + Block, BlockBody, BlockExt, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, }; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, - ProviderFactory, StaticFileProviderFactory, + ProviderFactory, }; use reth_revm::database::StateProviderDatabase; use reth_testing_utils::generators::sign_tx_with_key_pair; @@ -29,7 +29,7 @@ pub(crate) fn to_execution_outcome( bundle: block_execution_output.state.clone(), receipts: block_execution_output.receipts.clone().into(), first_block: block_number, - requests: vec![Requests(block_execution_output.requests.clone())], + requests: vec![block_execution_output.requests.clone()], } } @@ -58,16 +58,19 @@ pub(crate) fn execute_block_and_commit_to_database( block: &BlockWithSenders, ) -> eyre::Result> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, + >, { let provider = provider_factory.provider()?; // Execute the block to produce a block execution output let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) - .executor(StateProviderDatabase::new(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider(), - ))) + .executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))) .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; block_execution_output.state.reverts.sort(); @@ -165,7 +168,13 @@ pub(crate) fn blocks_and_execution_outputs( key_pair: Keypair, ) -> eyre::Result)>> where - N: ProviderNodeTypes, + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + Block = reth_primitives::Block, + BlockBody = reth_primitives::BlockBody, + Receipt = reth_primitives::Receipt, + >, + >, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; @@ -187,15 +196,15 @@ pub(crate) fn blocks_and_execution_outcome( ) -> eyre::Result<(Vec, ExecutionOutcome)> where N: ProviderNodeTypes, + N::Primitives: + FullNodePrimitives, { let (block1, block2) = blocks(chain_spec.clone(), key_pair)?; let provider = provider_factory.provider()?; - let executor = - EthExecutorProvider::ethereum(chain_spec).batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new(provider.tx_ref(), provider.static_file_provider()), - )); + let executor = EthExecutorProvider::ethereum(chain_spec) + .batch_executor(StateProviderDatabase::new(LatestStateProviderRef::new(&provider))); let mut execution_outcome = executor.execute_and_verify_batch(vec![ (&block1, U256::ZERO).into(), diff --git a/crates/exex/exex/src/context.rs b/crates/exex/exex/src/context.rs index 9af12e260a7..f536ed515f9 100644 --- a/crates/exex/exex/src/context.rs +++ b/crates/exex/exex/src/context.rs @@ -1,14 +1,13 @@ -use std::fmt::Debug; - +use crate::{ExExContextDyn, ExExEvent, ExExNotifications, ExExNotificationsStream}; use reth_exex_types::ExExHead; -use reth_node_api::{FullNodeComponents, NodeTypes, NodeTypesWithEngine}; +use reth_node_api::{FullNodeComponents, NodePrimitives, NodeTypes}; use reth_node_core::node_config::NodeConfig; use reth_primitives::Head; +use reth_provider::BlockReader; use reth_tasks::TaskExecutor; +use std::fmt::Debug; use tokio::sync::mpsc::UnboundedSender; -use crate::{ExExEvent, ExExNotifications}; - /// Captures the context that an `ExEx` has access to. pub struct ExExContext { /// The current head of the blockchain at launch. @@ -55,7 +54,24 @@ where } } -impl ExExContext { +impl ExExContext +where + Node: FullNodeComponents, + Node::Provider: Debug + BlockReader, + Node::Executor: Debug, + Node::Types: NodeTypes, +{ + /// Returns dynamic version of the context + pub fn into_dyn(self) -> ExExContextDyn<::Primitives> { + ExExContextDyn::from(self) + } +} + +impl ExExContext +where + Node: FullNodeComponents, + Node::Types: NodeTypes, +{ /// Returns the transaction pool of the node. pub fn pool(&self) -> &Node::Pool { self.components.pool() @@ -82,10 +98,7 @@ impl ExExContext { } /// Returns the handle to the payload builder service. - pub fn payload_builder( - &self, - ) -> &reth_payload_builder::PayloadBuilderHandle<::Engine> - { + pub fn payload_builder(&self) -> &Node::PayloadBuilder { self.components.payload_builder() } @@ -106,3 +119,38 @@ impl ExExContext { self.notifications.set_with_head(head); } } + +#[cfg(test)] +mod tests { + use reth_exex_types::ExExHead; + use reth_node_api::FullNodeComponents; + use reth_provider::BlockReader; + + use crate::ExExContext; + + /// + #[test] + const fn issue_12054() { + #[allow(dead_code)] + struct ExEx { + ctx: ExExContext, + } + + impl ExEx + where + Node::Provider: BlockReader, + { + async fn _test_bounds(mut self) -> eyre::Result<()> { + self.ctx.pool(); + self.ctx.block_executor(); + self.ctx.provider(); + self.ctx.network(); + self.ctx.payload_builder(); + self.ctx.task_executor(); + self.ctx.set_notifications_without_head(); + self.ctx.set_notifications_with_head(ExExHead { block: Default::default() }); + Ok(()) + } + } + } +} diff --git a/crates/exex/exex/src/dyn_context.rs b/crates/exex/exex/src/dyn_context.rs new file mode 100644 index 00000000000..8bda75cac45 --- /dev/null +++ b/crates/exex/exex/src/dyn_context.rs @@ -0,0 +1,73 @@ +//! Mirrored version of [`ExExContext`](`crate::ExExContext`) +//! without generic abstraction over [Node](`reth_node_api::FullNodeComponents`) + +use std::fmt::Debug; + +use reth_chainspec::{EthChainSpec, Head}; +use reth_node_api::{FullNodeComponents, HeaderTy, NodePrimitives, NodeTypes}; +use reth_node_core::node_config::NodeConfig; +use reth_primitives::EthPrimitives; +use reth_provider::BlockReader; +use tokio::sync::mpsc; + +use crate::{ExExContext, ExExEvent, ExExNotificationsStream}; + +// TODO(0xurb) - add `node` after abstractions +/// Captures the context that an `ExEx` has access to. +pub struct ExExContextDyn { + /// The current head of the blockchain at launch. + pub head: Head, + /// The config of the node + pub config: NodeConfig + 'static>>, + /// The loaded node config + pub reth_config: reth_config::Config, + /// Channel used to send [`ExExEvent`]s to the rest of the node. + /// + /// # Important + /// + /// The exex should emit a `FinishedHeight` whenever a processed block is safe to prune. + /// Additionally, the exex can pre-emptively emit a `FinishedHeight` event to specify what + /// blocks to receive notifications for. + pub events: mpsc::UnboundedSender, + /// Channel to receive [`ExExNotification`](crate::ExExNotification)s. + /// + /// # Important + /// + /// Once an [`ExExNotification`](crate::ExExNotification) is sent over the channel, it is + /// considered delivered by the node. + pub notifications: Box>, +} + +impl Debug for ExExContextDyn { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ExExContext") + .field("head", &self.head) + .field("config", &self.config) + .field("reth_config", &self.reth_config) + .field("events", &self.events) + .field("notifications", &"...") + .finish() + } +} + +impl From> for ExExContextDyn<::Primitives> +where + Node: FullNodeComponents>, + Node::Provider: Debug + BlockReader, + Node::Executor: Debug, +{ + fn from(ctx: ExExContext) -> Self { + let config = ctx.config.map_chainspec(|chainspec| { + Box::new(chainspec) as Box>> + }); + let notifications = Box::new(ctx.notifications) as Box<_>; + + Self { + head: ctx.head, + config, + reth_config: ctx.reth_config, + events: ctx.events, + notifications, + } + } +} diff --git a/crates/exex/exex/src/event.rs b/crates/exex/exex/src/event.rs index 1215ea2a502..bbd79addc9e 100644 --- a/crates/exex/exex/src/event.rs +++ b/crates/exex/exex/src/event.rs @@ -1,4 +1,4 @@ -use reth_primitives::BlockNumHash; +use alloy_eips::BlockNumHash; /// Events emitted by an `ExEx`. #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/crates/exex/exex/src/lib.rs b/crates/exex/exex/src/lib.rs index edc9e40d449..ce6641ff673 100644 --- a/crates/exex/exex/src/lib.rs +++ b/crates/exex/exex/src/lib.rs @@ -40,6 +40,9 @@ pub use backfill::*; mod context; pub use context::*; +mod dyn_context; +pub use dyn_context::*; + mod event; pub use event::*; diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 8c1518f3090..16a93052614 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1,15 +1,19 @@ use crate::{ wal::Wal, ExExEvent, ExExNotification, ExExNotifications, FinishedExExHeight, WalHandle, }; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumHash; use futures::StreamExt; use itertools::Itertools; use metrics::Gauge; use reth_chain_state::ForkChoiceStream; use reth_chainspec::Head; +use reth_evm::execute::BlockExecutorProvider; use reth_metrics::{metrics::Counter, Metrics}; -use reth_primitives::{BlockNumHash, SealedHeader}; +use reth_node_api::NodePrimitives; +use reth_primitives::{EthPrimitives, SealedHeader}; use reth_provider::HeaderProvider; -use reth_tracing::tracing::debug; +use reth_tracing::tracing::{debug, warn}; use std::{ collections::VecDeque, fmt::Debug, @@ -34,6 +38,12 @@ use tokio_util::sync::{PollSendError, PollSender, ReusableBoxFuture}; /// or 17 minutes of 1-second blocks. pub const DEFAULT_EXEX_MANAGER_CAPACITY: usize = 1024; +/// The maximum number of blocks allowed in the WAL before emitting a warning. +/// +/// This constant defines the threshold for the Write-Ahead Log (WAL) size. If the number of blocks +/// in the WAL exceeds this limit, a warning is logged to indicate potential issues. +pub const WAL_BLOCKS_WARNING: usize = 128; + /// The source of the notification. /// /// This distinguishment is needed to not commit any pipeline notificatations to [WAL](`Wal`), @@ -62,13 +72,13 @@ struct ExExMetrics { /// [`ExExHandle::new`] should be given to the `ExEx`, while the handle itself should be given to /// the manager in [`ExExManager::new`]. #[derive(Debug)] -pub struct ExExHandle { +pub struct ExExHandle { /// The execution extension's ID. id: String, /// Metrics for an `ExEx`. metrics: ExExMetrics, /// Channel to send [`ExExNotification`]s to the `ExEx`. - sender: PollSender, + sender: PollSender>, /// Channel to receive [`ExExEvent`]s from the `ExEx`. receiver: UnboundedReceiver, /// The ID of the next notification to send to this `ExEx`. @@ -79,17 +89,17 @@ pub struct ExExHandle { finished_height: Option, } -impl ExExHandle { +impl ExExHandle { /// Create a new handle for the given `ExEx`. /// /// Returns the handle, as well as a [`UnboundedSender`] for [`ExExEvent`]s and a /// [`mpsc::Receiver`] for [`ExExNotification`]s that should be given to the `ExEx`. - pub fn new( + pub fn new>( id: String, node_head: Head, provider: P, executor: E, - wal_handle: WalHandle, + wal_handle: WalHandle, ) -> (Self, UnboundedSender, ExExNotifications) { let (notification_tx, notification_rx) = mpsc::channel(1); let (event_tx, event_rx) = mpsc::unbounded_channel(); @@ -117,21 +127,21 @@ impl ExExHandle { fn send( &mut self, cx: &mut Context<'_>, - (notification_id, notification): &(usize, ExExNotification), - ) -> Poll>> { + (notification_id, notification): &(usize, ExExNotification), + ) -> Poll>>> { if let Some(finished_height) = self.finished_height { match notification { ExExNotification::ChainCommitted { new } => { // Skip the chain commit notification if the finished height of the ExEx is // higher than or equal to the tip of the new notification. // I.e., the ExEx has already processed the notification. - if finished_height.number >= new.tip().number { + if finished_height.number >= new.tip().number() { debug!( target: "exex::manager", exex_id = %self.id, %notification_id, ?finished_height, - new_tip = %new.tip().number, + new_tip = %new.tip().number(), "Skipping notification" ); @@ -201,15 +211,15 @@ pub struct ExExManagerMetrics { /// - Error handling /// - Monitoring #[derive(Debug)] -pub struct ExExManager

{ +pub struct ExExManager { /// Provider for querying headers. provider: P, /// Handles to communicate with the `ExEx`'s. - exex_handles: Vec, + exex_handles: Vec>, /// [`ExExNotification`] channel from the [`ExExManagerHandle`]s. - handle_rx: UnboundedReceiver<(ExExNotificationSource, ExExNotification)>, + handle_rx: UnboundedReceiver<(ExExNotificationSource, ExExNotification)>, /// The minimum notification ID currently present in the buffer. min_id: usize, @@ -219,7 +229,7 @@ pub struct ExExManager

{ /// /// The first element of the tuple is a monotonically increasing ID unique to the notification /// (the second element of the tuple). - buffer: VecDeque<(usize, ExExNotification)>, + buffer: VecDeque<(usize, ExExNotification)>, /// Max size of the internal state notifications buffer. max_capacity: usize, /// Current state notifications buffer capacity. @@ -234,17 +244,20 @@ pub struct ExExManager

{ finished_height: watch::Sender, /// Write-Ahead Log for the [`ExExNotification`]s. - wal: Wal, + wal: Wal, /// A stream of finalized headers. - finalized_header_stream: ForkChoiceStream, + finalized_header_stream: ForkChoiceStream>, /// A handle to the `ExEx` manager. - handle: ExExManagerHandle, + handle: ExExManagerHandle, /// Metrics for the `ExEx` manager. metrics: ExExManagerMetrics, } -impl

ExExManager

{ +impl ExExManager +where + N: NodePrimitives, +{ /// Create a new [`ExExManager`]. /// /// You must provide an [`ExExHandle`] for each `ExEx` and the maximum capacity of the @@ -254,10 +267,10 @@ impl

ExExManager

{ /// notifications over [`ExExManagerHandle`]s until there is capacity again. pub fn new( provider: P, - handles: Vec, + handles: Vec>, max_capacity: usize, - wal: Wal, - finalized_header_stream: ForkChoiceStream, + wal: Wal, + finalized_header_stream: ForkChoiceStream>, ) -> Self { let num_exexs = handles.len(); @@ -307,7 +320,7 @@ impl

ExExManager

{ } /// Returns the handle to the manager. - pub fn handle(&self) -> ExExManagerHandle { + pub fn handle(&self) -> ExExManagerHandle { self.handle.clone() } @@ -326,22 +339,23 @@ impl

ExExManager

{ /// Pushes a new notification into the managers internal buffer, assigning the notification a /// unique ID. - fn push_notification(&mut self, notification: ExExNotification) { + fn push_notification(&mut self, notification: ExExNotification) { let next_id = self.next_id; self.buffer.push_back((next_id, notification)); self.next_id += 1; } } -impl

ExExManager

+impl ExExManager where P: HeaderProvider, + N: NodePrimitives, { /// Finalizes the WAL according to the passed finalized header. /// /// This function checks if all ExExes are on the canonical chain and finalizes the WAL if /// necessary. - fn finalize_wal(&self, finalized_header: SealedHeader) -> eyre::Result<()> { + fn finalize_wal(&self, finalized_header: SealedHeader) -> eyre::Result<()> { debug!(target: "exex::manager", header = ?finalized_header.num_hash(), "Received finalized header"); // Check if all ExExes are on the canonical chain @@ -376,6 +390,13 @@ where .unwrap(); self.wal.finalize(lowest_finished_height)?; + if self.wal.num_blocks() > WAL_BLOCKS_WARNING { + warn!( + target: "exex::manager", + blocks = ?self.wal.num_blocks(), + "WAL contains too many blocks and is not getting cleared. That will lead to increased disk space usage. Check that you emit the FinishedHeight event from your ExExes." + ); + } } else { let unfinalized_exexes = exex_finished_heights .into_iter() @@ -399,9 +420,10 @@ where } } -impl

Future for ExExManager

+impl Future for ExExManager where P: HeaderProvider + Unpin + 'static, + N: NodePrimitives, { type Output = eyre::Result<()>; @@ -442,8 +464,9 @@ where // Drain handle notifications while this.buffer.len() < this.max_capacity { if let Poll::Ready(Some((source, notification))) = this.handle_rx.poll_recv(cx) { - let committed_tip = notification.committed_chain().map(|chain| chain.tip().number); - let reverted_tip = notification.reverted_chain().map(|chain| chain.tip().number); + let committed_tip = + notification.committed_chain().map(|chain| chain.tip().number()); + let reverted_tip = notification.reverted_chain().map(|chain| chain.tip().number()); debug!(target: "exex::manager", ?committed_tip, ?reverted_tip, "Received new notification"); // Commit to WAL only notifications from blockchain tree. Pipeline notifications @@ -510,9 +533,9 @@ where /// A handle to communicate with the [`ExExManager`]. #[derive(Debug)] -pub struct ExExManagerHandle { +pub struct ExExManagerHandle { /// Channel to send notifications to the `ExEx` manager. - exex_tx: UnboundedSender<(ExExNotificationSource, ExExNotification)>, + exex_tx: UnboundedSender<(ExExNotificationSource, ExExNotification)>, /// The number of `ExEx`'s running on the node. num_exexs: usize, /// A watch channel denoting whether the manager is ready for new notifications or not. @@ -530,7 +553,7 @@ pub struct ExExManagerHandle { finished_height: watch::Receiver, } -impl ExExManagerHandle { +impl ExExManagerHandle { /// Creates an empty manager handle. /// /// Use this if there is no manager present. @@ -557,8 +580,8 @@ impl ExExManagerHandle { pub fn send( &self, source: ExExNotificationSource, - notification: ExExNotification, - ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { + notification: ExExNotification, + ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { self.exex_tx.send((source, notification)) } @@ -569,8 +592,8 @@ impl ExExManagerHandle { pub async fn send_async( &mut self, source: ExExNotificationSource, - notification: ExExNotification, - ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { + notification: ExExNotification, + ) -> Result<(), SendError<(ExExNotificationSource, ExExNotification)>> { self.ready().await; self.exex_tx.send((source, notification)) } @@ -619,7 +642,7 @@ async fn make_wait_future(mut rx: watch::Receiver) -> watch::Receiver Clone for ExExManagerHandle { fn clone(&self) -> Self { Self { exex_tx: self.exex_tx.clone(), @@ -639,11 +662,12 @@ mod tests { use futures::{StreamExt, TryStreamExt}; use rand::Rng; use reth_db_common::init::init_genesis; + use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::SealedBlockWithSenders; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, - BlockWriter, Chain, DatabaseProviderFactory, TransactionVariant, + BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, TransactionVariant, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; @@ -659,8 +683,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (mut exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (mut exex_handle, event_tx, mut _notification_rx) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Send an event and check that it's delivered correctly let event = ExExEvent::FinishedHeight(BlockNumHash::new(42, B256::random())); @@ -674,8 +703,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream()) .handle @@ -691,8 +725,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); assert!(!ExExManager::new((), vec![], 0, wal.clone(), empty_finalized_header_stream()) .handle @@ -714,15 +753,20 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle, _, _) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, _, _) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create a mock ExExManager and add the exex_handle to it let mut exex_manager = ExExManager::new((), vec![exex_handle], 10, wal, empty_finalized_header_stream()); // Define the notification for testing - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -740,7 +784,7 @@ mod tests { assert_eq!(exex_manager.next_id, 1); // Push another notification - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block2.block.header.set_hash(B256::new([0x02; 32])); block2.block.header.set_block_number(20); @@ -764,8 +808,13 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (exex_handle, _, _) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, _, _) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create a mock ExExManager and add the exex_handle to it let max_capacity = 5; @@ -778,7 +827,7 @@ mod tests { ); // Push some notifications to fill part of the buffer - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -810,8 +859,13 @@ mod tests { let provider_factory = create_test_provider_factory(); - let (exex_handle, event_tx, mut _notification_rx) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle, event_tx, mut _notification_rx) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Check initial block height assert!(exex_handle.finished_height.is_none()); @@ -860,10 +914,20 @@ mod tests { let provider_factory = create_test_provider_factory(); // Create two `ExExHandle` instances - let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); - let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle1, event_tx1, _) = ExExHandle::new( + "test_exex1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); + let (exex_handle2, event_tx2, _) = ExExHandle::new( + "test_exex2".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); let block1 = BlockNumHash::new(42, B256::random()); let block2 = BlockNumHash::new(10, B256::random()); @@ -907,10 +971,20 @@ mod tests { let provider_factory = create_test_provider_factory(); // Create two `ExExHandle` instances - let (exex_handle1, event_tx1, _) = - ExExHandle::new("test_exex1".to_string(), Head::default(), (), (), wal.handle()); - let (exex_handle2, event_tx2, _) = - ExExHandle::new("test_exex2".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle1, event_tx1, _) = ExExHandle::new( + "test_exex1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); + let (exex_handle2, event_tx2, _) = ExExHandle::new( + "test_exex2".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Assert that the initial block height is `None` for the first `ExExHandle`. assert!(exex_handle1.finished_height.is_none()); @@ -960,8 +1034,13 @@ mod tests { let provider_factory = create_test_provider_factory(); - let (exex_handle_1, _, _) = - ExExHandle::new("test_exex_1".to_string(), Head::default(), (), (), wal.handle()); + let (exex_handle_1, _, _) = ExExHandle::new( + "test_exex_1".to_string(), + Head::default(), + (), + MockExecutorProvider::default(), + wal.handle(), + ); // Create an ExExManager with a small max capacity let max_capacity = 2; @@ -1037,11 +1116,11 @@ mod tests { assert_eq!(exex_handle.next_notification_id, 0); // Setup two blocks for the chain commit notification - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); - let mut block2 = SealedBlockWithSenders::default(); + let mut block2: SealedBlockWithSenders = Default::default(); block2.block.header.set_hash(B256::new([0x02; 32])); block2.block.header.set_block_number(11); @@ -1090,7 +1169,7 @@ mod tests { // Set finished_height to a value higher than the block tip exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); - let mut block1 = SealedBlockWithSenders::default(); + let mut block1: SealedBlockWithSenders = Default::default(); block1.block.header.set_hash(B256::new([0x01; 32])); block1.block.header.set_block_number(10); @@ -1221,10 +1300,10 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .unwrap(); let provider_rw = provider_factory.database_provider_rw().unwrap(); - provider_rw.insert_block(block.clone()).unwrap(); + provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); provider_rw.commit().unwrap(); let provider = BlockchainProvider2::new(provider_factory).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index d0c94d34f64..05892e2f90d 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -1,8 +1,11 @@ use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob, WalHandle}; +use alloy_consensus::BlockHeader; use futures::{Stream, StreamExt}; use reth_chainspec::Head; use reth_evm::execute::BlockExecutorProvider; use reth_exex_types::ExExHead; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; use reth_tracing::tracing::debug; use std::{ @@ -17,12 +20,54 @@ use tokio::sync::mpsc::Receiver; /// stream is configured with a head via [`ExExNotifications::set_with_head`] or /// [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. #[derive(Debug)] -pub struct ExExNotifications { +pub struct ExExNotifications +where + E: BlockExecutorProvider, +{ inner: ExExNotificationsInner, } +/// A trait, that represents a stream of [`ExExNotification`]s. The stream will emit notifications +/// for all blocks. If the stream is configured with a head via [`ExExNotifications::set_with_head`] +/// or [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. +pub trait ExExNotificationsStream: + Stream>> + Unpin +{ + /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s without a head. + /// + /// It's a no-op if the stream has already been configured without a head. + /// + /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. + fn set_without_head(&mut self); + + /// Sets [`ExExNotificationsStream`] to a stream of [`ExExNotification`]s with the provided + /// head. + /// + /// It's a no-op if the stream has already been configured with a head. + /// + /// See the documentation of [`ExExNotificationsWithHead`] for more details. + fn set_with_head(&mut self, exex_head: ExExHead); + + /// Returns a new [`ExExNotificationsStream`] without a head. + /// + /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. + fn without_head(self) -> Self + where + Self: Sized; + + /// Returns a new [`ExExNotificationsStream`] with the provided head. + /// + /// See the documentation of [`ExExNotificationsWithHead`] for more details. + fn with_head(self, exex_head: ExExHead) -> Self + where + Self: Sized; +} + #[derive(Debug)] -enum ExExNotificationsInner { +enum ExExNotificationsInner +where + E: BlockExecutorProvider, +{ /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. WithoutHead(ExExNotificationsWithoutHead), /// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that @@ -33,14 +78,17 @@ enum ExExNotificationsInner { Invalid, } -impl ExExNotifications { +impl ExExNotifications +where + E: BlockExecutorProvider, +{ /// Creates a new stream of [`ExExNotifications`] without a head. pub const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, ) -> Self { Self { inner: ExExNotificationsInner::WithoutHead(ExExNotificationsWithoutHead::new( @@ -52,13 +100,17 @@ impl ExExNotifications { )), } } +} - /// Sets [`ExExNotifications`] to a stream of [`ExExNotification`]s without a head. - /// - /// It's a no-op if the stream has already been configured without a head. - /// - /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. - pub fn set_without_head(&mut self) { +impl ExExNotificationsStream for ExExNotifications +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider> + + Clone + + Unpin + + 'static, +{ + fn set_without_head(&mut self) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); self.inner = ExExNotificationsInner::WithoutHead(match current { ExExNotificationsInner::WithoutHead(notifications) => notifications, @@ -73,20 +125,7 @@ impl ExExNotifications { }); } - /// Returns a new [`ExExNotifications`] without a head. - /// - /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. - pub fn without_head(mut self) -> Self { - self.set_without_head(); - self - } - - /// Sets [`ExExNotifications`] to a stream of [`ExExNotification`]s with the provided head. - /// - /// It's a no-op if the stream has already been configured with a head. - /// - /// See the documentation of [`ExExNotificationsWithHead`] for more details. - pub fn set_with_head(&mut self, exex_head: ExExHead) { + fn set_with_head(&mut self, exex_head: ExExHead) { let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); self.inner = ExExNotificationsInner::WithHead(match current { ExExNotificationsInner::WithoutHead(notifications) => { @@ -104,10 +143,12 @@ impl ExExNotifications { }); } - /// Returns a new [`ExExNotifications`] with the provided head. - /// - /// See the documentation of [`ExExNotificationsWithHead`] for more details. - pub fn with_head(mut self, exex_head: ExExHead) -> Self { + fn without_head(mut self) -> Self { + self.set_without_head(); + self + } + + fn with_head(mut self, exex_head: ExExHead) -> Self { self.set_with_head(exex_head); self } @@ -116,9 +157,12 @@ impl ExExNotifications { impl Stream for ExExNotifications where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, + E: BlockExecutorProvider> + + Clone + + Unpin + + 'static, { - type Item = eyre::Result; + type Item = eyre::Result>; fn poll_next( self: std::pin::Pin<&mut Self>, @@ -135,15 +179,21 @@ where } /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. -pub struct ExExNotificationsWithoutHead { +pub struct ExExNotificationsWithoutHead +where + E: BlockExecutorProvider, +{ node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, } -impl Debug for ExExNotificationsWithoutHead { +impl Debug for ExExNotificationsWithoutHead +where + E: Debug + BlockExecutorProvider, +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ExExNotifications") .field("provider", &self.provider) @@ -153,14 +203,17 @@ impl Debug for ExExNotificationsWithoutHead { } } -impl ExExNotificationsWithoutHead { +impl ExExNotificationsWithoutHead +where + E: BlockExecutorProvider, +{ /// Creates a new instance of [`ExExNotificationsWithoutHead`]. const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, ) -> Self { Self { node_head, provider, executor, notifications, wal_handle } } @@ -178,8 +231,11 @@ impl ExExNotificationsWithoutHead { } } -impl Stream for ExExNotificationsWithoutHead { - type Item = ExExNotification; +impl Stream for ExExNotificationsWithoutHead +where + E: Unpin + BlockExecutorProvider, +{ + type Item = ExExNotification; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().notifications.poll_recv(cx) @@ -195,12 +251,15 @@ impl Stream for ExExNotificationsWithoutHead { /// `exex_head.number` of 10 indicates that the ExEx has processed up to block 10, and is ready to /// process block 11. #[derive(Debug)] -pub struct ExExNotificationsWithHead { +pub struct ExExNotificationsWithHead +where + E: BlockExecutorProvider, +{ node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, exex_head: ExExHead, /// If true, then we need to check if the ExEx head is on the canonical chain and if not, /// revert its head. @@ -209,17 +268,20 @@ pub struct ExExNotificationsWithHead { /// the missing blocks. pending_check_backfill: bool, /// The backfill job to run before consuming any notifications. - backfill_job: Option>, + backfill_job: Option>>, } -impl ExExNotificationsWithHead { +impl ExExNotificationsWithHead +where + E: BlockExecutorProvider, +{ /// Creates a new [`ExExNotificationsWithHead`]. const fn new( node_head: Head, provider: P, executor: E, - notifications: Receiver, - wal_handle: WalHandle, + notifications: Receiver>, + wal_handle: WalHandle, exex_head: ExExHead, ) -> Self { Self { @@ -239,14 +301,17 @@ impl ExExNotificationsWithHead { impl ExExNotificationsWithHead where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, + E: BlockExecutorProvider> + + Clone + + Unpin + + 'static, { /// Checks if the ExEx head is on the canonical chain. /// /// If the head block is not found in the database or it's ahead of the node head, it means /// we're not on the canonical chain and we need to revert the notification with the ExEx /// head block. - fn check_canonical(&mut self) -> eyre::Result> { + fn check_canonical(&mut self) -> eyre::Result>> { if self.provider.is_known(&self.exex_head.block.hash)? && self.exex_head.block.number <= self.node_head.number { @@ -270,7 +335,7 @@ where // Update the head block hash to the parent hash of the first committed block. let committed_chain = notification.committed_chain().unwrap(); let new_exex_head = - (committed_chain.first().parent_hash, committed_chain.first().number - 1).into(); + (committed_chain.first().parent_hash(), committed_chain.first().number() - 1).into(); debug!(target: "exex::notifications", old_exex_head = ?self.exex_head.block, new_exex_head = ?new_exex_head, "ExEx head updated"); self.exex_head.block = new_exex_head; @@ -316,9 +381,12 @@ where impl Stream for ExExNotificationsWithHead where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, + E: BlockExecutorProvider> + + Clone + + Unpin + + 'static, { - type Item = eyre::Result; + type Item = eyre::Result>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -358,7 +426,7 @@ where this.exex_head.block = committed_chain.tip().num_hash(); } else if let Some(reverted_chain) = notification.reverted_chain() { let first_block = reverted_chain.first(); - this.exex_head.block = (first_block.parent_hash, first_block.number - 1).into(); + this.exex_head.block = (first_block.parent_hash(), first_block.number() - 1).into(); } Poll::Ready(Some(Ok(notification))) @@ -376,10 +444,10 @@ mod tests { use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::Block; + use reth_primitives::{Block, BlockExt}; use reth_provider::{ providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, - Chain, DatabaseProviderFactory, + Chain, DatabaseProviderFactory, StorageLocation, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; use tokio::sync::mpsc; @@ -407,6 +475,7 @@ mod tests { let provider_rw = provider_factory.provider_rw()?; provider_rw.insert_block( node_head_block.clone().seal_with_senders().ok_or_eyre("failed to recover senders")?, + StorageLocation::Database, )?; provider_rw.commit()?; @@ -542,7 +611,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; let node_head = Head { number: node_head_block.number, @@ -550,7 +619,7 @@ mod tests { ..Default::default() }; let provider_rw = provider.database_provider_rw()?; - provider_rw.insert_block(node_head_block)?; + provider_rw.insert_block(node_head_block, StorageLocation::Database)?; provider_rw.commit()?; let node_head_notification = ExExNotification::ChainCommitted { new: Arc::new( diff --git a/crates/exex/exex/src/wal/cache.rs b/crates/exex/exex/src/wal/cache.rs index 882b65e1589..b5e0f2034e8 100644 --- a/crates/exex/exex/src/wal/cache.rs +++ b/crates/exex/exex/src/wal/cache.rs @@ -3,9 +3,11 @@ use std::{ collections::{BinaryHeap, HashSet}, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; use alloy_primitives::{map::FbHashMap, BlockNumber, B256}; use reth_exex_types::ExExNotification; +use reth_node_api::NodePrimitives; /// The block cache of the WAL. /// @@ -35,6 +37,11 @@ impl BlockCache { self.notification_max_blocks.is_empty() } + /// Returns the number of blocks in the cache. + pub(super) fn num_blocks(&self) -> usize { + self.committed_blocks.len() + } + /// Removes all files from the cache that has notifications with a tip block less than or equal /// to the given block number. /// @@ -86,16 +93,16 @@ impl BlockCache { } /// Inserts the blocks from the notification into the cache with the given file ID. - pub(super) fn insert_notification_blocks_with_file_id( + pub(super) fn insert_notification_blocks_with_file_id( &mut self, file_id: u32, - notification: &ExExNotification, + notification: &ExExNotification, ) { let reverted_chain = notification.reverted_chain(); let committed_chain = notification.committed_chain(); let max_block = - reverted_chain.iter().chain(&committed_chain).map(|chain| chain.tip().number).max(); + reverted_chain.iter().chain(&committed_chain).map(|chain| chain.tip().number()).max(); if let Some(max_block) = max_block { self.notification_max_blocks.push(Reverse((max_block, file_id))); } @@ -103,13 +110,13 @@ impl BlockCache { if let Some(committed_chain) = &committed_chain { for block in committed_chain.blocks().values() { let cached_block = CachedBlock { - block: (block.number, block.hash()).into(), - parent_hash: block.parent_hash, + block: (block.number(), block.hash()).into(), + parent_hash: block.parent_hash(), }; self.committed_blocks.insert(block.hash(), (file_id, cached_block)); } - self.highest_committed_block_height = Some(committed_chain.tip().number); + self.highest_committed_block_height = Some(committed_chain.tip().number()); } } diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 00b0ea919ef..fb6be6e8c85 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -3,6 +3,8 @@ mod cache; pub use cache::BlockCache; mod storage; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; pub use storage::Storage; mod metrics; use metrics::Metrics; @@ -32,23 +34,26 @@ use reth_tracing::tracing::{debug, instrument}; /// 2. When the chain is finalized, call [`Wal::finalize`] to prevent the infinite growth of the /// WAL. #[derive(Debug, Clone)] -pub struct Wal { - inner: Arc, +pub struct Wal { + inner: Arc>, } -impl Wal { +impl Wal +where + N: NodePrimitives, +{ /// Creates a new instance of [`Wal`]. pub fn new(directory: impl AsRef) -> eyre::Result { Ok(Self { inner: Arc::new(WalInner::new(directory)?) }) } /// Returns a read-only handle to the WAL. - pub fn handle(&self) -> WalHandle { + pub fn handle(&self) -> WalHandle { WalHandle { wal: self.inner.clone() } } /// Commits the notification to WAL. - pub fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { + pub fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { self.inner.commit(notification) } @@ -63,23 +68,31 @@ impl Wal { /// Returns an iterator over all notifications in the WAL. pub fn iter_notifications( &self, - ) -> eyre::Result> + '_>> { + ) -> eyre::Result>> + '_>> { self.inner.iter_notifications() } + + /// Returns the number of blocks in the WAL. + pub fn num_blocks(&self) -> usize { + self.inner.block_cache().num_blocks() + } } /// Inner type for the WAL. #[derive(Debug)] -struct WalInner { +struct WalInner { next_file_id: AtomicU32, /// The underlying WAL storage backed by a file. - storage: Storage, + storage: Storage, /// WAL block cache. See [`cache::BlockCache`] docs for more details. block_cache: RwLock, metrics: Metrics, } -impl WalInner { +impl WalInner +where + N: NodePrimitives, +{ fn new(directory: impl AsRef) -> eyre::Result { let mut wal = Self { next_file_id: AtomicU32::new(0), @@ -132,7 +145,7 @@ impl WalInner { reverted_block_range = ?notification.reverted_chain().as_ref().map(|chain| chain.range()), committed_block_range = ?notification.committed_chain().as_ref().map(|chain| chain.range()) ))] - fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { + fn commit(&self, notification: &ExExNotification) -> eyre::Result<()> { let mut block_cache = self.block_cache.write(); let file_id = self.next_file_id.fetch_add(1, Ordering::Relaxed); @@ -182,7 +195,7 @@ impl WalInner { /// Returns an iterator over all notifications in the WAL. fn iter_notifications( &self, - ) -> eyre::Result> + '_>> { + ) -> eyre::Result>> + '_>> { let Some(range) = self.storage.files_range()? else { return Ok(Box::new(std::iter::empty())) }; @@ -193,16 +206,19 @@ impl WalInner { /// A read-only handle to the WAL that can be shared. #[derive(Debug)] -pub struct WalHandle { - wal: Arc, +pub struct WalHandle { + wal: Arc>, } -impl WalHandle { +impl WalHandle +where + N: NodePrimitives, +{ /// Returns the notification for the given committed block hash if it exists. pub fn get_committed_notification_by_block_hash( &self, block_hash: &B256, - ) -> eyre::Result> { + ) -> eyre::Result>> { let Some(file_id) = self.wal.block_cache().get_file_id_by_committed_block_hash(block_hash) else { return Ok(None) @@ -231,13 +247,13 @@ mod tests { use crate::wal::{cache::CachedBlock, Wal}; fn read_notifications(wal: &Wal) -> eyre::Result> { - let Some(files_range) = wal.inner.storage.files_range()? else { return Ok(Vec::new()) }; - - wal.inner - .storage - .iter_notifications(files_range) - .map(|entry| Ok(entry?.2)) - .collect::>() + wal.inner.storage.files_range()?.map_or(Ok(Vec::new()), |range| { + wal.inner + .storage + .iter_notifications(range) + .map(|entry| entry.map(|(_, _, n)| n)) + .collect() + }) } fn sort_committed_blocks( @@ -263,21 +279,25 @@ mod tests { // Create 4 canonical blocks and one reorged block with number 2 let blocks = random_block_range(&mut rng, 0..=3, BlockRangeParams::default()) .into_iter() - .map(|block| block.seal_with_senders().ok_or_eyre("failed to recover senders")) + .map(|block| { + block + .seal_with_senders::() + .ok_or_eyre("failed to recover senders") + }) .collect::>>()?; let block_1_reorged = random_block( &mut rng, 1, BlockParams { parent: Some(blocks[0].hash()), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; let block_2_reorged = random_block( &mut rng, 2, BlockParams { parent: Some(blocks[1].hash()), ..Default::default() }, ) - .seal_with_senders() + .seal_with_senders::() .ok_or_eyre("failed to recover senders")?; // Create notifications for the above blocks. diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index aaa4398fd0b..699d88ba2a7 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -6,6 +6,8 @@ use std::{ use eyre::OptionExt; use reth_exex_types::ExExNotification; +use reth_node_api::NodePrimitives; +use reth_primitives::EthPrimitives; use reth_tracing::tracing::debug; use tracing::instrument; @@ -16,18 +18,22 @@ static FILE_EXTENSION: &str = "wal"; /// Each notification is represented by a single file that contains a MessagePack-encoded /// notification. #[derive(Debug, Clone)] -pub struct Storage { +pub struct Storage { /// The path to the WAL file. path: PathBuf, + _pd: std::marker::PhantomData, } -impl Storage { +impl Storage +where + N: NodePrimitives, +{ /// Creates a new instance of [`Storage`] backed by the file at the given path and creates /// it doesn't exist. pub(super) fn new(path: impl AsRef) -> eyre::Result { reth_fs_util::create_dir_all(&path)?; - Ok(Self { path: path.as_ref().to_path_buf() }) + Ok(Self { path: path.as_ref().to_path_buf(), _pd: std::marker::PhantomData }) } fn file_path(&self, id: u32) -> PathBuf { @@ -110,7 +116,7 @@ impl Storage { pub(super) fn iter_notifications( &self, range: RangeInclusive, - ) -> impl Iterator> + '_ { + ) -> impl Iterator)>> + '_ { range.map(move |id| { let (notification, size) = self.read_notification(id)?.ok_or_eyre("notification {id} not found")?; @@ -124,7 +130,7 @@ impl Storage { pub(super) fn read_notification( &self, file_id: u32, - ) -> eyre::Result> { + ) -> eyre::Result, u64)>> { let file_path = self.file_path(file_id); debug!(target: "exex::wal::storage", ?file_path, "Reading notification from WAL"); @@ -136,7 +142,7 @@ impl Storage { let size = file.metadata()?.len(); // Deserialize using the bincode- and msgpack-compatible serde wrapper - let notification: reth_exex_types::serde_bincode_compat::ExExNotification<'_> = + let notification: reth_exex_types::serde_bincode_compat::ExExNotification<'_, N> = rmp_serde::decode::from_read(&mut file).map_err(|err| { eyre::eyre!("failed to decode notification from {file_path:?}: {err:?}") })?; @@ -153,14 +159,14 @@ impl Storage { pub(super) fn write_notification( &self, file_id: u32, - notification: &ExExNotification, + notification: &ExExNotification, ) -> eyre::Result { let file_path = self.file_path(file_id); debug!(target: "exex::wal::storage", ?file_path, "Writing notification to WAL"); // Serialize using the bincode- and msgpack-compatible serde wrapper let notification = - reth_exex_types::serde_bincode_compat::ExExNotification::from(notification); + reth_exex_types::serde_bincode_compat::ExExNotification::::from(notification); reth_fs_util::atomic_write_file(&file_path, |file| { rmp_serde::encode::write(file, ¬ification) @@ -186,7 +192,7 @@ mod tests { let mut rng = generators::rng(); let temp_dir = tempfile::tempdir()?; - let storage = Storage::new(&temp_dir)?; + let storage: Storage = Storage::new(&temp_dir)?; let old_block = random_block(&mut rng, 0, Default::default()) .seal_with_senders() @@ -215,7 +221,7 @@ mod tests { #[test] fn test_files_range() -> eyre::Result<()> { let temp_dir = tempfile::tempdir()?; - let storage = Storage::new(&temp_dir)?; + let storage: Storage = Storage::new(&temp_dir)?; // Create WAL files File::create(storage.file_path(1))?; diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index 8488cdb8b73..6e5af981b31 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -31,7 +31,10 @@ reth-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-ethereum-engine-primitives.workspace = true +reth-trie-db.workspace = true + +## alloy +alloy-eips.workspace = true ## async futures-util.workspace = true diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 9e17013c4a5..939bf21c022 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -15,6 +15,7 @@ use std::{ task::Poll, }; +use alloy_eips::BlockNumHash; use futures_util::FutureExt; use reth_blockchain_tree::noop::NoopBlockchainTree; use reth_chainspec::{ChainSpec, MAINNET}; @@ -24,13 +25,13 @@ use reth_db::{ DatabaseEnv, }; use reth_db_common::init::init_genesis; -use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_evm::test_utils::MockExecutorProvider; use reth_execution_types::Chain; use reth_exex::{ExExContext, ExExEvent, ExExNotification, ExExNotifications, Wal}; use reth_network::{config::SecretKey, NetworkConfigBuilder, NetworkManager}; use reth_node_api::{ - FullNodeTypes, FullNodeTypesAdapter, NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, + FullNodeTypes, FullNodeTypesAdapter, NodePrimitives, NodeTypes, NodeTypesWithDBAdapter, + NodeTypesWithEngine, }; use reth_node_builder::{ components::{ @@ -41,17 +42,14 @@ use reth_node_builder::{ }; use reth_node_core::node_config::NodeConfig; use reth_node_ethereum::{ - node::{ - EthereumAddOns, EthereumEngineValidatorBuilder, EthereumNetworkBuilder, - EthereumPayloadBuilder, - }, + node::{EthereumAddOns, EthereumNetworkBuilder, EthereumPayloadBuilder}, EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{BlockNumHash, Head, SealedBlockWithSenders}; +use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders, TransactionSigned}; use reth_provider::{ providers::{BlockchainProvider, StaticFileProvider}, - BlockReader, ProviderFactory, + BlockReader, EthStorage, ProviderFactory, }; use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; @@ -67,7 +65,7 @@ pub struct TestPoolBuilder; impl PoolBuilder for TestPoolBuilder where - Node: FullNodeTypes, + Node: FullNodeTypes>>, { type Pool = TestPool; @@ -83,7 +81,7 @@ pub struct TestExecutorBuilder; impl ExecutorBuilder for TestExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = EthEvmConfig; type Executor = MockExecutorProvider; @@ -121,8 +119,10 @@ where pub struct TestNode; impl NodeTypes for TestNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; + type StateCommitment = reth_trie_db::MerklePatriciaTrie; + type Storage = EthStorage; } impl NodeTypesWithEngine for TestNode { @@ -131,7 +131,14 @@ impl NodeTypesWithEngine for TestNode { impl Node for TestNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, @@ -140,7 +147,6 @@ where EthereumNetworkBuilder, TestExecutorBuilder, TestConsensusBuilder, - EthereumEngineValidatorBuilder, >; type AddOns = EthereumAddOns< NodeAdapter>::Components>, @@ -154,7 +160,6 @@ where .network(EthereumNetworkBuilder::default()) .executor(TestExecutorBuilder::default()) .consensus(TestConsensusBuilder::default()) - .engine_validator(EthereumEngineValidatorBuilder::default()) } fn add_ons(&self) -> Self::AddOns { @@ -261,7 +266,7 @@ pub async fn test_exex_context_with_chain_spec( let (static_dir, _) = create_test_static_files_dir(); let db = create_test_rw_db(); - let provider_factory = ProviderFactory::new( + let provider_factory = ProviderFactory::>::new( db, chain_spec.clone(), StaticFileProvider::read_write(static_dir.into_path()).expect("static file provider"), @@ -274,19 +279,18 @@ pub async fn test_exex_context_with_chain_spec( let network_manager = NetworkManager::new( NetworkConfigBuilder::new(SecretKey::new(&mut rand::thread_rng())) .with_unused_discovery_port() + .with_unused_listener_port() .build(provider_factory.clone()), ) .await?; let network = network_manager.handle().clone(); - - let (_, payload_builder) = NoopPayloadBuilderService::::new(); - let tasks = TaskManager::current(); let task_executor = tasks.executor(); + tasks.executor().spawn(network_manager); - let engine_validator = EthereumEngineValidator::new(chain_spec.clone()); + let (_, payload_builder) = NoopPayloadBuilderService::::new(); - let components = NodeAdapter::, _>, _> { + let components = NodeAdapter::, _> { components: Components { transaction_pool, evm_config, @@ -294,7 +298,6 @@ pub async fn test_exex_context_with_chain_spec( consensus, network, payload_builder, - engine_validator, }, task_executor, provider, @@ -304,7 +307,7 @@ pub async fn test_exex_context_with_chain_spec( .block_by_hash(genesis_hash)? .ok_or_else(|| eyre::eyre!("genesis block not found"))? .seal_slow() - .seal_with_senders() + .seal_with_senders::() .ok_or_else(|| eyre::eyre!("failed to recover senders"))?; let head = Head { diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index a146cbc2273..b7e659d80a8 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -15,6 +15,8 @@ workspace = true # reth reth-chain-state.workspace = true reth-execution-types.workspace = true +reth-primitives = { workspace = true, optional = true } +reth-primitives-traits.workspace = true # reth alloy-primitives.workspace = true @@ -33,5 +35,18 @@ rand.workspace = true [features] default = [] -serde = ["dep:serde", "reth-execution-types/serde"] -serde-bincode-compat = ["reth-execution-types/serde-bincode-compat", "serde_with"] +serde = [ + "dep:serde", + "reth-execution-types/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "rand/serde", + "reth-primitives-traits/serde", +] +serde-bincode-compat = [ + "reth-execution-types/serde-bincode-compat", + "serde_with", + "reth-primitives/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", +] diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 61d42a3319b..19e47c0a1da 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -2,34 +2,35 @@ use std::sync::Arc; use reth_chain_state::CanonStateNotification; use reth_execution_types::Chain; +use reth_primitives_traits::NodePrimitives; /// Notifications sent to an `ExEx`. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum ExExNotification { +pub enum ExExNotification { /// Chain got committed without a reorg, and only the new chain is returned. ChainCommitted { /// The new chain after commit. - new: Arc, + new: Arc>, }, /// Chain got reorged, and both the old and the new chains are returned. ChainReorged { /// The old chain before reorg. - old: Arc, + old: Arc>, /// The new chain after reorg. - new: Arc, + new: Arc>, }, /// Chain got reverted, and only the old chain is returned. ChainReverted { /// The old chain before reversion. - old: Arc, + old: Arc>, }, } -impl ExExNotification { +impl ExExNotification { /// Returns the committed chain from the [`Self::ChainCommitted`] and [`Self::ChainReorged`] /// variants, if any. - pub fn committed_chain(&self) -> Option> { + pub fn committed_chain(&self) -> Option>> { match self { Self::ChainCommitted { new } | Self::ChainReorged { old: _, new } => Some(new.clone()), Self::ChainReverted { .. } => None, @@ -38,7 +39,7 @@ impl ExExNotification { /// Returns the reverted chain from the [`Self::ChainReorged`] and [`Self::ChainReverted`] /// variants, if any. - pub fn reverted_chain(&self) -> Option> { + pub fn reverted_chain(&self) -> Option>> { match self { Self::ChainReorged { old, new: _ } | Self::ChainReverted { old } => Some(old.clone()), Self::ChainCommitted { .. } => None, @@ -60,8 +61,8 @@ impl ExExNotification { } } -impl From for ExExNotification { - fn from(notification: CanonStateNotification) -> Self { +impl From> for ExExNotification

{ + fn from(notification: CanonStateNotification

) -> Self { match notification { CanonStateNotification::Commit { new } => Self::ChainCommitted { new }, CanonStateNotification::Reorg { old, new } => Self::ChainReorged { old, new }, @@ -72,11 +73,11 @@ impl From for ExExNotification { /// Bincode-compatible [`ExExNotification`] serde implementation. #[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] pub(super) mod serde_bincode_compat { - use std::sync::Arc; - use reth_execution_types::serde_bincode_compat::Chain; + use reth_primitives::{EthPrimitives, NodePrimitives}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use std::sync::Arc; /// Bincode-compatible [`super::ExExNotification`] serde implementation. /// @@ -95,14 +96,21 @@ pub(super) mod serde_bincode_compat { /// ``` #[derive(Debug, Serialize, Deserialize)] #[allow(missing_docs)] - pub enum ExExNotification<'a> { - ChainCommitted { new: Chain<'a> }, - ChainReorged { old: Chain<'a>, new: Chain<'a> }, - ChainReverted { old: Chain<'a> }, + #[serde(bound = "")] + pub enum ExExNotification<'a, N = EthPrimitives> + where + N: NodePrimitives, + { + ChainCommitted { new: Chain<'a, N> }, + ChainReorged { old: Chain<'a, N>, new: Chain<'a, N> }, + ChainReverted { old: Chain<'a, N> }, } - impl<'a> From<&'a super::ExExNotification> for ExExNotification<'a> { - fn from(value: &'a super::ExExNotification) -> Self { + impl<'a, N> From<&'a super::ExExNotification> for ExExNotification<'a, N> + where + N: NodePrimitives, + { + fn from(value: &'a super::ExExNotification) -> Self { match value { super::ExExNotification::ChainCommitted { new } => { ExExNotification::ChainCommitted { new: Chain::from(new.as_ref()) } @@ -120,8 +128,11 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> From> for super::ExExNotification { - fn from(value: ExExNotification<'a>) -> Self { + impl<'a, N> From> for super::ExExNotification + where + N: NodePrimitives, + { + fn from(value: ExExNotification<'a, N>) -> Self { match value { ExExNotification::ChainCommitted { new } => { Self::ChainCommitted { new: Arc::new(new.into()) } @@ -159,16 +170,14 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { - use std::sync::Arc; - + use super::super::{serde_bincode_compat, ExExNotification}; use arbitrary::Arbitrary; use rand::Rng; use reth_execution_types::Chain; use reth_primitives::SealedBlockWithSenders; use serde::{Deserialize, Serialize}; use serde_with::serde_as; - - use super::super::{serde_bincode_compat, ExExNotification}; + use std::sync::Arc; #[test] fn test_exex_notification_bincode_roundtrip() { diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index d242ecc98e2..c1aa4900e03 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -210,6 +210,12 @@ impl FsPathError { } } +/// Wrapper for [`File::open`]. +pub fn open(path: impl AsRef) -> Result { + let path = path.as_ref(); + File::open(path).map_err(|err| FsPathError::open(err, path)) +} + /// Wrapper for `std::fs::read_to_string` pub fn read_to_string(path: impl AsRef) -> Result { let path = path.as_ref(); diff --git a/crates/net/discv4/Cargo.toml b/crates/net/discv4/Cargo.toml index fde652ef397..70946c6dce8 100644 --- a/crates/net/discv4/Cargo.toml +++ b/crates/net/discv4/Cargo.toml @@ -19,7 +19,7 @@ reth-net-nat.workspace = true reth-network-peers = { workspace = true, features = ["secp256k1"] } # ethereum -alloy-primitives.workspace = true +alloy-primitives = { workspace = true, features = ["rand"] } alloy-rlp = { workspace = true, features = ["derive"] } discv5.workspace = true secp256k1 = { workspace = true, features = [ @@ -42,14 +42,25 @@ parking_lot.workspace = true rand = { workspace = true, optional = true } generic-array.workspace = true serde = { workspace = true, optional = true } +itertools.workspace = true [dev-dependencies] assert_matches.workspace = true rand.workspace = true -tokio = { workspace = true, features = ["macros"] } +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } reth-tracing.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] +serde = [ + "dep:serde", + "alloy-primitives/serde", + "discv5/serde", + "enr/serde", + "generic-array/serde", + "parking_lot/serde", + "rand?/serde", + "secp256k1/serde", + "reth-ethereum-forks/serde" +] test-utils = ["dep:rand"] diff --git a/crates/net/discv4/src/config.rs b/crates/net/discv4/src/config.rs index 4fae31f585a..38467304db2 100644 --- a/crates/net/discv4/src/config.rs +++ b/crates/net/discv4/src/config.rs @@ -8,8 +8,6 @@ use alloy_rlp::Encodable; use reth_net_banlist::BanList; use reth_net_nat::{NatResolver, ResolveNatInterval}; use reth_network_peers::NodeRecord; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; use std::{ collections::{HashMap, HashSet}, time::Duration, @@ -17,7 +15,7 @@ use std::{ /// Configuration parameters that define the performance of the discovery network. #[derive(Clone, Debug)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4Config { /// Whether to enable the incoming packet filter. Default: false. pub enable_packet_filter: bool, @@ -25,7 +23,7 @@ pub struct Discv4Config { pub udp_egress_message_buffer: usize, /// Size of the channel buffer for incoming messages. pub udp_ingress_message_buffer: usize, - /// The number of allowed failures for `FindNode` requests. Default: 5. + /// The number of allowed consecutive failures for `FindNode` requests. Default: 5. pub max_find_node_failures: u8, /// The interval to use when checking for expired nodes that need to be re-pinged. Default: /// 10min. @@ -118,7 +116,7 @@ impl Default for Discv4Config { // Every outgoing request will eventually lead to an incoming response udp_ingress_message_buffer: 1024, max_find_node_failures: 5, - ping_interval: Duration::from_secs(60 * 10), + ping_interval: Duration::from_secs(10), // Unified expiration and timeout durations, mirrors geth's `expiration` duration ping_expiration: Duration::from_secs(20), bond_expiration: Duration::from_secs(60 * 60), @@ -144,7 +142,7 @@ impl Default for Discv4Config { /// Builder type for [`Discv4Config`] #[derive(Clone, Debug, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Discv4ConfigBuilder { config: Discv4Config, } diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 7c14eac9b65..9ffe8451f0e 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -38,6 +38,7 @@ use discv5::{ ConnectionDirection, ConnectionState, }; use enr::Enr; +use itertools::Itertools; use parking_lot::Mutex; use proto::{EnrRequest, EnrResponse}; use reth_ethereum_forks::ForkId; @@ -743,7 +744,8 @@ impl Discv4Service { trace!(target: "discv4", ?target, "Starting lookup"); let target_key = kad_key(target); - // Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes + // Start a lookup context with the 16 (MAX_NODES_PER_BUCKET) closest nodes to which we have + // a valid endpoint proof let ctx = LookupContext::new( target_key.clone(), self.kbuckets @@ -772,7 +774,10 @@ impl Discv4Service { trace!(target: "discv4", ?target, num = closest.len(), "Start lookup closest nodes"); for node in closest { - self.find_node(&node, ctx.clone()); + // here we still want to check against previous request failures and if necessary + // re-establish a new endpoint proof because it can be the case that the other node lost + // our entry and no longer has an endpoint proof on their end + self.find_node_checked(&node, ctx.clone()); } } @@ -788,6 +793,22 @@ impl Discv4Service { self.pending_find_nodes.insert(node.id, FindNodeRequest::new(ctx)); } + /// Sends a new `FindNode` packet to the node with `target` as the lookup target but checks + /// whether we should send a new ping first to renew the endpoint proof by checking the + /// previously failed findNode requests. It could be that the node is no longer reachable or + /// lost our entry. + fn find_node_checked(&mut self, node: &NodeRecord, ctx: LookupContext) { + let max_failures = self.config.max_find_node_failures; + let needs_ping = self + .on_entry(node.id, |entry| entry.exceeds_find_node_failures(max_failures)) + .unwrap_or(true); + if needs_ping { + self.try_ping(*node, PingReason::Lookup(*node, ctx)) + } else { + self.find_node(node, ctx) + } + } + /// Notifies all listeners. /// /// Removes all listeners that are closed. @@ -829,6 +850,24 @@ impl Discv4Service { /// table. Returns `true` if the node was in the table and `false` otherwise. pub fn remove_node(&mut self, node_id: PeerId) -> bool { let key = kad_key(node_id); + self.remove_key(node_id, key) + } + + /// Removes a `node_id` from the routing table but only if there are enough other nodes in the + /// bucket (bucket must be at least half full) + /// + /// Returns `true` if the node was removed + pub fn soft_remove_node(&mut self, node_id: PeerId) -> bool { + let key = kad_key(node_id); + let Some(bucket) = self.kbuckets.get_bucket(&key) else { return false }; + if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { + // skip half empty bucket + return false + } + self.remove_key(node_id, key) + } + + fn remove_key(&mut self, node_id: PeerId, key: discv5::Key) -> bool { let removed = self.kbuckets.remove(&key); if removed { trace!(target: "discv4", ?node_id, "removed node"); @@ -842,7 +881,7 @@ impl Discv4Service { self.kbuckets.buckets_iter().fold(0, |count, bucket| count + bucket.num_connected()) } - /// Check if the peer has a bond + /// Check if the peer has an active bond. fn has_bond(&self, remote_id: PeerId, remote_ip: IpAddr) -> bool { if let Some(timestamp) = self.received_pongs.last_pong(remote_id, remote_ip) { if timestamp.elapsed() < self.config.bond_expiration { @@ -852,7 +891,22 @@ impl Discv4Service { false } - /// Update the entry on RE-ping + /// Applies a closure on the pending or present [`NodeEntry`]. + fn on_entry(&mut self, peer_id: PeerId, f: F) -> Option + where + F: FnOnce(&NodeEntry) -> R, + { + let key = kad_key(peer_id); + match self.kbuckets.entry(&key) { + BucketEntry::Present(entry, _) => Some(f(entry.value())), + BucketEntry::Pending(mut entry, _) => Some(f(entry.value())), + _ => None, + } + } + + /// Update the entry on RE-ping. + /// + /// Invoked when we received the Pong to our [`PingReason::RePing`] ping. /// /// On re-ping we check for a changed `enr_seq` if eip868 is enabled and when it changed we sent /// a followup request to retrieve the updated ENR @@ -909,7 +963,7 @@ impl Discv4Service { match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, old_status) => { // endpoint is now proven - entry.value_mut().has_endpoint_proof = true; + entry.value_mut().establish_proof(); entry.value_mut().update_with_enr(last_enr_seq); if !old_status.is_connected() { @@ -925,7 +979,7 @@ impl Discv4Service { } kbucket::Entry::Pending(mut entry, mut status) => { // endpoint is now proven - entry.value().has_endpoint_proof = true; + entry.value().establish_proof(); entry.value().update_with_enr(last_enr_seq); if !status.is_connected() { @@ -1028,11 +1082,23 @@ impl Discv4Service { let old_enr = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { - is_proven = entry.value().has_endpoint_proof; + if entry.value().is_expired() { + // If no communication with the sender has occurred within the last 12h, a ping + // should be sent in addition to pong in order to receive an endpoint proof. + needs_bond = true; + } else { + is_proven = entry.value().has_endpoint_proof; + } entry.value_mut().update_with_enr(ping.enr_sq) } kbucket::Entry::Pending(mut entry, _) => { - is_proven = entry.value().has_endpoint_proof; + if entry.value().is_expired() { + // If no communication with the sender has occurred within the last 12h, a ping + // should be sent in addition to pong in order to receive an endpoint proof. + needs_bond = true; + } else { + is_proven = entry.value().has_endpoint_proof; + } entry.value().update_with_enr(ping.enr_sq) } kbucket::Entry::Absent(entry) => { @@ -1097,6 +1163,8 @@ impl Discv4Service { // try to send it ctx.unmark_queried(record.id); } else { + // we just received a ping from that peer so we can send a find node request + // directly self.find_node(&record, ctx); } } @@ -1205,7 +1273,8 @@ impl Discv4Service { self.update_on_pong(node, pong.enr_sq); } PingReason::EstablishBond => { - // nothing to do here + // same as `InitialInsert` which renews the bond if the peer is in the table + self.update_on_pong(node, pong.enr_sq); } PingReason::RePing => { self.update_on_reping(node, pong.enr_sq); @@ -1338,6 +1407,16 @@ impl Discv4Service { } }; + // log the peers we discovered + trace!(target: "discv4", + target=format!("{:#?}", node_id), + peers_count=msg.nodes.len(), + peers=format!("[{:#}]", msg.nodes.iter() + .map(|node_rec| node_rec.id + ).format(", ")), + "Received peers from Neighbours packet" + ); + // This is the recursive lookup step where we initiate new FindNode requests for new nodes // that were discovered. for node in msg.nodes.into_iter().map(NodeRecord::into_ipv4_mapped) { @@ -1386,14 +1465,28 @@ impl Discv4Service { BucketEntry::SelfEntry => { // we received our own node entry } - BucketEntry::Present(mut entry, _) => { - if entry.value_mut().has_endpoint_proof { - self.find_node(&closest, ctx.clone()); + BucketEntry::Present(entry, _) => { + if entry.value().has_endpoint_proof { + if entry + .value() + .exceeds_find_node_failures(self.config.max_find_node_failures) + { + self.try_ping(closest, PingReason::Lookup(closest, ctx.clone())) + } else { + self.find_node(&closest, ctx.clone()); + } } } BucketEntry::Pending(mut entry, _) => { if entry.value().has_endpoint_proof { - self.find_node(&closest, ctx.clone()); + if entry + .value() + .exceeds_find_node_failures(self.config.max_find_node_failures) + { + self.try_ping(closest, PingReason::Lookup(closest, ctx.clone())) + } else { + self.find_node(&closest, ctx.clone()); + } } } } @@ -1431,11 +1524,12 @@ impl Discv4Service { true }); - trace!(target: "discv4", num=%failed_pings.len(), "evicting nodes due to failed pong"); - - // remove nodes that failed to pong - for node_id in failed_pings { - self.remove_node(node_id); + if !failed_pings.is_empty() { + // remove nodes that failed to pong + trace!(target: "discv4", num=%failed_pings.len(), "evicting nodes due to failed pong"); + for node_id in failed_pings { + self.remove_node(node_id); + } } let mut failed_lookups = Vec::new(); @@ -1446,34 +1540,40 @@ impl Discv4Service { } true }); - trace!(target: "discv4", num=%failed_lookups.len(), "evicting nodes due to failed lookup"); - // remove nodes that failed the e2e lookup process, so we can restart it - for node_id in failed_lookups { - self.remove_node(node_id); + if !failed_lookups.is_empty() { + // remove nodes that failed the e2e lookup process, so we can restart it + trace!(target: "discv4", num=%failed_lookups.len(), "evicting nodes due to failed lookup"); + for node_id in failed_lookups { + self.remove_node(node_id); + } } - self.evict_failed_neighbours(now); + self.evict_failed_find_nodes(now); } /// Handles failed responses to `FindNode` - fn evict_failed_neighbours(&mut self, now: Instant) { - let mut failed_neighbours = Vec::new(); + fn evict_failed_find_nodes(&mut self, now: Instant) { + let mut failed_find_nodes = Vec::new(); self.pending_find_nodes.retain(|node_id, find_node_request| { if now.duration_since(find_node_request.sent_at) > self.config.neighbours_expiration { if !find_node_request.answered { // node actually responded but with fewer entries than expected, but we don't // treat this as an hard error since it responded. - failed_neighbours.push(*node_id); + failed_find_nodes.push(*node_id); } return false } true }); - trace!(target: "discv4", num=%failed_neighbours.len(), "processing failed neighbours"); + if failed_find_nodes.is_empty() { + return + } + + trace!(target: "discv4", num=%failed_find_nodes.len(), "processing failed find nodes"); - for node_id in failed_neighbours { + for node_id in failed_find_nodes { let key = kad_key(node_id); let failures = match self.kbuckets.entry(&key) { kbucket::Entry::Present(mut entry, _) => { @@ -1490,14 +1590,8 @@ impl Discv4Service { // if the node failed to respond anything useful multiple times, remove the node from // the table, but only if there are enough other nodes in the bucket (bucket must be at // least half full) - if failures > (self.config.max_find_node_failures as usize) { - if let Some(bucket) = self.kbuckets.get_bucket(&key) { - if bucket.num_entries() < MAX_NODES_PER_BUCKET / 2 { - // skip half empty bucket - continue - } - } - self.remove_node(node_id); + if failures > self.config.max_find_node_failures { + self.soft_remove_node(node_id); } } } @@ -2189,8 +2283,8 @@ struct NodeEntry { last_enr_seq: Option, /// `ForkId` if retrieved via ENR requests. fork_id: Option, - /// Counter for failed findNode requests. - find_node_failures: usize, + /// Counter for failed _consecutive_ findNode requests. + find_node_failures: u8, /// Whether the endpoint of the peer is proven. has_endpoint_proof: bool, } @@ -2217,6 +2311,17 @@ impl NodeEntry { node } + /// Marks the entry with an established proof and resets the consecutive failure counter. + fn establish_proof(&mut self) { + self.has_endpoint_proof = true; + self.find_node_failures = 0; + } + + /// Returns true if the tracked find node failures exceed the max amount + const fn exceeds_find_node_failures(&self, max_failures: u8) -> bool { + self.find_node_failures >= max_failures + } + /// Updates the last timestamp and sets the enr seq fn update_with_enr(&mut self, last_enr_seq: Option) -> Option { self.update_now(|s| std::mem::replace(&mut s.last_enr_seq, last_enr_seq)) @@ -2247,7 +2352,7 @@ impl NodeEntry { impl NodeEntry { /// Returns true if the node should be re-pinged. fn is_expired(&self) -> bool { - self.last_seen.elapsed() > ENDPOINT_PROOF_EXPIRATION + self.last_seen.elapsed() > (ENDPOINT_PROOF_EXPIRATION / 2) } } @@ -2256,8 +2361,7 @@ impl NodeEntry { enum PingReason { /// Initial ping to a previously unknown peer that was inserted into the table. InitialInsert, - /// Initial ping to a previously unknown peer that didn't fit into the table. But we still want - /// to establish a bond. + /// A ping to a peer to establish a bond (endpoint proof). EstablishBond, /// Re-ping a peer. RePing, @@ -2324,9 +2428,9 @@ mod tests { let original = EnrForkIdEntry { fork_id: ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 0 }, }; - let mut encoded = Vec::new(); - original.encode(&mut encoded); let expected: [u8; 8] = [0xc7, 0xc6, 0x84, 0xdc, 0xe9, 0x6c, 0x2d, 0x80]; + let mut encoded = Vec::with_capacity(expected.len()); + original.encode(&mut encoded); assert_eq!(&expected[..], encoded.as_slice()); } @@ -2634,6 +2738,45 @@ mod tests { assert_eq!(ctx.inner.closest_nodes.borrow().len(), 1); } + #[tokio::test] + async fn test_reping_on_find_node_failures() { + reth_tracing::init_test_tracing(); + + let config = Discv4Config::builder().build(); + let (_discv4, mut service) = create_discv4_with_config(config).await; + + let target = PeerId::random(); + + let id = PeerId::random(); + let key = kad_key(id); + let record = NodeRecord::new("0.0.0.0:0".parse().unwrap(), id); + + let mut entry = NodeEntry::new_proven(record); + entry.find_node_failures = u8::MAX; + let _ = service.kbuckets.insert_or_update( + &key, + entry, + NodeStatus { + direction: ConnectionDirection::Incoming, + state: ConnectionState::Connected, + }, + ); + + service.lookup(target); + assert_eq!(service.pending_find_nodes.len(), 0); + assert_eq!(service.pending_pings.len(), 1); + + service.update_on_pong(record, None); + + service + .on_entry(record.id, |entry| { + // reset on pong + assert_eq!(entry.find_node_failures, 0); + assert!(entry.has_endpoint_proof); + }) + .unwrap(); + } + #[tokio::test] async fn test_service_commands() { reth_tracing::init_test_tracing(); diff --git a/crates/net/discv5/src/config.rs b/crates/net/discv5/src/config.rs index 0684c263b8c..61ab94b4f2f 100644 --- a/crates/net/discv5/src/config.rs +++ b/crates/net/discv5/src/config.rs @@ -412,11 +412,13 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv6.map(|ip| SocketAddrV6::new(ip, discv5_port_ipv6, 0, 0)); if let Some(discv5_addr) = discv5_addr_ipv4 { - warn!(target: "discv5", - %discv5_addr, - %rlpx_addr, - "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" - ); + if discv5_addr != rlpx_addr { + warn!(target: "net::discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv4 address with RLPx IPv4 address, limited to one advertised IP address per IP version" + ); + } } // overwrite discv5 ipv4 addr with RLPx address. this is since there is no @@ -429,11 +431,13 @@ pub fn discv5_sockets_wrt_rlpx_addr( discv5_addr_ipv4.map(|ip| SocketAddrV4::new(ip, discv5_port_ipv4)); if let Some(discv5_addr) = discv5_addr_ipv6 { - warn!(target: "discv5", - %discv5_addr, - %rlpx_addr, - "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" - ); + if discv5_addr != rlpx_addr { + warn!(target: "net::discv5", + %discv5_addr, + %rlpx_addr, + "Overwriting discv5 IPv6 address with RLPx IPv6 address, limited to one advertised IP address per IP version" + ); + } } // overwrite discv5 ipv6 addr with RLPx address. this is since there is no @@ -477,11 +481,9 @@ impl BootNode { #[cfg(test)] mod test { - use std::net::SocketAddrV4; - - use alloy_primitives::hex; - use super::*; + use alloy_primitives::hex; + use std::net::SocketAddrV4; const MULTI_ADDRESSES: &str = "/ip4/184.72.129.189/udp/30301/p2p/16Uiu2HAmSG2hdLwyQHQmG4bcJBgD64xnW63WMTLcrNq6KoZREfGb,/ip4/3.231.11.52/udp/30301/p2p/16Uiu2HAmMy4V8bi3XP7KDfSLQcLACSvTLroRRwEsTyFUKo8NCkkp,/ip4/54.198.153.150/udp/30301/p2p/16Uiu2HAmSVsb7MbRf1jg3Dvd6a3n5YNqKQwn1fqHCFgnbqCsFZKe,/ip4/3.220.145.177/udp/30301/p2p/16Uiu2HAm74pBDGdQ84XCZK27GRQbGFFwQ7RsSqsPwcGmCR3Cwn3B,/ip4/3.231.138.188/udp/30301/p2p/16Uiu2HAmMnTiJwgFtSVGV14ZNpwAvS1LUoF4pWWeNtURuV6C3zYB"; const BOOT_NODES_OP_MAINNET_AND_BASE_MAINNET: &[&str] = &[ diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index 325544de6c1..a83345a9a5e 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -89,13 +89,11 @@ impl MustNotIncludeKeys { #[cfg(test)] mod tests { + use super::*; + use crate::NetworkStackId; use alloy_rlp::Bytes; use discv5::enr::{CombinedKey, Enr}; - use crate::NetworkStackId; - - use super::*; - #[test] fn must_not_include_key_filter() { // rig test diff --git a/crates/net/discv5/src/lib.rs b/crates/net/discv5/src/lib.rs index d4e8e928fda..da54d0b5266 100644 --- a/crates/net/discv5/src/lib.rs +++ b/crates/net/discv5/src/lib.rs @@ -95,14 +95,14 @@ impl Discv5 { /// CAUTION: The value **must** be rlp encoded pub fn set_eip868_in_local_enr(&self, key: Vec, rlp: Bytes) { let Ok(key_str) = std::str::from_utf8(&key) else { - error!(target: "discv5", + error!(target: "net::discv5", err="key not utf-8", "failed to update local enr" ); return }; if let Err(err) = self.discv5.enr_insert(key_str, &rlp) { - error!(target: "discv5", + error!(target: "net::discv5", %err, "failed to update local enr" ); @@ -131,7 +131,7 @@ impl Discv5 { self.discv5.ban_node(&node_id, None); self.ban_ip(ip); } - Err(err) => error!(target: "discv5", + Err(err) => error!(target: "net::discv5", %err, "failed to ban peer" ), diff --git a/crates/net/discv5/src/network_stack_id.rs b/crates/net/discv5/src/network_stack_id.rs index f707c7de7b7..a7b6944f355 100644 --- a/crates/net/discv5/src/network_stack_id.rs +++ b/crates/net/discv5/src/network_stack_id.rs @@ -20,12 +20,11 @@ impl NetworkStackId { /// ENR fork ID kv-pair key, for an Optimism CL node. pub const OPSTACK: &'static [u8] = b"opstack"; - #[allow(clippy::missing_const_for_fn)] /// Returns the [`NetworkStackId`] that matches the given chain spec. pub fn id(chain: impl EthChainSpec) -> Option<&'static [u8]> { - if chain.chain().is_optimism() { + if chain.is_optimism() { return Some(Self::OPEL) - } else if chain.chain().is_ethereum() { + } else if chain.is_ethereum() { return Some(Self::ETH) } diff --git a/crates/net/dns/Cargo.toml b/crates/net/dns/Cargo.toml index 2af72afcef6..9e3e93d12f8 100644 --- a/crates/net/dns/Cargo.toml +++ b/crates/net/dns/Cargo.toml @@ -27,7 +27,7 @@ tokio = { workspace = true, features = ["io-util", "net", "time"] } tokio-stream.workspace = true # trust-dns -trust-dns-resolver = "0.23" +hickory-resolver = { version = "0.25.0-alpha.4" } # misc data-encoding = "2" @@ -48,4 +48,16 @@ reth-tracing.workspace = true rand.workspace = true [features] -serde = ["dep:serde", "dep:serde_with"] +serde = [ + "dep:serde", + "dep:serde_with", + "alloy-chains/serde", + "alloy-primitives/serde", + "enr/serde", + "linked_hash_set/serde", + "parking_lot/serde", + "rand/serde", + "secp256k1/serde", + "hickory-resolver/serde", + "reth-ethereum-forks/serde" +] diff --git a/crates/net/dns/src/resolver.rs b/crates/net/dns/src/resolver.rs index 42c444f89a7..255f2ad4a10 100644 --- a/crates/net/dns/src/resolver.rs +++ b/crates/net/dns/src/resolver.rs @@ -1,10 +1,10 @@ //! Perform DNS lookups +use hickory_resolver::name_server::ConnectionProvider; +pub use hickory_resolver::{ResolveError, TokioResolver}; use parking_lot::RwLock; use std::{collections::HashMap, future::Future}; use tracing::trace; -pub use trust_dns_resolver::{error::ResolveError, TokioAsyncResolver}; -use trust_dns_resolver::{name_server::ConnectionProvider, AsyncResolver}; /// A type that can lookup DNS entries pub trait Resolver: Send + Sync + Unpin + 'static { @@ -12,7 +12,7 @@ pub trait Resolver: Send + Sync + Unpin + 'static { fn lookup_txt(&self, query: &str) -> impl Future> + Send; } -impl Resolver for AsyncResolver

{ +impl Resolver for hickory_resolver::Resolver

{ async fn lookup_txt(&self, query: &str) -> Option { // See: [AsyncResolver::txt_lookup] // > *hint* queries that end with a '.' are fully qualified names and are cheaper lookups @@ -33,7 +33,7 @@ impl Resolver for AsyncResolver

{ /// An asynchronous DNS resolver /// -/// See also [`TokioAsyncResolver`] +/// See also [`TokioResolver`] /// /// ``` /// # fn t() { @@ -43,16 +43,16 @@ impl Resolver for AsyncResolver

{ /// ``` /// /// Note: This [Resolver] can send multiple lookup attempts, See also -/// [`ResolverOpts`](trust_dns_resolver::config::ResolverOpts) which configures 2 attempts (1 retry) +/// [`ResolverOpts`](hickory_resolver::config::ResolverOpts) which configures 2 attempts (1 retry) /// by default. #[derive(Clone, Debug)] -pub struct DnsResolver(TokioAsyncResolver); +pub struct DnsResolver(TokioResolver); // === impl DnsResolver === impl DnsResolver { - /// Create a new resolver by wrapping the given [`AsyncResolver`] - pub const fn new(resolver: TokioAsyncResolver) -> Self { + /// Create a new resolver by wrapping the given [`TokioResolver`]. + pub const fn new(resolver: TokioResolver) -> Self { Self(resolver) } @@ -60,7 +60,7 @@ impl DnsResolver { /// /// This will use `/etc/resolv.conf` on Unix OSes and the registry on Windows. pub fn from_system_conf() -> Result { - TokioAsyncResolver::tokio_from_system_conf().map(Self::new) + TokioResolver::tokio_from_system_conf().map(Self::new) } } diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 5e7f4dd47a2..f4cc134ec48 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -18,6 +18,7 @@ reth-consensus.workspace = true reth-network-p2p.workspace = true reth-network-peers.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-tasks.workspace = true @@ -27,6 +28,7 @@ reth-db-api = { workspace = true, optional = true } reth-testing-utils = { workspace = true, optional = true } # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true @@ -44,9 +46,9 @@ reth-metrics.workspace = true metrics.workspace = true # misc -tracing.workspace = true rayon.workspace = true thiserror.workspace = true +tracing.workspace = true tempfile = { workspace = true, optional = true } itertools.workspace = true @@ -70,11 +72,23 @@ rand.workspace = true tempfile.workspace = true [features] +optimism = [ + "reth-primitives/optimism", + "reth-db?/optimism", + "reth-db-api?/optimism", + "reth-provider/optimism" +] + test-utils = [ - "dep:tempfile", - "dep:reth-db-api", - "reth-db/test-utils", - "reth-consensus/test-utils", - "reth-network-p2p/test-utils", - "reth-testing-utils", + "tempfile", + "reth-db-api", + "reth-db/test-utils", + "reth-consensus/test-utils", + "reth-network-p2p/test-utils", + "reth-testing-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-db-api?/test-utils", + "reth-provider/test-utils", + "reth-primitives-traits/test-utils" ] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 314f3a09084..54026070ec8 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -1,5 +1,6 @@ use super::queue::BodiesRequestQueue; use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics}; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use futures::Stream; use futures_util::StreamExt; @@ -14,11 +15,13 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::SealedHeader; +use reth_primitives_traits::size::InMemorySize; use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::Ordering, collections::BinaryHeap, + fmt::Debug, mem, ops::RangeInclusive, pin::Pin, @@ -32,11 +35,11 @@ use tracing::info; /// All blocks in a batch are fetched at the same time. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] -pub struct BodiesDownloader { +pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -54,22 +57,22 @@ pub struct BodiesDownloader { /// The latest block number returned. latest_queued_block_number: Option, /// Requests in progress - in_progress_queue: BodiesRequestQueue, + in_progress_queue: BodiesRequestQueue, /// Buffered responses - buffered_responses: BinaryHeap, + buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } impl BodiesDownloader where - B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + B: BodiesClient + 'static, + Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. - fn next_headers_request(&self) -> DownloadResult>> { + fn next_headers_request(&self) -> DownloadResult>>> { let start_at = match self.in_progress_queue.last_requested_block_number { Some(num) => num + 1, None => *self.download_range.start(), @@ -94,7 +97,7 @@ where &self, range: RangeInclusive, max_non_empty: u64, - ) -> DownloadResult>> { + ) -> DownloadResult>>> { if range.is_empty() || max_non_empty == 0 { return Ok(None) } @@ -107,7 +110,7 @@ where let mut collected = 0; let mut non_empty_headers = 0; let headers = self.provider.sealed_headers_while(range.clone(), |header| { - let should_take = range.contains(&header.number) && + let should_take = range.contains(&header.number()) && non_empty_headers < max_non_empty && collected < self.stream_batch_size; @@ -190,14 +193,16 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); } /// Removes the next response from the buffer. - fn pop_buffered_response(&mut self) -> Option { + fn pop_buffered_response( + &mut self, + ) -> Option> { let resp = self.buffered_responses.pop()?; self.metrics.buffered_responses.decrement(1.); self.buffered_blocks_size_bytes -= resp.size(); @@ -207,10 +212,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response(&mut self, response: Vec) { + fn buffer_bodies_response(&mut self, response: Vec>) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::(); + response.capacity() * mem::size_of::>(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -224,7 +229,7 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered(&mut self) -> Option> { + fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -250,7 +255,7 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch(&mut self) -> Option> { + fn try_split_next_batch(&mut self) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -282,12 +287,17 @@ where Self: BodyDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader { + pub fn into_task( + self, + ) -> TaskDownloader<::Header, ::Body> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader + pub fn into_task_with( + self, + spawner: &S, + ) -> TaskDownloader<::Header, ::Body> where S: TaskSpawner, { @@ -297,9 +307,12 @@ where impl BodyDownloader for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { + type Header = Provider::Header; + type Body = B::Body; + /// Set a new download range (exclusive). /// /// This method will drain all queued bodies, filter out ones outside the range and put them @@ -345,10 +358,10 @@ where impl Stream for BodiesDownloader where - B: BodiesClient + 'static, + B: BodiesClient + 'static, Provider: HeaderProvider + Unpin + 'static, { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -430,13 +443,28 @@ where } #[derive(Debug)] -struct OrderedBodiesResponse { - resp: Vec, +struct OrderedBodiesResponse { + resp: Vec>, /// The total size of the response in bytes size: usize, } -impl OrderedBodiesResponse { +impl OrderedBodiesResponse { + #[inline] + fn len(&self) -> usize { + self.resp.len() + } + + /// Returns the size of the response in bytes + /// + /// See [`BlockResponse::size`] + #[inline] + const fn size(&self) -> usize { + self.size + } +} + +impl OrderedBodiesResponse { /// Returns the block number of the first element /// /// # Panics @@ -452,36 +480,23 @@ impl OrderedBodiesResponse { fn block_range(&self) -> RangeInclusive { self.first_block_number()..=self.resp.last().expect("is not empty").block_number() } - - #[inline] - fn len(&self) -> usize { - self.resp.len() - } - - /// Returns the size of the response in bytes - /// - /// See [`BlockResponse::size`] - #[inline] - const fn size(&self) -> usize { - self.size - } } -impl PartialEq for OrderedBodiesResponse { +impl PartialEq for OrderedBodiesResponse { fn eq(&self, other: &Self) -> bool { self.first_block_number() == other.first_block_number() } } -impl Eq for OrderedBodiesResponse {} +impl Eq for OrderedBodiesResponse {} -impl PartialOrd for OrderedBodiesResponse { +impl PartialOrd for OrderedBodiesResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedBodiesResponse { +impl Ord for OrderedBodiesResponse { fn cmp(&self, other: &Self) -> Ordering { self.first_block_number().cmp(&other.first_block_number()).reverse() } @@ -561,7 +576,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index e70c534a0e3..b7a9431a4d7 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -4,21 +4,29 @@ use reth_network_p2p::{ bodies::{downloader::BodyDownloader, response::BlockResponse}, error::{DownloadError, DownloadResult}, }; -use std::ops::RangeInclusive; +use std::{fmt::Debug, ops::RangeInclusive}; /// A [`BodyDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopBodiesDownloader; +pub struct NoopBodiesDownloader { + _header: std::marker::PhantomData, + _body: std::marker::PhantomData, +} + +impl + BodyDownloader for NoopBodiesDownloader +{ + type Body = B; + type Header = H; -impl BodyDownloader for NoopBodiesDownloader { fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } -impl Stream for NoopBodiesDownloader { - type Item = Result, DownloadError>; +impl Stream for NoopBodiesDownloader { + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index db7ff71cfc9..ed8c425e611 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -1,5 +1,6 @@ use super::request::BodiesRequestFuture; use crate::metrics::BodyDownloaderMetrics; +use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; @@ -9,6 +10,7 @@ use reth_network_p2p::{ error::DownloadResult, }; use reth_primitives::SealedHeader; +use reth_primitives_traits::InMemorySize; use std::{ pin::Pin, sync::Arc, @@ -18,18 +20,19 @@ use std::{ /// The wrapper around [`FuturesUnordered`] that keeps information /// about the blocks currently being requested. #[derive(Debug)] -pub(crate) struct BodiesRequestQueue { +pub(crate) struct BodiesRequestQueue { /// Inner body request queue. - inner: FuturesUnordered>, + inner: FuturesUnordered>, /// The downloader metrics. metrics: BodyDownloaderMetrics, /// Last requested block number. pub(crate) last_requested_block_number: Option, } -impl BodiesRequestQueue +impl BodiesRequestQueue where B: BodiesClient + 'static, + H: BlockHeader, { /// Create new instance of request queue. pub(crate) fn new(metrics: BodyDownloaderMetrics) -> Self { @@ -57,15 +60,15 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc, - request: Vec, + consensus: Arc>, + request: Vec>, ) { // Set last max requested block number self.last_requested_block_number = request .last() .map(|last| match self.last_requested_block_number { - Some(num) => last.number.max(num), - None => last.number, + Some(num) => last.number().max(num), + None => last.number(), }) .or(self.last_requested_block_number); // Create request and push into the queue. @@ -75,11 +78,12 @@ where } } -impl Stream for BodiesRequestQueue +impl Stream for BodiesRequestQueue where - B: BodiesClient + 'static, + H: BlockHeader + Send + Sync + Unpin + 'static, + B: BodiesClient + 'static, { - type Item = DownloadResult>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index c2b36732b51..92f46fa6fdd 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -1,4 +1,5 @@ use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use futures::{Future, FutureExt}; use reth_consensus::Consensus; @@ -9,6 +10,7 @@ use reth_network_p2p::{ }; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader}; +use reth_primitives_traits::InMemorySize; use std::{ collections::VecDeque, mem, @@ -26,7 +28,7 @@ use std::{ /// It then proceeds to verify the downloaded bodies. In case of an validation error, /// the future will start over. /// -/// The future will filter out any empty headers (see [`reth_primitives::Header::is_empty`]) from +/// The future will filter out any empty headers (see [`alloy_consensus::Header::is_empty`]) from /// the request. If [`BodiesRequestFuture`] was initialized with all empty headers, no request will /// be dispatched and they will be immediately returned upon polling. /// @@ -36,30 +38,31 @@ use std::{ /// All errors regarding the response cause the peer to get penalized, meaning that adversaries /// that try to give us bodies that do not match the requested order are going to be penalized /// and eventually disconnected. -pub(crate) struct BodiesRequestFuture { +pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. response_metrics: ResponseMetrics, // Headers to download. The collection is shrunk as responses are buffered. - pending_headers: VecDeque, + pending_headers: VecDeque>, /// Internal buffer for all blocks - buffer: Vec, + buffer: Vec>, fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, } -impl BodiesRequestFuture +impl BodiesRequestFuture where B: BodiesClient + 'static, + H: BlockHeader, { /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { @@ -74,7 +77,7 @@ where } } - pub(crate) fn with_headers(mut self, headers: Vec) -> Self { + pub(crate) fn with_headers(mut self, headers: Vec>) -> Self { self.buffer.reserve_exact(headers.len()); self.pending_headers = VecDeque::from(headers); // Submit the request only if there are any headers to download. @@ -114,7 +117,10 @@ where /// Process block response. /// Returns an error if the response is invalid. - fn on_block_response(&mut self, response: WithPeerId>) -> DownloadResult<()> { + fn on_block_response(&mut self, response: WithPeerId>) -> DownloadResult<()> + where + B::Body: InMemorySize, + { let (peer_id, bodies) = response.split(); let request_len = self.last_request_len.unwrap_or_default(); let response_len = bodies.len(); @@ -157,7 +163,10 @@ where /// /// This method removes headers from the internal collection. /// If the response fails validation, then the header will be put back. - fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> { + fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> + where + B::Body: InMemorySize, + { let bodies_capacity = bodies.capacity(); let bodies_len = bodies.len(); let mut bodies = bodies.into_iter().peekable(); @@ -184,7 +193,7 @@ where if let Err(error) = self.consensus.validate_block_pre_execution(&block) { // Body is invalid, put the header back and return an error let hash = block.hash(); - let number = block.number; + let number = block.number(); self.pending_headers.push_front(block.header); return Err(DownloadError::BodyValidation { hash, @@ -205,11 +214,12 @@ where } } -impl Future for BodiesRequestFuture +impl Future for BodiesRequestFuture where - B: BodiesClient + 'static, + H: BlockHeader + Unpin + Send + Sync + 'static, + B: BodiesClient + 'static, { - type Output = DownloadResult>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index eeafb7ab121..9377be78676 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -8,6 +8,7 @@ use reth_network_p2p::{ }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ + fmt::Debug, future::Future, ops::RangeInclusive, pin::Pin, @@ -23,15 +24,15 @@ pub const BODIES_TASK_BUFFER_SIZE: usize = 4; /// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream, + from_downloader: ReceiverStream>, to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's /// connected to that task. /// @@ -45,12 +46,16 @@ impl TaskDownloader { /// use reth_consensus::Consensus; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_network_p2p::bodies::client::BodiesClient; + /// use reth_primitives_traits::InMemorySize; /// use reth_storage_api::HeaderProvider; - /// use std::sync::Arc; + /// use std::{fmt::Debug, sync::Arc}; /// - /// fn t( + /// fn t< + /// B: BodiesClient + 'static, + /// Provider: HeaderProvider

+ Unpin + 'static, + /// >( /// client: Arc, - /// consensus: Arc, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); @@ -59,7 +64,7 @@ impl TaskDownloader { /// ``` pub fn spawn(downloader: T) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader
+ 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -68,7 +73,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: BodyDownloader + 'static, + T: BodyDownloader
+ 'static, S: TaskSpawner, { let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE); @@ -86,15 +91,20 @@ impl TaskDownloader { } } -impl BodyDownloader for TaskDownloader { +impl + BodyDownloader for TaskDownloader +{ + type Header = H; + type Body = B; + fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { let _ = self.to_downloader.send(range); Ok(()) } } -impl Stream for TaskDownloader { - type Item = BodyDownloaderResult; +impl Stream for TaskDownloader { + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -102,9 +112,9 @@ impl Stream for TaskDownloader { } /// A [`BodyDownloader`] that runs on its own task -struct SpawnedDownloader { +struct SpawnedDownloader { updates: UnboundedReceiverStream>, - bodies_tx: PollSender, + bodies_tx: PollSender>, downloader: T, } diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index af4bf8145af..ca35c7449a0 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -2,6 +2,7 @@ #![allow(dead_code)] +use alloy_consensus::BlockHeader; use alloy_primitives::B256; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, transaction::DbTxMut}; @@ -9,10 +10,10 @@ use reth_network_p2p::bodies::response::BlockResponse; use reth_primitives::{Block, BlockBody, SealedBlock, SealedHeader}; use std::collections::HashMap; -pub(crate) fn zip_blocks<'a>( - headers: impl Iterator, - bodies: &mut HashMap, -) -> Vec { +pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>( + headers: impl Iterator>, + bodies: &mut HashMap, +) -> Vec> { headers .into_iter() .map(|header| { diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 5b21c82fb3f..ff352bc2304 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, io, path::Path}; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, Sealable, B256}; use futures::Future; @@ -12,16 +13,16 @@ use reth_network_p2p::{ priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, Header, SealedHeader}; +use reth_primitives::SealedHeader; +use reth_primitives_traits::{Block, BlockBody, FullBlock}; use thiserror::Error; use tokio::{fs::File, io::AsyncReadExt}; use tokio_stream::StreamExt; use tokio_util::codec::FramedRead; use tracing::{debug, trace, warn}; -use crate::receipt_file_client::FromReceiptReader; - use super::file_codec::BlockFileCodec; +use crate::receipt_file_client::FromReceiptReader; /// Default byte length of chunk to read from chain file. /// @@ -40,15 +41,15 @@ pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000; /// /// This reads the entire file into memory, so it is not suitable for large files. #[derive(Debug)] -pub struct FileClient { +pub struct FileClient { /// The buffered headers retrieved when fetching new bodies. - headers: HashMap, + headers: HashMap, /// A mapping between block hash and number. hash_to_number: HashMap, /// The buffered bodies retrieved when fetching new headers. - bodies: HashMap, + bodies: HashMap, } /// An error that can occur when constructing and using a [`FileClient`]. @@ -73,7 +74,7 @@ impl From<&'static str> for FileClientError { } } -impl FileClient { +impl FileClient { /// Create a new file client from a file path. pub async fn new>(path: P) -> Result { let file = File::open(path).await?; @@ -114,12 +115,8 @@ impl FileClient { /// Clones and returns the highest header of this client has or `None` if empty. Seals header /// before returning. - pub fn tip_header(&self) -> Option { - self.headers.get(&self.max_block()?).map(|h| { - let sealed = h.clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) + pub fn tip_header(&self) -> Option> { + self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal(h.clone())) } /// Returns true if all blocks are canonical (no gaps) @@ -141,13 +138,13 @@ impl FileClient { } /// Use the provided bodies as the file client's block body buffer. - pub fn with_bodies(mut self, bodies: HashMap) -> Self { + pub fn with_bodies(mut self, bodies: HashMap) -> Self { self.bodies = bodies; self } /// Use the provided headers as the file client's block body buffer. - pub fn with_headers(mut self, headers: HashMap) -> Self { + pub fn with_headers(mut self, headers: HashMap) -> Self { self.headers = headers; for (number, header) in &self.headers { self.hash_to_number.insert(header.hash_slow(), *number); @@ -166,14 +163,14 @@ impl FileClient { } /// Returns an iterator over headers in the client. - pub fn headers_iter(&self) -> impl Iterator { + pub fn headers_iter(&self) -> impl Iterator { self.headers.values() } /// Returns a mutable iterator over bodies in the client. /// /// Panics, if file client headers and bodies are not mapping 1-1. - pub fn bodies_iter_mut(&mut self) -> impl Iterator { + pub fn bodies_iter_mut(&mut self) -> impl Iterator { let bodies = &mut self.bodies; let numbers = &self.hash_to_number; bodies.iter_mut().map(|(hash, body)| (numbers[hash], body)) @@ -181,27 +178,28 @@ impl FileClient { /// Returns the current number of transactions in the client. pub fn total_transactions(&self) -> usize { - self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions.len()) + self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions().len()) } } -impl FromReader for FileClient { +impl FromReader for FileClient { type Error = FileClientError; /// Initialize the [`FileClient`] from bytes that have been read from file. - fn from_reader( - reader: B, + fn from_reader( + reader: R, num_bytes: u64, ) -> impl Future, Self::Error>> where - B: AsyncReadExt + Unpin, + R: AsyncReadExt + Unpin, { let mut headers = HashMap::default(); let mut hash_to_number = HashMap::default(); let mut bodies = HashMap::default(); // use with_capacity to make sure the internal buffer contains the entire chunk - let mut stream = FramedRead::with_capacity(reader, BlockFileCodec, num_bytes as usize); + let mut stream = + FramedRead::with_capacity(reader, BlockFileCodec::::default(), num_bytes as usize); trace!(target: "downloaders::file", target_num_bytes=num_bytes, @@ -229,13 +227,13 @@ impl FromReader for FileClient { } Err(err) => return Err(err), }; - let block_number = block.header.number; - let block_hash = block.header.hash_slow(); + let block_number = block.header().number(); + let block_hash = block.header().hash_slow(); // add to the internal maps - headers.insert(block.header.number, block.header.clone()); - hash_to_number.insert(block_hash, block.header.number); - bodies.insert(block_hash, block.into()); + headers.insert(block.header().number(), block.header().clone()); + hash_to_number.insert(block_hash, block.header().number()); + bodies.insert(block_hash, block.body().clone()); if log_interval == 0 { trace!(target: "downloaders::file", @@ -264,8 +262,9 @@ impl FromReader for FileClient { } } -impl HeadersClient for FileClient { - type Output = HeadersFut; +impl HeadersClient for FileClient { + type Header = B::Header; + type Output = HeadersFut; fn get_headers_with_priority( &self, @@ -314,8 +313,9 @@ impl HeadersClient for FileClient { } } -impl BodiesClient for FileClient { - type Output = BodiesFut; +impl BodiesClient for FileClient { + type Body = B::Body; + type Output = BodiesFut; fn get_block_bodies_with_priority( &self, @@ -338,7 +338,7 @@ impl BodiesClient for FileClient { } } -impl DownloadClient for FileClient { +impl DownloadClient for FileClient { fn report_bad_message(&self, _peer_id: PeerId) { warn!("Reported a bad message on a file client, the file may be corrupted or invalid"); // noop @@ -544,7 +544,7 @@ mod tests { // create an empty file let file = tempfile::tempfile().unwrap(); - let client = + let client: Arc = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_bodies(bodies.clone())); let mut downloader = BodiesDownloaderBuilder::default().build( client.clone(), @@ -569,14 +569,14 @@ mod tests { let p0 = child_header(&p1); let file = tempfile::tempfile().unwrap(); - let client = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_headers( - HashMap::from([ + let client: Arc = Arc::new( + FileClient::from_file(file.into()).await.unwrap().with_headers(HashMap::from([ (0u64, p0.clone().unseal()), (1, p1.clone().unseal()), (2, p2.clone().unseal()), (3, p3.clone().unseal()), - ]), - )); + ])), + ); let mut downloader = ReverseHeadersDownloaderBuilder::default() .stream_batch_size(3) @@ -598,7 +598,7 @@ mod tests { // Generate some random blocks let (file, headers, _) = generate_bodies_file(0..=19).await; // now try to read them back - let client = Arc::new(FileClient::from_file(file).await.unwrap()); + let client: Arc = Arc::new(FileClient::from_file(file).await.unwrap()); // construct headers downloader and use first header let mut header_downloader = ReverseHeadersDownloaderBuilder::default() @@ -623,7 +623,7 @@ mod tests { let (file, headers, mut bodies) = generate_bodies_file(0..=19).await; // now try to read them back - let client = Arc::new(FileClient::from_file(file).await.unwrap()); + let client: Arc = Arc::new(FileClient::from_file(file).await.unwrap()); // insert headers in db for the bodies downloader insert_headers(factory.db_ref().db(), &headers); diff --git a/crates/net/downloaders/src/file_codec.rs b/crates/net/downloaders/src/file_codec.rs index 3e754f9cf49..57a15b6c888 100644 --- a/crates/net/downloaders/src/file_codec.rs +++ b/crates/net/downloaders/src/file_codec.rs @@ -3,7 +3,6 @@ use crate::file_client::FileClientError; use alloy_primitives::bytes::{Buf, BytesMut}; use alloy_rlp::{Decodable, Encodable}; -use reth_primitives::Block; use tokio_util::codec::{Decoder, Encoder}; /// Codec for reading raw block bodies from a file. @@ -19,10 +18,16 @@ use tokio_util::codec::{Decoder, Encoder}; /// /// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set /// the capacity of the framed reader to the size of the file. -pub(crate) struct BlockFileCodec; +pub(crate) struct BlockFileCodec(std::marker::PhantomData); -impl Decoder for BlockFileCodec { - type Item = Block; +impl Default for BlockFileCodec { + fn default() -> Self { + Self(std::marker::PhantomData) + } +} + +impl Decoder for BlockFileCodec { + type Item = B; type Error = FileClientError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { @@ -31,18 +36,17 @@ impl Decoder for BlockFileCodec { } let buf_slice = &mut src.as_ref(); - let body = - Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + let body = B::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; src.advance(src.len() - buf_slice.len()); Ok(Some(body)) } } -impl Encoder for BlockFileCodec { +impl Encoder for BlockFileCodec { type Error = FileClientError; - fn encode(&mut self, item: Block, dst: &mut BytesMut) -> Result<(), Self::Error> { + fn encode(&mut self, item: B, dst: &mut BytesMut) -> Result<(), Self::Error> { item.encode(dst); Ok(()) } diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index 210655f7e26..e9dee56dd2e 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -4,22 +4,25 @@ use reth_network_p2p::headers::{ error::HeadersDownloaderError, }; use reth_primitives::SealedHeader; +use std::fmt::Debug; /// A [`HeaderDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopHeaderDownloader; +pub struct NoopHeaderDownloader(std::marker::PhantomData); -impl HeaderDownloader for NoopHeaderDownloader { - fn update_local_head(&mut self, _: SealedHeader) {} +impl HeaderDownloader for NoopHeaderDownloader { + type Header = H; + + fn update_local_head(&mut self, _: SealedHeader) {} fn update_sync_target(&mut self, _: SyncTarget) {} fn set_batch_size(&mut self, _: usize) {} } -impl Stream for NoopHeaderDownloader { - type Item = Result, HeadersDownloaderError>; +impl Stream for NoopHeaderDownloader { + type Item = Result>, HeadersDownloaderError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index 941d140b39d..be359134e79 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -2,24 +2,25 @@ use super::task::TaskDownloader; use crate::metrics::HeaderDownloaderMetrics; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; use reth_config::config::HeadersConfig; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ - client::{HeadersClient, HeadersDirection, HeadersRequest}, + client::{HeadersClient, HeadersRequest}, downloader::{validate_header_download, HeaderDownloader, SyncTarget}, error::{HeadersDownloaderError, HeadersDownloaderResult}, }, priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{GotExpected, Header, SealedHeader}; +use reth_primitives::{GotExpected, SealedHeader}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::{Ordering, Reverse}, @@ -39,14 +40,14 @@ const REQUESTS_PER_PEER_MULTIPLIER: usize = 5; /// Wrapper for internal downloader errors. #[derive(Error, Debug)] -enum ReverseHeadersDownloaderError { +enum ReverseHeadersDownloaderError { #[error(transparent)] - Downloader(#[from] HeadersDownloaderError), + Downloader(#[from] HeadersDownloaderError), #[error(transparent)] Response(#[from] Box), } -impl From for ReverseHeadersDownloaderError { +impl From for ReverseHeadersDownloaderError { fn from(value: HeadersResponseError) -> Self { Self::Response(Box::new(value)) } @@ -59,24 +60,25 @@ impl From for ReverseHeadersDownloaderError { /// tries to fill the gap between the local head of the node and the chain tip by issuing multiple /// requests at a time but yielding them in batches on [`Stream::poll_next`]. /// -/// **Note:** This downloader downloads in reverse, see also [`HeadersDirection::Falling`], this -/// means the batches of headers that this downloader yields will start at the chain tip and move -/// towards the local head: falling block numbers. +/// **Note:** This downloader downloads in reverse, see also +/// [`reth_network_p2p::headers::client::HeadersDirection`], this means the batches of headers that +/// this downloader yields will start at the chain tip and move towards the local head: falling +/// block numbers. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] pub struct ReverseHeadersDownloader { /// Consensus client used to validate headers - consensus: Arc, + consensus: Arc>, /// Client used to download headers. client: Arc, /// The local head of the chain. - local_head: Option, + local_head: Option>, /// Block we want to close the gap to. sync_target: Option, /// The block number to use for requests. next_request_block_number: u64, /// Keeps track of the block we need to validate next. - lowest_validated_header: Option, + lowest_validated_header: Option>, /// Tip block number to start validating from (in reverse) next_chain_tip_block_number: u64, /// The batch size per one request @@ -97,11 +99,11 @@ pub struct ReverseHeadersDownloader { /// requests in progress in_progress_queue: FuturesUnordered>, /// Buffered, unvalidated responses - buffered_responses: BinaryHeap, + buffered_responses: BinaryHeap>, /// Buffered, _sorted_ and validated headers ready to be returned. /// /// Note: headers are sorted from high to low - queued_validated_headers: Vec, + queued_validated_headers: Vec>, /// Header downloader metrics. metrics: HeaderDownloaderMetrics, } @@ -110,7 +112,7 @@ pub struct ReverseHeadersDownloader { impl ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { /// Convenience method to create a [`ReverseHeadersDownloaderBuilder`] without importing it pub fn builder() -> ReverseHeadersDownloaderBuilder { @@ -120,7 +122,7 @@ where /// Returns the block number the local node is at. #[inline] fn local_block_number(&self) -> Option { - self.local_head.as_ref().map(|h| h.number) + self.local_head.as_ref().map(|h| h.number()) } /// Returns the existing local head block number @@ -130,7 +132,7 @@ where /// If the local head has not been set. #[inline] fn existing_local_block_number(&self) -> BlockNumber { - self.local_head.as_ref().expect("is initialized").number + self.local_head.as_ref().expect("is initialized").number() } /// Returns the existing sync target. @@ -197,14 +199,14 @@ where /// `lowest_validated_header`. /// /// This only returns `None` if we haven't fetched the initial chain tip yet. - fn lowest_validated_header(&self) -> Option<&SealedHeader> { + fn lowest_validated_header(&self) -> Option<&SealedHeader> { self.queued_validated_headers.last().or(self.lowest_validated_header.as_ref()) } /// Validate that the received header matches the expected sync target. fn validate_sync_target( &self, - header: &SealedHeader, + header: &SealedHeader, request: HeadersRequest, peer_id: PeerId, ) -> Result<(), Box> { @@ -220,12 +222,12 @@ where ), })) } - SyncTargetBlock::Number(number) if header.number != number => { + SyncTargetBlock::Number(number) if header.number() != number => { Err(Box::new(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::InvalidTipNumber(GotExpected { - got: header.number, + got: header.number(), expected: number, }), })) @@ -244,20 +246,12 @@ where fn process_next_headers( &mut self, request: HeadersRequest, - headers: Vec
, + headers: Vec, peer_id: PeerId, - ) -> Result<(), ReverseHeadersDownloaderError> { + ) -> Result<(), ReverseHeadersDownloaderError> { let mut validated = Vec::with_capacity(headers.len()); - let sealed_headers = headers - .into_par_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - - SealedHeader::new(header, seal) - }) - .collect::>(); + let sealed_headers = headers.into_par_iter().map(SealedHeader::seal).collect::>(); for parent in sealed_headers { // Validate that the header is the parent header of the last validated header. if let Some(validated_header) = @@ -280,17 +274,17 @@ where if let Some((last_header, head)) = validated .last_mut() .zip(self.local_head.as_ref()) - .filter(|(last, head)| last.number == head.number + 1) + .filter(|(last, head)| last.number() == head.number() + 1) { // Every header must be valid on its own - if let Err(error) = self.consensus.validate_header(last_header) { + if let Err(error) = self.consensus.validate_header(&*last_header) { trace!(target: "downloaders::headers", %error, "Failed to validate header"); return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::HeaderValidation { hash: head.hash(), - number: head.number, + number: head.number(), error: Box::new(error), }, } @@ -299,9 +293,9 @@ where // If the header is valid on its own, but not against its parent, we return it as // detached head error. - if let Err(error) = self.consensus.validate_header_against_parent(last_header, head) { + if let Err(error) = self.consensus.validate_header_against_parent(&*last_header, head) { // Replace the last header with a detached variant - error!(target: "downloaders::headers", %error, number = last_header.number, hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); + error!(target: "downloaders::headers", %error, number = last_header.number(), hash = ?last_header.hash(), "Header cannot be attached to known canonical chain"); return Err(HeadersDownloaderError::DetachedHead { local_head: Box::new(head.clone()), header: Box::new(last_header.clone()), @@ -313,7 +307,7 @@ where // update tracked block info (falling block number) self.next_chain_tip_block_number = - validated.last().expect("exists").number.saturating_sub(1); + validated.last().expect("exists").number().saturating_sub(1); self.queued_validated_headers.extend(validated); Ok(()) @@ -345,7 +339,7 @@ where let skip = self .queued_validated_headers .iter() - .take_while(|last| last.number > target_block_number) + .take_while(|last| last.number() > target_block_number) .count(); // removes all headers that are higher than current target self.queued_validated_headers.drain(..skip); @@ -360,8 +354,8 @@ where /// Handles the response for the request for the sync target fn on_sync_target_outcome( &mut self, - response: HeadersRequestOutcome, - ) -> Result<(), ReverseHeadersDownloaderError> { + response: HeadersRequestOutcome, + ) -> Result<(), ReverseHeadersDownloaderError> { let sync_target = self.existing_sync_target(); let HeadersRequestOutcome { request, outcome } = response; match outcome { @@ -372,7 +366,7 @@ where self.metrics.total_downloaded.increment(headers.len() as u64); // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); + headers.sort_unstable_by_key(|h| Reverse(h.number())); if headers.is_empty() { return Err(HeadersResponseError { @@ -383,9 +377,8 @@ where .into()) } - let sealed_target = headers.swap_remove(0).seal_slow(); - let (header, seal) = sealed_target.into_parts(); - let target = SealedHeader::new(header, seal); + let header = headers.swap_remove(0); + let target = SealedHeader::seal(header); match sync_target { SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. } => { @@ -401,12 +394,12 @@ where } } SyncTargetBlock::Number(number) => { - if target.number != number { + if target.number() != number { return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::InvalidTipNumber(GotExpected { - got: target.number, + got: target.number(), expected: number, }), } @@ -415,17 +408,17 @@ where } } - trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number, "Received sync target"); + trace!(target: "downloaders::headers", head=?self.local_block_number(), hash=?target.hash(), number=%target.number(), "Received sync target"); // This is the next block we need to start issuing requests from - let parent_block_number = target.number.saturating_sub(1); - self.on_block_number_update(target.number, parent_block_number); + let parent_block_number = target.number().saturating_sub(1); + self.on_block_number_update(target.number(), parent_block_number); self.queued_validated_headers.push(target); // try to validate all buffered responses blocked by this successful response self.try_validate_buffered() - .map(Err::<(), ReverseHeadersDownloaderError>) + .map(Err::<(), ReverseHeadersDownloaderError>) .transpose()?; Ok(()) @@ -439,8 +432,8 @@ where /// Invoked when we received a response fn on_headers_outcome( &mut self, - response: HeadersRequestOutcome, - ) -> Result<(), ReverseHeadersDownloaderError> { + response: HeadersRequestOutcome, + ) -> Result<(), ReverseHeadersDownloaderError> { let requested_block_number = response.block_number(); let HeadersRequestOutcome { request, outcome } = response; @@ -475,19 +468,19 @@ where } // sort headers from highest to lowest block number - headers.sort_unstable_by_key(|h| Reverse(h.number)); + headers.sort_unstable_by_key(|h| Reverse(h.number())); // validate the response let highest = &headers[0]; - trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number, "Validating non-empty headers response"); + trace!(target: "downloaders::headers", requested_block_number, highest=?highest.number(), "Validating non-empty headers response"); - if highest.number != requested_block_number { + if highest.number() != requested_block_number { return Err(HeadersResponseError { request, peer_id: Some(peer_id), error: DownloadError::HeadersResponseStartBlockMismatch(GotExpected { - got: highest.number, + got: highest.number(), expected: requested_block_number, }), } @@ -495,14 +488,14 @@ where } // check if the response is the next expected - if highest.number == self.next_chain_tip_block_number { + if highest.number() == self.next_chain_tip_block_number { // is next response, validate it self.process_next_headers(request, headers, peer_id)?; // try to validate all buffered responses blocked by this successful response self.try_validate_buffered() - .map(Err::<(), ReverseHeadersDownloaderError>) + .map(Err::<(), ReverseHeadersDownloaderError>) .transpose()?; - } else if highest.number > self.existing_local_block_number() { + } else if highest.number() > self.existing_local_block_number() { self.metrics.buffered_responses.increment(1.); // can't validate yet self.buffered_responses.push(OrderedHeadersResponse { @@ -549,7 +542,7 @@ where /// Attempts to validate the buffered responses /// /// Returns an error if the next expected response was popped, but failed validation. - fn try_validate_buffered(&mut self) -> Option { + fn try_validate_buffered(&mut self) -> Option> { loop { // Check to see if we've already received the next value let next_response = self.buffered_responses.peek_mut()?; @@ -575,7 +568,7 @@ where /// Returns the request for the `sync_target` header. const fn get_sync_target_request(&self, start: BlockHashOrNumber) -> HeadersRequest { - HeadersRequest { start, limit: 1, direction: HeadersDirection::Falling } + HeadersRequest::falling(start, 1) } /// Starts a request future @@ -598,7 +591,11 @@ where } /// Validate whether the header is valid in relation to it's parent - fn validate(&self, header: &SealedHeader, parent: &SealedHeader) -> DownloadResult<()> { + fn validate( + &self, + header: &SealedHeader, + parent: &SealedHeader, + ) -> DownloadResult<()> { validate_header_download(&self.consensus, header, parent) } @@ -614,7 +611,7 @@ where } /// Splits off the next batch of headers - fn split_next_batch(&mut self) -> Vec { + fn split_next_batch(&mut self) -> Vec> { let batch_size = self.stream_batch_size.min(self.queued_validated_headers.len()); let mut rem = self.queued_validated_headers.split_off(batch_size); std::mem::swap(&mut rem, &mut self.queued_validated_headers); @@ -644,12 +641,15 @@ where Self: HeaderDownloader + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task(self) -> TaskDownloader { + pub fn into_task(self) -> TaskDownloader<::Header> { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given `spawner`. - pub fn into_task_with(self, spawner: &S) -> TaskDownloader + pub fn into_task_with( + self, + spawner: &S, + ) -> TaskDownloader<::Header> where S: TaskSpawner, { @@ -659,11 +659,17 @@ where impl HeaderDownloader for ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { - fn update_local_head(&mut self, head: SealedHeader) { + type Header = H::Header; + + fn update_local_head(&mut self, head: SealedHeader) { // ensure we're only yielding headers that are in range and follow the current local head. - while self.queued_validated_headers.last().is_some_and(|last| last.number <= head.number) { + while self + .queued_validated_headers + .last() + .is_some_and(|last| last.number() <= head.number()) + { // headers are sorted high to low self.queued_validated_headers.pop(); } @@ -686,7 +692,7 @@ where .queued_validated_headers .first() .filter(|h| h.hash() == tip) - .map(|h| h.number) + .map(|h| h.number()) { self.sync_target = Some(new_sync_target.with_number(target_number)); return @@ -701,13 +707,13 @@ where } } SyncTarget::Gap(existing) => { - let target = existing.parent_hash; + let target = existing.parent; if Some(target) != current_tip { // there could be a sync target request in progress self.sync_target_request.take(); // If the target has changed, update the request pointers based on the new // targeted block number - let parent_block_number = existing.number.saturating_sub(1); + let parent_block_number = existing.block.number.saturating_sub(1); trace!(target: "downloaders::headers", current=?current_tip, new=?target, %parent_block_number, "Updated sync target"); @@ -740,9 +746,9 @@ where impl Stream for ReverseHeadersDownloader where - H: HeadersClient + 'static, + H: HeadersClient + 'static, { - type Item = HeadersDownloaderResult>; + type Item = HeadersDownloaderResult>, H::Header>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -883,18 +889,18 @@ where } } -/// A future that returns a list of [`Header`] on success. +/// A future that returns a list of headers on success. #[derive(Debug)] struct HeadersRequestFuture { request: Option, fut: F, } -impl Future for HeadersRequestFuture +impl Future for HeadersRequestFuture where - F: Future>> + Sync + Send + Unpin, + F: Future>> + Sync + Send + Unpin, { - type Output = HeadersRequestOutcome; + type Output = HeadersRequestOutcome; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -906,14 +912,14 @@ where } /// The outcome of the [`HeadersRequestFuture`] -struct HeadersRequestOutcome { +struct HeadersRequestOutcome { request: HeadersRequest, - outcome: PeerRequestResult>, + outcome: PeerRequestResult>, } // === impl OrderedHeadersResponse === -impl HeadersRequestOutcome { +impl HeadersRequestOutcome { fn block_number(&self) -> u64 { self.request.start.as_number().expect("is number") } @@ -921,35 +927,35 @@ impl HeadersRequestOutcome { /// Wrapper type to order responses #[derive(Debug)] -struct OrderedHeadersResponse { - headers: Vec
, +struct OrderedHeadersResponse { + headers: Vec, request: HeadersRequest, peer_id: PeerId, } // === impl OrderedHeadersResponse === -impl OrderedHeadersResponse { +impl OrderedHeadersResponse { fn block_number(&self) -> u64 { self.request.start.as_number().expect("is number") } } -impl PartialEq for OrderedHeadersResponse { +impl PartialEq for OrderedHeadersResponse { fn eq(&self, other: &Self) -> bool { self.block_number() == other.block_number() } } -impl Eq for OrderedHeadersResponse {} +impl Eq for OrderedHeadersResponse {} -impl PartialOrd for OrderedHeadersResponse { +impl PartialOrd for OrderedHeadersResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedHeadersResponse { +impl Ord for OrderedHeadersResponse { fn cmp(&self, other: &Self) -> Ordering { self.block_number().cmp(&other.block_number()) } @@ -1156,7 +1162,11 @@ impl ReverseHeadersDownloaderBuilder { /// Build [`ReverseHeadersDownloader`] with provided consensus /// and header client implementations - pub fn build(self, client: H, consensus: Arc) -> ReverseHeadersDownloader + pub fn build( + self, + client: H, + consensus: Arc>, + ) -> ReverseHeadersDownloader where H: HeadersClient + 'static, { @@ -1207,13 +1217,15 @@ fn calc_next_request( let diff = next_request_block_number - local_head; let limit = diff.min(request_limit); let start = next_request_block_number; - HeadersRequest { start: start.into(), limit, direction: HeadersDirection::Falling } + HeadersRequest::falling(start.into(), limit) } #[cfg(test)] mod tests { use super::*; use crate::headers::test_utils::child_header; + use alloy_consensus::Header; + use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; use assert_matches::assert_matches; use reth_consensus::test_utils::TestConsensus; use reth_network_p2p::test_utils::TestHeadersClient; @@ -1296,7 +1308,10 @@ mod tests { assert!(downloader.sync_target_request.is_some()); downloader.sync_target_request.take(); - let target = SyncTarget::Gap(SealedHeader::new(Header::default(), B256::random())); + let target = SyncTarget::Gap(BlockWithParent { + block: BlockNumHash::new(0, B256::random()), + parent: Default::default(), + }); downloader.update_sync_target(target); assert!(downloader.sync_target_request.is_none()); assert_matches!( @@ -1310,7 +1325,7 @@ mod tests { fn test_head_update() { let client = Arc::new(TestHeadersClient::default()); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let mut downloader = ReverseHeadersDownloaderBuilder::default() .build(Arc::clone(&client), Arc::new(TestConsensus::default())); @@ -1373,7 +1388,7 @@ mod tests { fn test_resp_order() { let mut heap = BinaryHeap::new(); let hi = 1u64; - heap.push(OrderedHeadersResponse { + heap.push(OrderedHeadersResponse::
{ headers: vec![], request: HeadersRequest { start: hi.into(), limit: 0, direction: Default::default() }, peer_id: Default::default(), diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index b3fa27fde59..3dbfd5e3615 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -8,6 +8,7 @@ use reth_network_p2p::headers::{ use reth_primitives::SealedHeader; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ + fmt::Debug, future::Future, pin::Pin, task::{ready, Context, Poll}, @@ -22,15 +23,15 @@ pub const HEADERS_TASK_BUFFER_SIZE: usize = 8; /// A [HeaderDownloader] that drives a spawned [HeaderDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream>>, - to_downloader: UnboundedSender, + from_downloader: ReceiverStream>, H>>, + to_downloader: UnboundedSender>, } // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] and returns a [`TaskDownloader`] /// that's connected to that task. /// @@ -44,9 +45,10 @@ impl TaskDownloader { /// # use std::sync::Arc; /// # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloader; /// # use reth_downloaders::headers::task::TaskDownloader; - /// # use reth_consensus::Consensus; + /// # use reth_consensus::HeaderValidator; /// # use reth_network_p2p::headers::client::HeadersClient; - /// # fn t(consensus:Arc, client: Arc) { + /// # use reth_primitives_traits::BlockHeader; + /// # fn t + 'static>(consensus:Arc>, client: Arc) { /// let downloader = ReverseHeadersDownloader::::builder().build( /// client, /// consensus @@ -55,7 +57,7 @@ impl TaskDownloader { /// # } pub fn spawn(downloader: T) -> Self where - T: HeaderDownloader + 'static, + T: HeaderDownloader
+ 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -64,7 +66,7 @@ impl TaskDownloader { /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: HeaderDownloader + 'static, + T: HeaderDownloader
+ 'static, S: TaskSpawner, { let (headers_tx, headers_rx) = mpsc::channel(HEADERS_TASK_BUFFER_SIZE); @@ -81,12 +83,14 @@ impl TaskDownloader { } } -impl HeaderDownloader for TaskDownloader { - fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { +impl HeaderDownloader for TaskDownloader { + type Header = H; + + fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { let _ = self.to_downloader.send(DownloaderUpdates::UpdateSyncGap(head, target)); } - fn update_local_head(&mut self, head: SealedHeader) { + fn update_local_head(&mut self, head: SealedHeader) { let _ = self.to_downloader.send(DownloaderUpdates::UpdateLocalHead(head)); } @@ -99,8 +103,8 @@ impl HeaderDownloader for TaskDownloader { } } -impl Stream for TaskDownloader { - type Item = HeadersDownloaderResult>; +impl Stream for TaskDownloader { + type Item = HeadersDownloaderResult>, H>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -108,9 +112,10 @@ impl Stream for TaskDownloader { } /// A [`HeaderDownloader`] that runs on its own task -struct SpawnedDownloader { - updates: UnboundedReceiverStream, - headers_tx: PollSender>>, +#[expect(clippy::complexity)] +struct SpawnedDownloader { + updates: UnboundedReceiverStream>, + headers_tx: PollSender>, T::Header>>, downloader: T, } @@ -170,9 +175,9 @@ impl Future for SpawnedDownloader { } /// Commands delegated tot the spawned [`HeaderDownloader`] -enum DownloaderUpdates { - UpdateSyncGap(SealedHeader, SyncTarget), - UpdateLocalHead(SealedHeader), +enum DownloaderUpdates { + UpdateSyncGap(SealedHeader, SyncTarget), + UpdateLocalHead(SealedHeader), UpdateSyncTarget(SyncTarget), SetBatchSize(usize), } diff --git a/crates/net/downloaders/src/headers/test_utils.rs b/crates/net/downloaders/src/headers/test_utils.rs index 923ad996937..baea409f20e 100644 --- a/crates/net/downloaders/src/headers/test_utils.rs +++ b/crates/net/downloaders/src/headers/test_utils.rs @@ -2,7 +2,6 @@ #![allow(dead_code)] -use alloy_primitives::Sealable; use reth_primitives::SealedHeader; /// Returns a new [`SealedHeader`] that's the child header of the given `parent`. @@ -10,7 +9,5 @@ pub(crate) fn child_header(parent: &SealedHeader) -> SealedHeader { let mut child = parent.as_ref().clone(); child.number += 1; child.parent_hash = parent.hash_slow(); - let sealed = child.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) + SealedHeader::seal(child) } diff --git a/crates/net/downloaders/src/test_utils/bodies_client.rs b/crates/net/downloaders/src/test_utils/bodies_client.rs index be8373f8235..d84d92363ee 100644 --- a/crates/net/downloaders/src/test_utils/bodies_client.rs +++ b/crates/net/downloaders/src/test_utils/bodies_client.rs @@ -78,6 +78,7 @@ impl DownloadClient for TestBodiesClient { } impl BodiesClient for TestBodiesClient { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 7755c5e6017..635383ce3f3 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -43,7 +43,7 @@ pub(crate) async fn generate_bodies_file( let raw_block_bodies = create_raw_bodies(headers.iter().cloned(), &mut bodies.clone()); let file: File = tempfile::tempfile().unwrap().into(); - let mut writer = FramedWrite::new(file, BlockFileCodec); + let mut writer = FramedWrite::new(file, BlockFileCodec::default()); // rlp encode one after the other for block in raw_block_bodies { diff --git a/crates/net/ecies/Cargo.toml b/crates/net/ecies/Cargo.toml index eb2a0b023b3..ec34e3e7a32 100644 --- a/crates/net/ecies/Cargo.toml +++ b/crates/net/ecies/Cargo.toml @@ -28,7 +28,7 @@ tracing.workspace = true # HeaderBytes generic-array.workspace = true typenum = "1.15.0" -byteorder = "1.4.3" +byteorder.workspace = true # crypto rand.workspace = true diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 83dcc657bce..f799b6c7f6c 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -650,6 +650,8 @@ impl ECIES { out.extend_from_slice(tag.as_slice()); } + /// Reads the `RLPx` header from the slice, setting up the MAC and AES, returning the body + /// size contained in the header. pub fn read_header(&mut self, data: &mut [u8]) -> Result { // If the data is not large enough to fit the header and mac bytes, return an error // @@ -677,7 +679,7 @@ impl ECIES { self.body_size = Some(body_size); - Ok(self.body_size.unwrap()) + Ok(body_size) } pub const fn header_len() -> usize { @@ -686,7 +688,7 @@ impl ECIES { pub fn body_len(&self) -> usize { let len = self.body_size.unwrap(); - (if len % 16 == 0 { len } else { (len / 16 + 1) * 16 }) + 16 + Self::align_16(len) + 16 } #[cfg(test)] @@ -697,7 +699,7 @@ impl ECIES { } pub fn write_body(&mut self, out: &mut BytesMut, data: &[u8]) { - let len = if data.len() % 16 == 0 { data.len() } else { (data.len() / 16 + 1) * 16 }; + let len = Self::align_16(data.len()); let old_len = out.len(); out.resize(old_len + len, 0); @@ -730,6 +732,14 @@ impl ECIES { self.ingress_aes.as_mut().unwrap().apply_keystream(ret); Ok(split_at_mut(ret, size)?.0) } + + /// Returns `num` aligned to 16. + /// + /// `` + #[inline] + const fn align_16(num: usize) -> usize { + (num + (16 - 1)) & !(16 - 1) + } } #[cfg(test)] diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index c3e9b8d58cc..b5a10284cf2 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -1,12 +1,15 @@ //! This contains the main codec for `RLPx` ECIES messages -use crate::{algorithm::ECIES, ECIESError, EgressECIESValue, IngressECIESValue}; +use crate::{algorithm::ECIES, ECIESError, ECIESErrorImpl, EgressECIESValue, IngressECIESValue}; use alloy_primitives::{bytes::BytesMut, B512 as PeerId}; use secp256k1::SecretKey; use std::{fmt::Debug, io}; use tokio_util::codec::{Decoder, Encoder}; use tracing::{instrument, trace}; +/// The max size that the initial handshake packet can be. Currently 2KiB. +const MAX_INITIAL_HANDSHAKE_SIZE: usize = 2048; + /// Tokio codec for ECIES #[derive(Debug)] pub struct ECIESCodec { @@ -26,6 +29,11 @@ pub enum ECIESState { /// message containing the nonce and other metadata. Ack, + /// This is the same as the [`ECIESState::Header`] stage, but occurs only after the first + /// [`ECIESState::Ack`] message. This is so that the initial handshake message can be properly + /// validated. + InitialHeader, + /// The third stage of the ECIES handshake, where header is parsed, message integrity checks /// performed, and message is decrypted. Header, @@ -70,7 +78,7 @@ impl Decoder for ECIESCodec { self.ecies.read_auth(&mut buf.split_to(total_size))?; - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; return Ok(Some(IngressECIESValue::AuthReceive(self.ecies.remote_id()))) } ECIESState::Ack => { @@ -89,9 +97,29 @@ impl Decoder for ECIESCodec { self.ecies.read_ack(&mut buf.split_to(total_size))?; - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; return Ok(Some(IngressECIESValue::Ack)) } + ECIESState::InitialHeader => { + if buf.len() < ECIES::header_len() { + trace!("current len {}, need {}", buf.len(), ECIES::header_len()); + return Ok(None) + } + + let body_size = + self.ecies.read_header(&mut buf.split_to(ECIES::header_len()))?; + + if body_size > MAX_INITIAL_HANDSHAKE_SIZE { + trace!(?body_size, max=?MAX_INITIAL_HANDSHAKE_SIZE, "Header exceeds max initial handshake size"); + return Err(ECIESErrorImpl::InitialHeaderBodyTooLarge { + body_size, + max_body_size: MAX_INITIAL_HANDSHAKE_SIZE, + } + .into()) + } + + self.state = ECIESState::Body; + } ECIESState::Header => { if buf.len() < ECIES::header_len() { trace!("current len {}, need {}", buf.len(), ECIES::header_len()); @@ -131,7 +159,7 @@ impl Encoder for ECIESCodec { Ok(()) } EgressECIESValue::Ack => { - self.state = ECIESState::Header; + self.state = ECIESState::InitialHeader; self.ecies.write_ack(buf); Ok(()) } diff --git a/crates/net/ecies/src/error.rs b/crates/net/ecies/src/error.rs index 79965f73303..9dabfc16183 100644 --- a/crates/net/ecies/src/error.rs +++ b/crates/net/ecies/src/error.rs @@ -62,6 +62,14 @@ pub enum ECIESErrorImpl { /// The encrypted data is not large enough for all fields #[error("encrypted data is not large enough for all fields")] EncryptedDataTooSmall, + /// The initial header body is too large. + #[error("initial header body is {body_size} but the max is {max_body_size}")] + InitialHeaderBodyTooLarge { + /// The body size from the header + body_size: usize, + /// The max body size + max_body_size: usize, + }, /// Error when trying to split an array beyond its length #[error("requested {idx} but array len is {len}")] OutOfBounds { diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 82c9fe37a44..1fe97f236de 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -16,12 +16,15 @@ workspace = true reth-chainspec.workspace = true reth-codecs-derive.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-chains = { workspace = true, features = ["rlp"] } alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } +alloy-consensus.workspace = true bytes.workspace = true derive_more.workspace = true @@ -41,14 +44,29 @@ arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true rand.workspace = true -alloy-consensus.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "alloy-chains/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "alloy-chains/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-ethereum-forks/arbitrary" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-primitives-traits/serde", + "reth-ethereum-forks/serde" ] -serde = ["dep:serde"] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 6e5483f3a0e..e6506e86ad7 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -5,8 +5,7 @@ use crate::HeadersDirection; use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{BlockBody, Header}; +use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; /// A request for a peer to return block headers starting at the requested block. /// The peer must return at most [`limit`](#structfield.limit) headers. @@ -41,34 +40,16 @@ pub struct GetBlockHeaders { /// The response to [`GetBlockHeaders`], containing headers if any headers were found. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(rlp, 10)] -pub struct BlockHeaders( +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +pub struct BlockHeaders( /// The requested headers. - pub Vec
, + pub Vec, ); -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for BlockHeaders { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let headers_count: usize = u.int_in_range(0..=10)?; - let mut headers = Vec::with_capacity(headers_count); - - for _ in 0..headers_count { - headers.push(reth_primitives::generate_valid_header( - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - u.arbitrary()?, - )) - } +generate_tests!(#[rlp, 10] BlockHeaders, EthBlockHeadersTests); - Ok(Self(headers)) - } -} - -impl From> for BlockHeaders { - fn from(headers: Vec
) -> Self { +impl From> for BlockHeaders { + fn from(headers: Vec) -> Self { Self(headers) } } @@ -94,14 +75,15 @@ impl From> for GetBlockBodies { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(rlp, 16)] -pub struct BlockBodies( +pub struct BlockBodies( /// The requested block bodies, each of which should correspond to a hash in the request. - pub Vec, + pub Vec, ); -impl From> for BlockBodies { - fn from(bodies: Vec) -> Self { +generate_tests!(#[rlp, 16] BlockBodies, EthBlockBodiesTests); + +impl From> for BlockBodies { + fn from(bodies: Vec) -> Self { Self(bodies) } } @@ -112,14 +94,13 @@ mod tests { message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, HeadersDirection, }; - use alloy_consensus::TxLegacy; - use alloy_primitives::{hex, Parity, TxKind, U256}; + use alloy_consensus::{Header, TxLegacy}; + use alloy_eips::BlockHashOrNumber; + use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::{BlockHashOrNumber, Header, Signature, Transaction, TransactionSigned}; + use reth_primitives::{BlockBody, Transaction, TransactionSigned}; use std::str::FromStr; - use super::BlockBody; - #[test] fn decode_hash() { // this is a valid 32 byte rlp string @@ -217,7 +198,7 @@ mod tests { fn encode_get_block_header_number() { let expected = hex!("ca820457c682270f050580"); let mut data = vec![]; - RequestPair:: { + RequestPair { request_id: 1111, message: GetBlockHeaders { start_block: BlockHashOrNumber::Number(9999), @@ -234,7 +215,7 @@ mod tests { #[test] fn decode_get_block_header_number() { let data = hex!("ca820457c682270f050580"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: GetBlockHeaders { start_block: BlockHashOrNumber::Number(9999), @@ -253,7 +234,7 @@ mod tests { // [ (f90202) 0x0457 = 1111, [ (f901fc) [ (f901f9) header ] ] ] let expected = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - RequestPair:: { + RequestPair { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -277,7 +258,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None, + target_blobs_per_block: None, }, ]), }.encode(&mut data); @@ -288,7 +270,7 @@ mod tests { #[test] fn decode_block_header() { let data = hex!("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: BlockHeaders(vec![ Header { @@ -312,7 +294,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None, + target_blobs_per_block: None, }, ]), }; @@ -325,7 +308,7 @@ mod tests { fn encode_get_block_bodies() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - RequestPair:: { + RequestPair { request_id: 1111, message: GetBlockBodies(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -340,7 +323,7 @@ mod tests { #[test] fn decode_get_block_bodies() { let data = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: GetBlockBodies(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -356,12 +339,12 @@ mod tests { fn encode_block_bodies() { let expected = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: BlockBodies(vec![ BlockBody { transactions: vec![ - TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { + TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, gas_price: 0x4a817c808, @@ -372,10 +355,10 @@ mod tests { }), Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), - Parity::Parity(false), + false, ), ), - TransactionSigned::from_transaction_and_signature(Transaction::Legacy(TxLegacy { + TransactionSigned::new_unhashed(Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x9u64, gas_price: 0x4a817c809, @@ -386,7 +369,7 @@ mod tests { }), Signature::new( U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), - Parity::Parity(false), + false, ), ), ], @@ -412,11 +395,11 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None, + target_blobs_per_block: None, }, ], withdrawals: None, - requests: None } ]), }; @@ -428,12 +411,12 @@ mod tests { #[test] fn decode_block_bodies() { let data = hex!("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"); - let expected = RequestPair:: { + let expected = RequestPair { request_id: 1111, message: BlockBodies(vec![ BlockBody { transactions: vec![ - TransactionSigned::from_transaction_and_signature(Transaction::Legacy( + TransactionSigned::new_unhashed(Transaction::Legacy( TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -443,13 +426,13 @@ mod tests { value: U256::from(0x200u64), input: Default::default(), }), - Signature::new( + Signature::new( U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12").unwrap(), U256::from_str("0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10").unwrap(), - Parity::Eip155(37), + false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x9u64, @@ -462,7 +445,7 @@ mod tests { Signature::new( U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), U256::from_str("0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb").unwrap(), - Parity::Eip155(37), + false, ), ), ], @@ -488,15 +471,24 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None, + target_blobs_per_block: None, }, ], withdrawals: None, - requests: None } ]), }; let result = RequestPair::decode(&mut &data[..]).unwrap(); assert_eq!(result, expected); } + + #[test] + fn empty_block_bodies_rlp() { + let body = BlockBodies::default(); + let mut buf = Vec::new(); + body.encode(&mut buf); + let decoded = BlockBodies::::decode(&mut buf.as_slice()).unwrap(); + assert_eq!(body, decoded); + } } diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 2ef6083a500..72a1116c392 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -1,15 +1,14 @@ //! Types for broadcasting new data. -use crate::{EthMessage, EthVersion}; +use crate::{EthMessage, EthVersion, NetworkPrimitives}; +use alloy_primitives::{Bytes, TxHash, B256, U128}; use alloy_rlp::{ Decodable, Encodable, RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper, }; - -use alloy_primitives::{Bytes, TxHash, B256, U128}; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; -use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{Block, PooledTransactionsElement, TransactionSigned}; - +use reth_codecs_derive::{add_arbitrary_tests, generate_tests}; +use reth_primitives::TransactionSigned; +use reth_primitives_traits::{SignedTransaction, Transaction}; use std::{ collections::{HashMap, HashSet}, mem, @@ -75,40 +74,41 @@ impl From for Vec { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(rlp, 25)] -pub struct NewBlock { +pub struct NewBlock { /// A new block. - pub block: Block, + pub block: B, /// The current total difficulty. pub td: U128, } +generate_tests!(#[rlp, 25] NewBlock, EthNewBlockTests); + /// This informs peers of transactions that have appeared on the network and are not yet included /// in a block. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp, 10)] -pub struct Transactions( +pub struct Transactions( /// New transactions for the peer to include in its mempool. - pub Vec, + pub Vec, ); -impl Transactions { +impl Transactions { /// Returns `true` if the list of transactions contains any blob transactions. pub fn has_eip4844(&self) -> bool { self.0.iter().any(|tx| tx.is_eip4844()) } } -impl From> for Transactions { - fn from(txs: Vec) -> Self { +impl From> for Transactions { + fn from(txs: Vec) -> Self { Self(txs) } } -impl From for Vec { - fn from(txs: Transactions) -> Self { +impl From> for Vec { + fn from(txs: Transactions) -> Self { txs.0 } } @@ -120,9 +120,9 @@ impl From for Vec { #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(rlp, 20)] -pub struct SharedTransactions( +pub struct SharedTransactions( /// New transactions for the peer to include in its mempool. - pub Vec>, + pub Vec>, ); /// A wrapper type for all different new pooled transaction types @@ -269,7 +269,7 @@ impl NewPooledTransactionHashes { } } -impl From for EthMessage { +impl From for EthMessage { fn from(value: NewPooledTransactionHashes) -> Self { match value { NewPooledTransactionHashes::Eth66(msg) => Self::NewPooledTransactionHashes66(msg), @@ -309,7 +309,7 @@ impl From> for NewPooledTransactionHashes66 { } } -/// Same as [`NewPooledTransactionHashes66`] but extends that that beside the transaction hashes, +/// Same as [`NewPooledTransactionHashes66`] but extends that beside the transaction hashes, /// the node sends the transaction types and their sizes (as defined in EIP-2718) as well. #[derive(Clone, Debug, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] @@ -554,7 +554,7 @@ pub trait HandleVersionedMempoolData { fn msg_version(&self) -> EthVersion; } -impl HandleMempoolData for Vec { +impl HandleMempoolData for Vec { fn is_empty(&self) -> bool { self.is_empty() } @@ -564,7 +564,7 @@ impl HandleMempoolData for Vec { } fn retain_by_hash(&mut self, mut f: impl FnMut(&TxHash) -> bool) { - self.retain(|tx| f(tx.hash())) + self.retain(|tx| f(tx.tx_hash())) } } @@ -732,7 +732,7 @@ impl RequestTxHashes { impl FromIterator<(TxHash, Eth68TxMetadata)> for RequestTxHashes { fn from_iter>(iter: I) -> Self { - Self::new(iter.into_iter().map(|(hash, _)| hash).collect::>()) + Self::new(iter.into_iter().map(|(hash, _)| hash).collect()) } } diff --git a/crates/net/eth-wire-types/src/header.rs b/crates/net/eth-wire-types/src/header.rs index 7ecfc802d8a..883db625c6e 100644 --- a/crates/net/eth-wire-types/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -87,10 +87,9 @@ impl From for bool { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::EMPTY_ROOT_HASH; + use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; - use reth_primitives::Header; use std::str::FromStr; // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 @@ -124,7 +123,7 @@ mod tests { .unwrap(); let header = Header { parent_hash: b256!("e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2a"), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("ba5e000000000000000000000000000000000000"), state_root: b256!("ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7"), transactions_root: b256!("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf"), @@ -143,7 +142,8 @@ mod tests { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None + requests_hash: None, + target_blobs_per_block: None, }; assert_eq!(header.hash_slow(), expected_hash); } @@ -228,10 +228,7 @@ mod tests { "3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6", ) .unwrap(), - ommers_hash: B256::from_str( - "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - ) - .unwrap(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: Address::from_str("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").unwrap(), state_root: B256::from_str( "3c837fc158e3e93eafcaf2e658a02f5d8f99abc9f1c4c66cdea96c0ca26406ae", @@ -259,7 +256,8 @@ mod tests { blob_gas_used: Some(0x020000), excess_blob_gas: Some(0), parent_beacon_block_root: None, - requests_root: None, + requests_hash: None, + target_blobs_per_block: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); @@ -280,7 +278,7 @@ mod tests { "13a7ec98912f917b3e804654e37c9866092043c13eb8eab94eb64818e886cff5", ) .unwrap(), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("f97e180c050e5ab072211ad2c213eb5aee4df134"), state_root: b256!("ec229dbe85b0d3643ad0f471e6ec1a36bbc87deffbbd970762d22a53b35d068a"), transactions_root: EMPTY_ROOT_HASH, @@ -299,7 +297,8 @@ mod tests { parent_beacon_block_root: None, blob_gas_used: Some(0), excess_blob_gas: Some(0x1600000), - requests_root: None, + requests_hash: None, + target_blobs_per_block: None, }; let header = Header::decode(&mut data.as_slice()).unwrap(); diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs index 0e8fd5df98a..ac7ea55d0b9 100644 --- a/crates/net/eth-wire-types/src/lib.rs +++ b/crates/net/eth-wire-types/src/lib.rs @@ -40,3 +40,6 @@ pub use disconnect_reason::*; pub mod capability; pub use capability::*; + +pub mod primitives; +pub use primitives::*; diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index 9ef8e6c7147..9a866720310 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -11,8 +11,7 @@ use super::{ GetNodeData, GetPooledTransactions, GetReceipts, NewBlock, NewPooledTransactionHashes66, NewPooledTransactionHashes68, NodeData, PooledTransactions, Receipts, Status, Transactions, }; -use crate::{EthVersion, SharedTransactions}; - +use crate::{EthNetworkPrimitives, EthVersion, NetworkPrimitives, SharedTransactions}; use alloy_primitives::bytes::{Buf, BufMut}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use std::{fmt::Debug, sync::Arc}; @@ -35,14 +34,18 @@ pub enum MessageError { /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct ProtocolMessage { +pub struct ProtocolMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message, including specific data based on the message type. - pub message: EthMessage, + #[cfg_attr( + feature = "serde", + serde(bound = "EthMessage: serde::Serialize + serde::de::DeserializeOwned") + )] + pub message: EthMessage, } -impl ProtocolMessage { +impl ProtocolMessage { /// Create a new `ProtocolMessage` from a message type and message rlp bytes. pub fn decode_message(version: EthVersion, buf: &mut &[u8]) -> Result { let message_type = EthMessageID::decode(buf)?; @@ -50,9 +53,17 @@ impl ProtocolMessage { let message = match message_type { EthMessageID::Status => EthMessage::Status(Status::decode(buf)?), EthMessageID::NewBlockHashes => { + if version.is_eth69() { + return Err(MessageError::Invalid(version, EthMessageID::NewBlockHashes)); + } EthMessage::NewBlockHashes(NewBlockHashes::decode(buf)?) } - EthMessageID::NewBlock => EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)), + EthMessageID::NewBlock => { + if version.is_eth69() { + return Err(MessageError::Invalid(version, EthMessageID::NewBlock)); + } + EthMessage::NewBlock(Box::new(NewBlock::decode(buf)?)) + } EthMessageID::Transactions => EthMessage::Transactions(Transactions::decode(buf)?), EthMessageID::NewPooledTransactionHashes => { if version >= EthVersion::Eth68 { @@ -65,58 +76,36 @@ impl ProtocolMessage { )?) } } - EthMessageID::GetBlockHeaders => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetBlockHeaders(request_pair) - } - EthMessageID::BlockHeaders => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::BlockHeaders(request_pair) - } - EthMessageID::GetBlockBodies => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetBlockBodies(request_pair) - } - EthMessageID::BlockBodies => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::BlockBodies(request_pair) - } + EthMessageID::GetBlockHeaders => EthMessage::GetBlockHeaders(RequestPair::decode(buf)?), + EthMessageID::BlockHeaders => EthMessage::BlockHeaders(RequestPair::decode(buf)?), + EthMessageID::GetBlockBodies => EthMessage::GetBlockBodies(RequestPair::decode(buf)?), + EthMessageID::BlockBodies => EthMessage::BlockBodies(RequestPair::decode(buf)?), EthMessageID::GetPooledTransactions => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetPooledTransactions(request_pair) + EthMessage::GetPooledTransactions(RequestPair::decode(buf)?) } EthMessageID::PooledTransactions => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::PooledTransactions(request_pair) + EthMessage::PooledTransactions(RequestPair::decode(buf)?) } EthMessageID::GetNodeData => { if version >= EthVersion::Eth67 { return Err(MessageError::Invalid(version, EthMessageID::GetNodeData)) } - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetNodeData(request_pair) + EthMessage::GetNodeData(RequestPair::decode(buf)?) } EthMessageID::NodeData => { if version >= EthVersion::Eth67 { return Err(MessageError::Invalid(version, EthMessageID::GetNodeData)) } - let request_pair = RequestPair::::decode(buf)?; - EthMessage::NodeData(request_pair) - } - EthMessageID::GetReceipts => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::GetReceipts(request_pair) - } - EthMessageID::Receipts => { - let request_pair = RequestPair::::decode(buf)?; - EthMessage::Receipts(request_pair) + EthMessage::NodeData(RequestPair::decode(buf)?) } + EthMessageID::GetReceipts => EthMessage::GetReceipts(RequestPair::decode(buf)?), + EthMessageID::Receipts => EthMessage::Receipts(RequestPair::decode(buf)?), }; Ok(Self { message_type, message }) } } -impl Encodable for ProtocolMessage { +impl Encodable for ProtocolMessage { /// Encodes the protocol message into bytes. The message type is encoded as a single byte and /// prepended to the message. fn encode(&self, out: &mut dyn BufMut) { @@ -128,23 +117,23 @@ impl Encodable for ProtocolMessage { } } -impl From for ProtocolMessage { - fn from(message: EthMessage) -> Self { +impl From> for ProtocolMessage { + fn from(message: EthMessage) -> Self { Self { message_type: message.message_id(), message } } } /// Represents messages that can be sent to multiple peers. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ProtocolBroadcastMessage { +#[derive(Clone, Debug)] +pub struct ProtocolBroadcastMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, /// The content of the message to be broadcasted, including specific data based on the message /// type. - pub message: EthBroadcastMessage, + pub message: EthBroadcastMessage, } -impl Encodable for ProtocolBroadcastMessage { +impl Encodable for ProtocolBroadcastMessage { /// Encodes the protocol message into bytes. The message type is encoded as a single byte and /// prepended to the message. fn encode(&self, out: &mut dyn BufMut) { @@ -156,8 +145,8 @@ impl Encodable for ProtocolBroadcastMessage { } } -impl From for ProtocolBroadcastMessage { - fn from(message: EthBroadcastMessage) -> Self { +impl From> for ProtocolBroadcastMessage { + fn from(message: EthBroadcastMessage) -> Self { Self { message_type: message.message_id(), message } } } @@ -181,15 +170,23 @@ impl From for ProtocolBroadcastMessage { /// [`NewPooledTransactionHashes68`] is defined. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum EthMessage { +pub enum EthMessage { /// Represents a Status message required for the protocol handshake. Status(Status), /// Represents a `NewBlockHashes` message broadcast to the network. NewBlockHashes(NewBlockHashes), /// Represents a `NewBlock` message broadcast to the network. - NewBlock(Box), + #[cfg_attr( + feature = "serde", + serde(bound = "N::Block: serde::Serialize + serde::de::DeserializeOwned") + )] + NewBlock(Box>), /// Represents a Transactions message broadcast to the network. - Transactions(Transactions), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BroadcastedTransaction: serde::Serialize + serde::de::DeserializeOwned") + )] + Transactions(Transactions), /// Represents a `NewPooledTransactionHashes` message for eth/66 version. NewPooledTransactionHashes66(NewPooledTransactionHashes66), /// Represents a `NewPooledTransactionHashes` message for eth/68 version. @@ -198,15 +195,27 @@ pub enum EthMessage { /// Represents a `GetBlockHeaders` request-response pair. GetBlockHeaders(RequestPair), /// Represents a `BlockHeaders` request-response pair. - BlockHeaders(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BlockHeader: serde::Serialize + serde::de::DeserializeOwned") + )] + BlockHeaders(RequestPair>), /// Represents a `GetBlockBodies` request-response pair. GetBlockBodies(RequestPair), /// Represents a `BlockBodies` request-response pair. - BlockBodies(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::BlockBody: serde::Serialize + serde::de::DeserializeOwned") + )] + BlockBodies(RequestPair>), /// Represents a `GetPooledTransactions` request-response pair. GetPooledTransactions(RequestPair), /// Represents a `PooledTransactions` request-response pair. - PooledTransactions(RequestPair), + #[cfg_attr( + feature = "serde", + serde(bound = "N::PooledTransaction: serde::Serialize + serde::de::DeserializeOwned") + )] + PooledTransactions(RequestPair>), /// Represents a `GetNodeData` request-response pair. GetNodeData(RequestPair), /// Represents a `NodeData` request-response pair. @@ -217,7 +226,7 @@ pub enum EthMessage { Receipts(RequestPair), } -impl EthMessage { +impl EthMessage { /// Returns the message's ID. pub const fn message_id(&self) -> EthMessageID { match self { @@ -242,7 +251,7 @@ impl EthMessage { } } -impl Encodable for EthMessage { +impl Encodable for EthMessage { fn encode(&self, out: &mut dyn BufMut) { match self { Self::Status(status) => status.encode(out), @@ -293,16 +302,16 @@ impl Encodable for EthMessage { /// /// Note: This is only useful for outgoing messages. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum EthBroadcastMessage { +pub enum EthBroadcastMessage { /// Represents a new block broadcast message. - NewBlock(Arc), + NewBlock(Arc>), /// Represents a transactions broadcast message. - Transactions(SharedTransactions), + Transactions(SharedTransactions), } // === impl EthBroadcastMessage === -impl EthBroadcastMessage { +impl EthBroadcastMessage { /// Returns the message's ID. pub const fn message_id(&self) -> EthMessageID { match self { @@ -312,7 +321,7 @@ impl EthBroadcastMessage { } } -impl Encodable for EthBroadcastMessage { +impl Encodable for EthBroadcastMessage { fn encode(&self, out: &mut dyn BufMut) { match self { Self::NewBlock(new_block) => new_block.encode(out), @@ -494,7 +503,8 @@ where mod tests { use super::MessageError; use crate::{ - message::RequestPair, EthMessage, EthMessageID, GetNodeData, NodeData, ProtocolMessage, + message::RequestPair, EthMessage, EthMessageID, EthNetworkPrimitives, EthVersion, + GetNodeData, NodeData, ProtocolMessage, }; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable, Error}; @@ -507,20 +517,30 @@ mod tests { #[test] fn test_removed_message_at_eth67() { - let get_node_data = - EthMessage::GetNodeData(RequestPair { request_id: 1337, message: GetNodeData(vec![]) }); + let get_node_data = EthMessage::::GetNodeData(RequestPair { + request_id: 1337, + message: GetNodeData(vec![]), + }); let buf = encode(ProtocolMessage { message_type: EthMessageID::GetNodeData, message: get_node_data, }); - let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); + let msg = ProtocolMessage::::decode_message( + crate::EthVersion::Eth67, + &mut &buf[..], + ); assert!(matches!(msg, Err(MessageError::Invalid(..)))); - let node_data = - EthMessage::NodeData(RequestPair { request_id: 1337, message: NodeData(vec![]) }); + let node_data = EthMessage::::NodeData(RequestPair { + request_id: 1337, + message: NodeData(vec![]), + }); let buf = encode(ProtocolMessage { message_type: EthMessageID::NodeData, message: node_data }); - let msg = ProtocolMessage::decode_message(crate::EthVersion::Eth67, &mut &buf[..]); + let msg = ProtocolMessage::::decode_message( + crate::EthVersion::Eth67, + &mut &buf[..], + ); assert!(matches!(msg, Err(MessageError::Invalid(..)))); } @@ -566,4 +586,18 @@ mod tests { let result = RequestPair::>::decode(&mut &*raw_pair); assert!(matches!(result, Err(Error::UnexpectedLength))); } + + #[test] + fn empty_block_bodies_protocol() { + let empty_block_bodies = + ProtocolMessage::from(EthMessage::::BlockBodies(RequestPair { + request_id: 0, + message: Default::default(), + })); + let mut buf = Vec::new(); + empty_block_bodies.encode(&mut buf); + let decoded = + ProtocolMessage::decode_message(EthVersion::Eth68, &mut buf.as_slice()).unwrap(); + assert_eq!(empty_block_bodies, decoded); + } } diff --git a/crates/net/eth-wire-types/src/primitives.rs b/crates/net/eth-wire-types/src/primitives.rs new file mode 100644 index 00000000000..17f1943186a --- /dev/null +++ b/crates/net/eth-wire-types/src/primitives.rs @@ -0,0 +1,57 @@ +//! Abstraction over primitive types in network messages. + +use alloy_rlp::{Decodable, Encodable}; +use reth_primitives_traits::{Block, BlockBody, BlockHeader, SignedTransaction}; +use std::fmt::Debug; + +/// Abstraction over primitive types which might appear in network messages. See +/// [`crate::EthMessage`] for more context. +pub trait NetworkPrimitives: + Send + Sync + Unpin + Clone + Debug + PartialEq + Eq + 'static +{ + /// The block header type. + type BlockHeader: BlockHeader + 'static; + + /// The block body type. + type BlockBody: BlockBody + 'static; + + /// Full block type. + type Block: Block
+ + Encodable + + Decodable + + 'static; + + /// The transaction type which peers announce in `Transactions` messages. It is different from + /// `PooledTransactions` to account for Ethereum case where EIP-4844 transactions are not being + /// announced and can only be explicitly requested from peers. + type BroadcastedTransaction: SignedTransaction + 'static; + + /// The transaction type which peers return in `PooledTransactions` messages. + type PooledTransaction: SignedTransaction + TryFrom + 'static; + + /// The transaction type which peers return in `GetReceipts` messages. + type Receipt: Encodable + + Decodable + + Send + + Sync + + Unpin + + Clone + + Debug + + PartialEq + + Eq + + 'static; +} + +/// Primitive types used by Ethereum network. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] +#[non_exhaustive] +pub struct EthNetworkPrimitives; + +impl NetworkPrimitives for EthNetworkPrimitives { + type BlockHeader = alloy_consensus::Header; + type BlockBody = reth_primitives::BlockBody; + type Block = reth_primitives::Block; + type BroadcastedTransaction = reth_primitives::TransactionSigned; + type PooledTransaction = reth_primitives::PooledTransactionsElement; + type Receipt = reth_primitives::Receipt; +} diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index db9d6f871e4..2bad4287f2e 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::ReceiptWithBloom; +use reth_primitives::{Receipt, ReceiptWithBloom}; /// A request for transaction receipts from the given block hashes. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -23,7 +23,7 @@ pub struct GetReceipts( #[add_arbitrary_tests(rlp)] pub struct Receipts( /// Each receipt hash should correspond to a block hash in the request. - pub Vec>, + pub Vec>>, ); #[cfg(test)] @@ -37,7 +37,7 @@ mod tests { fn roundtrip_eip1559() { let receipts = Receipts(vec![vec![ReceiptWithBloom { receipt: Receipt { tx_type: TxType::Eip1559, ..Default::default() }, - bloom: Default::default(), + logs_bloom: Default::default(), }]]); let mut out = vec![]; @@ -54,7 +54,7 @@ mod tests { fn encode_get_receipts() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetReceipts(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -72,7 +72,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetReceipts(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -88,7 +88,7 @@ mod tests { fn encode_receipts() { let expected = hex!("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: Receipts(vec![vec![ ReceiptWithBloom { @@ -108,7 +108,7 @@ mod tests { success: false, ..Default::default() }, - bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, ]]), }; @@ -124,7 +124,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: Receipts(vec![ vec![ @@ -145,7 +145,7 @@ mod tests { success: false, ..Default::default() }, - bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), + logs_bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, ], ]), diff --git a/crates/net/eth-wire-types/src/response.rs b/crates/net/eth-wire-types/src/response.rs deleted file mode 100644 index dfcf5ed56a8..00000000000 --- a/crates/net/eth-wire-types/src/response.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::{ - BlockBodies, BlockHeaders, NodeData, PooledTransactions, Receipts, RequestPair, Status, -}; - -// This type is analogous to the `zebra_network::Response` type. -/// An ethereum network response for version 66. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum Response { - /// The request does not have a response. - Nil, - - /// The [`Status`](super::Status) message response in the eth protocol handshake. - Status(Status), - - /// The response to a [`Request::GetBlockHeaders`](super::Request::GetBlockHeaders) request. - BlockHeaders(RequestPair), - - /// The response to a [`Request::GetBlockBodies`](super::Request::GetBlockBodies) request. - BlockBodies(RequestPair), - - /// The response to a [`Request::GetPooledTransactions`](super::Request::GetPooledTransactions) request. - PooledTransactions(RequestPair), - - /// The response to a [`Request::GetNodeData`](super::Request::GetNodeData) request. - NodeData(RequestPair), - - /// The response to a [`Request::GetReceipts`](super::Request::GetReceipts) request. - Receipts(RequestPair), -} diff --git a/crates/net/eth-wire-types/src/state.rs b/crates/net/eth-wire-types/src/state.rs index 16a2959b338..57273adc6b1 100644 --- a/crates/net/eth-wire-types/src/state.rs +++ b/crates/net/eth-wire-types/src/state.rs @@ -36,7 +36,7 @@ mod tests { fn encode_get_node_data() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetNodeData(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -54,7 +54,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetNodeData(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -69,7 +69,7 @@ mod tests { fn encode_node_data() { let expected = hex!("ce820457ca84deadc0de84feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: NodeData(vec![ hex!("deadc0de").as_slice().into(), @@ -87,7 +87,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: NodeData(vec![ hex!("deadc0de").as_slice().into(), diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index a5e7530ec09..e19912481e4 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -4,7 +4,7 @@ use alloy_primitives::{hex, B256, U256}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_chainspec::{EthChainSpec, Hardforks, MAINNET}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{EthereumHardfork, ForkId, Head}; +use reth_ethereum_forks::{EthereumHardfork, ForkId, Head}; use std::fmt::{Debug, Display}; /// The status message is used in the eth protocol handshake to ensure that peers are on the same @@ -19,7 +19,7 @@ use std::fmt::{Debug, Display}; pub struct Status { /// The current protocol version. For example, peers running `eth/66` would have a version of /// 66. - pub version: u8, + pub version: EthVersion, /// The chain id, as introduced in /// [EIP155](https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids). @@ -50,7 +50,7 @@ impl Status { /// Sets the [`EthVersion`] for the status. pub fn set_eth_version(&mut self, version: EthVersion) { - self.version = version as u8; + self.version = version; } /// Create a [`StatusBuilder`] from the given [`EthChainSpec`] and head block. @@ -122,7 +122,7 @@ impl Default for Status { fn default() -> Self { let mainnet_genesis = MAINNET.genesis_hash(); Self { - version: EthVersion::Eth68 as u8, + version: EthVersion::Eth68, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(17_179_869_184u64), blockhash: mainnet_genesis, @@ -138,14 +138,14 @@ impl Default for Status { /// /// # Example /// ``` +/// use alloy_consensus::constants::MAINNET_GENESIS_HASH; /// use alloy_primitives::{B256, U256}; /// use reth_chainspec::{Chain, EthereumHardfork, MAINNET}; /// use reth_eth_wire_types::{EthVersion, Status}; -/// use reth_primitives::MAINNET_GENESIS_HASH; /// /// // this is just an example status message! /// let status = Status::builder() -/// .version(EthVersion::Eth66.into()) +/// .version(EthVersion::Eth66) /// .chain(Chain::mainnet()) /// .total_difficulty(U256::from(100)) /// .blockhash(B256::from(MAINNET_GENESIS_HASH)) @@ -156,7 +156,7 @@ impl Default for Status { /// assert_eq!( /// status, /// Status { -/// version: EthVersion::Eth66.into(), +/// version: EthVersion::Eth66, /// chain: Chain::mainnet(), /// total_difficulty: U256::from(100), /// blockhash: B256::from(MAINNET_GENESIS_HASH), @@ -177,7 +177,7 @@ impl StatusBuilder { } /// Sets the protocol version. - pub const fn version(mut self, version: u8) -> Self { + pub const fn version(mut self, version: EthVersion) -> Self { self.status.version = version; self } @@ -216,6 +216,7 @@ impl StatusBuilder { #[cfg(test)] mod tests { use crate::{EthVersion, Status}; + use alloy_consensus::constants::MAINNET_GENESIS_HASH; use alloy_genesis::Genesis; use alloy_primitives::{hex, B256, U256}; use alloy_rlp::{Decodable, Encodable}; @@ -228,17 +229,14 @@ mod tests { fn encode_eth_status_message() { let expected = hex!("f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(36206751599115524359527u128), blockhash: B256::from_str( "feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d", ) .unwrap(), - genesis: B256::from_str( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - ) - .unwrap(), + genesis: MAINNET_GENESIS_HASH, forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 }, }; @@ -251,17 +249,14 @@ mod tests { fn decode_eth_status_message() { let data = hex!("f85643018a07aac59dabcdd74bc567a0feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13da0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3c684b715077d80"); let expected = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::from_named(NamedChain::Mainnet), total_difficulty: U256::from(36206751599115524359527u128), blockhash: B256::from_str( "feb27336ca7923f8fab3bd617fcb6e75841538f71c1bcfc267d7838489d9e13d", ) .unwrap(), - genesis: B256::from_str( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3", - ) - .unwrap(), + genesis: MAINNET_GENESIS_HASH, forkid: ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 0 }, }; let status = Status::decode(&mut &data[..]).unwrap(); @@ -272,7 +267,7 @@ mod tests { fn encode_network_status_message() { let expected = hex!("f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"); let status = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_named(NamedChain::BinanceSmartChain), total_difficulty: U256::from(37851386u64), blockhash: B256::from_str( @@ -295,7 +290,7 @@ mod tests { fn decode_network_status_message() { let data = hex!("f850423884024190faa0f8514c4680ef27700751b08f37645309ce65a449616a3ea966bf39dd935bb27ba00d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5bc6845d43d2fd80"); let expected = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_named(NamedChain::BinanceSmartChain), total_difficulty: U256::from(37851386u64), blockhash: B256::from_str( @@ -316,7 +311,7 @@ mod tests { fn decode_another_network_status_message() { let data = hex!("f86142820834936d68fcffffffffffffffffffffffffdeab81b8a0523e8163a6d620a4cc152c547a05f28a03fec91a2a615194cb86df9731372c0ca06499dccdc7c7def3ebb1ce4c6ee27ec6bd02aee570625ca391919faf77ef27bdc6841a67ccd880"); let expected = Status { - version: EthVersion::Eth66 as u8, + version: EthVersion::Eth66, chain: Chain::from_id(2100), total_difficulty: U256::from_str( "0x000000000000000000000000006d68fcffffffffffffffffffffffffdeab81b8", @@ -343,7 +338,7 @@ mod tests { let total_difficulty = U256::from(rng.gen::()); // create a genesis that has a random part, so we can check that the hash is preserved - let genesis = Genesis { nonce: rng.gen::(), ..Default::default() }; + let genesis = Genesis { nonce: rng.gen(), ..Default::default() }; // build head let head = Head { diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index ab65aa178ee..ca76f0a8c7e 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -1,12 +1,11 @@ //! Implements the `GetPooledTransactions` and `PooledTransactions` message types. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{Constructor, Deref, IntoIterator}; use reth_codecs_derive::add_arbitrary_tests; -use reth_primitives::{ - transaction::TransactionConversionError, PooledTransactionsElement, TransactionSigned, -}; +use reth_primitives::PooledTransactionsElement; /// A list of transaction hashes that the peer would like transaction bodies for. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] @@ -42,46 +41,54 @@ where Eq, RlpEncodableWrapper, RlpDecodableWrapper, - Default, IntoIterator, Deref, Constructor, )] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct PooledTransactions( +pub struct PooledTransactions( /// The transaction bodies, each of which should correspond to a requested hash. - pub Vec, + pub Vec, ); -impl PooledTransactions { +impl PooledTransactions { /// Returns an iterator over the transaction hashes in this response. - pub fn hashes(&self) -> impl Iterator + '_ { - self.0.iter().map(|tx| tx.hash()) + pub fn hashes(&self) -> impl Iterator + '_ { + self.0.iter().map(|tx| tx.trie_hash()) } } -impl TryFrom> for PooledTransactions { - type Error = TransactionConversionError; +impl TryFrom> for PooledTransactions +where + T: TryFrom, +{ + type Error = T::Error; - fn try_from(txs: Vec) -> Result { - txs.into_iter().map(PooledTransactionsElement::try_from).collect() + fn try_from(txs: Vec) -> Result { + txs.into_iter().map(T::try_from).collect() } } -impl FromIterator for PooledTransactions { - fn from_iter>(iter: I) -> Self { +impl FromIterator for PooledTransactions { + fn from_iter>(iter: I) -> Self { Self(iter.into_iter().collect()) } } +impl Default for PooledTransactions { + fn default() -> Self { + Self(Default::default()) + } +} + #[cfg(test)] mod tests { use crate::{message::RequestPair, GetPooledTransactions, PooledTransactions}; use alloy_consensus::{TxEip1559, TxLegacy}; - use alloy_primitives::{hex, Parity, TxKind, U256}; + use alloy_primitives::{hex, PrimitiveSignature as Signature, TxKind, U256}; use alloy_rlp::{Decodable, Encodable}; use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_primitives::{PooledTransactionsElement, Signature, Transaction, TransactionSigned}; + use reth_primitives::{PooledTransactionsElement, Transaction, TransactionSigned}; use std::str::FromStr; #[test] @@ -89,7 +96,7 @@ mod tests { fn encode_get_pooled_transactions() { let expected = hex!("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"); let mut data = vec![]; - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: GetPooledTransactions(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -107,7 +114,7 @@ mod tests { let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!( request, - RequestPair:: { + RequestPair { request_id: 1111, message: GetPooledTransactions(vec![ hex!("00000000000000000000000000000000000000000000000000000000deadc0de").into(), @@ -123,7 +130,7 @@ mod tests { let expected = hex!("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"); let mut data = vec![]; let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -142,10 +149,10 @@ mod tests { "0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", ) .unwrap(), - Parity::Parity(false), + false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x09u64, @@ -164,7 +171,7 @@ mod tests { "0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", ) .unwrap(), - Parity::Parity(false), + false, ), ), ]; @@ -175,7 +182,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let request = RequestPair:: { + let request = RequestPair { request_id: 1111, message: PooledTransactions(message), /* Assuming PooledTransactions wraps a * Vec */ @@ -189,7 +196,7 @@ mod tests { fn decode_pooled_transactions() { let data = hex!("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x8u64, @@ -208,10 +215,10 @@ mod tests { "0x64b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", ) .unwrap(), - Parity::Eip155(37), + false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(1), nonce: 0x09u64, @@ -230,7 +237,7 @@ mod tests { "0x52f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", ) .unwrap(), - Parity::Eip155(37), + false, ), ), ]; @@ -241,10 +248,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let expected = RequestPair:: { - request_id: 1111, - message: PooledTransactions(message), - }; + let expected = RequestPair { request_id: 1111, message: PooledTransactions(message) }; let request = RequestPair::::decode(&mut &data[..]).unwrap(); assert_eq!(request, expected); @@ -256,7 +260,7 @@ mod tests { let decoded_transactions = RequestPair::::decode(&mut &data[..]).unwrap(); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 15u64, @@ -275,10 +279,10 @@ mod tests { "0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860", ) .unwrap(), - Parity::Eip155(44), + true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: 4, nonce: 26u64, @@ -299,10 +303,10 @@ mod tests { "0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469", ) .unwrap(), - Parity::Parity(true), + true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 3u64, @@ -321,10 +325,10 @@ mod tests { "0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88", ) .unwrap(), - Parity::Eip155(43), + false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 1u64, @@ -343,10 +347,10 @@ mod tests { "0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da", ) .unwrap(), - Parity::Eip155(43), + false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 2u64, @@ -365,7 +369,7 @@ mod tests { "0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18", ) .unwrap(), - Parity::Eip155(43), + false, ), ), ]; @@ -376,10 +380,8 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let expected_transactions = RequestPair:: { - request_id: 0, - message: PooledTransactions(message), - }; + let expected_transactions = + RequestPair { request_id: 0, message: PooledTransactions(message) }; // checking tx by tx for easier debugging if there are any regressions for (decoded, expected) in @@ -395,7 +397,7 @@ mod tests { fn encode_pooled_transactions_network() { let expected = hex!("f9022980f90225f8650f84832156008287fb94cf7f9e66af820a19257a2108375b180b0ec491678204d2802ca035b7bfeb9ad9ece2cbafaaf8e202e706b4cfaeb233f46198f00b44d4a566a981a0612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860b87502f872041a8459682f008459682f0d8252089461815774383099e24810ab832a5b2a5425c154d58829a2241af62c000080c001a059e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafda0016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469f86b0384773594008398968094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba0ce6834447c0a4193c40382e6c57ae33b241379c5418caac9cdc18d786fd12071a03ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88f86b01843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac3960468702769bb01b2a00802ba0e24d8bd32ad906d6f8b8d7741e08d1959df021698b19ee232feba15361587d0aa05406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631daf86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18"); let txs = vec![ - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 15u64, @@ -414,10 +416,10 @@ mod tests { "0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860", ) .unwrap(), - Parity::Parity(true), + true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: 4, nonce: 26u64, @@ -438,10 +440,10 @@ mod tests { "0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469", ) .unwrap(), - Parity::Parity(true), + true, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 3u64, @@ -460,10 +462,10 @@ mod tests { "0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88", ) .unwrap(), - Parity::Parity(false), + false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 1u64, @@ -482,10 +484,10 @@ mod tests { "0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da", ) .unwrap(), - Parity::Parity(false), + false, ), ), - TransactionSigned::from_transaction_and_signature( + TransactionSigned::new_unhashed( Transaction::Legacy(TxLegacy { chain_id: Some(4), nonce: 2u64, @@ -504,7 +506,7 @@ mod tests { "0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18", ) .unwrap(), - Parity::Parity(false), + false, ), ), ]; @@ -515,10 +517,7 @@ mod tests { .expect("Failed to convert TransactionSigned to PooledTransactionsElement") }) .collect(); - let transactions = RequestPair:: { - request_id: 0, - message: PooledTransactions(message), - }; + let transactions = RequestPair { request_id: 0, message: PooledTransactions(message) }; let mut encoded = vec![]; transactions.encode(&mut encoded); diff --git a/crates/net/eth-wire-types/src/version.rs b/crates/net/eth-wire-types/src/version.rs index 4fd3e792dcc..40d51cb5518 100644 --- a/crates/net/eth-wire-types/src/version.rs +++ b/crates/net/eth-wire-types/src/version.rs @@ -15,15 +15,17 @@ pub struct ParseVersionError(String); /// The `eth` protocol version. #[repr(u8)] #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Display)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] pub enum EthVersion { /// The `eth` protocol version 66. Eth66 = 66, - /// The `eth` protocol version 67. Eth67 = 67, - /// The `eth` protocol version 68. Eth68 = 68, + /// The `eth` protocol version 69. + Eth69 = 69, } impl EthVersion { @@ -38,6 +40,8 @@ impl EthVersion { // eth/67,68 are eth/66 minus GetNodeData and NodeData messages 13 } + // eth69 is both eth67 and eth68 minus NewBlockHashes and NewBlock + Self::Eth69 => 11, } } @@ -55,6 +59,31 @@ impl EthVersion { pub const fn is_eth68(&self) -> bool { matches!(self, Self::Eth68) } + + /// Returns true if the version is eth/69 + pub const fn is_eth69(&self) -> bool { + matches!(self, Self::Eth69) + } +} + +/// RLP encodes `EthVersion` as a single byte (66-69). +impl Encodable for EthVersion { + fn encode(&self, out: &mut dyn BufMut) { + (*self as u8).encode(out) + } + + fn length(&self) -> usize { + (*self as u8).length() + } +} + +/// RLP decodes a single byte into `EthVersion`. +/// Returns error if byte is not a valid version (66-69). +impl Decodable for EthVersion { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let version = u8::decode(buf)?; + Self::try_from(version).map_err(|_| RlpError::Custom("invalid eth version")) + } } /// Allow for converting from a `&str` to an `EthVersion`. @@ -75,6 +104,7 @@ impl TryFrom<&str> for EthVersion { "66" => Ok(Self::Eth66), "67" => Ok(Self::Eth67), "68" => Ok(Self::Eth68), + "69" => Ok(Self::Eth69), _ => Err(ParseVersionError(s.to_string())), } } @@ -98,6 +128,7 @@ impl TryFrom for EthVersion { 66 => Ok(Self::Eth66), 67 => Ok(Self::Eth67), 68 => Ok(Self::Eth68), + 69 => Ok(Self::Eth69), _ => Err(ParseVersionError(u.to_string())), } } @@ -126,6 +157,7 @@ impl From for &'static str { EthVersion::Eth66 => "66", EthVersion::Eth67 => "67", EthVersion::Eth68 => "68", + EthVersion::Eth69 => "69", } } } @@ -173,13 +205,16 @@ impl Decodable for ProtocolVersion { #[cfg(test)] mod tests { use super::{EthVersion, ParseVersionError}; + use alloy_rlp::{Decodable, Encodable, Error as RlpError}; + use bytes::BytesMut; #[test] fn test_eth_version_try_from_str() { assert_eq!(EthVersion::Eth66, EthVersion::try_from("66").unwrap()); assert_eq!(EthVersion::Eth67, EthVersion::try_from("67").unwrap()); assert_eq!(EthVersion::Eth68, EthVersion::try_from("68").unwrap()); - assert_eq!(Err(ParseVersionError("69".to_string())), EthVersion::try_from("69")); + assert_eq!(EthVersion::Eth69, EthVersion::try_from("69").unwrap()); + assert_eq!(Err(ParseVersionError("70".to_string())), EthVersion::try_from("70")); } #[test] @@ -187,6 +222,48 @@ mod tests { assert_eq!(EthVersion::Eth66, "66".parse().unwrap()); assert_eq!(EthVersion::Eth67, "67".parse().unwrap()); assert_eq!(EthVersion::Eth68, "68".parse().unwrap()); - assert_eq!(Err(ParseVersionError("69".to_string())), "69".parse::()); + assert_eq!(EthVersion::Eth69, "69".parse().unwrap()); + assert_eq!(Err(ParseVersionError("70".to_string())), "70".parse::()); + } + + #[test] + fn test_eth_version_rlp_encode() { + let versions = [EthVersion::Eth66, EthVersion::Eth67, EthVersion::Eth68, EthVersion::Eth69]; + + for version in versions { + let mut encoded = BytesMut::new(); + version.encode(&mut encoded); + + assert_eq!(encoded.len(), 1); + assert_eq!(encoded[0], version as u8); + } + } + #[test] + fn test_eth_version_rlp_decode() { + let test_cases = [ + (66_u8, Ok(EthVersion::Eth66)), + (67_u8, Ok(EthVersion::Eth67)), + (68_u8, Ok(EthVersion::Eth68)), + (69_u8, Ok(EthVersion::Eth69)), + (70_u8, Err(RlpError::Custom("invalid eth version"))), + (65_u8, Err(RlpError::Custom("invalid eth version"))), + ]; + + for (input, expected) in test_cases { + let mut encoded = BytesMut::new(); + input.encode(&mut encoded); + + let mut slice = encoded.as_ref(); + let result = EthVersion::decode(&mut slice); + assert_eq!(result, expected); + } + } + + #[test] + fn test_eth_version_total_messages() { + assert_eq!(EthVersion::Eth66.total_messages(), 15); + assert_eq!(EthVersion::Eth67.total_messages(), 13); + assert_eq!(EthVersion::Eth68.total_messages(), 13); + assert_eq!(EthVersion::Eth69.total_messages(), 11); } } diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 6eea4bc4ac6..3dd632de5c0 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -13,16 +13,17 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-codecs.workspace = true -reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } reth-eth-wire-types.workspace = true reth-network-peers.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-primitives.workspace = true +alloy-chains.workspace = true # metrics reth-metrics.workspace = true @@ -44,6 +45,7 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-eth-wire-types = { workspace = true, features = ["arbitrary"] } reth-tracing.workspace = true @@ -66,11 +68,29 @@ alloy-eips.workspace = true [features] arbitrary = [ - "reth-primitives/arbitrary", - "reth-eth-wire-types/arbitrary", - "dep:arbitrary", + "reth-eth-wire-types/arbitrary", + "dep:arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary", + "alloy-chains/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-ethereum-forks/arbitrary", + "reth-primitives/arbitrary" +] +serde = [ + "dep:serde", + "reth-eth-wire-types/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "secp256k1/serde", + "reth-codecs/serde", + "alloy-chains/serde", + "reth-primitives-traits/serde", + "reth-ethereum-forks/serde" ] -serde = ["dep:serde", "reth-eth-wire-types/serde"] [[test]] name = "fuzz_roundtrip" diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index d60e500744c..0dc9119ce88 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -5,10 +5,11 @@ use crate::{ p2pstream::MAX_RESERVED_MESSAGE_ID, protocol::{ProtoVersion, Protocol}, version::ParseVersionError, - Capability, EthMessage, EthMessageID, EthVersion, + Capability, EthMessageID, EthVersion, }; use alloy_primitives::bytes::Bytes; use derive_more::{Deref, DerefMut}; +use reth_eth_wire_types::{EthMessage, EthNetworkPrimitives, NetworkPrimitives}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -22,18 +23,38 @@ use std::{ pub struct RawCapabilityMessage { /// Identifier of the message. pub id: usize, - /// Actual payload + /// Actual __encoded__ payload pub payload: Bytes, } +impl RawCapabilityMessage { + /// Creates a new capability message with the given id and payload. + pub const fn new(id: usize, payload: Bytes) -> Self { + Self { id, payload } + } + + /// Creates a raw message for the eth sub-protocol. + /// + /// Caller must ensure that the rlp encoded `payload` matches the given `id`. + /// + /// See also [`EthMessage`] + pub const fn eth(id: EthMessageID, payload: Bytes) -> Self { + Self::new(id as usize, payload) + } +} + /// Various protocol related event types bubbled up from a session that need to be handled by the /// network. #[derive(Debug)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum CapabilityMessage { +pub enum CapabilityMessage { /// Eth sub-protocol message. - Eth(EthMessage), - /// Any other capability message. + #[cfg_attr( + feature = "serde", + serde(bound = "EthMessage: Serialize + serde::de::DeserializeOwned") + )] + Eth(EthMessage), + /// Any other or manually crafted eth message. Other(RawCapabilityMessage), } @@ -314,7 +335,7 @@ pub fn shared_capability_offsets( // highest wins, others are ignored if shared_capabilities .get(&peer_capability.name) - .map_or(true, |v| peer_capability.version > v.version) + .is_none_or(|v| peer_capability.version > v.version) { shared_capabilities.insert( peer_capability.name.clone(), diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 557fbd66a00..499ff8089bf 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -3,9 +3,11 @@ use crate::{ errors::P2PStreamError, message::MessageError, version::ParseVersionError, DisconnectReason, }; +use alloy_chains::Chain; use alloy_primitives::B256; -use reth_chainspec::Chain; -use reth_primitives::{GotExpected, GotExpectedBoxed, ValidationError}; +use reth_eth_wire_types::EthVersion; +use reth_ethereum_forks::ValidationError; +use reth_primitives_traits::{GotExpected, GotExpectedBoxed}; use std::io; /// Errors when sending/receiving messages @@ -88,7 +90,7 @@ pub enum EthHandshakeError { MismatchedGenesis(GotExpectedBoxed), #[error("mismatched protocol version in status message: {0}")] /// Mismatched protocol versions in status messages. - MismatchedProtocolVersion(GotExpected), + MismatchedProtocolVersion(GotExpected), #[error("mismatched chain in status message: {0}")] /// Mismatch in chain details in status messages. MismatchedChain(GotExpected), diff --git a/crates/net/eth-wire/src/errors/p2p.rs b/crates/net/eth-wire/src/errors/p2p.rs index 2cfef926984..f24e2cebc78 100644 --- a/crates/net/eth-wire/src/errors/p2p.rs +++ b/crates/net/eth-wire/src/errors/p2p.rs @@ -3,7 +3,7 @@ use std::io; use reth_eth_wire_types::{DisconnectReason, UnknownDisconnectReason}; -use reth_primitives::GotExpected; +use reth_primitives_traits::GotExpected; use crate::{capability::SharedCapabilityError, ProtocolVersion}; diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 9deca99fb58..ccc80594b60 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -1,13 +1,18 @@ use crate::{ + capability::RawCapabilityMessage, errors::{EthHandshakeError, EthStreamError}, message::{EthBroadcastMessage, ProtocolBroadcastMessage}, p2pstream::HANDSHAKE_TIMEOUT, - CanDisconnect, DisconnectReason, EthMessage, EthVersion, ProtocolMessage, Status, + CanDisconnect, DisconnectReason, EthMessage, EthNetworkPrimitives, EthVersion, ProtocolMessage, + Status, }; use alloy_primitives::bytes::{Bytes, BytesMut}; +use alloy_rlp::Encodable; use futures::{ready, Sink, SinkExt, StreamExt}; use pin_project::pin_project; -use reth_primitives::{ForkFilter, GotExpected}; +use reth_eth_wire_types::NetworkPrimitives; +use reth_ethereum_forks::ForkFilter; +use reth_primitives_traits::GotExpected; use std::{ pin::Pin, task::{Context, Poll}, @@ -21,6 +26,9 @@ use tracing::{debug, trace}; // https://github.com/ethereum/go-ethereum/blob/30602163d5d8321fbc68afdcbbaf2362b2641bde/eth/protocols/eth/protocol.go#L50 pub const MAX_MESSAGE_SIZE: usize = 10 * 1024 * 1024; +/// [`MAX_STATUS_SIZE`] is the maximum cap on the size of the initial status message +pub(crate) const MAX_STATUS_SIZE: usize = 500 * 1024; + /// An un-authenticated [`EthStream`]. This is consumed and returns a [`EthStream`] after the /// `Status` handshake is completed. #[pin_project] @@ -50,32 +58,32 @@ where /// Consumes the [`UnauthedEthStream`] and returns an [`EthStream`] after the `Status` /// handshake is completed successfully. This also returns the `Status` message sent by the /// remote peer. - pub async fn handshake( + pub async fn handshake( self, status: Status, fork_filter: ForkFilter, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { self.handshake_with_timeout(status, fork_filter, HANDSHAKE_TIMEOUT).await } /// Wrapper around handshake which enforces a timeout. - pub async fn handshake_with_timeout( + pub async fn handshake_with_timeout( self, status: Status, fork_filter: ForkFilter, timeout_limit: Duration, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { timeout(timeout_limit, Self::handshake_without_timeout(self, status, fork_filter)) .await .map_err(|_| EthStreamError::StreamTimeout)? } /// Handshake with no timeout - pub async fn handshake_without_timeout( + pub async fn handshake_without_timeout( mut self, status: Status, fork_filter: ForkFilter, - ) -> Result<(EthStream, Status), EthStreamError> { + ) -> Result<(EthStream, Status), EthStreamError> { trace!( %status, "sending eth status to peer" @@ -84,7 +92,10 @@ where // we need to encode and decode here on our own because we don't have an `EthStream` yet // The max length for a status with TTD is: + self.inner - .send(alloy_rlp::encode(ProtocolMessage::from(EthMessage::Status(status))).into()) + .send( + alloy_rlp::encode(ProtocolMessage::::from(EthMessage::::Status(status))) + .into(), + ) .await?; let their_msg_res = self.inner.next().await; @@ -97,13 +108,13 @@ where } }?; - if their_msg.len() > MAX_MESSAGE_SIZE { + if their_msg.len() > MAX_STATUS_SIZE { self.inner.disconnect(DisconnectReason::ProtocolBreach).await?; return Err(EthStreamError::MessageTooBig(their_msg.len())) } - let version = EthVersion::try_from(status.version)?; - let msg = match ProtocolMessage::decode_message(version, &mut their_msg.as_ref()) { + let version = status.version; + let msg = match ProtocolMessage::::decode_message(version, &mut their_msg.as_ref()) { Ok(m) => m, Err(err) => { debug!("decode error in eth handshake: msg={their_msg:x}"); @@ -184,19 +195,21 @@ where /// compatible with eth-networking protocol messages, which get RLP encoded/decoded. #[pin_project] #[derive(Debug)] -pub struct EthStream { +pub struct EthStream { /// Negotiated eth version. version: EthVersion, #[pin] inner: S, + + _pd: std::marker::PhantomData, } -impl EthStream { +impl EthStream { /// Creates a new unauthed [`EthStream`] from a provided stream. You will need /// to manually handshake a peer. #[inline] pub const fn new(version: EthVersion, inner: S) -> Self { - Self { version, inner } + Self { version, inner, _pd: std::marker::PhantomData } } /// Returns the eth version. @@ -224,15 +237,16 @@ impl EthStream { } } -impl EthStream +impl EthStream where S: Sink + Unpin, EthStreamError: From, + N: NetworkPrimitives, { /// Same as [`Sink::start_send`] but accepts a [`EthBroadcastMessage`] instead. pub fn start_send_broadcast( &mut self, - item: EthBroadcastMessage, + item: EthBroadcastMessage, ) -> Result<(), EthStreamError> { self.inner.start_send_unpin(Bytes::from(alloy_rlp::encode( ProtocolBroadcastMessage::from(item), @@ -240,14 +254,25 @@ where Ok(()) } + + /// Sends a raw capability message directly over the stream + pub fn start_send_raw(&mut self, msg: RawCapabilityMessage) -> Result<(), EthStreamError> { + let mut bytes = Vec::new(); + msg.id.encode(&mut bytes); + bytes.extend_from_slice(&msg.payload); + + self.inner.start_send_unpin(bytes.into())?; + Ok(()) + } } -impl Stream for EthStream +impl Stream for EthStream where S: Stream> + Unpin, EthStreamError: From, + N: NetworkPrimitives, { - type Item = Result; + type Item = Result, EthStreamError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); @@ -289,10 +314,11 @@ where } } -impl Sink for EthStream +impl Sink> for EthStream where S: CanDisconnect + Unpin, EthStreamError: From<>::Error>, + N: NetworkPrimitives, { type Error = EthStreamError; @@ -300,7 +326,7 @@ where self.project().inner.poll_ready(cx).map_err(Into::into) } - fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { if matches!(item, EthMessage::Status(_)) { // TODO: to disconnect here we would need to do something similar to P2PStream's // start_disconnect, which would ideally be a part of the CanDisconnect trait, or at @@ -330,10 +356,11 @@ where } } -impl CanDisconnect for EthStream +impl CanDisconnect> for EthStream where S: CanDisconnect + Send, EthStreamError: From<>::Error>, + N: NetworkPrimitives, { async fn disconnect(&mut self, reason: DisconnectReason) -> Result<(), EthStreamError> { self.inner.disconnect(reason).await.map_err(Into::into) @@ -346,17 +373,20 @@ mod tests { use crate::{ broadcast::BlockHashNumber, errors::{EthHandshakeError, EthStreamError}, + ethstream::RawCapabilityMessage, hello::DEFAULT_TCP_PORT, p2pstream::UnauthedP2PStream, EthMessage, EthStream, EthVersion, HelloMessageWithProtocols, PassthroughCodec, ProtocolVersion, Status, }; - use alloy_primitives::{B256, U256}; + use alloy_chains::NamedChain; + use alloy_primitives::{bytes::Bytes, B256, U256}; + use alloy_rlp::Decodable; use futures::{SinkExt, StreamExt}; - use reth_chainspec::NamedChain; use reth_ecies::stream::ECIESStream; + use reth_eth_wire_types::EthNetworkPrimitives; + use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::pk2id; - use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; use std::time::Duration; use tokio::net::{TcpListener, TcpStream}; @@ -368,7 +398,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -387,7 +417,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -399,8 +429,10 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let (_, their_status) = - UnauthedEthStream::new(sink).handshake(status, fork_filter).await.unwrap(); + let (_, their_status) = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await + .unwrap(); // their status is a clone of our status, these should be equal assert_eq!(their_status, status); @@ -415,7 +447,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::from(2).pow(U256::from(100)) - U256::from(1), blockhash: B256::random(), @@ -434,7 +466,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -446,8 +478,10 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let (_, their_status) = - UnauthedEthStream::new(sink).handshake(status, fork_filter).await.unwrap(); + let (_, their_status) = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await + .unwrap(); // their status is a clone of our status, these should be equal assert_eq!(their_status, status); @@ -462,7 +496,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::from(2).pow(U256::from(100)), blockhash: B256::random(), @@ -480,8 +514,9 @@ mod tests { // roughly based off of the design of tokio::net::TcpListener let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); - let handshake_res = - UnauthedEthStream::new(stream).handshake(status_clone, fork_filter_clone).await; + let handshake_res = UnauthedEthStream::new(stream) + .handshake::(status_clone, fork_filter_clone) + .await; // make sure the handshake fails due to td too high assert!(matches!( @@ -496,7 +531,9 @@ mod tests { let sink = PassthroughCodec::default().framed(outgoing); // try to connect - let handshake_res = UnauthedEthStream::new(sink).handshake(status, fork_filter).await; + let handshake_res = UnauthedEthStream::new(sink) + .handshake::(status, fork_filter) + .await; // this handshake should also fail due to td too high assert!(matches!( @@ -514,7 +551,7 @@ mod tests { async fn can_write_and_read_cleartext() { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); - let test_msg = EthMessage::NewBlockHashes( + let test_msg = EthMessage::::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -549,7 +586,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg = EthMessage::NewBlockHashes( + let test_msg = EthMessage::::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -591,7 +628,7 @@ mod tests { let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); let local_addr = listener.local_addr().unwrap(); let server_key = SecretKey::new(&mut rand::thread_rng()); - let test_msg = EthMessage::NewBlockHashes( + let test_msg = EthMessage::::NewBlockHashes( vec![ BlockHashNumber { hash: B256::random(), number: 5 }, BlockHashNumber { hash: B256::random(), number: 6 }, @@ -603,7 +640,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -674,7 +711,7 @@ mod tests { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), total_difficulty: U256::ZERO, blockhash: B256::random(), @@ -695,7 +732,7 @@ mod tests { let (incoming, _) = listener.accept().await.unwrap(); let stream = PassthroughCodec::default().framed(incoming); let (_, their_status) = UnauthedEthStream::new(stream) - .handshake(status_clone, fork_filter_clone) + .handshake::(status_clone, fork_filter_clone) .await .unwrap(); @@ -708,7 +745,11 @@ mod tests { // try to connect let handshake_result = UnauthedEthStream::new(sink) - .handshake_with_timeout(status, fork_filter, Duration::from_secs(1)) + .handshake_with_timeout::( + status, + fork_filter, + Duration::from_secs(1), + ) .await; // Assert that a timeout error occurred @@ -716,4 +757,39 @@ mod tests { matches!(handshake_result, Err(e) if e.to_string() == EthStreamError::StreamTimeout.to_string()) ); } + + #[tokio::test] + async fn can_write_and_read_raw_capability() { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let local_addr = listener.local_addr().unwrap(); + + let test_msg = RawCapabilityMessage { id: 0x1234, payload: Bytes::from(vec![1, 2, 3, 4]) }; + + let test_msg_clone = test_msg.clone(); + let handle = tokio::spawn(async move { + let (incoming, _) = listener.accept().await.unwrap(); + let stream = PassthroughCodec::default().framed(incoming); + let mut stream = EthStream::<_, EthNetworkPrimitives>::new(EthVersion::Eth67, stream); + + let bytes = stream.inner_mut().next().await.unwrap().unwrap(); + + // Create a cursor to track position while decoding + let mut id_bytes = &bytes[..]; + let decoded_id = ::decode(&mut id_bytes).unwrap(); + assert_eq!(decoded_id, test_msg_clone.id); + + // Get remaining bytes after ID decoding + let remaining = id_bytes; + assert_eq!(remaining, &test_msg_clone.payload[..]); + }); + + let outgoing = TcpStream::connect(local_addr).await.unwrap(); + let sink = PassthroughCodec::default().framed(outgoing); + let mut client_stream = EthStream::<_, EthNetworkPrimitives>::new(EthVersion::Eth67, sink); + + client_stream.start_send_raw(test_msg).unwrap(); + client_stream.inner_mut().flush().await.unwrap(); + + handle.await.unwrap(); + } } diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index 2eb42eaeb49..5d7650b4b7b 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -2,7 +2,7 @@ use crate::{Capability, EthVersion, ProtocolVersion}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::add_arbitrary_tests; use reth_network_peers::PeerId; -use reth_primitives::constants::RETH_CLIENT_VERSION; +use reth_primitives_traits::constants::RETH_CLIENT_VERSION; /// The default tcp port for p2p. /// diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index d1d977aba78..e46563cad48 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -24,7 +24,8 @@ use crate::{ }; use bytes::{Bytes, BytesMut}; use futures::{Sink, SinkExt, Stream, StreamExt, TryStream, TryStreamExt}; -use reth_primitives::ForkFilter; +use reth_eth_wire_types::NetworkPrimitives; +use reth_ethereum_forks::ForkFilter; use tokio::sync::{mpsc, mpsc::UnboundedSender}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -204,11 +205,11 @@ impl RlpxProtocolMultiplexer { /// Converts this multiplexer into a [`RlpxSatelliteStream`] with eth protocol as the given /// primary protocol. - pub async fn into_eth_satellite_stream( + pub async fn into_eth_satellite_stream( self, status: Status, fork_filter: ForkFilter, - ) -> Result<(RlpxSatelliteStream>, Status), EthStreamError> + ) -> Result<(RlpxSatelliteStream>, Status), EthStreamError> where St: Stream> + Sink + Unpin, { @@ -674,6 +675,7 @@ mod tests { }, UnauthedP2PStream, }; + use reth_eth_wire_types::EthNetworkPrimitives; use tokio::{net::TcpListener, sync::oneshot}; use tokio_util::codec::Decoder; @@ -693,7 +695,7 @@ mod tests { UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap(); let (_eth_stream, _) = UnauthedEthStream::new(p2p_stream) - .handshake(other_status, other_fork_filter) + .handshake::(other_status, other_fork_filter) .await .unwrap(); @@ -708,7 +710,9 @@ mod tests { .into_satellite_stream_with_handshake( eth.capability().as_ref(), move |proxy| async move { - UnauthedEthStream::new(proxy).handshake(status, fork_filter).await + UnauthedEthStream::new(proxy) + .handshake::(status, fork_filter) + .await }, ) .await @@ -731,7 +735,7 @@ mod tests { let (conn, _) = UnauthedP2PStream::new(stream).handshake(server_hello).await.unwrap(); let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn) - .into_eth_satellite_stream(other_status, other_fork_filter) + .into_eth_satellite_stream::(other_status, other_fork_filter) .await .unwrap(); @@ -762,7 +766,7 @@ mod tests { let conn = connect_passthrough(local_addr, test_hello().0).await; let (mut st, _their_status) = RlpxProtocolMultiplexer::new(conn) - .into_eth_satellite_stream(status, fork_filter) + .into_eth_satellite_stream::(status, fork_filter) .await .unwrap(); diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 9882e39787e..0ae546daafb 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -14,7 +14,7 @@ use futures::{Sink, SinkExt, StreamExt}; use pin_project::pin_project; use reth_codecs::add_arbitrary_tests; use reth_metrics::metrics::counter; -use reth_primitives::GotExpected; +use reth_primitives_traits::GotExpected; use std::{ collections::VecDeque, io, @@ -614,25 +614,24 @@ where /// Returns `Poll::Ready(Ok(()))` when no buffered items remain. fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut this = self.project(); - loop { - match ready!(this.inner.as_mut().poll_flush(cx)) { - Err(err) => { - trace!(target: "net::p2p", - %err, - "error flushing p2p stream" - ); - return Poll::Ready(Err(err.into())) - } - Ok(()) => { + let poll_res = loop { + match this.inner.as_mut().poll_ready(cx) { + Poll::Pending => break Poll::Pending, + Poll::Ready(Err(err)) => break Poll::Ready(Err(err.into())), + Poll::Ready(Ok(())) => { let Some(message) = this.outgoing_messages.pop_front() else { - return Poll::Ready(Ok(())) + break Poll::Ready(Ok(())) }; if let Err(err) = this.inner.as_mut().start_send(message) { - return Poll::Ready(Err(err.into())) + break Poll::Ready(Err(err.into())) } } } - } + }; + + ready!(this.inner.as_mut().poll_flush(cx))?; + + poll_res } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index e516c0aee7d..56656d60e94 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -6,10 +6,10 @@ use crate::{ hello::DEFAULT_TCP_PORT, EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, Status, UnauthedP2PStream, }; +use alloy_chains::Chain; use alloy_primitives::{B256, U256}; -use reth_chainspec::Chain; +use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::pk2id; -use reth_primitives::{ForkFilter, Head}; use secp256k1::{SecretKey, SECP256K1}; use std::net::SocketAddr; use tokio::net::TcpStream; @@ -37,7 +37,7 @@ pub fn eth_handshake() -> (Status, ForkFilter) { let fork_filter = ForkFilter::new(Head::default(), genesis, 0, Vec::new()); let status = Status { - version: EthVersion::Eth67 as u8, + version: EthVersion::Eth67, chain: Chain::mainnet(), total_difficulty: U256::ZERO, blockhash: B256::random(), diff --git a/crates/net/eth-wire/tests/new_block.rs b/crates/net/eth-wire/tests/new_block.rs index 266752b74ab..366bf26a3a2 100644 --- a/crates/net/eth-wire/tests/new_block.rs +++ b/crates/net/eth-wire/tests/new_block.rs @@ -11,7 +11,7 @@ fn decode_new_block_network() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/new_block_network_rlp"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } #[test] @@ -20,7 +20,7 @@ fn decode_new_block_network_bsc_one() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_one"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } #[test] @@ -29,5 +29,5 @@ fn decode_new_block_network_bsc_two() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/bsc_new_block_network_two"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = NewBlock::decode(&mut &hex_data[..]).unwrap(); + let _txs: NewBlock = NewBlock::decode(&mut &hex_data[..]).unwrap(); } diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 6690f42631a..93a17f3b05b 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -3,7 +3,7 @@ use alloy_eips::eip2718::Decodable2718; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable}; -use reth_eth_wire::{EthVersion, PooledTransactions, ProtocolMessage}; +use reth_eth_wire::{EthNetworkPrimitives, EthVersion, PooledTransactions, ProtocolMessage}; use reth_primitives::PooledTransactionsElement; use std::{fs, path::PathBuf}; use test_fuzz::test_fuzz; @@ -12,10 +12,7 @@ use test_fuzz::test_fuzz; #[test_fuzz] fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Error> { let input_rlp = &mut &hex_data[..]; - let txs = match PooledTransactions::decode(input_rlp) { - Ok(txs) => txs, - Err(e) => return Err(e), - }; + let txs: PooledTransactions = PooledTransactions::decode(input_rlp)?; // get the amount of bytes decoded in `decode` by subtracting the length of the original buf, // from the length of the remaining bytes @@ -28,7 +25,7 @@ fn roundtrip_pooled_transactions(hex_data: Vec) -> Result<(), alloy_rlp::Err assert_eq!(expected_encoding, buf); // now do another decoding, on what we encoded - this should succeed - let txs2 = PooledTransactions::decode(&mut &buf[..]).unwrap(); + let txs2: PooledTransactions = PooledTransactions::decode(&mut &buf[..]).unwrap(); // ensure that the payload length is the same assert_eq!(txs.length(), txs2.length()); @@ -54,7 +51,8 @@ fn decode_request_pair_pooled_blob_transactions() { .join("testdata/request_pair_pooled_blob_transactions"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); + let _txs: ProtocolMessage = + ProtocolMessage::decode_message(EthVersion::Eth68, &mut &hex_data[..]).unwrap(); } #[test] diff --git a/crates/net/nat/src/lib.rs b/crates/net/nat/src/lib.rs index 600ba97cd2d..962f1e49efd 100644 --- a/crates/net/nat/src/lib.rs +++ b/crates/net/nat/src/lib.rs @@ -111,7 +111,7 @@ impl FromStr for NatResolver { "Unknown Nat Resolver: {s}" ))) }; - Self::ExternalIp(ip.parse::()?) + Self::ExternalIp(ip.parse()?) } }; Ok(r) diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 650d749048c..efb0257fc8e 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -40,4 +40,11 @@ derive_more.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] +serde = [ + "dep:serde", + "reth-eth-wire-types/serde", + "reth-network-types/serde", + "alloy-primitives/serde", + "enr/serde", + "reth-ethereum-forks/serde" +] diff --git a/crates/net/network-api/src/downloaders.rs b/crates/net/network-api/src/downloaders.rs index f081c16ed81..cbfe816134e 100644 --- a/crates/net/network-api/src/downloaders.rs +++ b/crates/net/network-api/src/downloaders.rs @@ -1,5 +1,7 @@ //! API related to syncing blocks. +use std::fmt::Debug; + use futures::Future; use reth_network_p2p::BlockClient; use tokio::sync::oneshot; @@ -7,10 +9,13 @@ use tokio::sync::oneshot; /// Provides client for downloading blocks. #[auto_impl::auto_impl(&, Arc)] pub trait BlockDownloaderProvider { + /// The client this type can provide. + type Client: BlockClient + Send + Sync + Clone + 'static; + /// Returns a new [`BlockClient`], used for fetching blocks from peers. /// /// The client is the entrypoint for sending block requests to the network. fn fetch_client( &self, - ) -> impl Future> + Send; + ) -> impl Future> + Send; } diff --git a/crates/net/network-api/src/events.rs b/crates/net/network-api/src/events.rs index d2bd66d1fdd..e17cedef11f 100644 --- a/crates/net/network-api/src/events.rs +++ b/crates/net/network-api/src/events.rs @@ -1,37 +1,80 @@ //! API related to listening for network events. -use std::{fmt, net::SocketAddr, sync::Arc}; - use reth_eth_wire_types::{ message::RequestPair, BlockBodies, BlockHeaders, Capabilities, DisconnectReason, EthMessage, - EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, GetPooledTransactions, GetReceipts, - NodeData, PooledTransactions, Receipts, Status, + EthNetworkPrimitives, EthVersion, GetBlockBodies, GetBlockHeaders, GetNodeData, + GetPooledTransactions, GetReceipts, NetworkPrimitives, NodeData, PooledTransactions, Receipts, + Status, }; use reth_ethereum_forks::ForkId; use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_peers::PeerId; use reth_network_types::PeerAddr; use reth_tokio_util::EventStream; +use std::{ + fmt, + net::SocketAddr, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use tokio::sync::{mpsc, oneshot}; -use tokio_stream::wrappers::UnboundedReceiverStream; +use tokio_stream::{wrappers::UnboundedReceiverStream, Stream, StreamExt}; -/// Provides event subscription for the network. -#[auto_impl::auto_impl(&, Arc)] -pub trait NetworkEventListenerProvider: Send + Sync { - /// Creates a new [`NetworkEvent`] listener channel. - fn event_listener(&self) -> EventStream; - /// Returns a new [`DiscoveryEvent`] stream. - /// - /// This stream yields [`DiscoveryEvent`]s for each peer that is discovered. - fn discovery_listener(&self) -> UnboundedReceiverStream; +/// A boxed stream of network peer events that provides a type-erased interface. +pub struct PeerEventStream(Pin + Send + Sync>>); + +impl fmt::Debug for PeerEventStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PeerEventStream").finish_non_exhaustive() + } +} + +impl PeerEventStream { + /// Create a new stream [`PeerEventStream`] by converting the provided stream's items into peer + /// events [`PeerEvent`] + pub fn new(stream: S) -> Self + where + S: Stream + Send + Sync + 'static, + T: Into + 'static, + { + let mapped_stream = stream.map(Into::into); + Self(Box::pin(mapped_stream)) + } +} + +impl Stream for PeerEventStream { + type Item = PeerEvent; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.0.as_mut().poll_next(cx) + } +} + +/// Represents information about an established peer session. +#[derive(Debug, Clone)] +pub struct SessionInfo { + /// The identifier of the peer to which a session was established. + pub peer_id: PeerId, + /// The remote addr of the peer to which a session was established. + pub remote_addr: SocketAddr, + /// The client version of the peer to which a session was established. + pub client_version: Arc, + /// Capabilities the peer announced. + pub capabilities: Arc, + /// The status of the peer to which a session was established. + pub status: Arc, + /// Negotiated eth version of the session. + pub version: EthVersion, } -/// (Non-exhaustive) Events emitted by the network that are of interest for subscribers. +/// (Non-exhaustive) List of the different events emitted by the network that are of interest for +/// subscribers. /// /// This includes any event types that may be relevant to tasks, for metrics, keep track of peers /// etc. #[derive(Debug, Clone)] -pub enum NetworkEvent { +pub enum PeerEvent { /// Closed the peer session. SessionClosed { /// The identifier of the peer to which a session was closed. @@ -40,28 +83,65 @@ pub enum NetworkEvent { reason: Option, }, /// Established a new session with the given peer. - SessionEstablished { - /// The identifier of the peer to which a session was established. - peer_id: PeerId, - /// The remote addr of the peer to which a session was established. - remote_addr: SocketAddr, - /// The client version of the peer to which a session was established. - client_version: Arc, - /// Capabilities the peer announced - capabilities: Arc, - /// A request channel to the session task. - messages: PeerRequestSender, - /// The status of the peer to which a session was established. - status: Arc, - /// negotiated eth version of the session - version: EthVersion, - }, + SessionEstablished(SessionInfo), /// Event emitted when a new peer is added PeerAdded(PeerId), /// Event emitted when a new peer is removed PeerRemoved(PeerId), } +/// (Non-exhaustive) Network events representing peer lifecycle events and session requests. +#[derive(Debug)] +pub enum NetworkEvent { + /// Basic peer lifecycle event. + Peer(PeerEvent), + /// Session established with requests. + ActivePeerSession { + /// Session information + info: SessionInfo, + /// A request channel to the session task. + messages: PeerRequestSender, + }, +} + +impl Clone for NetworkEvent { + fn clone(&self) -> Self { + match self { + Self::Peer(event) => Self::Peer(event.clone()), + Self::ActivePeerSession { info, messages } => { + Self::ActivePeerSession { info: info.clone(), messages: messages.clone() } + } + } + } +} + +impl From> for PeerEvent { + fn from(event: NetworkEvent) -> Self { + match event { + NetworkEvent::Peer(peer_event) => peer_event, + NetworkEvent::ActivePeerSession { info, .. } => Self::SessionEstablished(info), + } + } +} + +/// Provides peer event subscription for the network. +#[auto_impl::auto_impl(&, Arc)] +pub trait NetworkPeersEvents: Send + Sync { + /// Creates a new peer event listener stream. + fn peer_events(&self) -> PeerEventStream; +} + +/// Provides event subscription for the network. +#[auto_impl::auto_impl(&, Arc)] +pub trait NetworkEventListenerProvider: NetworkPeersEvents { + /// Creates a new [`NetworkEvent`] listener channel. + fn event_listener(&self) -> EventStream>; + /// Returns a new [`DiscoveryEvent`] stream. + /// + /// This stream yields [`DiscoveryEvent`]s for each peer that is discovered. + fn discovery_listener(&self) -> UnboundedReceiverStream; +} + /// Events produced by the `Discovery` manager. #[derive(Debug, Clone, PartialEq, Eq)] pub enum DiscoveryEvent { @@ -98,7 +178,7 @@ pub enum DiscoveredEvent { /// Protocol related request messages that expect a response #[derive(Debug)] -pub enum PeerRequest { +pub enum PeerRequest { /// Requests block headers from the peer. /// /// The response should be sent through the channel. @@ -106,7 +186,7 @@ pub enum PeerRequest { /// The request for block headers. request: GetBlockHeaders, /// The channel to send the response for block headers. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests block bodies from the peer. /// @@ -115,7 +195,7 @@ pub enum PeerRequest { /// The request for block bodies. request: GetBlockBodies, /// The channel to send the response for block bodies. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests pooled transactions from the peer. /// @@ -124,7 +204,7 @@ pub enum PeerRequest { /// The request for pooled transactions. request: GetPooledTransactions, /// The channel to send the response for pooled transactions. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Requests `NodeData` from the peer. /// @@ -148,7 +228,7 @@ pub enum PeerRequest { // === impl PeerRequest === -impl PeerRequest { +impl PeerRequest { /// Invoked if we received a response which does not match the request pub fn send_bad_response(self) { self.send_err_response(RequestError::BadResponse) @@ -166,7 +246,7 @@ impl PeerRequest { } /// Returns the [`EthMessage`] for this type - pub fn create_request_message(&self, request_id: u64) -> EthMessage { + pub fn create_request_message(&self, request_id: u64) -> EthMessage { match self { Self::GetBlockHeaders { request, .. } => { EthMessage::GetBlockHeaders(RequestPair { request_id, message: *request }) @@ -199,24 +279,29 @@ impl PeerRequest { } /// A Cloneable connection for sending _requests_ directly to the session of a peer. -#[derive(Clone)] -pub struct PeerRequestSender { +pub struct PeerRequestSender { /// id of the remote node. pub peer_id: PeerId, /// The Sender half connected to a session. - pub to_session_tx: mpsc::Sender, + pub to_session_tx: mpsc::Sender, +} + +impl Clone for PeerRequestSender { + fn clone(&self) -> Self { + Self { peer_id: self.peer_id, to_session_tx: self.to_session_tx.clone() } + } } // === impl PeerRequestSender === -impl PeerRequestSender { +impl PeerRequestSender { /// Constructs a new sender instance that's wired to a session - pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender) -> Self { + pub const fn new(peer_id: PeerId, to_session_tx: mpsc::Sender) -> Self { Self { peer_id, to_session_tx } } /// Attempts to immediately send a message on this Sender - pub fn try_send(&self, req: PeerRequest) -> Result<(), mpsc::error::TrySendError> { + pub fn try_send(&self, req: R) -> Result<(), mpsc::error::TrySendError> { self.to_session_tx.try_send(req) } @@ -226,7 +311,7 @@ impl PeerRequestSender { } } -impl fmt::Debug for PeerRequestSender { +impl fmt::Debug for PeerRequestSender { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PeerRequestSender").field("peer_id", &self.peer_id).finish_non_exhaustive() } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 6163c873003..986d490c34f 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -36,6 +36,7 @@ pub use events::{ use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; use reth_eth_wire_types::{capability::Capabilities, DisconnectReason, EthVersion, Status}; +use reth_network_p2p::EthBlockClient; use reth_network_peers::NodeRecord; /// The `PeerId` type. @@ -43,7 +44,7 @@ pub type PeerId = alloy_primitives::B512; /// Helper trait that unifies network API needed to launch node. pub trait FullNetwork: - BlockDownloaderProvider + BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider @@ -55,7 +56,7 @@ pub trait FullNetwork: } impl FullNetwork for T where - T: BlockDownloaderProvider + T: BlockDownloaderProvider + NetworkSyncUpdater + NetworkInfo + NetworkEventListenerProvider diff --git a/crates/net/network-types/Cargo.toml b/crates/net/network-types/Cargo.toml index 97c8e65cbbc..932527b91c6 100644 --- a/crates/net/network-types/Cargo.toml +++ b/crates/net/network-types/Cargo.toml @@ -22,9 +22,13 @@ serde = { workspace = true, optional = true } humantime-serde = { workspace = true, optional = true } serde_json = { workspace = true } -# misc +# misc tracing.workspace = true [features] -serde = ["dep:serde", "dep:humantime-serde"] +serde = [ + "dep:serde", + "dep:humantime-serde", + "reth-ethereum-forks/serde" +] test-utils = [] diff --git a/crates/net/network-types/src/peers/config.rs b/crates/net/network-types/src/peers/config.rs index 97a8bb3cac3..890679f5d34 100644 --- a/crates/net/network-types/src/peers/config.rs +++ b/crates/net/network-types/src/peers/config.rs @@ -24,6 +24,9 @@ pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30; /// This restricts how many outbound dials can be performed concurrently. pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15; +/// A temporary timeout for ips on incoming connection attempts. +pub const INBOUND_IP_THROTTLE_DURATION: Duration = Duration::from_secs(30); + /// The durations to use when a backoff should be applied to a peer. /// /// See also [`BackoffKind`]. @@ -155,6 +158,11 @@ pub struct PeersConfig { /// /// The backoff duration increases with number of backoff attempts. pub backoff_durations: PeerBackoffDurations, + /// How long to temporarily ban ips on incoming connection attempts. + /// + /// This acts as an IP based rate limit. + #[cfg_attr(feature = "serde", serde(default, with = "humantime_serde"))] + pub incoming_ip_throttle_duration: Duration, } impl Default for PeersConfig { @@ -171,6 +179,7 @@ impl Default for PeersConfig { trusted_nodes_only: false, basic_nodes: Default::default(), max_backoff_count: 5, + incoming_ip_throttle_duration: INBOUND_IP_THROTTLE_DURATION, } } } diff --git a/crates/net/network-types/src/peers/state.rs b/crates/net/network-types/src/peers/state.rs index f6ab1a39f85..1e2466c805a 100644 --- a/crates/net/network-types/src/peers/state.rs +++ b/crates/net/network-types/src/peers/state.rs @@ -31,6 +31,12 @@ impl PeerConnectionState { } } + /// Returns true if this is the idle state. + #[inline] + pub const fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } + /// Returns true if this is an active incoming connection. #[inline] pub const fn is_incoming(&self) -> bool { diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 1d3af517af3..97be9b1708e 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -16,24 +16,27 @@ workspace = true reth-chainspec.workspace = true reth-fs-util.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-net-banlist.workspace = true reth-network-api.workspace = true reth-network-p2p.workspace = true reth-discv4.workspace = true reth-discv5.workspace = true reth-dns-discovery.workspace = true +reth-ethereum-forks.workspace = true reth-eth-wire.workspace = true +reth-eth-wire-types.workspace = true reth-ecies.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true reth-storage-api.workspace = true -reth-provider = { workspace = true, optional = true } reth-tokio-util.workspace = true reth-consensus.workspace = true reth-network-peers = { workspace = true, features = ["net"] } reth-network-types.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true @@ -86,7 +89,7 @@ reth-transaction-pool = { workspace = true, features = ["test-utils"] } # alloy deps for testing against nodes alloy-node-bindings.workspace = true -alloy-provider= { workspace = true, features = ["admin-api"] } +alloy-provider = { workspace = true, features = ["admin-api"] } alloy-consensus.workspace = true # misc @@ -101,10 +104,46 @@ criterion = { workspace = true, features = ["async_tokio", "html_reports"] } [features] default = ["serde"] geth-tests = [] -serde = ["dep:serde", "secp256k1/serde", "enr/serde", "reth-network-types/serde"] -test-utils = ["dep:reth-provider", "reth-provider?/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils", "reth-network-types/test-utils"] +serde = [ + "dep:serde", + "secp256k1/serde", + "enr/serde", + "reth-network-types/serde", + "reth-dns-discovery/serde", + "reth-eth-wire/serde", + "reth-eth-wire-types/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "discv5/serde", + "parking_lot/serde", + "rand/serde", + "smallvec/serde", + "url/serde", + "reth-primitives-traits/serde", + "reth-ethereum-forks/serde", + "reth-provider/serde" +] +test-utils = [ + "dep:tempfile", + "reth-transaction-pool/test-utils", + "reth-network-types/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-discv4/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-provider/test-utils" +] + +[[bench]] +name = "broadcast" +required-features = ["test-utils"] +harness = false [[bench]] -name = "bench" +name = "tx_manager_hash_fetching" required-features = ["test-utils"] harness = false diff --git a/crates/net/network/benches/bench.rs b/crates/net/network/benches/broadcast.rs similarity index 100% rename from crates/net/network/benches/bench.rs rename to crates/net/network/benches/broadcast.rs diff --git a/crates/net/network/benches/tx_manager_hash_fetching.rs b/crates/net/network/benches/tx_manager_hash_fetching.rs new file mode 100644 index 00000000000..1ab9b8fc427 --- /dev/null +++ b/crates/net/network/benches/tx_manager_hash_fetching.rs @@ -0,0 +1,97 @@ +#![allow(missing_docs)] +use alloy_primitives::U256; +use criterion::*; +use pprof::criterion::{Output, PProfProfiler}; +use rand::thread_rng; +use reth_network::{ + test_utils::Testnet, + transactions::{ + TransactionFetcherConfig, TransactionPropagationMode::Max, TransactionsManagerConfig, + }, +}; +use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; +use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; +use tokio::runtime::Runtime as TokioRuntime; + +criterion_group!( + name = tx_fetch_benches; + config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = tx_fetch_bench +); + +pub fn tx_fetch_bench(c: &mut Criterion) { + let rt = TokioRuntime::new().unwrap(); + + let mut group = c.benchmark_group("Transaction Fetch"); + group.sample_size(10); + + group.bench_function("fetch_transactions", |b| { + b.to_async(&rt).iter_with_setup( + || { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + let tx_manager_config = TransactionsManagerConfig { + propagation_mode: Max(0), + transaction_fetcher_config: TransactionFetcherConfig { + max_inflight_requests: 1, + ..Default::default() + }, + ..Default::default() + }; + + let provider = MockEthProvider::default(); + let num_peers = 10; + let net = Testnet::create_with(num_peers, provider.clone()).await; + + // install request handlers + let net = net.with_eth_pool_config(tx_manager_config); + let handle = net.spawn(); + + // connect all the peers first + handle.connect_peers().await; + + let listening_peer = &handle.peers()[num_peers - 1]; + let listening_peer_tx_listener = + listening_peer.pool().unwrap().pending_transactions_listener(); + + let num_tx_per_peer = 10; + + for i in 1..num_peers { + let peer = &handle.peers()[i]; + let peer_pool = peer.pool().unwrap(); + + for _ in 0..num_tx_per_peer { + let mut gen = TransactionGenerator::new(thread_rng()); + let tx = gen.gen_eip1559_pooled(); + let sender = tx.sender(); + provider.add_account( + sender, + ExtendedAccount::new(0, U256::from(100_000_000)), + ); + peer_pool.add_external_transaction(tx.clone()).await.unwrap(); + } + } + + // Total expected transactions + let total_expected_tx = num_tx_per_peer * (num_peers - 1); + + (listening_peer_tx_listener, total_expected_tx) + }) + }) + }, + |(mut listening_peer_tx_listener, total_expected_tx)| async move { + let mut received_tx = 0; + while listening_peer_tx_listener.recv().await.is_some() { + received_tx += 1; + if received_tx >= total_expected_tx { + break; + } + } + }, + ) + }); + + group.finish(); +} + +criterion_main!(tx_fetch_benches); diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index e6a5d956641..64b864ef957 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -1,14 +1,14 @@ //! Builder support for configuring the entire setup. -use reth_network_api::test_utils::PeersHandleProvider; -use reth_transaction_pool::TransactionPool; -use tokio::sync::mpsc; - use crate::{ eth_requests::EthRequestHandler, transactions::{TransactionsManager, TransactionsManagerConfig}, NetworkHandle, NetworkManager, }; +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; +use reth_network_api::test_utils::PeersHandleProvider; +use reth_transaction_pool::TransactionPool; +use tokio::sync::mpsc; /// We set the max channel capacity of the `EthRequestHandler` to 256 /// 256 requests with malicious 10MB body requests is 2.6GB which can be absorbed by the node. @@ -16,49 +16,62 @@ pub(crate) const ETH_REQUEST_CHANNEL_CAPACITY: usize = 256; /// A builder that can configure all components of the network. #[allow(missing_debug_implementations)] -pub struct NetworkBuilder { - pub(crate) network: NetworkManager, +pub struct NetworkBuilder { + pub(crate) network: NetworkManager, pub(crate) transactions: Tx, pub(crate) request_handler: Eth, } // === impl NetworkBuilder === -impl NetworkBuilder { +impl NetworkBuilder { /// Consumes the type and returns all fields. - pub fn split(self) -> (NetworkManager, Tx, Eth) { + pub fn split(self) -> (NetworkManager, Tx, Eth) { let Self { network, transactions, request_handler } = self; (network, transactions, request_handler) } /// Returns the network manager. - pub const fn network(&self) -> &NetworkManager { + pub const fn network(&self) -> &NetworkManager { &self.network } /// Returns the mutable network manager. - pub fn network_mut(&mut self) -> &mut NetworkManager { + pub fn network_mut(&mut self) -> &mut NetworkManager { &mut self.network } /// Returns the handle to the network. - pub fn handle(&self) -> NetworkHandle { + pub fn handle(&self) -> NetworkHandle { self.network.handle().clone() } /// Consumes the type and returns all fields and also return a [`NetworkHandle`]. - pub fn split_with_handle(self) -> (NetworkHandle, NetworkManager, Tx, Eth) { + pub fn split_with_handle(self) -> (NetworkHandle, NetworkManager, Tx, Eth) { let Self { network, transactions, request_handler } = self; let handle = network.handle().clone(); (handle, network, transactions, request_handler) } + /// Creates a new [`EthRequestHandler`] and wires it to the network. + pub fn request_handler( + self, + client: Client, + ) -> NetworkBuilder, N> { + let Self { mut network, transactions, .. } = self; + let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY); + network.set_eth_request_handler(tx); + let peers = network.handle().peers_handle().clone(); + let request_handler = EthRequestHandler::new(client, peers, rx); + NetworkBuilder { network, request_handler, transactions } + } + /// Creates a new [`TransactionsManager`] and wires it to the network. pub fn transactions( self, pool: Pool, transactions_manager_config: TransactionsManagerConfig, - ) -> NetworkBuilder, Eth> { + ) -> NetworkBuilder, Eth, N> { let Self { mut network, request_handler, .. } = self; let (tx, rx) = mpsc::unbounded_channel(); network.set_transactions(tx); @@ -66,17 +79,4 @@ impl NetworkBuilder { let transactions = TransactionsManager::new(handle, pool, rx, transactions_manager_config); NetworkBuilder { network, request_handler, transactions } } - - /// Creates a new [`EthRequestHandler`] and wires it to the network. - pub fn request_handler( - self, - client: Client, - ) -> NetworkBuilder> { - let Self { mut network, transactions, .. } = self; - let (tx, rx) = mpsc::channel(ETH_REQUEST_CHANNEL_CAPACITY); - network.set_eth_request_handler(tx); - let peers = network.handle().peers_handle().clone(); - let request_handler = EthRequestHandler::new(client, peers, rx); - NetworkBuilder { network, request_handler, transactions } - } } diff --git a/crates/net/network/src/cache.rs b/crates/net/network/src/cache.rs index 758b4916790..32389ec4b7b 100644 --- a/crates/net/network/src/cache.rs +++ b/crates/net/network/src/cache.rs @@ -1,11 +1,10 @@ //! Network cache support use core::hash::BuildHasher; -use std::{fmt, hash::Hash}; - use derive_more::{Deref, DerefMut}; use itertools::Itertools; use schnellru::{ByLength, Limiter, RandomState, Unlimited}; +use std::{fmt, hash::Hash}; /// A minimal LRU cache based on a [`LruMap`](schnellru::LruMap) with limited capacity. /// diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 72627f5b657..fb383b104a5 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -1,25 +1,25 @@ //! Network config support -use std::{collections::HashSet, net::SocketAddr, sync::Arc}; - +use crate::{ + error::NetworkError, + import::{BlockImport, ProofOfStakeBlockImport}, + transactions::TransactionsManagerConfig, + NetworkHandle, NetworkManager, +}; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; -use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; +use reth_eth_wire::{ + EthNetworkPrimitives, HelloMessage, HelloMessageWithProtocols, NetworkPrimitives, Status, +}; +use reth_ethereum_forks::{ForkFilter, Head}; use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; use reth_network_types::{PeersConfig, SessionsConfig}; -use reth_primitives::{ForkFilter, Head}; -use reth_storage_api::{noop::NoopBlockReader, BlockNumReader, BlockReader, HeaderProvider}; +use reth_storage_api::{noop::NoopProvider, BlockNumReader, BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; - -use crate::{ - error::NetworkError, - import::{BlockImport, ProofOfStakeBlockImport}, - transactions::TransactionsManagerConfig, - NetworkHandle, NetworkManager, -}; +use std::{collections::HashSet, net::SocketAddr, sync::Arc}; // re-export for convenience use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocols}; @@ -32,7 +32,7 @@ pub fn rng_secret_key() -> SecretKey { /// All network related initialization settings. #[derive(Debug)] -pub struct NetworkConfig { +pub struct NetworkConfig { /// The client type that can interact with the chain. /// /// This type is used to fetch the block number after we established a session and received the @@ -66,7 +66,7 @@ pub struct NetworkConfig { /// first hardfork, `Frontier` for mainnet. pub fork_filter: ForkFilter, /// The block importer type. - pub block_import: Box, + pub block_import: Box>, /// The default mode of the network. pub network_mode: NetworkMode, /// The executor to use for spawning tasks. @@ -87,19 +87,19 @@ pub struct NetworkConfig { // === impl NetworkConfig === -impl NetworkConfig<()> { +impl NetworkConfig<(), N> { /// Convenience method for creating the corresponding builder type - pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { + pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { NetworkConfigBuilder::new(secret_key) } /// Convenience method for creating the corresponding builder type with a random secret key. - pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder { + pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder { NetworkConfigBuilder::with_rng_secret_key() } } -impl NetworkConfig { +impl NetworkConfig { /// Create a new instance with all mandatory fields set, rest is field with defaults. pub fn new(client: C, secret_key: SecretKey) -> Self where @@ -134,22 +134,28 @@ impl NetworkConfig { } } -impl NetworkConfig +impl NetworkConfig where C: BlockNumReader + 'static, + N: NetworkPrimitives, { /// Convenience method for calling [`NetworkManager::new`]. - pub async fn manager(self) -> Result { + pub async fn manager(self) -> Result, NetworkError> { NetworkManager::new(self).await } } -impl NetworkConfig +impl NetworkConfig where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, + N: NetworkPrimitives, + C: BlockReader + + HeaderProvider + + Clone + + Unpin + + 'static, { /// Starts the networking stack given a [`NetworkConfig`] and returns a handle to the network. - pub async fn start_network(self) -> Result { + pub async fn start_network(self) -> Result, NetworkError> { let client = self.client.clone(); let (handle, network, _txpool, eth) = NetworkManager::builder::(self) .await? @@ -164,7 +170,7 @@ where /// Builder for [`NetworkConfig`](struct.NetworkConfig.html). #[derive(Debug)] -pub struct NetworkConfigBuilder { +pub struct NetworkConfigBuilder { /// The node's secret key, from which the node's identity is derived. secret_key: SecretKey, /// How to configure discovery over DNS. @@ -196,7 +202,7 @@ pub struct NetworkConfigBuilder { /// Whether tx gossip is disabled tx_gossip_disabled: bool, /// The block importer type - block_import: Option>, + block_import: Option>>, /// How to instantiate transactions manager. transactions_manager_config: TransactionsManagerConfig, /// The NAT resolver for external IP @@ -206,7 +212,7 @@ pub struct NetworkConfigBuilder { // === impl NetworkConfigBuilder === #[allow(missing_docs)] -impl NetworkConfigBuilder { +impl NetworkConfigBuilder { /// Create a new builder instance with a random secret key. pub fn with_rng_secret_key() -> Self { Self::new(rng_secret_key()) @@ -480,7 +486,7 @@ impl NetworkConfigBuilder { } /// Sets the block import type. - pub fn block_import(mut self, block_import: Box) -> Self { + pub fn block_import(mut self, block_import: Box>) -> Self { self.block_import = Some(block_import); self } @@ -490,11 +496,11 @@ impl NetworkConfigBuilder { pub fn build_with_noop_provider( self, chain_spec: Arc, - ) -> NetworkConfig> + ) -> NetworkConfig, N> where ChainSpec: EthChainSpec + Hardforks + 'static, { - self.build(NoopBlockReader::new(chain_spec)) + self.build(NoopProvider::eth(chain_spec)) } /// Sets the NAT resolver for external IP. @@ -509,7 +515,7 @@ impl NetworkConfigBuilder { /// The given client is to be used for interacting with the chain, for example fetching the /// corresponding block for a given block hash we receive from a peer in the status message when /// establishing a connection. - pub fn build(self, client: C) -> NetworkConfig + pub fn build(self, client: C) -> NetworkConfig where C: ChainSpecProvider, { @@ -631,14 +637,13 @@ impl NetworkMode { #[cfg(test)] mod tests { - use std::sync::Arc; - use super::*; use rand::thread_rng; use reth_chainspec::{Chain, MAINNET}; use reth_dns_discovery::tree::LinkEntry; use reth_primitives::ForkHash; - use reth_provider::test_utils::NoopProvider; + use reth_storage_api::noop::NoopProvider; + use std::sync::Arc; fn builder() -> NetworkConfigBuilder { let secret_key = SecretKey::new(&mut thread_rng()); diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index d366027d680..c0b9ffa7630 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -1,13 +1,9 @@ //! Discovery support for the network. -use std::{ - collections::VecDeque, - net::{IpAddr, SocketAddr}, - pin::Pin, - sync::Arc, - task::{ready, Context, Poll}, +use crate::{ + cache::LruMap, + error::{NetworkError, ServiceKind}, }; - use enr::Enr; use futures::StreamExt; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4Config}; @@ -15,20 +11,22 @@ use reth_discv5::{DiscoveredPeer, Discv5}; use reth_dns_discovery::{ DnsDiscoveryConfig, DnsDiscoveryHandle, DnsDiscoveryService, DnsNodeRecordUpdate, DnsResolver, }; +use reth_ethereum_forks::{EnrForkIdEntry, ForkId}; use reth_network_api::{DiscoveredEvent, DiscoveryEvent}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::PeerAddr; -use reth_primitives::{EnrForkIdEntry, ForkId}; use secp256k1::SecretKey; +use std::{ + collections::VecDeque, + net::{IpAddr, SocketAddr}, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::{sync::mpsc, task::JoinHandle}; use tokio_stream::{wrappers::ReceiverStream, Stream}; use tracing::trace; -use crate::{ - cache::LruMap, - error::{NetworkError, ServiceKind}, -}; - /// Default max capacity for cache of discovered peers. /// /// Default is 10 000 peers. @@ -214,6 +212,10 @@ impl Discovery { fn on_node_record_update(&mut self, record: NodeRecord, fork_id: Option) { let peer_id = record.id; let tcp_addr = record.tcp_addr(); + if tcp_addr.port() == 0 { + // useless peer for p2p + return + } let udp_addr = record.udp_addr(); let addr = PeerAddr::new(tcp_addr, Some(udp_addr)); _ = diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 2709c4a2907..8156392b22f 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -1,7 +1,6 @@ //! Possible errors when interacting with the network. -use std::{fmt, io, io::ErrorKind, net::SocketAddr}; - +use crate::session::PendingSessionHandshakeError; use reth_dns_discovery::resolver::ResolveError; use reth_ecies::ECIESErrorImpl; use reth_eth_wire::{ @@ -9,8 +8,7 @@ use reth_eth_wire::{ DisconnectReason, }; use reth_network_types::BackoffKind; - -use crate::session::PendingSessionHandshakeError; +use std::{fmt, io, io::ErrorKind, net::SocketAddr}; /// Service kind. #[derive(Debug, PartialEq, Eq, Copy, Clone)] diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index f0c355b174a..bf0110f4270 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -1,32 +1,31 @@ //! Blocks/Headers management for the p2p network. -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, - time::Duration, +use crate::{ + budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, + metrics::EthRequestHandlerMetrics, }; - +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_rlp::Encodable; use futures::StreamExt; use reth_eth_wire::{ - BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, - HeadersDirection, NodeData, Receipts, + BlockBodies, BlockHeaders, EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, GetNodeData, + GetReceipts, HeadersDirection, NetworkPrimitives, NodeData, Receipts, }; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, Header}; +use reth_primitives_traits::Block; use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use tokio::sync::{mpsc::Receiver, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use crate::{ - budget::DEFAULT_BUDGET_TRY_DRAIN_DOWNLOADERS, metered_poll_nested_stream_with_budget, - metrics::EthRequestHandlerMetrics, -}; - // Limits: /// Maximum number of receipts to serve. @@ -53,7 +52,7 @@ const SOFT_RESPONSE_LIMIT: usize = 2 * 1024 * 1024; /// This can be spawned to another task and is supposed to be run as background service. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] -pub struct EthRequestHandler { +pub struct EthRequestHandler { /// The client type that can interact with the chain. client: C, /// Used for reporting peers. @@ -61,15 +60,15 @@ pub struct EthRequestHandler { #[allow(dead_code)] peers: PeersHandle, /// Incoming request from the [`NetworkManager`](crate::NetworkManager). - incoming_requests: ReceiverStream, + incoming_requests: ReceiverStream>, /// Metrics for the eth request handler. metrics: EthRequestHandlerMetrics, } // === impl EthRequestHandler === -impl EthRequestHandler { +impl EthRequestHandler { /// Create a new instance - pub fn new(client: C, peers: PeersHandle, incoming: Receiver) -> Self { + pub fn new(client: C, peers: PeersHandle, incoming: Receiver>) -> Self { Self { client, peers, @@ -79,12 +78,13 @@ impl EthRequestHandler { } } -impl EthRequestHandler +impl EthRequestHandler where - C: BlockReader + HeaderProvider + ReceiptProvider, + N: NetworkPrimitives, + C: BlockReader + HeaderProvider + ReceiptProvider, { /// Returns the list of requested headers - fn get_headers_response(&self, request: GetBlockHeaders) -> Vec
{ + fn get_headers_response(&self, request: GetBlockHeaders) -> Vec { let GetBlockHeaders { start_block, limit, skip, direction } = request; let mut headers = Vec::new(); @@ -106,7 +106,7 @@ where if let Some(header) = self.client.header_by_hash_or_number(block).unwrap_or_default() { match direction { HeadersDirection::Rising => { - if let Some(next) = (header.number + 1).checked_add(skip) { + if let Some(next) = (header.number() + 1).checked_add(skip) { block = next.into() } else { break @@ -117,14 +117,14 @@ where // prevent under flows for block.number == 0 and `block.number - skip < // 0` if let Some(next) = - header.number.checked_sub(1).and_then(|num| num.checked_sub(skip)) + header.number().checked_sub(1).and_then(|num| num.checked_sub(skip)) { block = next.into() } else { break } } else { - block = header.parent_hash.into() + block = header.parent_hash().into() } } } @@ -147,7 +147,7 @@ where &self, _peer_id: PeerId, request: GetBlockHeaders, - response: oneshot::Sender>, + response: oneshot::Sender>>, ) { self.metrics.eth_headers_requests_received_total.increment(1); let headers = self.get_headers_response(request); @@ -158,7 +158,9 @@ where &self, _peer_id: PeerId, request: GetBlockBodies, - response: oneshot::Sender>, + response: oneshot::Sender< + RequestResult::Body>>, + >, ) { self.metrics.eth_bodies_requests_received_total.increment(1); let mut bodies = Vec::new(); @@ -167,8 +169,7 @@ where for hash in request.0 { if let Some(block) = self.client.block_by_hash(hash).unwrap_or_default() { - let body: BlockBody = block.into(); - + let (_, body) = block.split(); total_bytes += body.length(); bodies.push(body); @@ -222,9 +223,12 @@ where /// An endless future. /// /// This should be spawned or used as part of `tokio::select!`. -impl Future for EthRequestHandler +impl Future for EthRequestHandler where - C: BlockReader + HeaderProvider + Unpin, + N: NetworkPrimitives, + C: BlockReader + + HeaderProvider
+ + Unpin, { type Output = (); @@ -271,7 +275,7 @@ where /// All `eth` request related to blocks delegated by the network. #[derive(Debug)] -pub enum IncomingEthRequest { +pub enum IncomingEthRequest { /// Request Block headers from the peer. /// /// The response should be sent through the channel. @@ -281,7 +285,7 @@ pub enum IncomingEthRequest { /// The specific block headers requested. request: GetBlockHeaders, /// The channel sender for the response containing block headers. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Request Block bodies from the peer. /// @@ -292,7 +296,7 @@ pub enum IncomingEthRequest { /// The specific block bodies requested. request: GetBlockBodies, /// The channel sender for the response containing block bodies. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Request Node Data from the peer. /// diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index c47ee5d234f..e24ea167f5f 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -1,12 +1,9 @@ //! A client implementation that can interact with the network and download data. -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; - +use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; use alloy_primitives::B256; use futures::{future, future::Either}; +use reth_eth_wire::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, @@ -17,11 +14,12 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::Header; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; - #[cfg_attr(doc, aquamarine::aquamarine)] /// Front-end API for fetching data from the network. /// @@ -30,16 +28,16 @@ use crate::{fetch::DownloadRequest, flattened_response::FlattenedResponse}; /// /// include_mmd!("docs/mermaid/fetch-client.mmd") #[derive(Debug, Clone)] -pub struct FetchClient { +pub struct FetchClient { /// Sender half of the request channel. - pub(crate) request_tx: UnboundedSender, + pub(crate) request_tx: UnboundedSender>, /// The handle to the peers pub(crate) peers_handle: PeersHandle, /// Number of active peer sessions the node's currently handling. pub(crate) num_active_peers: Arc, } -impl DownloadClient for FetchClient { +impl DownloadClient for FetchClient { fn report_bad_message(&self, peer_id: PeerId) { self.peers_handle.reputation_change(peer_id, ReputationChangeKind::BadMessage); } @@ -53,8 +51,9 @@ impl DownloadClient for FetchClient { // or an error. type HeadersClientFuture = Either, future::Ready>; -impl HeadersClient for FetchClient { - type Output = HeadersClientFuture>>; +impl HeadersClient for FetchClient { + type Header = N::BlockHeader; + type Output = HeadersClientFuture>>; /// Sends a `GetBlockHeaders` request to an available peer. fn get_headers_with_priority( @@ -75,8 +74,9 @@ impl HeadersClient for FetchClient { } } -impl BodiesClient for FetchClient { - type Output = BodiesFut; +impl BodiesClient for FetchClient { + type Body = N::BlockBody; + type Output = BodiesFut; /// Sends a `GetBlockBodies` request to an available peer. fn get_block_bodies_with_priority( diff --git a/crates/net/network/src/fetch/mod.rs b/crates/net/network/src/fetch/mod.rs index f5c0006bc3a..345df4f2e09 100644 --- a/crates/net/network/src/fetch/mod.rs +++ b/crates/net/network/src/fetch/mod.rs @@ -4,18 +4,10 @@ mod client; pub use client::FetchClient; -use std::{ - collections::{HashMap, VecDeque}, - sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, - }, - task::{Context, Poll}, -}; - +use crate::message::BlockRequest; use alloy_primitives::B256; use futures::StreamExt; -use reth_eth_wire::{GetBlockBodies, GetBlockHeaders}; +use reth_eth_wire::{EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives}; use reth_network_api::test_utils::PeersHandle; use reth_network_p2p::{ error::{EthResponseValidator, PeerRequestResult, RequestError, RequestResult}, @@ -24,11 +16,19 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{BlockBody, Header}; +use std::{ + collections::{HashMap, VecDeque}, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::message::BlockRequest; +type InflightHeadersRequest = Request>>; +type InflightBodiesRequest = Request, PeerRequestResult>>; /// Manages data fetching operations. /// @@ -37,13 +37,11 @@ use crate::message::BlockRequest; /// /// This type maintains a list of connected peers that are available for requests. #[derive(Debug)] -pub struct StateFetcher { +pub struct StateFetcher { /// Currently active [`GetBlockHeaders`] requests - inflight_headers_requests: - HashMap>>>, + inflight_headers_requests: HashMap>, /// Currently active [`GetBlockBodies`] requests - inflight_bodies_requests: - HashMap, PeerRequestResult>>>, + inflight_bodies_requests: HashMap>, /// The list of _available_ peers for requests. peers: HashMap, /// The handle to the peers manager @@ -51,16 +49,16 @@ pub struct StateFetcher { /// Number of active peer sessions the node's currently handling. num_active_peers: Arc, /// Requests queued for processing - queued_requests: VecDeque, + queued_requests: VecDeque>, /// Receiver for new incoming download requests - download_requests_rx: UnboundedReceiverStream, + download_requests_rx: UnboundedReceiverStream>, /// Sender for download requests, used to detach a [`FetchClient`] - download_requests_tx: UnboundedSender, + download_requests_tx: UnboundedSender>, } // === impl StateSyncer === -impl StateFetcher { +impl StateFetcher { pub(crate) fn new(peers_handle: PeersHandle, num_active_peers: Arc) -> Self { let (download_requests_tx, download_requests_rx) = mpsc::unbounded_channel(); Self { @@ -217,7 +215,7 @@ impl StateFetcher { /// Handles a new request to a peer. /// /// Caution: this assumes the peer exists and is idle - fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { + fn prepare_block_request(&mut self, peer_id: PeerId, req: DownloadRequest) -> BlockRequest { // update the peer's state if let Some(peer) = self.peers.get_mut(&peer_id) { peer.state = req.peer_state(); @@ -260,7 +258,7 @@ impl StateFetcher { pub(crate) fn on_block_headers_response( &mut self, peer_id: PeerId, - res: RequestResult>, + res: RequestResult>, ) -> Option { let is_error = res.is_err(); let maybe_reputation_change = res.reputation_change_err(); @@ -296,7 +294,7 @@ impl StateFetcher { pub(crate) fn on_block_bodies_response( &mut self, peer_id: PeerId, - res: RequestResult>, + res: RequestResult>, ) -> Option { let is_likely_bad_response = res.as_ref().map_or(true, |bodies| bodies.is_empty()); @@ -315,7 +313,7 @@ impl StateFetcher { } /// Returns a new [`FetchClient`] that can send requests to this type. - pub(crate) fn client(&self) -> FetchClient { + pub(crate) fn client(&self) -> FetchClient { FetchClient { request_tx: self.download_requests_tx.clone(), peers_handle: self.peers_handle.clone(), @@ -405,24 +403,24 @@ struct Request { /// Requests that can be sent to the Syncer from a [`FetchClient`] #[derive(Debug)] -pub(crate) enum DownloadRequest { +pub(crate) enum DownloadRequest { /// Download the requested headers and send response through channel GetBlockHeaders { request: HeadersRequest, - response: oneshot::Sender>>, + response: oneshot::Sender>>, priority: Priority, }, /// Download the requested headers and send response through channel GetBlockBodies { request: Vec, - response: oneshot::Sender>>, + response: oneshot::Sender>>, priority: Priority, }, } // === impl DownloadRequest === -impl DownloadRequest { +impl DownloadRequest { /// Returns the corresponding state for a peer that handles the request. const fn peer_state(&self) -> PeerState { match self { @@ -472,14 +470,15 @@ pub(crate) enum BlockResponseOutcome { mod tests { use super::*; use crate::{peers::PeersManager, PeersConfig}; + use alloy_consensus::Header; use alloy_primitives::B512; - use reth_primitives::SealedHeader; use std::future::poll_fn; #[tokio::test(flavor = "multi_thread")] async fn test_poll_fetcher() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); poll_fn(move |cx| { assert!(fetcher.poll(cx).is_pending()); @@ -499,7 +498,8 @@ mod tests { #[tokio::test] async fn test_peer_rotation() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -522,7 +522,8 @@ mod tests { #[tokio::test] async fn test_peer_prioritization() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); // Add a few random peers let peer1 = B512::random(); let peer2 = B512::random(); @@ -547,7 +548,8 @@ mod tests { #[tokio::test] async fn test_on_block_headers_response() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); let peer_id = B512::random(); assert_eq!(fetcher.on_block_headers_response(peer_id, Ok(vec![Header::default()])), None); @@ -577,7 +579,8 @@ mod tests { #[tokio::test] async fn test_header_response_outcome() { let manager = PeersManager::new(PeersConfig::default()); - let mut fetcher = StateFetcher::new(manager.handle(), Default::default()); + let mut fetcher = + StateFetcher::::new(manager.handle(), Default::default()); let peer_id = B512::random(); let request_pair = || { @@ -590,8 +593,7 @@ mod tests { }, response: tx, }; - let mut header = SealedHeader::default().unseal(); - header.number = 0u64; + let header = Header { number: 0, ..Default::default() }; (req, header) }; @@ -612,7 +614,10 @@ mod tests { let outcome = fetcher.on_block_headers_response(peer_id, Err(RequestError::Timeout)).unwrap(); - assert!(EthResponseValidator::reputation_change_err(&Err(RequestError::Timeout)).is_some()); + assert!(EthResponseValidator::reputation_change_err(&Err::, _>( + RequestError::Timeout + )) + .is_some()); match outcome { BlockResponseOutcome::BadResponse(peer, _) => { diff --git a/crates/net/network/src/flattened_response.rs b/crates/net/network/src/flattened_response.rs index 78c3c35f598..61dae9c7c72 100644 --- a/crates/net/network/src/flattened_response.rs +++ b/crates/net/network/src/flattened_response.rs @@ -1,10 +1,9 @@ +use futures::Future; +use pin_project::pin_project; use std::{ pin::Pin, task::{Context, Poll}, }; - -use futures::Future; -use pin_project::pin_project; use tokio::sync::oneshot::{error::RecvError, Receiver}; /// Flatten a [Receiver] message in order to get rid of the [RecvError] result @@ -24,10 +23,7 @@ where fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); - this.receiver.poll(cx).map(|r| match r { - Ok(r) => r, - Err(err) => Err(err.into()), - }) + this.receiver.poll(cx).map(|r| r.unwrap_or_else(|err| Err(err.into()))) } } diff --git a/crates/net/network/src/import.rs b/crates/net/network/src/import.rs index 201dc3e4f78..f63bf2dd7a8 100644 --- a/crates/net/network/src/import.rs +++ b/crates/net/network/src/import.rs @@ -1,13 +1,11 @@ //! This module provides an abstraction over block import in the form of the `BlockImport` trait. -use std::task::{Context, Poll}; - -use reth_network_peers::PeerId; - use crate::message::NewBlockMessage; +use reth_network_peers::PeerId; +use std::task::{Context, Poll}; /// Abstraction over block import. -pub trait BlockImport: std::fmt::Debug + Send + Sync { +pub trait BlockImport: std::fmt::Debug + Send + Sync { /// Invoked for a received `NewBlock` broadcast message from the peer. /// /// > When a `NewBlock` announcement message is received from a peer, the client first verifies @@ -15,35 +13,35 @@ pub trait BlockImport: std::fmt::Debug + Send + Sync { /// /// This is supposed to start verification. The results are then expected to be returned via /// [`BlockImport::poll`]. - fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); + fn on_new_block(&mut self, peer_id: PeerId, incoming_block: NewBlockMessage); /// Returns the results of a [`BlockImport::on_new_block`] - fn poll(&mut self, cx: &mut Context<'_>) -> Poll; + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; } /// Outcome of the [`BlockImport`]'s block handling. #[derive(Debug)] -pub struct BlockImportOutcome { +pub struct BlockImportOutcome { /// Sender of the `NewBlock` message. pub peer: PeerId, /// The result after validating the block - pub result: Result, + pub result: Result, BlockImportError>, } /// Represents the successful validation of a received `NewBlock` message. #[derive(Debug)] -pub enum BlockValidation { +pub enum BlockValidation { /// Basic Header validity check, after which the block should be relayed to peers via a /// `NewBlock` message ValidHeader { /// received block - block: NewBlockMessage, + block: NewBlockMessage, }, /// Successfully imported: state-root matches after execution. The block should be relayed via /// `NewBlockHashes` ValidBlock { /// validated block. - block: NewBlockMessage, + block: NewBlockMessage, }, } @@ -62,10 +60,10 @@ pub enum BlockImportError { #[non_exhaustive] pub struct ProofOfStakeBlockImport; -impl BlockImport for ProofOfStakeBlockImport { - fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} +impl BlockImport for ProofOfStakeBlockImport { + fn on_new_block(&mut self, _peer_id: PeerId, _incoming_block: NewBlockMessage) {} - fn poll(&mut self, _cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, _cx: &mut Context<'_>) -> Poll> { Poll::Pending } } diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 0e433a38862..af5976ce5be 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -42,13 +42,15 @@ //! ### Configure and launch a standalone network //! //! The [`NetworkConfig`] is used to configure the network. -//! It requires an instance of [`BlockReader`](reth_provider::BlockReader). +//! It requires an instance of [`BlockReader`](reth_storage_api::BlockReader). //! //! ``` //! # async fn launch() { -//! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +//! use reth_network::{ +//! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, +//! }; //! use reth_network_peers::mainnet_nodes; -//! use reth_provider::test_utils::NoopProvider; +//! use reth_storage_api::noop::NoopProvider; //! //! // This block provider implementation is used for testing purposes. //! let client = NoopProvider::default(); @@ -56,7 +58,9 @@ //! // The key that's used for encrypting sessions and to identify our node. //! let local_key = rng_secret_key(); //! -//! let config = NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client); +//! let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key) +//! .boot_nodes(mainnet_nodes()) +//! .build(client); //! //! // create the network instance //! let network = NetworkManager::new(config).await.unwrap(); @@ -71,9 +75,11 @@ //! ### Configure all components of the Network with the [`NetworkBuilder`] //! //! ``` -//! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +//! use reth_network::{ +//! config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, +//! }; //! use reth_network_peers::mainnet_nodes; -//! use reth_provider::test_utils::NoopProvider; +//! use reth_storage_api::noop::NoopProvider; //! use reth_transaction_pool::TransactionPool; //! async fn launch(pool: Pool) { //! // This block provider implementation is used for testing purposes. @@ -82,8 +88,9 @@ //! // The key that's used for encrypting sessions and to identify our node. //! let local_key = rng_secret_key(); //! -//! let config = -//! NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); +//! let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key) +//! .boot_nodes(mainnet_nodes()) +//! .build(client.clone()); //! let transactions_manager_config = config.transactions_manager_config.clone(); //! //! // create the network instance @@ -138,6 +145,7 @@ mod state; mod swarm; pub use reth_eth_wire::{DisconnectReason, HelloMessageWithProtocols}; +pub use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives}; pub use reth_network_api::{ BlockDownloaderProvider, DiscoveredEvent, DiscoveryEvent, NetworkEvent, NetworkEventListenerProvider, NetworkInfo, PeerRequest, PeerRequestSender, Peers, PeersInfo, diff --git a/crates/net/network/src/listener.rs b/crates/net/network/src/listener.rs index e5094f68948..9fcc15a104b 100644 --- a/crates/net/network/src/listener.rs +++ b/crates/net/network/src/listener.rs @@ -1,13 +1,12 @@ //! Contains connection-oriented interfaces. +use futures::{ready, Stream}; use std::{ io, net::SocketAddr, pin::Pin, task::{Context, Poll}, }; - -use futures::{ready, Stream}; use tokio::net::{TcpListener, TcpStream}; /// A tcp connection listener. diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 3a7f94985fc..fed3f54408b 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -15,36 +15,6 @@ //! (IP+port) of our node is published via discovery, remote peers can initiate inbound connections //! to the local node. Once a (tcp) connection is established, both peers start to authenticate a [RLPx session](https://github.com/ethereum/devp2p/blob/master/rlpx.md) via a handshake. If the handshake was successful, both peers announce their capabilities and are now ready to exchange sub-protocol messages via the `RLPx` session. -use std::{ - net::SocketAddr, - path::Path, - pin::Pin, - sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, - }, - task::{Context, Poll}, - time::{Duration, Instant}, -}; - -use futures::{Future, StreamExt}; -use parking_lot::Mutex; -use reth_eth_wire::{capability::CapabilityMessage, Capabilities, DisconnectReason}; -use reth_fs_util::{self as fs, FsPathError}; -use reth_metrics::common::mpsc::UnboundedMeteredSender; -use reth_network_api::{ - test_utils::PeersHandle, EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest, -}; -use reth_network_peers::{NodeRecord, PeerId}; -use reth_network_types::ReputationChangeKind; -use reth_storage_api::BlockNumReader; -use reth_tasks::shutdown::GracefulShutdown; -use reth_tokio_util::EventSender; -use secp256k1::SecretKey; -use tokio::sync::mpsc::{self, error::TrySendError}; -use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::{debug, error, trace, warn}; - use crate::{ budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM}, config::NetworkConfig, @@ -65,6 +35,39 @@ use crate::{ transactions::NetworkTransactionEvent, FetchClient, NetworkBuilder, }; +use futures::{Future, StreamExt}; +use parking_lot::Mutex; +use reth_eth_wire::{ + capability::CapabilityMessage, Capabilities, DisconnectReason, EthNetworkPrimitives, + NetworkPrimitives, +}; +use reth_fs_util::{self as fs, FsPathError}; +use reth_metrics::common::mpsc::UnboundedMeteredSender; +use reth_network_api::{ + events::{PeerEvent, SessionInfo}, + test_utils::PeersHandle, + EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest, +}; +use reth_network_peers::{NodeRecord, PeerId}; +use reth_network_types::ReputationChangeKind; +use reth_storage_api::BlockNumReader; +use reth_tasks::shutdown::GracefulShutdown; +use reth_tokio_util::EventSender; +use secp256k1::SecretKey; +use std::{ + net::SocketAddr, + path::Path, + pin::Pin, + sync::{ + atomic::{AtomicU64, AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::{Duration, Instant}, +}; +use tokio::sync::mpsc::{self, error::TrySendError}; +use tokio_stream::wrappers::UnboundedReceiverStream; +use tracing::{debug, error, trace, warn}; #[cfg_attr(doc, aquamarine::aquamarine)] /// Manages the _entire_ state of the network. @@ -76,20 +79,20 @@ use crate::{ /// include_mmd!("docs/mermaid/network-manager.mmd") #[derive(Debug)] #[must_use = "The NetworkManager does nothing unless polled"] -pub struct NetworkManager { +pub struct NetworkManager { /// The type that manages the actual network part, which includes connections. - swarm: Swarm, + swarm: Swarm, /// Underlying network handle that can be shared. - handle: NetworkHandle, + handle: NetworkHandle, /// Receiver half of the command channel set up between this type and the [`NetworkHandle`] - from_handle_rx: UnboundedReceiverStream, + from_handle_rx: UnboundedReceiverStream>, /// Handles block imports according to the `eth` protocol. - block_import: Box, + block_import: Box>, /// Sender for high level network events. - event_sender: EventSender, + event_sender: EventSender>>, /// Sender half to send events to the /// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured. - to_transactions_manager: Option>, + to_transactions_manager: Option>>, /// Sender half to send events to the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) task, if configured. /// @@ -103,7 +106,7 @@ pub struct NetworkManager { /// Thus, we use a bounded channel here to avoid unbounded build up if the node is flooded with /// requests. This channel size is set at /// [`ETH_REQUEST_CHANNEL_CAPACITY`](crate::builder::ETH_REQUEST_CHANNEL_CAPACITY) - to_eth_request_handler: Option>, + to_eth_request_handler: Option>>, /// Tracks the number of active session (connected peers). /// /// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`] @@ -116,17 +119,17 @@ pub struct NetworkManager { } // === impl NetworkManager === -impl NetworkManager { +impl NetworkManager { /// Sets the dedicated channel for events indented for the /// [`TransactionsManager`](crate::transactions::TransactionsManager). - pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender) { + pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender>) { self.to_transactions_manager = Some(UnboundedMeteredSender::new(tx, NETWORK_POOL_TRANSACTIONS_SCOPE)); } /// Sets the dedicated channel for events indented for the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler). - pub fn set_eth_request_handler(&mut self, tx: mpsc::Sender) { + pub fn set_eth_request_handler(&mut self, tx: mpsc::Sender>) { self.to_eth_request_handler = Some(tx); } @@ -138,7 +141,7 @@ impl NetworkManager { /// Returns the [`NetworkHandle`] that can be cloned and shared. /// /// The [`NetworkHandle`] can be used to interact with this [`NetworkManager`] - pub const fn handle(&self) -> &NetworkHandle { + pub const fn handle(&self) -> &NetworkHandle { &self.handle } @@ -165,7 +168,7 @@ impl NetworkManager { /// The [`NetworkManager`] is an endless future that needs to be polled in order to advance the /// state of the entire network. pub async fn new( - config: NetworkConfig, + config: NetworkConfig, ) -> Result { let NetworkConfig { client, @@ -253,7 +256,7 @@ impl NetworkManager { let (to_manager_tx, from_handle_rx) = mpsc::unbounded_channel(); - let event_sender: EventSender = Default::default(); + let event_sender: EventSender>> = Default::default(); let handle = NetworkHandle::new( Arc::clone(&num_active_peers), @@ -289,9 +292,11 @@ impl NetworkManager { /// components of the network /// /// ``` - /// use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; + /// use reth_network::{ + /// config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager, + /// }; /// use reth_network_peers::mainnet_nodes; - /// use reth_provider::test_utils::NoopProvider; + /// use reth_storage_api::noop::NoopProvider; /// use reth_transaction_pool::TransactionPool; /// async fn launch(pool: Pool) { /// // This block provider implementation is used for testing purposes. @@ -300,8 +305,9 @@ impl NetworkManager { /// // The key that's used for encrypting sessions and to identify our node. /// let local_key = rng_secret_key(); /// - /// let config = - /// NetworkConfig::builder(local_key).boot_nodes(mainnet_nodes()).build(client.clone()); + /// let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key) + /// .boot_nodes(mainnet_nodes()) + /// .build(client.clone()); /// let transactions_manager_config = config.transactions_manager_config.clone(); /// /// // create the network instance @@ -314,14 +320,14 @@ impl NetworkManager { /// } /// ``` pub async fn builder( - config: NetworkConfig, - ) -> Result, NetworkError> { + config: NetworkConfig, + ) -> Result, NetworkError> { let network = Self::new(config).await?; Ok(network.into_builder()) } /// Create a [`NetworkBuilder`] to configure all components of the network - pub const fn into_builder(self) -> NetworkBuilder<(), ()> { + pub const fn into_builder(self) -> NetworkBuilder<(), (), N> { NetworkBuilder { network: self, transactions: (), request_handler: () } } @@ -369,7 +375,7 @@ impl NetworkManager { /// Returns a new [`FetchClient`] that can be cloned and shared. /// /// The [`FetchClient`] is the entrypoint for sending requests to the network. - pub fn fetch_client(&self) -> FetchClient { + pub fn fetch_client(&self) -> FetchClient { self.swarm.state().fetch_client() } @@ -397,7 +403,7 @@ impl NetworkManager { &mut self, peer_id: PeerId, _capabilities: Arc, - _message: CapabilityMessage, + _message: CapabilityMessage, ) { trace!(target: "net", ?peer_id, "received unexpected message"); self.swarm @@ -408,7 +414,7 @@ impl NetworkManager { /// Sends an event to the [`TransactionsManager`](crate::transactions::TransactionsManager) if /// configured. - fn notify_tx_manager(&self, event: NetworkTransactionEvent) { + fn notify_tx_manager(&self, event: NetworkTransactionEvent) { if let Some(ref tx) = self.to_transactions_manager { let _ = tx.send(event); } @@ -416,7 +422,7 @@ impl NetworkManager { /// Sends an event to the [`EthRequestManager`](crate::eth_requests::EthRequestHandler) if /// configured. - fn delegate_eth_request(&self, event: IncomingEthRequest) { + fn delegate_eth_request(&self, event: IncomingEthRequest) { if let Some(ref reqs) = self.to_eth_request_handler { let _ = reqs.try_send(event).map_err(|e| { if let TrySendError::Full(_) = e { @@ -428,7 +434,7 @@ impl NetworkManager { } /// Handle an incoming request from the peer - fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { + fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest) { match req { PeerRequest::GetBlockHeaders { request, response } => { self.delegate_eth_request(IncomingEthRequest::GetBlockHeaders { @@ -469,7 +475,7 @@ impl NetworkManager { } /// Invoked after a `NewBlock` message from the peer was validated - fn on_block_import_result(&mut self, outcome: BlockImportOutcome) { + fn on_block_import_result(&mut self, outcome: BlockImportOutcome) { let BlockImportOutcome { peer, result } = outcome; match result { Ok(validated_block) => match validated_block { @@ -511,7 +517,7 @@ impl NetworkManager { } /// Handles a received Message from the peer's session. - fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) { + fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(hashes) => { self.within_pow_or_disconnect(peer_id, |this| { @@ -551,7 +557,7 @@ impl NetworkManager { } /// Handler for received messages from a handle - fn on_handle_message(&mut self, msg: NetworkHandleMessage) { + fn on_handle_message(&mut self, msg: NetworkHandleMessage) { match msg { NetworkHandleMessage::DiscoveryListener(tx) => { self.swarm.state_mut().discovery_mut().add_listener(tx); @@ -643,10 +649,13 @@ impl NetworkManager { let _ = tx.send(None); } } + NetworkHandleMessage::EthMessage { peer_id, message } => { + self.swarm.sessions_mut().send_message(&peer_id, message) + } } } - fn on_swarm_event(&mut self, event: SwarmEvent) { + fn on_swarm_event(&mut self, event: SwarmEvent) { // handle event match event { SwarmEvent::ValidMessage { peer_id, message } => self.on_peer_message(peer_id, message), @@ -708,24 +717,26 @@ impl NetworkManager { self.update_active_connection_metrics(); - self.event_sender.notify(NetworkEvent::SessionEstablished { + let session_info = SessionInfo { peer_id, remote_addr, client_version, capabilities, - version, status, - messages, - }); + version, + }; + + self.event_sender + .notify(NetworkEvent::ActivePeerSession { info: session_info, messages }); } SwarmEvent::PeerAdded(peer_id) => { trace!(target: "net", ?peer_id, "Peer added"); - self.event_sender.notify(NetworkEvent::PeerAdded(peer_id)); + self.event_sender.notify(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))); self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64); } SwarmEvent::PeerRemoved(peer_id) => { trace!(target: "net", ?peer_id, "Peer dropped"); - self.event_sender.notify(NetworkEvent::PeerRemoved(peer_id)); + self.event_sender.notify(NetworkEvent::Peer(PeerEvent::PeerRemoved(peer_id))); self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64); } SwarmEvent::SessionClosed { peer_id, remote_addr, error } => { @@ -768,7 +779,8 @@ impl NetworkManager { .saturating_sub(1) as f64, ); - self.event_sender.notify(NetworkEvent::SessionClosed { peer_id, reason }); + self.event_sender + .notify(NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason })); } SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => { trace!( @@ -981,7 +993,7 @@ impl NetworkManager { } } -impl Future for NetworkManager { +impl Future for NetworkManager { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/net/network/src/message.rs b/crates/net/network/src/message.rs index 6b8287fe51c..ff5093b6732 100644 --- a/crates/net/network/src/message.rs +++ b/crates/net/network/src/message.rs @@ -3,58 +3,59 @@ //! An `RLPx` stream is multiplexed via the prepended message-id of a framed message. //! Capabilities are exchanged via the `RLPx` `Hello` message as pairs of `(id, version)`, -use std::{ - sync::Arc, - task::{ready, Context, Poll}, -}; - +use alloy_consensus::BlockHeader; use alloy_primitives::{Bytes, B256}; use futures::FutureExt; use reth_eth_wire::{ capability::RawCapabilityMessage, message::RequestPair, BlockBodies, BlockHeaders, EthMessage, - GetBlockBodies, GetBlockHeaders, NewBlock, NewBlockHashes, NewPooledTransactionHashes, - NodeData, PooledTransactions, Receipts, SharedTransactions, Transactions, + EthNetworkPrimitives, GetBlockBodies, GetBlockHeaders, NetworkPrimitives, NewBlock, + NewBlockHashes, NewPooledTransactionHashes, NodeData, PooledTransactions, Receipts, + SharedTransactions, Transactions, }; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; -use reth_primitives::{BlockBody, Header, PooledTransactionsElement, ReceiptWithBloom}; +use reth_primitives::ReceiptWithBloom; +use std::{ + sync::Arc, + task::{ready, Context, Poll}, +}; use tokio::sync::oneshot; /// Internal form of a `NewBlock` message #[derive(Debug, Clone)] -pub struct NewBlockMessage { +pub struct NewBlockMessage { /// Hash of the block pub hash: B256, /// Raw received message - pub block: Arc, + pub block: Arc>, } // === impl NewBlockMessage === -impl NewBlockMessage { +impl NewBlockMessage { /// Returns the block number of the block pub fn number(&self) -> u64 { - self.block.block.header.number + self.block.block.header().number() } } /// All Bi-directional eth-message variants that can be sent to a session or received from a /// session. #[derive(Debug)] -pub enum PeerMessage { +pub enum PeerMessage { /// Announce new block hashes NewBlockHashes(NewBlockHashes), /// Broadcast new block. - NewBlock(NewBlockMessage), + NewBlock(NewBlockMessage), /// Received transactions _from_ the peer - ReceivedTransaction(Transactions), + ReceivedTransaction(Transactions), /// Broadcast transactions _from_ local _to_ a peer. - SendTransactions(SharedTransactions), + SendTransactions(SharedTransactions), /// Send new pooled transactions PooledTransactions(NewPooledTransactionHashes), /// All `eth` request variants. - EthRequest(PeerRequest), - /// Other than eth namespace message + EthRequest(PeerRequest), + /// Any other or manually crafted eth message. Other(RawCapabilityMessage), } @@ -74,21 +75,21 @@ pub enum BlockRequest { /// Corresponding variant for [`PeerRequest`]. #[derive(Debug)] -pub enum PeerResponse { +pub enum PeerResponse { /// Represents a response to a request for block headers. BlockHeaders { /// The receiver channel for the response to a block headers request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for block bodies. BlockBodies { /// The receiver channel for the response to a block bodies request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for pooled transactions. PooledTransactions { /// The receiver channel for the response to a pooled transactions request. - response: oneshot::Receiver>, + response: oneshot::Receiver>>, }, /// Represents a response to a request for `NodeData`. NodeData { @@ -104,9 +105,9 @@ pub enum PeerResponse { // === impl PeerResponse === -impl PeerResponse { +impl PeerResponse { /// Polls the type to completion. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { macro_rules! poll_request { ($response:ident, $item:ident, $cx:ident) => { match ready!($response.poll_unpin($cx)) { @@ -139,24 +140,24 @@ impl PeerResponse { /// All response variants for [`PeerResponse`] #[derive(Debug)] -pub enum PeerResponseResult { +pub enum PeerResponseResult { /// Represents a result containing block headers or an error. - BlockHeaders(RequestResult>), + BlockHeaders(RequestResult>), /// Represents a result containing block bodies or an error. - BlockBodies(RequestResult>), + BlockBodies(RequestResult>), /// Represents a result containing pooled transactions or an error. - PooledTransactions(RequestResult>), + PooledTransactions(RequestResult>), /// Represents a result containing node data or an error. NodeData(RequestResult>), /// Represents a result containing receipts or an error. - Receipts(RequestResult>>), + Receipts(RequestResult>>>), } // === impl PeerResponseResult === -impl PeerResponseResult { +impl PeerResponseResult { /// Converts this response into an [`EthMessage`] - pub fn try_into_message(self, id: u64) -> RequestResult { + pub fn try_into_message(self, id: u64) -> RequestResult> { macro_rules! to_message { ($response:ident, $item:ident, $request_id:ident) => { match $response { diff --git a/crates/net/network/src/metrics.rs b/crates/net/network/src/metrics.rs index 4333cf1408b..bda5f84c76b 100644 --- a/crates/net/network/src/metrics.rs +++ b/crates/net/network/src/metrics.rs @@ -85,6 +85,8 @@ pub struct SessionManagerMetrics { pub(crate) total_dial_successes: Counter, /// Number of dropped outgoing peer messages. pub(crate) total_outgoing_peer_messages_dropped: Counter, + /// Number of queued outgoing messages + pub(crate) queued_outgoing_messages: Gauge, } /// Metrics for the [`TransactionsManager`](crate::transactions::TransactionsManager). diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 594ad4d155d..68c57724f0d 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -1,61 +1,61 @@ -use std::{ - net::SocketAddr, - sync::{ - atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - Arc, - }, +use crate::{ + config::NetworkMode, message::PeerMessage, protocol::RlpxSubProtocol, + swarm::NetworkConnectionState, transactions::TransactionsHandle, FetchClient, }; - use alloy_primitives::B256; use enr::Enr; +use futures::StreamExt; use parking_lot::Mutex; use reth_discv4::{Discv4, NatResolver}; use reth_discv5::Discv5; -use reth_eth_wire::{DisconnectReason, NewBlock, NewPooledTransactionHashes, SharedTransactions}; +use reth_eth_wire::{ + DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, NewBlock, + NewPooledTransactionHashes, SharedTransactions, +}; +use reth_ethereum_forks::Head; use reth_network_api::{ + events::{NetworkPeersEvents, PeerEvent, PeerEventStream}, test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerInfo, PeerRequest, Peers, PeersInfo, }; -use reth_network_p2p::{ - sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}, - BlockClient, -}; +use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState, SyncStateProvider}; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::{PeerAddr, PeerKind, Reputation, ReputationChangeKind}; -use reth_primitives::{Head, TransactionSigned}; use reth_tokio_util::{EventSender, EventStream}; use secp256k1::SecretKey; +use std::{ + net::SocketAddr, + sync::{ + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, + Arc, + }, +}; use tokio::sync::{ mpsc::{self, UnboundedSender}, oneshot, }; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::{ - config::NetworkMode, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, - transactions::TransactionsHandle, FetchClient, -}; - /// A _shareable_ network frontend. Used to interact with the network. /// /// See also [`NetworkManager`](crate::NetworkManager). #[derive(Clone, Debug)] -pub struct NetworkHandle { +pub struct NetworkHandle { /// The Arc'ed delegate that contains the state. - inner: Arc, + inner: Arc>, } // === impl NetworkHandle === -impl NetworkHandle { +impl NetworkHandle { /// Creates a single new instance. #[allow(clippy::too_many_arguments)] pub(crate) fn new( num_active_peers: Arc, listener_address: Arc>, - to_manager_tx: UnboundedSender, + to_manager_tx: UnboundedSender>, secret_key: SecretKey, local_peer_id: PeerId, peers: PeersHandle, @@ -64,7 +64,7 @@ impl NetworkHandle { tx_gossip_disabled: bool, discv4: Option, discv5: Option, - event_sender: EventSender, + event_sender: EventSender>>, nat: Option, ) -> Self { let inner = NetworkInner { @@ -92,7 +92,7 @@ impl NetworkHandle { &self.inner.local_peer_id } - fn manager(&self) -> &UnboundedSender { + fn manager(&self) -> &UnboundedSender> { &self.inner.to_manager_tx } @@ -102,7 +102,7 @@ impl NetworkHandle { } /// Sends a [`NetworkHandleMessage`] to the manager - pub(crate) fn send_message(&self, msg: NetworkHandleMessage) { + pub(crate) fn send_message(&self, msg: NetworkHandleMessage) { let _ = self.inner.to_manager_tx.send(msg); } @@ -116,12 +116,12 @@ impl NetworkHandle { /// Caution: in `PoS` this is a noop because new blocks are no longer announced over devp2p. /// Instead they are sent to the node by CL and can be requested over devp2p. /// Broadcasting new blocks is considered a protocol violation. - pub fn announce_block(&self, block: NewBlock, hash: B256) { + pub fn announce_block(&self, block: NewBlock, hash: B256) { self.send_message(NetworkHandleMessage::AnnounceBlock(block, hash)) } /// Sends a [`PeerRequest`] to the given peer's session. - pub fn send_request(&self, peer_id: PeerId, request: PeerRequest) { + pub fn send_request(&self, peer_id: PeerId, request: PeerRequest) { self.send_message(NetworkHandleMessage::EthRequest { peer_id, request }) } @@ -131,17 +131,22 @@ impl NetworkHandle { } /// Send full transactions to the peer - pub fn send_transactions(&self, peer_id: PeerId, msg: Vec>) { + pub fn send_transactions(&self, peer_id: PeerId, msg: Vec>) { self.send_message(NetworkHandleMessage::SendTransaction { peer_id, msg: SharedTransactions(msg), }) } + /// Send eth message to the peer. + pub fn send_eth_message(&self, peer_id: PeerId, message: PeerMessage) { + self.send_message(NetworkHandleMessage::EthMessage { peer_id, message }) + } + /// Send message to get the [`TransactionsHandle`]. /// /// Returns `None` if no transaction task is installed. - pub async fn transactions_handle(&self) -> Option { + pub async fn transactions_handle(&self) -> Option> { let (tx, rx) = oneshot::channel(); let _ = self.manager().send(NetworkHandleMessage::GetTransactionsHandle(tx)); rx.await.unwrap() @@ -189,8 +194,19 @@ impl NetworkHandle { // === API Implementations === -impl NetworkEventListenerProvider for NetworkHandle { - fn event_listener(&self) -> EventStream { +impl NetworkPeersEvents for NetworkHandle { + /// Returns an event stream of peer-specific network events. + fn peer_events(&self) -> PeerEventStream { + let peer_events = self.inner.event_sender.new_listener().map(|event| match event { + NetworkEvent::Peer(peer_event) => peer_event, + NetworkEvent::ActivePeerSession { info, .. } => PeerEvent::SessionEstablished(info), + }); + PeerEventStream::new(peer_events) + } +} + +impl NetworkEventListenerProvider> for NetworkHandle { + fn event_listener(&self) -> EventStream>> { self.inner.event_sender.new_listener() } @@ -201,13 +217,13 @@ impl NetworkEventListenerProvider for NetworkHandle { } } -impl NetworkProtocols for NetworkHandle { +impl NetworkProtocols for NetworkHandle { fn add_rlpx_sub_protocol(&self, protocol: RlpxSubProtocol) { self.send_message(NetworkHandleMessage::AddRlpxSubProtocol(protocol)) } } -impl PeersInfo for NetworkHandle { +impl PeersInfo for NetworkHandle { fn num_connected_peers(&self) -> usize { self.inner.num_active_peers.load(Ordering::Relaxed) } @@ -252,7 +268,7 @@ impl PeersInfo for NetworkHandle { } } -impl Peers for NetworkHandle { +impl Peers for NetworkHandle { fn add_trusted_peer_id(&self, peer: PeerId) { self.send_message(NetworkHandleMessage::AddTrustedPeerId(peer)); } @@ -340,13 +356,13 @@ impl Peers for NetworkHandle { } } -impl PeersHandleProvider for NetworkHandle { +impl PeersHandleProvider for NetworkHandle { fn peers_handle(&self) -> &PeersHandle { &self.inner.peers } } -impl NetworkInfo for NetworkHandle { +impl NetworkInfo for NetworkHandle { fn local_addr(&self) -> SocketAddr { *self.inner.listener_address.lock() } @@ -370,7 +386,7 @@ impl NetworkInfo for NetworkHandle { } } -impl SyncStateProvider for NetworkHandle { +impl SyncStateProvider for NetworkHandle { fn is_syncing(&self) -> bool { self.inner.is_syncing.load(Ordering::Relaxed) } @@ -383,7 +399,7 @@ impl SyncStateProvider for NetworkHandle { } } -impl NetworkSyncUpdater for NetworkHandle { +impl NetworkSyncUpdater for NetworkHandle { fn update_sync_state(&self, state: SyncState) { let future_state = state.is_syncing(); let prev_state = self.inner.is_syncing.swap(future_state, Ordering::Relaxed); @@ -399,8 +415,10 @@ impl NetworkSyncUpdater for NetworkHandle { } } -impl BlockDownloaderProvider for NetworkHandle { - async fn fetch_client(&self) -> Result { +impl BlockDownloaderProvider for NetworkHandle { + type Client = FetchClient; + + async fn fetch_client(&self) -> Result { let (tx, rx) = oneshot::channel(); let _ = self.manager().send(NetworkHandleMessage::FetchClient(tx)); rx.await @@ -408,11 +426,11 @@ impl BlockDownloaderProvider for NetworkHandle { } #[derive(Debug)] -struct NetworkInner { +struct NetworkInner { /// Number of active peer sessions the node's currently handling. num_active_peers: Arc, /// Sender half of the message channel to the [`crate::NetworkManager`]. - to_manager_tx: UnboundedSender, + to_manager_tx: UnboundedSender>, /// The local address that accepts incoming connections. listener_address: Arc>, /// The secret key used for authenticating sessions. @@ -436,7 +454,7 @@ struct NetworkInner { /// The instance of the discv5 service discv5: Option, /// Sender for high level network events. - event_sender: EventSender, + event_sender: EventSender>>, /// The NAT resolver nat: Option, } @@ -449,7 +467,7 @@ pub trait NetworkProtocols: Send + Sync { /// Internal messages that can be passed to the [`NetworkManager`](crate::NetworkManager). #[derive(Debug)] -pub(crate) enum NetworkHandleMessage { +pub(crate) enum NetworkHandleMessage { /// Marks a peer as trusted. AddTrustedPeerId(PeerId), /// Adds an address for a peer, including its ID, kind, and socket address. @@ -459,13 +477,13 @@ pub(crate) enum NetworkHandleMessage { /// Disconnects a connection to a peer if it exists, optionally providing a disconnect reason. DisconnectPeer(PeerId, Option), /// Broadcasts an event to announce a new block to all nodes. - AnnounceBlock(NewBlock, B256), + AnnounceBlock(NewBlock, B256), /// Sends a list of transactions to the given peer. SendTransaction { /// The ID of the peer to which the transactions are sent. peer_id: PeerId, /// The shared transactions to send. - msg: SharedTransactions, + msg: SharedTransactions, }, /// Sends a list of transaction hashes to the given peer. SendPooledTransactionHashes { @@ -479,12 +497,19 @@ pub(crate) enum NetworkHandleMessage { /// The peer to send the request to. peer_id: PeerId, /// The request to send to the peer's sessions. - request: PeerRequest, + request: PeerRequest, + }, + /// Sends an `eth` protocol message to the peer. + EthMessage { + /// The peer to send the message to. + peer_id: PeerId, + /// The message to send to the peer's sessions. + message: PeerMessage, }, /// Applies a reputation change to the given peer. ReputationChange(PeerId, ReputationChangeKind), /// Returns the client that can be used to interact with the network. - FetchClient(oneshot::Sender), + FetchClient(oneshot::Sender>), /// Applies a status update. StatusUpdate { /// The head status to apply. @@ -503,7 +528,7 @@ pub(crate) enum NetworkHandleMessage { /// Gets the reputation for a specific peer via a oneshot sender. GetReputationById(PeerId, oneshot::Sender>), /// Retrieves the `TransactionsHandle` via a oneshot sender. - GetTransactionsHandle(oneshot::Sender>), + GetTransactionsHandle(oneshot::Sender>>), /// Initiates a graceful shutdown of the network via a oneshot sender. Shutdown(oneshot::Sender<()>), /// Sets the network state between hibernation and active. diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index 3d5ff7a0d43..f8d18e15994 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -1,16 +1,13 @@ //! Peer related implementations -use std::{ - collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, - fmt::Display, - io::{self}, - net::{IpAddr, SocketAddr}, - task::{Context, Poll}, - time::Duration, +use crate::{ + error::SessionError, + session::{Direction, PendingSessionHandshakeError}, + swarm::NetworkConnectionState, }; - use futures::StreamExt; use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; +use reth_ethereum_forks::ForkId; use reth_net_banlist::BanList; use reth_network_api::test_utils::{PeerCommand, PeersHandle}; use reth_network_peers::{NodeRecord, PeerId}; @@ -22,7 +19,14 @@ use reth_network_types::{ ConnectionsConfig, Peer, PeerAddr, PeerConnectionState, PeerKind, PeersConfig, ReputationChangeKind, ReputationChangeOutcome, ReputationChangeWeights, }; -use reth_primitives::ForkId; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, + fmt::Display, + io::{self}, + net::{IpAddr, SocketAddr}, + task::{Context, Poll}, + time::Duration, +}; use thiserror::Error; use tokio::{ sync::mpsc, @@ -31,12 +35,6 @@ use tokio::{ use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{trace, warn}; -use crate::{ - error::SessionError, - session::{Direction, PendingSessionHandshakeError}, - swarm::NetworkConnectionState, -}; - /// Maintains the state of _all_ the peers known to the network. /// /// This is supposed to be owned by the network itself, but can be reached via the [`PeersHandle`]. @@ -84,6 +82,8 @@ pub struct PeersManager { max_backoff_count: u8, /// Tracks the connection state of the node net_connection_state: NetworkConnectionState, + /// How long to temporarily ban ip on an incoming connection attempt. + incoming_ip_throttle_duration: Duration, } impl PeersManager { @@ -100,6 +100,7 @@ impl PeersManager { trusted_nodes_only, basic_nodes, max_backoff_count, + incoming_ip_throttle_duration, } = config; let (manager_tx, handle_rx) = mpsc::unbounded_channel(); let now = Instant::now(); @@ -148,6 +149,7 @@ impl PeersManager { last_tick: Instant::now(), max_backoff_count, net_connection_state: NetworkConnectionState::default(), + incoming_ip_throttle_duration, } } @@ -218,6 +220,11 @@ impl PeersManager { self.backed_off_peers.len() } + /// Returns the number of idle trusted peers. + fn num_idle_trusted_peers(&self) -> usize { + self.peers.iter().filter(|(_, peer)| peer.kind.is_trusted() && peer.state.is_idle()).count() + } + /// Invoked when a new _incoming_ tcp connection is accepted. /// /// returns an error if the inbound ip address is on the ban list @@ -229,12 +236,40 @@ impl PeersManager { return Err(InboundConnectionError::IpBanned) } - if !self.connection_info.has_in_capacity() && self.trusted_peer_ids.is_empty() { - // if we don't have any inbound slots and no trusted peers, we don't accept any new - // connections + // check if we even have slots for a new incoming connection + if !self.connection_info.has_in_capacity() { + if self.trusted_peer_ids.is_empty() { + // if we don't have any incoming slots and no trusted peers, we don't accept any new + // connections + return Err(InboundConnectionError::ExceedsCapacity) + } + + // there's an edge case here where no incoming connections besides from trusted peers + // are allowed (max_inbound == 0), in which case we still need to allow new pending + // incoming connections until all trusted peers are connected. + let num_idle_trusted_peers = self.num_idle_trusted_peers(); + if num_idle_trusted_peers <= self.trusted_peer_ids.len() { + // we still want to limit concurrent pending connections + let max_inbound = + self.trusted_peer_ids.len().max(self.connection_info.config.max_inbound); + if self.connection_info.num_pending_in <= max_inbound { + self.connection_info.inc_pending_in(); + } + return Ok(()) + } + + // all trusted peers are either connected or connecting return Err(InboundConnectionError::ExceedsCapacity) } + // also cap the incoming connections we can process at once + if !self.connection_info.has_in_pending_capacity() { + return Err(InboundConnectionError::ExceedsCapacity) + } + + // apply the rate limit + self.throttle_incoming_ip(addr); + self.connection_info.inc_pending_in(); Ok(()) } @@ -340,7 +375,7 @@ impl PeersManager { if peer.is_trusted() || peer.is_static() { // For misbehaving trusted or static peers, we provide a bit more leeway when // penalizing them. - ban_duration = self.backoff_durations.medium; + ban_duration = self.backoff_durations.low / 2; } } @@ -353,6 +388,12 @@ impl PeersManager { self.ban_list.ban_ip_until(ip, std::time::Instant::now() + self.ban_duration); } + /// Bans the IP temporarily to rate limit inbound connection attempts per IP. + fn throttle_incoming_ip(&mut self, ip: IpAddr) { + self.ban_list + .ban_ip_until(ip, std::time::Instant::now() + self.incoming_ip_throttle_duration); + } + /// Temporarily puts the peer in timeout by inserting it into the backedoff peers set fn backoff_peer_until(&mut self, peer_id: PeerId, until: std::time::Instant) { trace!(target: "net::peers", ?peer_id, "backing off"); @@ -968,17 +1009,22 @@ impl ConnectionInfo { Self { config, num_outbound: 0, num_pending_out: 0, num_inbound: 0, num_pending_in: 0 } } - /// Returns `true` if there's still capacity for a new outgoing connection. + /// Returns `true` if there's still capacity to perform an outgoing connection. const fn has_out_capacity(&self) -> bool { self.num_pending_out < self.config.max_concurrent_outbound_dials && self.num_outbound < self.config.max_outbound } - /// Returns `true` if there's still capacity for a new incoming connection. + /// Returns `true` if there's still capacity to accept a new incoming connection. const fn has_in_capacity(&self) -> bool { self.num_inbound < self.config.max_inbound } + /// Returns `true` if we can handle an additional incoming pending connection. + const fn has_in_pending_capacity(&self) -> bool { + self.num_pending_in < self.config.max_inbound + } + fn decr_state(&mut self, state: PeerConnectionState) { match state { PeerConnectionState::Idle => {} @@ -1094,15 +1140,6 @@ impl Display for InboundConnectionError { #[cfg(test)] mod tests { - use std::{ - future::{poll_fn, Future}, - io, - net::{IpAddr, Ipv4Addr, SocketAddr}, - pin::Pin, - task::{Context, Poll}, - time::Duration, - }; - use alloy_primitives::B512; use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError}, @@ -1114,6 +1151,14 @@ mod tests { use reth_network_types::{ peers::reputation::DEFAULT_REPUTATION, BackoffKind, ReputationChangeKind, }; + use std::{ + future::{poll_fn, Future}, + io, + net::{IpAddr, Ipv4Addr, SocketAddr}, + pin::Pin, + task::{Context, Poll}, + time::Duration, + }; use url::Host; use super::PeersManager; @@ -1597,6 +1642,23 @@ mod tests { assert_eq!(peers.connection_info.num_pending_in, 0); } + #[tokio::test] + async fn test_reject_incoming_at_pending_capacity() { + let mut peers = PeersManager::default(); + + for count in 1..=peers.connection_info.config.max_inbound { + let socket_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, count as u8)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_ok()); + assert_eq!(peers.connection_info.num_pending_in, count); + } + assert!(peers.connection_info.has_in_capacity()); + assert!(!peers.connection_info.has_in_pending_capacity()); + + let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 100)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_err()); + } + #[tokio::test] async fn test_closed_incoming() { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); @@ -2278,6 +2340,39 @@ mod tests { ); } + #[tokio::test] + async fn test_incoming_rate_limit() { + let config = PeersConfig { + incoming_ip_throttle_duration: Duration::from_millis(100), + ..PeersConfig::test() + }; + let mut peers = PeersManager::new(config); + + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(168, 0, 1, 2)), 8009); + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!( + peers.on_incoming_pending_session(addr.ip()).unwrap_err(), + InboundConnectionError::IpBanned + ); + + peers.release_interval.reset_immediately(); + tokio::time::sleep(peers.incoming_ip_throttle_duration).await; + + // await unban + poll_fn(|cx| loop { + if peers.poll(cx).is_pending() { + return Poll::Ready(()); + } + }) + .await; + + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + assert_eq!( + peers.on_incoming_pending_session(addr.ip()).unwrap_err(), + InboundConnectionError::IpBanned + ); + } + #[tokio::test] async fn test_tick() { let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); diff --git a/crates/net/network/src/protocol.rs b/crates/net/network/src/protocol.rs index eeffd1c95f4..aa0749c2c7b 100644 --- a/crates/net/network/src/protocol.rs +++ b/crates/net/network/src/protocol.rs @@ -2,19 +2,18 @@ //! //! See also -use std::{ - fmt, - net::SocketAddr, - ops::{Deref, DerefMut}, - pin::Pin, -}; - use alloy_primitives::bytes::BytesMut; use futures::Stream; use reth_eth_wire::{ capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol, }; use reth_network_api::{Direction, PeerId}; +use std::{ + fmt, + net::SocketAddr, + ops::{Deref, DerefMut}, + pin::Pin, +}; /// A trait that allows to offer additional RLPx-based application-level protocols when establishing /// a peer-to-peer connection. diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index e83f5d9f125..7b7837090cf 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -11,17 +11,29 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult}, + session::{ + conn::EthRlpxConnection, + handle::{ActiveSessionMessage, SessionCommand}, + SessionId, + }, +}; +use alloy_primitives::Sealable; use futures::{stream::Fuse, SinkExt, StreamExt}; +use metrics::Gauge; use reth_eth_wire::{ + capability::RawCapabilityMessage, errors::{EthHandshakeError, EthStreamError, P2PStreamError}, message::{EthBroadcastMessage, RequestPair}, - Capabilities, DisconnectP2P, DisconnectReason, EthMessage, + Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, }; use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_api::PeerRequest; use reth_network_p2p::error::RequestError; use reth_network_peers::PeerId; use reth_network_types::session::config::INITIAL_REQUEST_TIMEOUT; +use reth_primitives_traits::Block; use rustc_hash::FxHashMap; use tokio::{ sync::{mpsc::error::TrySendError, oneshot}, @@ -31,15 +43,6 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, trace}; -use crate::{ - message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult}, - session::{ - conn::EthRlpxConnection, - handle::{ActiveSessionMessage, SessionCommand}, - SessionId, - }, -}; - // Constants for timeout updating. /// Minimum timeout value @@ -61,11 +64,11 @@ const TIMEOUT_SCALING: u32 = 3; /// - incoming requests/broadcasts _from remote_ via the connection /// - responses for handled ETH requests received from the remote peer. #[allow(dead_code)] -pub(crate) struct ActiveSession { +pub(crate) struct ActiveSession { /// Keeps track of request ids. pub(crate) next_id: u64, /// The underlying connection. - pub(crate) conn: EthRlpxConnection, + pub(crate) conn: EthRlpxConnection, /// Identifier of the node we're connected to. pub(crate) remote_peer_id: PeerId, /// The address we're connected to. @@ -75,19 +78,19 @@ pub(crate) struct ActiveSession { /// Internal identifier of this session pub(crate) session_id: SessionId, /// Incoming commands from the manager - pub(crate) commands_rx: ReceiverStream, + pub(crate) commands_rx: ReceiverStream>, /// Sink to send messages to the [`SessionManager`](super::SessionManager). - pub(crate) to_session_manager: MeteredPollSender, + pub(crate) to_session_manager: MeteredPollSender>, /// A message that needs to be delivered to the session manager - pub(crate) pending_message_to_session: Option, + pub(crate) pending_message_to_session: Option>, /// Incoming internal requests which are delegated to the remote peer. - pub(crate) internal_request_tx: Fuse>, + pub(crate) internal_request_tx: Fuse>>, /// All requests sent to the remote peer we're waiting on a response - pub(crate) inflight_requests: FxHashMap, + pub(crate) inflight_requests: FxHashMap>>, /// All requests that were sent by the remote peer and we're waiting on an internal response - pub(crate) received_requests_from_remote: Vec, + pub(crate) received_requests_from_remote: Vec>, /// Buffered messages that should be handled and sent to the peer. - pub(crate) queued_outgoing: VecDeque, + pub(crate) queued_outgoing: QueuedOutgoingMessages, /// The maximum time we wait for a response from a peer. pub(crate) internal_request_timeout: Arc, /// Interval when to check for timed out requests. @@ -96,10 +99,11 @@ pub(crate) struct ActiveSession { /// considered a protocol violation and the session will initiate a drop. pub(crate) protocol_breach_request_timeout: Duration, /// Used to reserve a slot to guarantee that the termination message is delivered - pub(crate) terminate_message: Option<(PollSender, ActiveSessionMessage)>, + pub(crate) terminate_message: + Option<(PollSender>, ActiveSessionMessage)>, } -impl ActiveSession { +impl ActiveSession { /// Returns `true` if the session is currently in the process of disconnecting fn is_disconnecting(&self) -> bool { self.conn.inner().is_disconnecting() @@ -121,7 +125,7 @@ impl ActiveSession { /// Handle a message read from the connection. /// /// Returns an error if the message is considered to be in violation of the protocol. - fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { + fn on_incoming_message(&mut self, msg: EthMessage) -> OnIncomingMessageOutcome { /// A macro that handles an incoming request /// This creates a new channel and tries to send the sender half to the session while /// storing the receiver half internally so the pending response can be polled. @@ -181,7 +185,7 @@ impl ActiveSession { } EthMessage::NewBlock(msg) => { let block = - NewBlockMessage { hash: msg.block.header.hash_slow(), block: Arc::new(*msg) }; + NewBlockMessage { hash: msg.block.header().hash_slow(), block: Arc::new(*msg) }; self.try_emit_broadcast(PeerMessage::NewBlock(block)).into() } EthMessage::Transactions(msg) => { @@ -237,7 +241,7 @@ impl ActiveSession { } /// Handle an internal peer request that will be sent to the remote. - fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { + fn on_internal_peer_request(&mut self, request: PeerRequest, deadline: Instant) { let request_id = self.next_id(); let msg = request.create_request_message(request_id); self.queued_outgoing.push_back(msg.into()); @@ -250,7 +254,7 @@ impl ActiveSession { } /// Handle a message received from the internal network - fn on_internal_peer_message(&mut self, msg: PeerMessage) { + fn on_internal_peer_message(&mut self, msg: PeerMessage) { match msg { PeerMessage::NewBlockHashes(msg) => { self.queued_outgoing.push_back(EthMessage::NewBlockHashes(msg).into()); @@ -275,6 +279,7 @@ impl ActiveSession { } PeerMessage::Other(other) => { debug!(target: "net::session", message_id=%other.id, "Ignoring unsupported message"); + self.queued_outgoing.push_back(OutgoingMessage::Raw(other)); } } } @@ -288,7 +293,7 @@ impl ActiveSession { /// Handle a Response to the peer /// /// This will queue the response to be sent to the peer - fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { + fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult) { match resp.try_into_message(id) { Ok(msg) => { self.queued_outgoing.push_back(msg.into()); @@ -303,7 +308,7 @@ impl ActiveSession { /// /// Returns the message if the bounded channel is currently unable to handle this message. #[allow(clippy::result_large_err)] - fn try_emit_broadcast(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { + fn try_emit_broadcast(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) }; match sender @@ -329,7 +334,7 @@ impl ActiveSession { /// /// Returns the message if the bounded channel is currently unable to handle this message. #[allow(clippy::result_large_err)] - fn try_emit_request(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { + fn try_emit_request(&self, message: PeerMessage) -> Result<(), ActiveSessionMessage> { let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) }; match sender @@ -469,7 +474,7 @@ impl ActiveSession { } } -impl Future for ActiveSession { +impl Future for ActiveSession { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -556,6 +561,7 @@ impl Future for ActiveSession { let res = match msg { OutgoingMessage::Eth(msg) => this.conn.start_send_unpin(msg), OutgoingMessage::Broadcast(msg) => this.conn.start_send_broadcast(msg), + OutgoingMessage::Raw(msg) => this.conn.start_send_raw(msg), }; if let Err(err) = res { debug!(target: "net::session", %err, remote_peer_id=?this.remote_peer_id, "failed to send message"); @@ -655,20 +661,20 @@ impl Future for ActiveSession { } /// Tracks a request received from the peer -pub(crate) struct ReceivedRequest { +pub(crate) struct ReceivedRequest { /// Protocol Identifier request_id: u64, /// Receiver half of the channel that's supposed to receive the proper response. - rx: PeerResponse, + rx: PeerResponse, /// Timestamp when we read this msg from the wire. #[allow(dead_code)] received: Instant, } /// A request that waits for a response from the peer -pub(crate) struct InflightRequest { +pub(crate) struct InflightRequest { /// Request we sent to peer and the internal response channel - request: RequestState, + request: RequestState, /// Instant when the request was sent timestamp: Instant, /// Time limit for the response @@ -677,7 +683,7 @@ pub(crate) struct InflightRequest { // === impl InflightRequest === -impl InflightRequest { +impl InflightRequest> { /// Returns true if the request is timedout #[inline] fn is_timed_out(&self, now: Instant) -> bool { @@ -702,17 +708,19 @@ impl InflightRequest { } /// All outcome variants when handling an incoming message -enum OnIncomingMessageOutcome { +enum OnIncomingMessageOutcome { /// Message successfully handled. Ok, /// Message is considered to be in violation of the protocol - BadMessage { error: EthStreamError, message: EthMessage }, + BadMessage { error: EthStreamError, message: EthMessage }, /// Currently no capacity to handle the message - NoCapacity(ActiveSessionMessage), + NoCapacity(ActiveSessionMessage), } -impl From> for OnIncomingMessageOutcome { - fn from(res: Result<(), ActiveSessionMessage>) -> Self { +impl From>> + for OnIncomingMessageOutcome +{ + fn from(res: Result<(), ActiveSessionMessage>) -> Self { match res { Ok(_) => Self::Ok, Err(msg) => Self::NoCapacity(msg), @@ -720,29 +728,31 @@ impl From> for OnIncomingMessageOutcome { } } -enum RequestState { +enum RequestState { /// Waiting for the response - Waiting(PeerRequest), + Waiting(R), /// Request already timed out TimedOut, } /// Outgoing messages that can be sent over the wire. -pub(crate) enum OutgoingMessage { +pub(crate) enum OutgoingMessage { /// A message that is owned. - Eth(EthMessage), + Eth(EthMessage), /// A message that may be shared by multiple sessions. - Broadcast(EthBroadcastMessage), + Broadcast(EthBroadcastMessage), + /// A raw capability message + Raw(RawCapabilityMessage), } -impl From for OutgoingMessage { - fn from(value: EthMessage) -> Self { +impl From> for OutgoingMessage { + fn from(value: EthMessage) -> Self { Self::Eth(value) } } -impl From for OutgoingMessage { - fn from(value: EthBroadcastMessage) -> Self { +impl From> for OutgoingMessage { + fn from(value: EthBroadcastMessage) -> Self { Self::Broadcast(value) } } @@ -757,6 +767,32 @@ fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> smoothened_timeout.clamp(MINIMUM_TIMEOUT, MAXIMUM_TIMEOUT) } + +/// A helper struct that wraps the queue of outgoing messages and a metric to track their count +pub(crate) struct QueuedOutgoingMessages { + messages: VecDeque>, + count: Gauge, +} + +impl QueuedOutgoingMessages { + pub(crate) const fn new(metric: Gauge) -> Self { + Self { messages: VecDeque::new(), count: metric } + } + + pub(crate) fn push_back(&mut self, message: OutgoingMessage) { + self.messages.push_back(message); + self.count.increment(1); + } + + pub(crate) fn pop_front(&mut self) -> Option> { + self.messages.pop_front().inspect(|_| self.count.decrement(1)) + } + + pub(crate) fn shrink_to_fit(&mut self) { + self.messages.shrink_to_fit(); + } +} + #[cfg(test)] mod tests { use super::*; @@ -764,8 +800,8 @@ mod tests { use reth_chainspec::MAINNET; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ - EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, Status, StatusBuilder, - UnauthedEthStream, UnauthedP2PStream, + EthNetworkPrimitives, EthStream, GetBlockBodies, HelloMessageWithProtocols, P2PStream, + Status, StatusBuilder, UnauthedEthStream, UnauthedP2PStream, }; use reth_network_peers::pk2id; use reth_network_types::session::config::PROTOCOL_BREACH_REQUEST_TIMEOUT; @@ -781,11 +817,11 @@ mod tests { HelloMessageWithProtocols::builder(pk2id(&server_key.public_key(SECP256K1))).build() } - struct SessionBuilder { + struct SessionBuilder { _remote_capabilities: Arc, - active_session_tx: mpsc::Sender, - active_session_rx: ReceiverStream, - to_sessions: Vec>, + active_session_tx: mpsc::Sender>, + active_session_rx: ReceiverStream>, + to_sessions: Vec>>, secret_key: SecretKey, local_peer_id: PeerId, hello: HelloMessageWithProtocols, @@ -794,7 +830,7 @@ mod tests { next_id: usize, } - impl SessionBuilder { + impl SessionBuilder { fn next_id(&mut self) -> SessionId { let id = self.next_id; self.next_id += 1; @@ -808,7 +844,7 @@ mod tests { f: F, ) -> Pin + Send>> where - F: FnOnce(EthStream>>) -> O + Send + 'static, + F: FnOnce(EthStream>, N>) -> O + Send + 'static, O: Future + Send + Sync, { let status = self.status; @@ -831,7 +867,7 @@ mod tests { }) } - async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession { + async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession { let remote_addr = stream.local_addr().unwrap(); let session_id = self.next_id(); let (_disconnect_tx, disconnect_rx) = oneshot::channel(); @@ -882,7 +918,7 @@ mod tests { internal_request_tx: ReceiverStream::new(messages_rx).fuse(), inflight_requests: Default::default(), conn, - queued_outgoing: Default::default(), + queued_outgoing: QueuedOutgoingMessages::new(Gauge::noop()), received_requests_from_remote: Default::default(), internal_request_timeout_interval: tokio::time::interval( INITIAL_REQUEST_TIMEOUT, diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 628c880c8ea..c948937a04d 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -1,26 +1,26 @@ //! Connection types for a session -use std::{ - pin::Pin, - task::{Context, Poll}, -}; - use futures::{Sink, Stream}; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ + capability::RawCapabilityMessage, errors::EthStreamError, message::EthBroadcastMessage, multiplex::{ProtocolProxy, RlpxSatelliteStream}, - EthMessage, EthStream, EthVersion, P2PStream, + EthMessage, EthNetworkPrimitives, EthStream, EthVersion, NetworkPrimitives, P2PStream, +}; +use std::{ + pin::Pin, + task::{Context, Poll}, }; use tokio::net::TcpStream; /// The type of the underlying peer network connection. -pub type EthPeerConnection = EthStream>>; +pub type EthPeerConnection = EthStream>, N>; /// Various connection types that at least support the ETH protocol. -pub type EthSatelliteConnection = - RlpxSatelliteStream, EthStream>; +pub type EthSatelliteConnection = + RlpxSatelliteStream, EthStream>; /// Connection types that support the ETH protocol. /// @@ -30,14 +30,14 @@ pub type EthSatelliteConnection = // This type is boxed because the underlying stream is ~6KB, // mostly coming from `P2PStream`'s `snap::Encoder` (2072), and `ECIESStream` (3600). #[derive(Debug)] -pub enum EthRlpxConnection { +pub enum EthRlpxConnection { /// A connection that only supports the ETH protocol. - EthOnly(Box), + EthOnly(Box>), /// A connection that supports the ETH protocol and __at least one other__ `RLPx` protocol. - Satellite(Box), + Satellite(Box>), } -impl EthRlpxConnection { +impl EthRlpxConnection { /// Returns the negotiated ETH version. #[inline] pub(crate) const fn version(&self) -> EthVersion { @@ -78,25 +78,33 @@ impl EthRlpxConnection { #[inline] pub fn start_send_broadcast( &mut self, - item: EthBroadcastMessage, + item: EthBroadcastMessage, ) -> Result<(), EthStreamError> { match self { Self::EthOnly(conn) => conn.start_send_broadcast(item), Self::Satellite(conn) => conn.primary_mut().start_send_broadcast(item), } } + + /// Sends a raw capability message over the connection + pub fn start_send_raw(&mut self, msg: RawCapabilityMessage) -> Result<(), EthStreamError> { + match self { + Self::EthOnly(conn) => conn.start_send_raw(msg), + Self::Satellite(conn) => conn.primary_mut().start_send_raw(msg), + } + } } -impl From for EthRlpxConnection { +impl From> for EthRlpxConnection { #[inline] - fn from(conn: EthPeerConnection) -> Self { + fn from(conn: EthPeerConnection) -> Self { Self::EthOnly(Box::new(conn)) } } -impl From for EthRlpxConnection { +impl From> for EthRlpxConnection { #[inline] - fn from(conn: EthSatelliteConnection) -> Self { + fn from(conn: EthSatelliteConnection) -> Self { Self::Satellite(Box::new(conn)) } } @@ -112,22 +120,22 @@ macro_rules! delegate_call { } } -impl Stream for EthRlpxConnection { - type Item = Result; +impl Stream for EthRlpxConnection { + type Item = Result, EthStreamError>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { delegate_call!(self.poll_next(cx)) } } -impl Sink for EthRlpxConnection { +impl Sink> for EthRlpxConnection { type Error = EthStreamError; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { delegate_call!(self.poll_ready(cx)) } - fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { + fn start_send(self: Pin<&mut Self>, item: EthMessage) -> Result<(), Self::Error> { delegate_call!(self.start_send(item)) } @@ -144,15 +152,16 @@ impl Sink for EthRlpxConnection { mod tests { use super::*; - const fn assert_eth_stream() + const fn assert_eth_stream() where - St: Stream> + Sink, + N: NetworkPrimitives, + St: Stream, EthStreamError>> + Sink>, { } #[test] const fn test_eth_stream_variants() { - assert_eth_stream::(); - assert_eth_stream::(); + assert_eth_stream::>(); + assert_eth_stream::>(); } } diff --git a/crates/net/network/src/session/counter.rs b/crates/net/network/src/session/counter.rs index 0d8f764f206..052cf1e2570 100644 --- a/crates/net/network/src/session/counter.rs +++ b/crates/net/network/src/session/counter.rs @@ -1,8 +1,7 @@ +use super::ExceedsSessionLimit; use reth_network_api::Direction; use reth_network_types::SessionLimits; -use super::ExceedsSessionLimit; - /// Keeps track of all sessions. #[derive(Debug, Clone)] pub struct SessionCounter { diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index a022e670419..d24d7ec6841 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -1,26 +1,24 @@ //! Session handles. -use std::{io, net::SocketAddr, sync::Arc, time::Instant}; - +use crate::{ + message::PeerMessage, + session::{conn::EthRlpxConnection, Direction, SessionId}, + PendingSessionHandshakeError, +}; use reth_ecies::ECIESError; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, - EthVersion, Status, + EthVersion, NetworkPrimitives, Status, }; use reth_network_api::PeerInfo; use reth_network_peers::{NodeRecord, PeerId}; use reth_network_types::PeerKind; +use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, oneshot, }; -use crate::{ - message::PeerMessage, - session::{conn::EthRlpxConnection, Direction, SessionId}, - PendingSessionHandshakeError, -}; - /// A handler attached to a peer session that's not authenticated yet, pending Handshake and hello /// message which exchanges the `capabilities` of the peer. /// @@ -54,7 +52,7 @@ impl PendingSessionHandle { /// Within an active session that supports the `Ethereum Wire Protocol `, three high-level tasks can /// be performed: chain synchronization, block propagation and transaction exchange. #[derive(Debug)] -pub struct ActiveSessionHandle { +pub struct ActiveSessionHandle { /// The direction of the session pub(crate) direction: Direction, /// The assigned id for this session @@ -68,7 +66,7 @@ pub struct ActiveSessionHandle { /// Announced capabilities of the peer. pub(crate) capabilities: Arc, /// Sender half of the command channel used send commands _to_ the spawned session - pub(crate) commands_to_session: mpsc::Sender, + pub(crate) commands_to_session: mpsc::Sender>, /// The client's name and version pub(crate) client_version: Arc, /// The address we're connected to @@ -81,7 +79,7 @@ pub struct ActiveSessionHandle { // === impl ActiveSessionHandle === -impl ActiveSessionHandle { +impl ActiveSessionHandle { /// Sends a disconnect command to the session. pub fn disconnect(&self, reason: Option) { // Note: we clone the sender which ensures the channel has capacity to send the message @@ -93,7 +91,7 @@ impl ActiveSessionHandle { pub async fn try_disconnect( &self, reason: Option, - ) -> Result<(), SendError> { + ) -> Result<(), SendError>> { self.commands_to_session.clone().send(SessionCommand::Disconnect { reason }).await } @@ -162,7 +160,7 @@ impl ActiveSessionHandle { /// /// A session starts with a `Handshake`, followed by a `Hello` message which #[derive(Debug)] -pub enum PendingSessionEvent { +pub enum PendingSessionEvent { /// Represents a successful `Hello` and `Status` exchange: Established { /// An internal identifier for the established session @@ -179,7 +177,7 @@ pub enum PendingSessionEvent { status: Arc, /// The actual connection stream which can be used to send and receive `eth` protocol /// messages - conn: EthRlpxConnection, + conn: EthRlpxConnection, /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, /// The remote node's user agent, usually containing the client name and version @@ -222,20 +220,20 @@ pub enum PendingSessionEvent { /// Commands that can be sent to the spawned session. #[derive(Debug)] -pub enum SessionCommand { +pub enum SessionCommand { /// Disconnect the connection Disconnect { /// Why the disconnect was initiated reason: Option, }, /// Sends a message to the peer - Message(PeerMessage), + Message(PeerMessage), } /// Message variants an active session can produce and send back to the /// [`SessionManager`](crate::session::SessionManager) #[derive(Debug)] -pub enum ActiveSessionMessage { +pub enum ActiveSessionMessage { /// Session was gracefully disconnected. Disconnected { /// The remote node's public key @@ -257,7 +255,7 @@ pub enum ActiveSessionMessage { /// Identifier of the remote peer. peer_id: PeerId, /// Message received from the peer. - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { @@ -266,7 +264,7 @@ pub enum ActiveSessionMessage { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 74f303df7b8..b19281b079a 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -5,6 +5,7 @@ mod conn; mod counter; mod handle; +use active::QueuedOutgoingMessages; pub use conn::EthRlpxConnection; pub use handle::{ ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, @@ -22,19 +23,25 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + message::PeerMessage, + metrics::SessionManagerMetrics, + protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}, + session::active::ActiveSession, +}; use counter::SessionCounter; use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; use reth_eth_wire::{ capability::CapabilityMessage, errors::EthStreamError, multiplex::RlpxProtocolMultiplexer, - Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, Status, - UnauthedEthStream, UnauthedP2PStream, + Capabilities, DisconnectReason, EthVersion, HelloMessageWithProtocols, NetworkPrimitives, + Status, UnauthedEthStream, UnauthedP2PStream, }; +use reth_ethereum_forks::{ForkFilter, ForkId, ForkTransition, Head}; use reth_metrics::common::mpsc::MeteredPollSender; -use reth_network_api::PeerRequestSender; +use reth_network_api::{PeerRequest, PeerRequestSender}; use reth_network_peers::PeerId; use reth_network_types::SessionsConfig; -use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; use rustc_hash::FxHashMap; use secp256k1::SecretKey; @@ -47,13 +54,6 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::sync::PollSender; use tracing::{debug, instrument, trace}; -use crate::{ - message::PeerMessage, - metrics::SessionManagerMetrics, - protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}, - session::active::ActiveSession, -}; - /// Internal identifier for active sessions. #[derive(Debug, Clone, Copy, PartialOrd, PartialEq, Eq, Hash)] pub struct SessionId(usize); @@ -61,7 +61,7 @@ pub struct SessionId(usize); /// Manages a set of sessions. #[must_use = "Session Manager must be polled to process session events."] #[derive(Debug)] -pub struct SessionManager { +pub struct SessionManager { /// Tracks the identifier for the next session. next_id: usize, /// Keeps track of all sessions @@ -92,30 +92,32 @@ pub struct SessionManager { /// session is authenticated, it can be moved to the `active_session` set. pending_sessions: FxHashMap, /// All active sessions that are ready to exchange messages. - active_sessions: HashMap, + active_sessions: HashMap>, /// The original Sender half of the [`PendingSessionEvent`] channel. /// /// When a new (pending) session is created, the corresponding [`PendingSessionHandle`] will /// get a clone of this sender half. - pending_sessions_tx: mpsc::Sender, + pending_sessions_tx: mpsc::Sender>, /// Receiver half that listens for [`PendingSessionEvent`] produced by pending sessions. - pending_session_rx: ReceiverStream, + pending_session_rx: ReceiverStream>, /// The original Sender half of the [`ActiveSessionMessage`] channel. /// /// When active session state is reached, the corresponding [`ActiveSessionHandle`] will get a /// clone of this sender half. - active_session_tx: MeteredPollSender, + active_session_tx: MeteredPollSender>, /// Receiver half that listens for [`ActiveSessionMessage`] produced by pending sessions. - active_session_rx: ReceiverStream, + active_session_rx: ReceiverStream>, /// Additional `RLPx` sub-protocols to be used by the session manager. extra_protocols: RlpxSubProtocols, + /// Tracks the ongoing graceful disconnections attempts for incoming connections. + disconnections_counter: DisconnectionsCounter, /// Metrics for the session manager. metrics: SessionManagerMetrics, } // === impl SessionManager === -impl SessionManager { +impl SessionManager { /// Creates a new empty [`SessionManager`]. #[allow(clippy::too_many_arguments)] pub fn new( @@ -150,6 +152,7 @@ impl SessionManager { active_session_tx: MeteredPollSender::new(active_session_tx, "network_active_session"), active_session_rx: ReceiverStream::new(active_session_rx), extra_protocols, + disconnections_counter: Default::default(), metrics: Default::default(), } } @@ -178,7 +181,7 @@ impl SessionManager { } /// Returns a borrowed reference to the active sessions. - pub const fn active_sessions(&self) -> &HashMap { + pub const fn active_sessions(&self) -> &HashMap> { &self.active_sessions } @@ -344,7 +347,7 @@ impl SessionManager { } /// Sends a message to the peer's session - pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { + pub fn send_message(&mut self, peer_id: &PeerId, msg: PeerMessage) { if let Some(session) = self.active_sessions.get_mut(peer_id) { let _ = session.commands_to_session.try_send(SessionCommand::Message(msg)).inspect_err( |e| { @@ -369,16 +372,45 @@ impl SessionManager { } /// Removes the [`PendingSessionHandle`] if it exists. - fn remove_active_session(&mut self, id: &PeerId) -> Option { + fn remove_active_session(&mut self, id: &PeerId) -> Option> { let session = self.active_sessions.remove(id)?; self.counter.dec_active(&session.direction); Some(session) } + /// Try to gracefully disconnect an incoming connection by initiating a ECIES connection and + /// sending a disconnect. If [`SessionManager`] is at capacity for ongoing disconnections, will + /// simply drop the incoming connection. + pub(crate) fn try_disconnect_incoming_connection( + &self, + stream: TcpStream, + reason: DisconnectReason, + ) { + if !self.disconnections_counter.has_capacity() { + // drop the connection if we don't have capacity for gracefully disconnecting + return + } + + let guard = self.disconnections_counter.clone(); + let secret_key = self.secret_key; + + self.spawn(async move { + trace!( + target: "net::session", + "gracefully disconnecting incoming connection" + ); + if let Ok(stream) = get_ecies_stream(stream, secret_key, Direction::Incoming).await { + let mut unauth = UnauthedP2PStream::new(stream); + let _ = unauth.send_disconnect(reason).await; + drop(guard); + } + }); + } + /// This polls all the session handles and returns [`SessionEvent`]. /// /// Active sessions are prioritized. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { // Poll events from active sessions match self.active_session_rx.poll_next_unpin(cx) { Poll::Pending => {} @@ -495,7 +527,9 @@ impl SessionManager { internal_request_tx: ReceiverStream::new(messages_rx).fuse(), inflight_requests: Default::default(), conn, - queued_outgoing: Default::default(), + queued_outgoing: QueuedOutgoingMessages::new( + self.metrics.queued_outgoing_messages.clone(), + ), received_requests_from_remote: Default::default(), internal_request_timeout_interval: tokio::time::interval( self.initial_internal_request_timeout, @@ -612,9 +646,23 @@ impl SessionManager { } } +/// A counter for ongoing graceful disconnections attempts. +#[derive(Default, Debug, Clone)] +struct DisconnectionsCounter(Arc<()>); + +impl DisconnectionsCounter { + const MAX_CONCURRENT_GRACEFUL_DISCONNECTIONS: usize = 15; + + /// Returns true if the [`DisconnectionsCounter`] still has capacity + /// for an additional graceful disconnection. + fn has_capacity(&self) -> bool { + Arc::strong_count(&self.0) <= Self::MAX_CONCURRENT_GRACEFUL_DISCONNECTIONS + } +} + /// Events produced by the [`SessionManager`] #[derive(Debug)] -pub enum SessionEvent { +pub enum SessionEvent { /// A new session was successfully authenticated. /// /// This session is now able to exchange data. @@ -632,7 +680,7 @@ pub enum SessionEvent { /// The Status message the peer sent during the `eth` handshake status: Arc, /// The channel for sending messages to the peer with the session - messages: PeerRequestSender, + messages: PeerRequestSender>, /// The direction of the session, either `Inbound` or `Outgoing` direction: Direction, /// The maximum time that the session waits for a response from the peer before timing out @@ -653,7 +701,7 @@ pub enum SessionEvent { /// The remote node's public key peer_id: PeerId, /// Message received from the peer. - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidMessage { @@ -662,7 +710,7 @@ pub enum SessionEvent { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { @@ -748,18 +796,18 @@ impl PendingSessionHandshakeError { pub struct ExceedsSessionLimit(pub(crate) u32); /// Starts a pending session authentication with a timeout. -pub(crate) async fn pending_session_with_timeout( +pub(crate) async fn pending_session_with_timeout( timeout: Duration, session_id: SessionId, remote_addr: SocketAddr, direction: Direction, - events: mpsc::Sender, + events: mpsc::Sender>, f: F, ) where F: Future, { if tokio::time::timeout(timeout, f).await.is_err() { - debug!(target: "net::session", ?remote_addr, ?direction, "pending session timed out"); + trace!(target: "net::session", ?remote_addr, ?direction, "pending session timed out"); let event = PendingSessionEvent::Disconnected { remote_addr, session_id, @@ -774,11 +822,11 @@ pub(crate) async fn pending_session_with_timeout( /// /// This will wait for the _incoming_ handshake request and answer it. #[allow(clippy::too_many_arguments)] -pub(crate) async fn start_pending_incoming_session( +pub(crate) async fn start_pending_incoming_session( disconnect_rx: oneshot::Receiver<()>, session_id: SessionId, stream: TcpStream, - events: mpsc::Sender, + events: mpsc::Sender>, remote_addr: SocketAddr, secret_key: SecretKey, hello: HelloMessageWithProtocols, @@ -805,9 +853,9 @@ pub(crate) async fn start_pending_incoming_session( /// Starts the authentication process for a connection initiated by a remote peer. #[instrument(skip_all, fields(%remote_addr, peer_id), target = "net")] #[allow(clippy::too_many_arguments)] -async fn start_pending_outbound_session( +async fn start_pending_outbound_session( disconnect_rx: oneshot::Receiver<()>, - events: mpsc::Sender, + events: mpsc::Sender>, session_id: SessionId, remote_addr: SocketAddr, remote_peer_id: PeerId, @@ -854,9 +902,9 @@ async fn start_pending_outbound_session( /// Authenticates a session #[allow(clippy::too_many_arguments)] -async fn authenticate( +async fn authenticate( disconnect_rx: oneshot::Receiver<()>, - events: mpsc::Sender, + events: mpsc::Sender>, stream: TcpStream, session_id: SessionId, remote_addr: SocketAddr, @@ -868,7 +916,7 @@ async fn authenticate( extra_handlers: RlpxSubProtocolHandlers, ) { let local_addr = stream.local_addr().ok(); - let stream = match get_eciess_stream(stream, secret_key, direction).await { + let stream = match get_ecies_stream(stream, secret_key, direction).await { Ok(stream) => stream, Err(error) => { let _ = events @@ -917,7 +965,7 @@ async fn authenticate( /// Returns an [`ECIESStream`] if it can be built. If not, send a /// [`PendingSessionEvent::EciesAuthError`] and returns `None` -async fn get_eciess_stream( +async fn get_ecies_stream( stream: Io, secret_key: SecretKey, direction: Direction, @@ -937,7 +985,7 @@ async fn get_eciess_stream( /// If additional [`RlpxSubProtocolHandlers`] are provided, the hello message will be updated to /// also negotiate the additional protocols. #[allow(clippy::too_many_arguments)] -async fn authenticate_stream( +async fn authenticate_stream( stream: UnauthedP2PStream>, session_id: SessionId, remote_addr: SocketAddr, @@ -947,7 +995,7 @@ async fn authenticate_stream( mut status: Status, fork_filter: ForkFilter, mut extra_handlers: RlpxSubProtocolHandlers, -) -> PendingSessionEvent { +) -> PendingSessionEvent { // Add extra protocols to the hello message extra_handlers.retain(|handler| hello.try_add_protocol(handler.protocol()).is_ok()); diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index 5caa656a98e..4bb82cf97c4 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -1,5 +1,25 @@ //! Keeps track of the state of the network. +use crate::{ + cache::LruCache, + discovery::Discovery, + fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, + message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, + peers::{PeerAction, PeersManager}, + FetchClient, +}; +use alloy_consensus::BlockHeader; +use alloy_primitives::B256; +use rand::seq::SliceRandom; +use reth_eth_wire::{ + BlockHashNumber, Capabilities, DisconnectReason, EthNetworkPrimitives, NetworkPrimitives, + NewBlockHashes, Status, +}; +use reth_ethereum_forks::ForkId; +use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; +use reth_network_peers::PeerId; +use reth_network_types::{PeerAddr, PeerKind}; +use reth_primitives_traits::Block; use std::{ collections::{HashMap, VecDeque}, fmt, @@ -11,26 +31,9 @@ use std::{ }, task::{Context, Poll}, }; - -use alloy_primitives::B256; -use rand::seq::SliceRandom; -use reth_eth_wire::{BlockHashNumber, Capabilities, DisconnectReason, NewBlockHashes, Status}; -use reth_network_api::{DiscoveredEvent, DiscoveryEvent, PeerRequest, PeerRequestSender}; -use reth_network_peers::PeerId; -use reth_network_types::{PeerAddr, PeerKind}; -use reth_primitives::ForkId; use tokio::sync::oneshot; use tracing::{debug, trace}; -use crate::{ - cache::LruCache, - discovery::Discovery, - fetch::{BlockResponseOutcome, FetchAction, StateFetcher}, - message::{BlockRequest, NewBlockMessage, PeerResponse, PeerResponseResult}, - peers::{PeerAction, PeersManager}, - FetchClient, -}; - /// Cache limit of blocks to keep track of for a single peer. const PEER_BLOCK_CACHE_LIMIT: u32 = 512; @@ -69,13 +72,13 @@ impl Deref for BlockNumReader { /// /// This type is also responsible for responding for received request. #[derive(Debug)] -pub struct NetworkState { +pub struct NetworkState { /// All active peers and their state. - active_peers: HashMap, + active_peers: HashMap>, /// Manages connections to peers. peers_manager: PeersManager, /// Buffered messages until polled. - queued_messages: VecDeque, + queued_messages: VecDeque>, /// The client type that can interact with the chain. /// /// This type is used to fetch the block number after we established a session and received the @@ -88,10 +91,10 @@ pub struct NetworkState { /// The fetcher streams `RLPx` related requests on a per-peer basis to this type. This type /// will then queue in the request and notify the fetcher once the result has been /// received. - state_fetcher: StateFetcher, + state_fetcher: StateFetcher, } -impl NetworkState { +impl NetworkState { /// Create a new state instance with the given params pub(crate) fn new( client: BlockNumReader, @@ -126,7 +129,7 @@ impl NetworkState { } /// Returns a new [`FetchClient`] - pub(crate) fn fetch_client(&self) -> FetchClient { + pub(crate) fn fetch_client(&self) -> FetchClient { self.state_fetcher.client() } @@ -144,7 +147,7 @@ impl NetworkState { peer: PeerId, capabilities: Arc, status: Arc, - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, timeout: Arc, ) { debug_assert!(!self.active_peers.contains_key(&peer), "Already connected; not possible"); @@ -182,12 +185,12 @@ impl NetworkState { /// > the total number of peers) using the `NewBlock` message. /// /// See also - pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { + pub(crate) fn announce_new_block(&mut self, msg: NewBlockMessage) { // send a `NewBlock` message to a fraction of the connected peers (square root of the total // number of peers) let num_propagate = (self.active_peers.len() as f64).sqrt() as u64 + 1; - let number = msg.block.block.header.number; + let number = msg.block.block.header().number(); let mut count = 0; // Shuffle to propagate to a random sample of peers on every block announcement @@ -224,8 +227,8 @@ impl NetworkState { /// Completes the block propagation process started in [`NetworkState::announce_new_block()`] /// but sending `NewBlockHash` broadcast to all peers that haven't seen it yet. - pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { - let number = msg.block.block.header.number; + pub(crate) fn announce_new_block_hash(&mut self, msg: NewBlockMessage) { + let number = msg.block.block.header().number(); let hashes = NewBlockHashes(vec![BlockHashNumber { hash: msg.hash, number }]); for (peer_id, peer) in &mut self.active_peers { if peer.blocks.contains(&msg.hash) { @@ -382,7 +385,7 @@ impl NetworkState { } /// Handle the outcome of processed response, for example directly queue another request. - fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) -> Option { + fn on_block_response_outcome(&mut self, outcome: BlockResponseOutcome) { match outcome { BlockResponseOutcome::Request(peer, request) => { self.handle_block_request(peer, request); @@ -391,7 +394,6 @@ impl NetworkState { self.peers_manager.apply_reputation_change(&peer, reputation_change); } } - None } /// Invoked when received a response from a connected peer. @@ -399,22 +401,24 @@ impl NetworkState { /// Delegates the response result to the fetcher which may return an outcome specific /// instruction that needs to be handled in [`Self::on_block_response_outcome`]. This could be /// a follow-up request or an instruction to slash the peer's reputation. - fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) -> Option { - match resp { + fn on_eth_response(&mut self, peer: PeerId, resp: PeerResponseResult) { + let outcome = match resp { PeerResponseResult::BlockHeaders(res) => { - let outcome = self.state_fetcher.on_block_headers_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_headers_response(peer, res) } PeerResponseResult::BlockBodies(res) => { - let outcome = self.state_fetcher.on_block_bodies_response(peer, res)?; - self.on_block_response_outcome(outcome) + self.state_fetcher.on_block_bodies_response(peer, res) } _ => None, + }; + + if let Some(outcome) = outcome { + self.on_block_response_outcome(outcome); } } /// Advances the state - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { loop { // drain buffered messages if let Some(message) = self.queued_messages.pop_front() { @@ -433,13 +437,14 @@ impl NetworkState { } } - // need to buffer results here to make borrow checker happy - let mut closed_sessions = Vec::new(); - let mut received_responses = Vec::new(); + loop { + // need to buffer results here to make borrow checker happy + let mut closed_sessions = Vec::new(); + let mut received_responses = Vec::new(); - // poll all connected peers for responses - for (id, peer) in &mut self.active_peers { - if let Some(mut response) = peer.pending_response.take() { + // poll all connected peers for responses + for (id, peer) in &mut self.active_peers { + let Some(mut response) = peer.pending_response.take() else { continue }; match response.poll(cx) { Poll::Ready(res) => { // check if the error is due to a closed channel to the session @@ -450,7 +455,8 @@ impl NetworkState { "Request canceled, response channel from session closed." ); // if the channel is closed, this means the peer session is also - // closed, in which case we can invoke the [Self::on_closed_session] + // closed, in which case we can invoke the + // [Self::on_closed_session] // immediately, preventing followup requests and propagate the // connection dropped error closed_sessions.push(*id); @@ -464,15 +470,17 @@ impl NetworkState { } }; } - } - for peer in closed_sessions { - self.on_session_closed(peer) - } + for peer in closed_sessions { + self.on_session_closed(peer) + } + + if received_responses.is_empty() { + break; + } - for (peer_id, resp) in received_responses { - if let Some(action) = self.on_eth_response(peer_id, resp) { - self.queued_messages.push_back(action); + for (peer_id, resp) in received_responses { + self.on_eth_response(peer_id, resp); } } @@ -481,6 +489,8 @@ impl NetworkState { self.on_peer_action(action); } + // We need to poll again tn case we have received any responses because they may have + // triggered follow-up requests. if self.queued_messages.is_empty() { return Poll::Pending } @@ -492,29 +502,29 @@ impl NetworkState { /// /// For example known blocks,so we can decide what to announce. #[derive(Debug)] -pub(crate) struct ActivePeer { +pub(crate) struct ActivePeer { /// Best block of the peer. pub(crate) best_hash: B256, /// The capabilities of the remote peer. #[allow(dead_code)] pub(crate) capabilities: Arc, /// A communication channel directly to the session task. - pub(crate) request_tx: PeerRequestSender, + pub(crate) request_tx: PeerRequestSender>, /// The response receiver for a currently active request to that peer. - pub(crate) pending_response: Option, + pub(crate) pending_response: Option>, /// Blocks we know the peer has. pub(crate) blocks: LruCache, } /// Message variants triggered by the [`NetworkState`] #[derive(Debug)] -pub(crate) enum StateAction { +pub(crate) enum StateAction { /// Dispatch a `NewBlock` message to the peer NewBlock { /// Target of the message peer_id: PeerId, /// The `NewBlock` message - block: NewBlockMessage, + block: NewBlockMessage, }, NewBlockHashes { /// Target of the message @@ -546,21 +556,6 @@ pub(crate) enum StateAction { #[cfg(test)] mod tests { - use std::{ - future::poll_fn, - sync::{atomic::AtomicU64, Arc}, - }; - - use alloy_primitives::B256; - use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthVersion}; - use reth_network_api::PeerRequestSender; - use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError}; - use reth_network_peers::PeerId; - use reth_primitives::{BlockBody, Header}; - use reth_provider::test_utils::NoopProvider; - use tokio::sync::mpsc; - use tokio_stream::{wrappers::ReceiverStream, StreamExt}; - use crate::{ discovery::Discovery, fetch::StateFetcher, @@ -568,9 +563,23 @@ mod tests { state::{BlockNumReader, NetworkState}, PeerRequest, }; + use alloy_consensus::Header; + use alloy_primitives::B256; + use reth_eth_wire::{BlockBodies, Capabilities, Capability, EthNetworkPrimitives, EthVersion}; + use reth_network_api::PeerRequestSender; + use reth_network_p2p::{bodies::client::BodiesClient, error::RequestError}; + use reth_network_peers::PeerId; + use reth_primitives::BlockBody; + use reth_storage_api::noop::NoopProvider; + use std::{ + future::poll_fn, + sync::{atomic::AtomicU64, Arc}, + }; + use tokio::sync::mpsc; + use tokio_stream::{wrappers::ReceiverStream, StreamExt}; /// Returns a testing instance of the [`NetworkState`]. - fn state() -> NetworkState { + fn state() -> NetworkState { let peers = PeersManager::default(); let handle = peers.handle(); NetworkState { diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 0be7ae1c1bb..c4a2bd14d36 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -1,19 +1,3 @@ -use std::{ - io, - net::SocketAddr, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -use futures::Stream; -use reth_eth_wire::{ - capability::CapabilityMessage, errors::EthStreamError, Capabilities, EthVersion, Status, -}; -use reth_network_api::PeerRequestSender; -use reth_network_peers::PeerId; -use tracing::trace; - use crate::{ listener::{ConnectionListener, ListenerEvent}, message::PeerMessage, @@ -22,6 +6,21 @@ use crate::{ session::{Direction, PendingSessionHandshakeError, SessionEvent, SessionId, SessionManager}, state::{NetworkState, StateAction}, }; +use futures::Stream; +use reth_eth_wire::{ + capability::CapabilityMessage, errors::EthStreamError, Capabilities, DisconnectReason, + EthNetworkPrimitives, EthVersion, NetworkPrimitives, Status, +}; +use reth_network_api::{PeerRequest, PeerRequestSender}; +use reth_network_peers::PeerId; +use std::{ + io, + net::SocketAddr, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use tracing::trace; #[cfg_attr(doc, aquamarine::aquamarine)] /// Contains the connectivity related state of the network. @@ -32,7 +31,7 @@ use crate::{ /// [`SessionManager`]. Outgoing connections are either initiated on demand or triggered by the /// [`NetworkState`] and also delegated to the [`NetworkState`]. /// -/// Following diagram gives displays the dataflow contained in the [`Swarm`] +/// Following diagram displays the dataflow contained in the [`Swarm`] /// /// The [`ConnectionListener`] yields incoming [`TcpStream`]s from peers that are spawned as session /// tasks. After a successful `RLPx` authentication, the task is ready to accept ETH requests or @@ -49,39 +48,39 @@ use crate::{ /// `include_mmd!("docs/mermaid/swarm.mmd`") #[derive(Debug)] #[must_use = "Swarm does nothing unless polled"] -pub(crate) struct Swarm { +pub(crate) struct Swarm { /// Listens for new incoming connections. incoming: ConnectionListener, /// All sessions. - sessions: SessionManager, + sessions: SessionManager, /// Tracks the entire state of the network and handles events received from the sessions. - state: NetworkState, + state: NetworkState, } // === impl Swarm === -impl Swarm { +impl Swarm { /// Configures a new swarm instance. pub(crate) const fn new( incoming: ConnectionListener, - sessions: SessionManager, - state: NetworkState, + sessions: SessionManager, + state: NetworkState, ) -> Self { Self { incoming, sessions, state } } - /// Adds an additional protocol handler to the `RLPx` sub-protocol list. + /// Adds a protocol handler to the `RLPx` sub-protocol list. pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { self.sessions_mut().add_rlpx_sub_protocol(protocol); } /// Access to the state. - pub(crate) const fn state(&self) -> &NetworkState { + pub(crate) const fn state(&self) -> &NetworkState { &self.state } /// Mutable access to the state. - pub(crate) fn state_mut(&mut self) -> &mut NetworkState { + pub(crate) fn state_mut(&mut self) -> &mut NetworkState { &mut self.state } @@ -91,17 +90,17 @@ impl Swarm { } /// Access to the [`SessionManager`]. - pub(crate) const fn sessions(&self) -> &SessionManager { + pub(crate) const fn sessions(&self) -> &SessionManager { &self.sessions } /// Mutable access to the [`SessionManager`]. - pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { + pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { &mut self.sessions } } -impl Swarm { +impl Swarm { /// Triggers a new outgoing connection to the given node pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) { self.sessions.dial_outbound(remote_addr, remote_id) @@ -111,7 +110,7 @@ impl Swarm { /// /// This either updates the state or produces a new [`SwarmEvent`] that is bubbled up to the /// manager. - fn on_session_event(&mut self, event: SessionEvent) -> Option { + fn on_session_event(&mut self, event: SessionEvent) -> Option> { match event { SessionEvent::SessionEstablished { peer_id, @@ -180,7 +179,7 @@ impl Swarm { /// Callback for events produced by [`ConnectionListener`]. /// /// Depending on the event, this will produce a new [`SwarmEvent`]. - fn on_connection(&mut self, event: ListenerEvent) -> Option { + fn on_connection(&mut self, event: ListenerEvent) -> Option> { match event { ListenerEvent::Error(err) => return Some(SwarmEvent::TcpListenerError(err)), ListenerEvent::ListenerClosed { local_address: address } => { @@ -201,6 +200,10 @@ impl Swarm { } InboundConnectionError::ExceedsCapacity => { trace!(target: "net", ?remote_addr, "No capacity for incoming connection"); + self.sessions.try_disconnect_incoming_connection( + stream, + DisconnectReason::TooManyPeers, + ); } } return None @@ -224,7 +227,7 @@ impl Swarm { } /// Hook for actions pulled from the state - fn on_state_action(&mut self, event: StateAction) -> Option { + fn on_state_action(&mut self, event: StateAction) -> Option> { match event { StateAction::Connect { remote_addr, peer_id } => { self.dial_outbound(remote_addr, peer_id); @@ -281,8 +284,8 @@ impl Swarm { } } -impl Stream for Swarm { - type Item = SwarmEvent; +impl Stream for Swarm { + type Item = SwarmEvent; /// This advances all components. /// @@ -333,13 +336,13 @@ impl Stream for Swarm { /// All events created or delegated by the [`Swarm`] that represents changes to the state of the /// network. -pub(crate) enum SwarmEvent { +pub(crate) enum SwarmEvent { /// Events related to the actual network protocol. ValidMessage { /// The peer that sent the message peer_id: PeerId, /// Message received from the peer - message: PeerMessage, + message: PeerMessage, }, /// Received a message that does not match the announced capabilities of the peer. InvalidCapabilityMessage { @@ -347,7 +350,7 @@ pub(crate) enum SwarmEvent { /// Announced capabilities of the remote peer. capabilities: Arc, /// Message received from the peer. - message: CapabilityMessage, + message: CapabilityMessage, }, /// Received a bad message from the peer. BadMessage { @@ -389,7 +392,7 @@ pub(crate) enum SwarmEvent { capabilities: Arc, /// negotiated eth version version: EthVersion, - messages: PeerRequestSender, + messages: PeerRequestSender>, status: Arc, direction: Direction, }, diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 767f6818091..87ccbb5f9d7 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -1,7 +1,6 @@ -use std::{net::SocketAddr, time::Duration}; - use enr::{k256::ecdsa::SigningKey, Enr, EnrPublicKey}; use reth_network_peers::PeerId; +use std::{net::SocketAddr, time::Duration}; /// The timeout for tests that create a `GethInstance` pub const GETH_TIMEOUT: Duration = Duration::from_secs(60); diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index d92272a871e..ddb49f33b89 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -1,32 +1,44 @@ //! A network implementation for testing purposes. -use std::{ - fmt, - future::Future, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - pin::Pin, - task::{Context, Poll}, +use crate::{ + builder::ETH_REQUEST_CHANNEL_CAPACITY, + error::NetworkError, + eth_requests::EthRequestHandler, + protocol::IntoRlpxSubProtocol, + transactions::{TransactionsHandle, TransactionsManager, TransactionsManagerConfig}, + NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, }; - use futures::{FutureExt, StreamExt}; use pin_project::pin_project; -use reth_chainspec::{Hardforks, MAINNET}; -use reth_eth_wire::{protocol::Protocol, DisconnectReason, HelloMessageWithProtocols}; +use reth_chainspec::{ChainSpecProvider, Hardforks, MAINNET}; +use reth_eth_wire::{ + protocol::Protocol, DisconnectReason, EthNetworkPrimitives, HelloMessageWithProtocols, +}; use reth_network_api::{ + events::{PeerEvent, SessionInfo}, test_utils::{PeersHandle, PeersHandleProvider}, NetworkEvent, NetworkEventListenerProvider, NetworkInfo, Peers, }; use reth_network_peers::PeerId; -use reth_provider::{test_utils::NoopProvider, ChainSpecProvider}; -use reth_storage_api::{BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory}; +use reth_primitives::{PooledTransactionsElement, TransactionSigned}; +use reth_storage_api::{ + noop::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, +}; use reth_tasks::TokioTaskExecutor; use reth_tokio_util::EventStream; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, test_utils::{TestPool, TestPoolBuilder}, - EthTransactionPool, TransactionPool, TransactionValidationTaskExecutor, + EthTransactionPool, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; use secp256k1::SecretKey; +use std::{ + fmt, + future::Future, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + pin::Pin, + task::{Context, Poll}, +}; use tokio::{ sync::{ mpsc::{channel, unbounded_channel}, @@ -35,15 +47,6 @@ use tokio::{ task::JoinHandle, }; -use crate::{ - builder::ETH_REQUEST_CHANNEL_CAPACITY, - error::NetworkError, - eth_requests::EthRequestHandler, - protocol::IntoRlpxSubProtocol, - transactions::{TransactionsHandle, TransactionsManager, TransactionsManagerConfig}, - NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, -}; - /// A test network consisting of multiple peers. pub struct Testnet { /// All running peers in the network. @@ -140,7 +143,7 @@ where } /// Returns all handles to the networks - pub fn handles(&self) -> impl Iterator + '_ { + pub fn handles(&self) -> impl Iterator> + '_ { self.peers.iter().map(|p| p.handle()) } @@ -192,12 +195,46 @@ where )) }) } + + /// Installs an eth pool on each peer with custom transaction manager config + pub fn with_eth_pool_config( + self, + tx_manager_config: TransactionsManagerConfig, + ) -> Testnet> { + self.map_pool(|peer| { + let blob_store = InMemoryBlobStore::default(); + let pool = TransactionValidationTaskExecutor::eth( + peer.client.clone(), + MAINNET.clone(), + blob_store.clone(), + TokioTaskExecutor::default(), + ); + + peer.map_transactions_manager_with_config( + EthTransactionPool::eth_pool(pool, blob_store, Default::default()), + tx_manager_config.clone(), + ) + }) + } } impl Testnet where - C: BlockReader + HeaderProvider + Clone + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + + Clone + + Unpin + + 'static, + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TransactionSigned, + Pooled = PooledTransactionsElement, + >, + > + Unpin + + 'static, { /// Spawns the testnet to a separate task pub fn spawn(self) -> TestnetHandle { @@ -255,8 +292,20 @@ impl fmt::Debug for Testnet { impl Future for Testnet where - C: BlockReader + HeaderProvider + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + + Unpin + + 'static, + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TransactionSigned, + Pooled = PooledTransactionsElement, + >, + > + Unpin + + 'static, { type Output = (); @@ -329,11 +378,11 @@ impl TestnetHandle { #[derive(Debug)] pub struct Peer { #[pin] - network: NetworkManager, + network: NetworkManager, #[pin] - request_handler: Option>, + request_handler: Option>, #[pin] - transactions_manager: Option>, + transactions_manager: Option>, pool: Option, client: C, secret_key: SecretKey, @@ -376,12 +425,12 @@ where } /// Returns mutable access to the network. - pub fn network_mut(&mut self) -> &mut NetworkManager { + pub fn network_mut(&mut self) -> &mut NetworkManager { &mut self.network } /// Returns the [`NetworkHandle`] of this peer. - pub fn handle(&self) -> NetworkHandle { + pub fn handle(&self) -> NetworkHandle { self.network.handle().clone() } @@ -436,6 +485,36 @@ where secret_key, } } + + /// Map transactions manager with custom config + pub fn map_transactions_manager_with_config

( + self, + pool: P, + config: TransactionsManagerConfig, + ) -> Peer + where + P: TransactionPool, + { + let Self { mut network, request_handler, client, secret_key, .. } = self; + let (tx, rx) = unbounded_channel(); + network.set_transactions(tx); + + let transactions_manager = TransactionsManager::new( + network.handle().clone(), + pool.clone(), + rx, + config, // Use provided config + ); + + Peer { + network, + request_handler, + transactions_manager: Some(transactions_manager), + pool: Some(pool), + client, + secret_key, + } + } } impl Peer @@ -450,8 +529,20 @@ where impl Future for Peer where - C: BlockReader + HeaderProvider + Unpin + 'static, - Pool: TransactionPool + Unpin + 'static, + C: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + HeaderProvider + + Unpin + + 'static, + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TransactionSigned, + Pooled = PooledTransactionsElement, + >, + > + Unpin + + 'static, { type Output = (); @@ -481,8 +572,8 @@ pub struct PeerConfig { /// A handle to a peer in the [`Testnet`]. #[derive(Debug)] pub struct PeerHandle { - network: NetworkHandle, - transactions: Option, + network: NetworkHandle, + transactions: Option>, pool: Option, } @@ -520,7 +611,7 @@ impl PeerHandle { } /// Returns the [`NetworkHandle`] of this peer. - pub const fn network(&self) -> &NetworkHandle { + pub const fn network(&self) -> &NetworkHandle { &self.network } } @@ -617,7 +708,9 @@ impl NetworkEventStream { pub async fn next_session_closed(&mut self) -> Option<(PeerId, Option)> { while let Some(ev) = self.inner.next().await { match ev { - NetworkEvent::SessionClosed { peer_id, reason } => return Some((peer_id, reason)), + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }) => { + return Some((peer_id, reason)) + } _ => continue, } } @@ -628,7 +721,10 @@ impl NetworkEventStream { pub async fn next_session_established(&mut self) -> Option { while let Some(ev) = self.inner.next().await { match ev { - NetworkEvent::SessionEstablished { peer_id, .. } => return Some(peer_id), + NetworkEvent::ActivePeerSession { info, .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + return Some(info.peer_id) + } _ => continue, } } @@ -638,16 +734,16 @@ impl NetworkEventStream { /// Awaits the next `num` events for an established session pub async fn take_session_established(&mut self, mut num: usize) -> Vec { if num == 0 { - return Vec::new() + return Vec::new(); } let mut peers = Vec::with_capacity(num); while let Some(ev) = self.inner.next().await { match ev { - NetworkEvent::SessionEstablished { peer_id, .. } => { + NetworkEvent::ActivePeerSession { info: SessionInfo { peer_id, .. }, .. } => { peers.push(peer_id); num -= 1; if num == 0 { - return peers + return peers; } } _ => continue, @@ -656,18 +752,24 @@ impl NetworkEventStream { peers } - /// Ensures that the first two events are a [`NetworkEvent::PeerAdded`] and - /// [`NetworkEvent::SessionEstablished`], returning the [`PeerId`] of the established + /// Ensures that the first two events are a [`NetworkEvent::Peer(PeerEvent::PeerAdded`] and + /// [`NetworkEvent::ActivePeerSession`], returning the [`PeerId`] of the established /// session. pub async fn peer_added_and_established(&mut self) -> Option { let peer_id = match self.inner.next().await { - Some(NetworkEvent::PeerAdded(peer_id)) => peer_id, + Some(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))) => peer_id, _ => return None, }; match self.inner.next().await { - Some(NetworkEvent::SessionEstablished { peer_id: peer_id2, .. }) => { - debug_assert_eq!(peer_id, peer_id2, "PeerAdded peer_id {peer_id} does not match SessionEstablished peer_id {peer_id2}"); + Some(NetworkEvent::ActivePeerSession { + info: SessionInfo { peer_id: peer_id2, .. }, + .. + }) => { + debug_assert_eq!( + peer_id, peer_id2, + "PeerAdded peer_id {peer_id} does not match SessionEstablished peer_id {peer_id2}" + ); Some(peer_id) } _ => None, diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index b838f7cfe71..db59ffac5cc 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -1,5 +1,3 @@ -use derive_more::Constructor; - use super::{ DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, @@ -9,6 +7,7 @@ use crate::transactions::constants::tx_fetcher::{ DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS, DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, }; +use derive_more::Constructor; /// Configuration for managing transactions within the network. #[derive(Debug, Clone)] diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index 3e856951552..2fa900d416f 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -25,13 +25,18 @@ //! before it's re-tried. Nonetheless, the capacity of the buffered hashes cache must be large //! enough to buffer many hashes during network failure, to allow for recovery. -use std::{ - collections::HashMap, - pin::Pin, - task::{ready, Context, Poll}, - time::Duration, +use super::{ + config::TransactionFetcherConfig, + constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, + MessageFilter, PeerMetadata, PooledTransactions, + SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, +}; +use crate::{ + cache::{LruCache, LruMap}, + duration_metered_exec, + metrics::TransactionFetcherMetrics, + transactions::{validation, PartiallyFilterMessage}, }; - use alloy_primitives::TxHash; use derive_more::{Constructor, Deref}; use futures::{stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt}; @@ -40,37 +45,32 @@ use reth_eth_wire::{ DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, PartiallyValidData, RequestTxHashes, ValidAnnouncementData, }; +use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives}; use reth_network_api::PeerRequest; use reth_network_p2p::error::{RequestError, RequestResult}; use reth_network_peers::PeerId; use reth_primitives::PooledTransactionsElement; +use reth_primitives_traits::SignedTransaction; use schnellru::ByLength; #[cfg(debug_assertions)] use smallvec::{smallvec, SmallVec}; +use std::{ + collections::HashMap, + pin::Pin, + task::{ready, Context, Poll}, + time::Duration, +}; use tokio::sync::{mpsc::error::TrySendError, oneshot, oneshot::error::RecvError}; use tracing::{debug, trace}; use validation::FilterOutcome; -use super::{ - config::TransactionFetcherConfig, - constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST}, - MessageFilter, PeerMetadata, PooledTransactions, - SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, -}; -use crate::{ - cache::{LruCache, LruMap}, - duration_metered_exec, - metrics::TransactionFetcherMetrics, - transactions::{validation, PartiallyFilterMessage}, -}; - /// The type responsible for fetching missing transactions from peers. /// /// This will keep track of unique transaction hashes that are currently being fetched and submits /// new requests on announced hashes. #[derive(Debug)] #[pin_project] -pub struct TransactionFetcher { +pub struct TransactionFetcher { /// All peers with to which a [`GetPooledTransactions`] request is inflight. pub active_peers: LruMap, /// All currently active [`GetPooledTransactions`] requests. @@ -79,7 +79,7 @@ pub struct TransactionFetcher { /// It's disjoint from the set of hashes which are awaiting an idle fallback peer in order to /// be fetched. #[pin] - pub inflight_requests: FuturesUnordered, + pub inflight_requests: FuturesUnordered>, /// Hashes that are awaiting an idle fallback peer so they can be fetched. /// /// This is a subset of all hashes in the fetcher, and is disjoint from the set of hashes for @@ -95,9 +95,7 @@ pub struct TransactionFetcher { metrics: TransactionFetcherMetrics, } -// === impl TransactionFetcher === - -impl TransactionFetcher { +impl TransactionFetcher { /// Removes the peer from the active set. pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) { self.active_peers.remove(peer_id); @@ -280,7 +278,6 @@ impl TransactionFetcher { + IntoIterator)>, ) -> RequestTxHashes { let mut acc_size_response = 0; - let hashes_from_announcement_len = hashes_from_announcement.len(); let mut hashes_from_announcement_iter = hashes_from_announcement.into_iter(); @@ -289,12 +286,12 @@ impl TransactionFetcher { // tx is really big, pack request with single tx if size >= self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request { - return hashes_from_announcement_iter.collect::() + return hashes_from_announcement_iter.collect() } acc_size_response = size; } - let mut surplus_hashes = RequestTxHashes::with_capacity(hashes_from_announcement_len - 1); + let mut surplus_hashes = RequestTxHashes::default(); // folds size based on expected response size and adds selected hashes to the request // list and the other hashes to the surplus list @@ -328,8 +325,6 @@ impl TransactionFetcher { } surplus_hashes.extend(hashes_from_announcement_iter.map(|(hash, _metadata)| hash)); - surplus_hashes.shrink_to_fit(); - hashes_to_request.shrink_to_fit(); surplus_hashes } @@ -431,11 +426,10 @@ impl TransactionFetcher { /// the request by checking the transactions seen by the peer against the buffer. pub fn on_fetch_pending_hashes( &mut self, - peers: &HashMap, + peers: &HashMap>, has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool, ) { - let init_capacity_req = approx_capacity_get_pooled_transactions_req_eth68(&self.info); - let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); + let mut hashes_to_request = RequestTxHashes::default(); let is_session_active = |peer_id: &PeerId| peers.contains_key(peer_id); let mut search_durations = TxFetcherSearchDurations::default(); @@ -484,9 +478,6 @@ impl TransactionFetcher { search_durations.fill_request ); - // free unused memory - hashes_to_request.shrink_to_fit(); - self.update_pending_fetch_cache_search_metrics(search_durations); trace!(target: "net::tx", @@ -634,7 +625,7 @@ impl TransactionFetcher { pub fn request_transactions_from_peer( &mut self, new_announced_hashes: RequestTxHashes, - peer: &PeerMetadata, + peer: &PeerMetadata, ) -> Option { let peer_id: PeerId = peer.request_tx.peer_id; let conn_eth_version = peer.version; @@ -688,10 +679,8 @@ impl TransactionFetcher { } let (response, rx) = oneshot::channel(); - let req: PeerRequest = PeerRequest::GetPooledTransactions { - request: GetPooledTransactions( - new_announced_hashes.iter().copied().collect::>(), - ), + let req = PeerRequest::GetPooledTransactions { + request: GetPooledTransactions(new_announced_hashes.iter().copied().collect()), response, }; @@ -906,8 +895,8 @@ impl TransactionFetcher { /// [`TransactionsManager`](super::TransactionsManager). pub fn on_resolved_get_pooled_transactions_request_fut( &mut self, - response: GetPooledTxResponse, - ) -> FetchEvent { + response: GetPooledTxResponse, + ) -> FetchEvent { // update peer activity, requests for buffered hashes can only be made to idle // fallback peers let GetPooledTxResponse { peer_id, mut requested_hashes, result } = response; @@ -1012,8 +1001,7 @@ impl TransactionFetcher { // self.try_buffer_hashes_for_retry(requested_hashes, &peer_id); - let transactions = - valid_payload.into_data().into_values().collect::(); + let transactions = valid_payload.into_data().into_values().collect(); FetchEvent::TransactionsFetched { peer_id, transactions } } @@ -1030,8 +1018,8 @@ impl TransactionFetcher { } } -impl Stream for TransactionFetcher { - type Item = FetchEvent; +impl Stream for TransactionFetcher { + type Item = FetchEvent; /// Advances all inflight requests and returns the next event. fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -1049,7 +1037,7 @@ impl Stream for TransactionFetcher { } } -impl Default for TransactionFetcher { +impl Default for TransactionFetcher { fn default() -> Self { Self { active_peers: LruMap::new(DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS), @@ -1096,13 +1084,13 @@ impl TxFetchMetadata { /// Represents possible events from fetching transactions. #[derive(Debug)] -pub enum FetchEvent { +pub enum FetchEvent { /// Triggered when transactions are successfully fetched. TransactionsFetched { /// The ID of the peer from which transactions were fetched. peer_id: PeerId, /// The transactions that were fetched, if available. - transactions: PooledTransactions, + transactions: PooledTransactions, }, /// Triggered when there is an error in fetching transactions. FetchError { @@ -1120,22 +1108,22 @@ pub enum FetchEvent { /// An inflight request for [`PooledTransactions`] from a peer. #[derive(Debug)] -pub struct GetPooledTxRequest { +pub struct GetPooledTxRequest { peer_id: PeerId, /// Transaction hashes that were requested, for cleanup purposes requested_hashes: RequestTxHashes, - response: oneshot::Receiver>, + response: oneshot::Receiver>>, } /// Upon reception of a response, a [`GetPooledTxRequest`] is deconstructed to form a /// [`GetPooledTxResponse`]. #[derive(Debug)] -pub struct GetPooledTxResponse { +pub struct GetPooledTxResponse { peer_id: PeerId, /// Transaction hashes that were requested, for cleanup purposes, since peer may only return a /// subset of requested hashes. requested_hashes: RequestTxHashes, - result: Result, RecvError>, + result: Result>, RecvError>, } /// Stores the response receiver made by sending a [`GetPooledTransactions`] request to a peer's @@ -1143,24 +1131,24 @@ pub struct GetPooledTxResponse { #[must_use = "futures do nothing unless polled"] #[pin_project::pin_project] #[derive(Debug)] -pub struct GetPooledTxRequestFut { +pub struct GetPooledTxRequestFut { #[pin] - inner: Option, + inner: Option>, } -impl GetPooledTxRequestFut { +impl GetPooledTxRequestFut { #[inline] const fn new( peer_id: PeerId, requested_hashes: RequestTxHashes, - response: oneshot::Receiver>, + response: oneshot::Receiver>>, ) -> Self { Self { inner: Some(GetPooledTxRequest { peer_id, requested_hashes, response }) } } } -impl Future for GetPooledTxRequestFut { - type Output = GetPooledTxResponse; +impl Future for GetPooledTxRequestFut { + type Output = GetPooledTxResponse; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut req = self.as_mut().project().inner.take().expect("polled after completion"); @@ -1180,18 +1168,18 @@ impl Future for GetPooledTxRequestFut { /// Wrapper of unverified [`PooledTransactions`]. #[derive(Debug, Constructor, Deref)] -pub struct UnverifiedPooledTransactions { - txns: PooledTransactions, +pub struct UnverifiedPooledTransactions { + txns: PooledTransactions, } /// [`PooledTransactions`] that have been successfully verified. #[derive(Debug, Constructor, Deref)] -pub struct VerifiedPooledTransactions { - txns: PooledTransactions, +pub struct VerifiedPooledTransactions { + txns: PooledTransactions, } -impl DedupPayload for VerifiedPooledTransactions { - type Value = PooledTransactionsElement; +impl DedupPayload for VerifiedPooledTransactions { + type Value = T; fn is_empty(&self) -> bool { self.txns.is_empty() @@ -1202,30 +1190,31 @@ impl DedupPayload for VerifiedPooledTransactions { } fn dedup(self) -> PartiallyValidData { - let Self { txns } = self; - let unique_fetched = txns - .into_iter() - .map(|tx| (*tx.hash(), tx)) - .collect::>(); - - PartiallyValidData::from_raw_data(unique_fetched, None) + PartiallyValidData::from_raw_data( + self.txns.into_iter().map(|tx| (*tx.tx_hash(), tx)).collect(), + None, + ) } } trait VerifyPooledTransactionsResponse { + type Transaction: SignedTransaction; + fn verify( self, requested_hashes: &RequestTxHashes, peer_id: &PeerId, - ) -> (VerificationOutcome, VerifiedPooledTransactions); + ) -> (VerificationOutcome, VerifiedPooledTransactions); } -impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { +impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { + type Transaction = T; + fn verify( self, requested_hashes: &RequestTxHashes, _peer_id: &PeerId, - ) -> (VerificationOutcome, VerifiedPooledTransactions) { + ) -> (VerificationOutcome, VerifiedPooledTransactions) { let mut verification_outcome = VerificationOutcome::Ok; let Self { mut txns } = self; @@ -1236,11 +1225,11 @@ impl VerifyPooledTransactionsResponse for UnverifiedPooledTransactions { let mut tx_hashes_not_requested_count = 0; txns.0.retain(|tx| { - if !requested_hashes.contains(tx.hash()) { + if !requested_hashes.contains(tx.tx_hash()) { verification_outcome = VerificationOutcome::ReportPeer; #[cfg(debug_assertions)] - tx_hashes_not_requested.push(*tx.hash()); + tx_hashes_not_requested.push(*tx.tx_hash()); #[cfg(not(debug_assertions))] { tx_hashes_not_requested_count += 1; @@ -1343,16 +1332,13 @@ struct TxFetcherSearchDurations { #[cfg(test)] mod test { - use std::{collections::HashSet, str::FromStr}; - + use super::*; + use crate::transactions::tests::{default_cache, new_mock_session}; use alloy_primitives::{hex, B256}; use alloy_rlp::Decodable; use derive_more::IntoIterator; use reth_primitives::TransactionSigned; - - use crate::transactions::tests::{default_cache, new_mock_session}; - - use super::*; + use std::{collections::HashSet, str::FromStr}; #[derive(IntoIterator)] struct TestValidAnnouncementData(Vec<(TxHash, Option<(u8, usize)>)>); @@ -1383,7 +1369,7 @@ mod test { // RIG TEST - let tx_fetcher = &mut TransactionFetcher::default(); + let tx_fetcher = &mut TransactionFetcher::::default(); let eth68_hashes = [ B256::from_slice(&[1; 32]), diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 439f92bada9..83674c96c51 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -18,28 +18,29 @@ pub use validation::*; pub(crate) use fetcher::{FetchEvent, TransactionFetcher}; use self::constants::{tx_manager::*, DEFAULT_SOFT_LIMIT_BYTE_SIZE_TRANSACTIONS_BROADCAST_MESSAGE}; -use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; - -use std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - pin::Pin, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, +use crate::{ + budget::{ + DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, + DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, + DEFAULT_BUDGET_TRY_DRAIN_STREAM, }, - task::{Context, Poll}, - time::{Duration, Instant}, + cache::LruCache, + duration_metered_exec, metered_poll_nested_stream_with_budget, + metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, + NetworkHandle, }; - use alloy_primitives::{TxHash, B256}; +use constants::SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE; use futures::{stream::FuturesUnordered, Future, StreamExt}; use reth_eth_wire::{ - DedupPayload, EthVersion, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData, - NewPooledTransactionHashes, NewPooledTransactionHashes66, NewPooledTransactionHashes68, - PooledTransactions, RequestTxHashes, Transactions, + DedupPayload, EthNetworkPrimitives, EthVersion, GetPooledTransactions, HandleMempoolData, + HandleVersionedMempoolData, NetworkPrimitives, NewPooledTransactionHashes, + NewPooledTransactionHashes66, NewPooledTransactionHashes68, PooledTransactions, + RequestTxHashes, Transactions, }; use reth_metrics::common::mpsc::UnboundedMeteredReceiver; use reth_network_api::{ + events::{PeerEvent, SessionInfo}, NetworkEvent, NetworkEventListenerProvider, PeerRequest, PeerRequestSender, Peers, }; use reth_network_p2p::{ @@ -48,29 +49,28 @@ use reth_network_p2p::{ }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{PooledTransactionsElement, TransactionSigned, TransactionSignedEcRecovered}; +use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, TransactionSigned}; +use reth_primitives_traits::{SignedTransaction, TxType}; use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, TransactionPool, ValidPoolTransaction, }; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + task::{Context, Poll}, + time::{Duration, Instant}, +}; use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; use tokio_stream::wrappers::{ReceiverStream, UnboundedReceiverStream}; use tracing::{debug, trace}; -use crate::{ - budget::{ - DEFAULT_BUDGET_TRY_DRAIN_NETWORK_TRANSACTION_EVENTS, - DEFAULT_BUDGET_TRY_DRAIN_PENDING_POOL_IMPORTS, DEFAULT_BUDGET_TRY_DRAIN_POOL_IMPORTS, - DEFAULT_BUDGET_TRY_DRAIN_STREAM, - }, - cache::LruCache, - duration_metered_exec, metered_poll_nested_stream_with_budget, - metrics::{TransactionsManagerMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, - NetworkHandle, -}; - /// The future for importing transactions into the pool. /// /// Resolves with the result of each transaction import. @@ -84,42 +84,26 @@ pub type PoolImportFuture = Pin>> /// For example [`TransactionsHandle::get_peer_transaction_hashes`] returns the transaction hashes /// known by a specific peer. #[derive(Debug, Clone)] -pub struct TransactionsHandle { +pub struct TransactionsHandle { /// Command channel to the [`TransactionsManager`] - manager_tx: mpsc::UnboundedSender, + manager_tx: mpsc::UnboundedSender>, } /// Implementation of the `TransactionsHandle` API for use in testnet via type /// [`PeerHandle`](crate::test_utils::PeerHandle). -impl TransactionsHandle { - fn send(&self, cmd: TransactionsCommand) { +impl TransactionsHandle { + fn send(&self, cmd: TransactionsCommand) { let _ = self.manager_tx.send(cmd); } /// Fetch the [`PeerRequestSender`] for the given peer. - async fn peer_handle(&self, peer_id: PeerId) -> Result, RecvError> { - let (tx, rx) = oneshot::channel(); - self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx }); - rx.await - } - - /// Requests the transactions directly from the given peer. - /// - /// Returns `None` if the peer is not connected. - /// - /// **Note**: this returns the response from the peer as received. - pub async fn get_pooled_transactions_from( + async fn peer_handle( &self, peer_id: PeerId, - hashes: Vec, - ) -> Result>, RequestError> { - let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; - + ) -> Result>>, RecvError> { let (tx, rx) = oneshot::channel(); - let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx }; - peer.try_send(request).ok(); - - rx.await?.map(|res| Some(res.0)) + self.send(TransactionsCommand::GetPeerSender { peer_id, peer_request_sender: tx }); + rx.await } /// Manually propagate the transaction that belongs to the hash. @@ -138,7 +122,11 @@ impl TransactionsHandle { /// /// Note: this only propagates the transactions that are known to the pool. pub fn propagate_hashes_to(&self, hash: impl IntoIterator, peer: PeerId) { - self.send(TransactionsCommand::PropagateHashesTo(hash.into_iter().collect(), peer)) + let hashes = hash.into_iter().collect::>(); + if hashes.is_empty() { + return + } + self.send(TransactionsCommand::PropagateHashesTo(hashes, peer)) } /// Request the active peer IDs from the [`TransactionsManager`]. @@ -149,7 +137,12 @@ impl TransactionsHandle { } /// Manually propagate full transactions to a specific peer. + /// + /// Do nothing if transactions are empty. pub fn propagate_transactions_to(&self, transactions: Vec, peer: PeerId) { + if transactions.is_empty() { + return + } self.send(TransactionsCommand::PropagateTransactionsTo(transactions, peer)) } @@ -157,7 +150,12 @@ impl TransactionsHandle { /// /// It's up to the [`TransactionsManager`] whether the transactions are sent as hashes or in /// full. + /// + /// Do nothing if transactions are empty. pub fn propagate_transactions(&self, transactions: Vec) { + if transactions.is_empty() { + return + } self.send(TransactionsCommand::PropagateTransactions(transactions)) } @@ -166,6 +164,9 @@ impl TransactionsHandle { &self, peers: Vec, ) -> Result>, RecvError> { + if peers.is_empty() { + return Ok(Default::default()) + } let (tx, rx) = oneshot::channel(); self.send(TransactionsCommand::GetTransactionHashes { peers, tx }); rx.await @@ -179,6 +180,25 @@ impl TransactionsHandle { let res = self.get_transaction_hashes(vec![peer]).await?; Ok(res.into_values().next().unwrap_or_default()) } + + /// Requests the transactions directly from the given peer. + /// + /// Returns `None` if the peer is not connected. + /// + /// **Note**: this returns the response from the peer as received. + pub async fn get_pooled_transactions_from( + &self, + peer_id: PeerId, + hashes: Vec, + ) -> Result>, RequestError> { + let Some(peer) = self.peer_handle(peer_id).await? else { return Ok(None) }; + + let (tx, rx) = oneshot::channel(); + let request = PeerRequest::GetPooledTransactions { request: hashes.into(), response: tx }; + peer.try_send(request).ok(); + + rx.await?.map(|res| Some(res.0)) + } } /// Manages transactions on top of the p2p network. @@ -200,17 +220,17 @@ impl TransactionsHandle { /// propagate new transactions over the network. #[derive(Debug)] #[must_use = "Manager does nothing unless polled."] -pub struct TransactionsManager { +pub struct TransactionsManager { /// Access to the transaction pool. pool: Pool, /// Network access. - network: NetworkHandle, + network: NetworkHandle, /// Subscriptions to all network related events. /// /// From which we get all new incoming transaction related messages. - network_events: EventStream, + network_events: EventStream>>, /// Transaction fetcher to handle inflight and missing transaction requests. - transaction_fetcher: TransactionFetcher, + transaction_fetcher: TransactionFetcher, /// All currently pending transactions grouped by peers. /// /// This way we can track incoming transactions and prevent multiple pool imports for the same @@ -233,16 +253,16 @@ pub struct TransactionsManager { /// Bad imports. bad_imports: LruCache, /// All the connected peers. - peers: HashMap, + peers: HashMap>, /// Send half for the command channel. /// /// This is kept so that a new [`TransactionsHandle`] can be created at any time. - command_tx: mpsc::UnboundedSender, + command_tx: mpsc::UnboundedSender>, /// Incoming commands from [`TransactionsHandle`]. /// /// This will only receive commands if a user manually sends a command to the manager through /// the [`TransactionsHandle`] to interact with this type directly. - command_rx: UnboundedReceiverStream, + command_rx: UnboundedReceiverStream>, /// A stream that yields new __pending__ transactions. /// /// A transaction is considered __pending__ if it is executable on the current state of the @@ -253,21 +273,21 @@ pub struct TransactionsManager { /// - account has enough balance to cover the transaction's gas pending_transactions: ReceiverStream, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). - transaction_events: UnboundedMeteredReceiver, + transaction_events: UnboundedMeteredReceiver>, /// How the `TransactionsManager` is configured. config: TransactionsManagerConfig, /// `TransactionsManager` metrics metrics: TransactionsManagerMetrics, } -impl TransactionsManager { +impl TransactionsManager { /// Sets up a new instance. /// /// Note: This expects an existing [`NetworkManager`](crate::NetworkManager) instance. pub fn new( - network: NetworkHandle, + network: NetworkHandle, pool: Pool, - from_network: mpsc::UnboundedReceiver, + from_network: mpsc::UnboundedReceiver>, transactions_manager_config: TransactionsManagerConfig, ) -> Self { let network_events = network.event_listener(); @@ -310,24 +330,106 @@ impl TransactionsManager { metrics, } } -} - -// === impl TransactionsManager === -impl TransactionsManager -where - Pool: TransactionPool, -{ /// Returns a new handle that can send commands to this type. - pub fn handle(&self) -> TransactionsHandle { + pub fn handle(&self) -> TransactionsHandle { TransactionsHandle { manager_tx: self.command_tx.clone() } } -} -impl TransactionsManager -where - Pool: TransactionPool + 'static, -{ + /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns + /// `false` if [`TransactionsManager`] is operating close to full capacity. + fn has_capacity_for_fetching_pending_hashes(&self) -> bool { + self.pending_pool_imports_info + .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && + self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() + } + + fn report_peer_bad_transactions(&self, peer_id: PeerId) { + self.report_peer(peer_id, ReputationChangeKind::BadTransactions); + self.metrics.reported_bad_transactions.increment(1); + } + + fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { + trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); + self.network.reputation_change(peer_id, kind); + } + + fn report_already_seen(&self, peer_id: PeerId) { + trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction"); + self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction); + } + + /// Clear the transaction + fn on_good_import(&mut self, hash: TxHash) { + self.transactions_by_peers.remove(&hash); + } + + /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid + /// fetching or importing it again. + /// + /// Errors that count as bad transactions are: + /// + /// - intrinsic gas too low + /// - exceeds gas limit + /// - gas uint overflow + /// - exceeds max init code size + /// - oversized data + /// - signer account has bytecode + /// - chain id mismatch + /// - old legacy chain id + /// - tx type not supported + /// + /// (and additionally for blobs txns...) + /// + /// - no blobs + /// - too many blobs + /// - invalid kzg proof + /// - kzg error + /// - not blob transaction (tx type mismatch) + /// - wrong versioned kzg commitment hash + fn on_bad_import(&mut self, err: PoolError) { + let peers = self.transactions_by_peers.remove(&err.hash); + + // if we're _currently_ syncing, we ignore a bad transaction + if !err.is_bad_transaction() || self.network.is_syncing() { + return + } + // otherwise we penalize the peer that sent the bad transaction, with the assumption that + // the peer should have known that this transaction is bad (e.g. violating consensus rules) + if let Some(peers) = peers { + for peer_id in peers { + self.report_peer_bad_transactions(peer_id); + } + } + self.metrics.bad_imports.increment(1); + self.bad_imports.insert(err.hash); + } + + /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. + fn on_fetch_hashes_pending_fetch(&mut self) { + // try drain transaction hashes pending fetch + let info = &self.pending_pool_imports_info; + let max_pending_pool_imports = info.max_pending_pool_imports; + let has_capacity_wrt_pending_pool_imports = + |divisor| info.has_capacity(max_pending_pool_imports / divisor); + + self.transaction_fetcher + .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); + } + + fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { + let kind = match req_err { + RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, + RequestError::Timeout => ReputationChangeKind::Timeout, + RequestError::ChannelClosed | RequestError::ConnectionDropped => { + // peer is already disconnected + return + } + RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id), + }; + self.report_peer(peer_id, kind); + } + #[inline] fn update_poll_metrics(&self, start: Instant, poll_durations: TxManagerPollDurations) { let metrics = &self.metrics; @@ -353,281 +455,34 @@ where metrics.acc_duration_fetch_pending_hashes.set(acc_pending_fetch.as_secs_f64()); metrics.acc_duration_poll_commands.set(acc_cmds.as_secs_f64()); } +} - /// Request handler for an incoming request for transactions - fn on_get_pooled_transactions( - &mut self, - peer_id: PeerId, - request: GetPooledTransactions, - response: oneshot::Sender>, - ) { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if self.network.tx_gossip_disabled() { - let _ = response.send(Ok(PooledTransactions::default())); - return +impl TransactionsManager +where + Pool: TransactionPool, + N: NetworkPrimitives, +{ + /// Processes a batch import results. + fn on_batch_import_result(&mut self, batch_results: Vec>) { + for res in batch_results { + match res { + Ok(hash) => { + self.on_good_import(hash); + } + Err(err) => { + self.on_bad_import(err); + } } - let transactions = self.pool.get_pooled_transaction_elements( - request.0, - GetPooledTransactionLimit::ResponseSizeSoftLimit( - self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, - ), - ); - - trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| *tx.hash()), "Sending requested transactions to peer"); - - // we sent a response at which point we assume that the peer is aware of the - // transactions - peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.hash())); - - let resp = PooledTransactions(transactions); - let _ = response.send(Ok(resp)); } } - /// Invoked when transactions in the local mempool are considered __pending__. - /// - /// When a transaction in the local mempool is moved to the pending pool, we propagate them to - /// connected peers over network using the `Transactions` and `NewPooledTransactionHashes` - /// messages. The Transactions message relays complete transaction objects and is typically - /// sent to a small, random fraction of connected peers. - /// - /// All other peers receive a notification of the transaction hash and can request the - /// complete transaction object if it is unknown to them. The dissemination of complete - /// transactions to a fraction of peers usually ensures that all nodes receive the transaction - /// and won't need to request it. - fn on_new_pending_transactions(&mut self, hashes: Vec) { - // Nothing to propagate while initially syncing - if self.network.is_initially_syncing() { - return - } - if self.network.tx_gossip_disabled() { - return - } - - trace!(target: "net::tx", num_hashes=?hashes.len(), "Start propagating transactions"); - - self.propagate_all(hashes); - } - - /// Propagates the given transactions to the peers - /// - /// This fetches all transaction from the pool, including the 4844 blob transactions but - /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. - fn propagate_all(&mut self, hashes: Vec) { - let propagated = self.propagate_transactions( - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), - ); - - // notify pool so events get fired - self.pool.on_propagated(propagated); - } - - /// Propagate the transactions to all connected peers either as full objects or hashes. - /// - /// The message for new pooled hashes depends on the negotiated version of the stream. - /// See [`NewPooledTransactionHashes`] - /// - /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . - fn propagate_transactions( - &mut self, - to_propagate: Vec, - ) -> PropagatedTransactions { - let mut propagated = PropagatedTransactions::default(); - if self.network.tx_gossip_disabled() { - return propagated - } - - // send full transactions to a set of the connected peers based on the configured mode - let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); - - // Note: Assuming ~random~ order due to random state of the peers map hasher - for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { - // determine whether to send full tx objects or hashes. - let mut builder = if peer_idx > max_num_full { - PropagateTransactionsBuilder::pooled(peer.version) - } else { - PropagateTransactionsBuilder::full(peer.version) - }; - - // Iterate through the transactions to propagate and fill the hashes and full - // transaction lists, before deciding whether or not to send full transactions to the - // peer. - for tx in &to_propagate { - // Only proceed if the transaction is not in the peer's list of seen transactions - if !peer.seen_transactions.contains(&tx.hash()) { - // add transaction to the list of hashes to propagate - builder.push(tx); - } - } - - if builder.is_empty() { - trace!(target: "net::tx", ?peer_id, "Nothing to propagate to peer; has seen all transactions"); - continue - } - - let PropagateTransactions { pooled, full } = builder.build(); - - // send hashes if any - if let Some(mut new_pooled_hashes) = pooled { - // enforce tx soft limit per message for the (unlikely) event the number of - // hashes exceeds it - new_pooled_hashes - .truncate(SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE); - - for hash in new_pooled_hashes.iter_hashes().copied() { - propagated.0.entry(hash).or_default().push(PropagateKind::Hash(*peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(hash); - } - - trace!(target: "net::tx", ?peer_id, num_txs=?new_pooled_hashes.len(), "Propagating tx hashes to peer"); - - // send hashes of transactions - self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); - } - - // send full transactions, if any - if let Some(new_full_transactions) = full { - for tx in &new_full_transactions { - propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(*peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(tx.hash()); - } - - trace!(target: "net::tx", ?peer_id, num_txs=?new_full_transactions.len(), "Propagating full transactions to peer"); - - // send full transactions - self.network.send_transactions(*peer_id, new_full_transactions); - } - } - - // Update propagated transactions metrics - self.metrics.propagated_transactions.increment(propagated.0.len() as u64); - - propagated - } - - /// Propagate the full transactions to a specific peer. - /// - /// Returns the propagated transactions. - fn propagate_full_transactions_to_peer( - &mut self, - txs: Vec, - peer_id: PeerId, - ) -> Option { - trace!(target: "net::tx", ?peer_id, "Propagating transactions to peer"); - - let peer = self.peers.get_mut(&peer_id)?; - let mut propagated = PropagatedTransactions::default(); - - // filter all transactions unknown to the peer - let mut full_transactions = FullTransactionsBuilder::new(peer.version); - - let to_propagate = self.pool.get_all(txs).into_iter().map(PropagateTransaction::new); - - // Iterate through the transactions to propagate and fill the hashes and full transaction - for tx in to_propagate { - if !peer.seen_transactions.contains(&tx.hash()) { - full_transactions.push(&tx); - } - } - - if full_transactions.is_empty() { - // nothing to propagate - return None - } - - let PropagateTransactions { pooled, full } = full_transactions.build(); - - // send hashes if any - if let Some(new_pooled_hashes) = pooled { - for hash in new_pooled_hashes.iter_hashes().copied() { - propagated.0.entry(hash).or_default().push(PropagateKind::Hash(peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(hash); - } - // send hashes of transactions - self.network.send_transactions_hashes(peer_id, new_pooled_hashes); - } - - // send full transactions, if any - if let Some(new_full_transactions) = full { - for tx in &new_full_transactions { - propagated.0.entry(tx.hash()).or_default().push(PropagateKind::Full(peer_id)); - // mark transaction as seen by peer - peer.seen_transactions.insert(tx.hash()); - } - // send full transactions - self.network.send_transactions(peer_id, new_full_transactions); - } - - // Update propagated transactions metrics - self.metrics.propagated_transactions.increment(propagated.0.len() as u64); - - Some(propagated) - } - - /// Propagate the transaction hashes to the given peer - /// - /// Note: This will only send the hashes for transactions that exist in the pool. - fn propagate_hashes_to(&mut self, hashes: Vec, peer_id: PeerId) { - trace!(target: "net::tx", "Start propagating transactions as hashes"); - - // This fetches a transactions from the pool, including the blob transactions, which are - // only ever sent as hashes. - let propagated = { - let Some(peer) = self.peers.get_mut(&peer_id) else { - // no such peer - return - }; - - let to_propagate: Vec = - self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(); - - let mut propagated = PropagatedTransactions::default(); - - // check if transaction is known to peer - let mut hashes = PooledTransactionsHashesBuilder::new(peer.version); - - for tx in to_propagate { - if !peer.seen_transactions.insert(tx.hash()) { - hashes.push(&tx); - } - } - - let new_pooled_hashes = hashes.build(); - - if new_pooled_hashes.is_empty() { - // nothing to propagate - return - } - - for hash in new_pooled_hashes.iter_hashes().copied() { - propagated.0.entry(hash).or_default().push(PropagateKind::Hash(peer_id)); - } - - trace!(target: "net::tx::propagation", ?peer_id, ?new_pooled_hashes, "Propagating transactions to peer"); - - // send hashes of transactions - self.network.send_transactions_hashes(peer_id, new_pooled_hashes); - - // Update propagated transactions metrics - self.metrics.propagated_transactions.increment(propagated.0.len() as u64); - - propagated - }; - - // notify pool so events get fired - self.pool.on_propagated(propagated); - } - - /// Request handler for an incoming `NewPooledTransactionHashes` - fn on_new_pooled_transaction_hashes( - &mut self, - peer_id: PeerId, - msg: NewPooledTransactionHashes, - ) { - // If the node is initially syncing, ignore transactions + /// Request handler for an incoming `NewPooledTransactionHashes` + fn on_new_pooled_transaction_hashes( + &mut self, + peer_id: PeerId, + msg: NewPooledTransactionHashes, + ) { + // If the node is initially syncing, ignore transactions if self.network.is_initially_syncing() { return } @@ -785,16 +640,8 @@ where return } - // load message version before announcement data type is destructed in packing - let msg_version = valid_announcement_data.msg_version(); - // - // demand recommended soft limit on response, however the peer may enforce an arbitrary - // limit on the response (2MB) - // - // request buffer is shrunk via call to pack request! - let init_capacity_req = - self.transaction_fetcher.approx_capacity_get_pooled_transactions_req(msg_version); - let mut hashes_to_request = RequestTxHashes::with_capacity(init_capacity_req); + let mut hashes_to_request = + RequestTxHashes::with_capacity(valid_announcement_data.len() / 4); let surplus_hashes = self.transaction_fetcher.pack_request(&mut hashes_to_request, valid_announcement_data); @@ -802,7 +649,6 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), surplus_hashes=?*surplus_hashes, - %msg_version, %client, "some hashes in announcement from peer didn't fit in `GetPooledTransactions` request, buffering surplus hashes" ); @@ -813,7 +659,6 @@ where trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hashes=?*hashes_to_request, - %msg_version, %client, "sending hashes in `GetPooledTransactions` request to peer's session" ); @@ -837,57 +682,335 @@ where self.transaction_fetcher.buffer_hashes(failed_to_request_hashes, Some(peer_id)); } } +} - /// Handles dedicated transaction events related to the `eth` protocol. - fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { - match event { - NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { - // ensure we didn't receive any blob transactions as these are disallowed to be - // broadcasted in full +impl TransactionsManager +where + Pool: TransactionPool + 'static, + N: NetworkPrimitives< + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + >, + Pool::Transaction: + PoolTransaction, +{ + /// Invoked when transactions in the local mempool are considered __pending__. + /// + /// When a transaction in the local mempool is moved to the pending pool, we propagate them to + /// connected peers over network using the `Transactions` and `NewPooledTransactionHashes` + /// messages. The Transactions message relays complete transaction objects and is typically + /// sent to a small, random fraction of connected peers. + /// + /// All other peers receive a notification of the transaction hash and can request the + /// complete transaction object if it is unknown to them. The dissemination of complete + /// transactions to a fraction of peers usually ensures that all nodes receive the transaction + /// and won't need to request it. + fn on_new_pending_transactions(&mut self, hashes: Vec) { + // Nothing to propagate while initially syncing + if self.network.is_initially_syncing() { + return + } + if self.network.tx_gossip_disabled() { + return + } - let has_blob_txs = msg.has_eip4844(); + trace!(target: "net::tx", num_hashes=?hashes.len(), "Start propagating transactions"); - let non_blob_txs = msg - .0 - .into_iter() - .map(PooledTransactionsElement::try_from_broadcast) - .filter_map(Result::ok) - .collect::(); + self.propagate_all(hashes); + } + + /// Propagate the full transactions to a specific peer. + /// + /// Returns the propagated transactions. + fn propagate_full_transactions_to_peer( + &mut self, + txs: Vec, + peer_id: PeerId, + propagation_mode: PropagationMode, + ) -> Option { + trace!(target: "net::tx", ?peer_id, "Propagating transactions to peer"); + + let peer = self.peers.get_mut(&peer_id)?; + let mut propagated = PropagatedTransactions::default(); + + // filter all transactions unknown to the peer + let mut full_transactions = FullTransactionsBuilder::new(peer.version); + + let to_propagate = self.pool.get_all(txs).into_iter().map(PropagateTransaction::new); + + if propagation_mode.is_forced() { + // skip cache check if forced + full_transactions.extend(to_propagate); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction + for tx in to_propagate { + if !peer.seen_transactions.contains(tx.tx_hash()) { + // Only include if the peer hasn't seen the transaction + full_transactions.push(&tx); + } + } + } + + if full_transactions.is_empty() { + // nothing to propagate + return None + } + + let PropagateTransactions { pooled, full } = full_transactions.build(); + + // send hashes if any + if let Some(new_pooled_hashes) = pooled { + for hash in new_pooled_hashes.iter_hashes().copied() { + propagated.0.entry(hash).or_default().push(PropagateKind::Hash(peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(hash); + } + + // send hashes of transactions + self.network.send_transactions_hashes(peer_id, new_pooled_hashes); + } + + // send full transactions, if any + if let Some(new_full_transactions) = full { + for tx in &new_full_transactions { + propagated.0.entry(*tx.tx_hash()).or_default().push(PropagateKind::Full(peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(*tx.tx_hash()); + } + + // send full transactions + self.network.send_transactions(peer_id, new_full_transactions); + } + + // Update propagated transactions metrics + self.metrics.propagated_transactions.increment(propagated.0.len() as u64); + + Some(propagated) + } + + /// Propagate the transaction hashes to the given peer + /// + /// Note: This will only send the hashes for transactions that exist in the pool. + fn propagate_hashes_to( + &mut self, + hashes: Vec, + peer_id: PeerId, + propagation_mode: PropagationMode, + ) { + trace!(target: "net::tx", "Start propagating transactions as hashes"); + + // This fetches a transactions from the pool, including the blob transactions, which are + // only ever sent as hashes. + let propagated = { + let Some(peer) = self.peers.get_mut(&peer_id) else { + // no such peer + return + }; + + let to_propagate = self + .pool + .get_all(hashes) + .into_iter() + .map(PropagateTransaction::new) + .collect::>(); + + let mut propagated = PropagatedTransactions::default(); + + // check if transaction is known to peer + let mut hashes = PooledTransactionsHashesBuilder::new(peer.version); + + if propagation_mode.is_forced() { + hashes.extend(to_propagate) + } else { + for tx in to_propagate { + if !peer.seen_transactions.contains(tx.tx_hash()) { + // Include if the peer hasn't seen it + hashes.push(&tx); + } + } + } + + let new_pooled_hashes = hashes.build(); + + if new_pooled_hashes.is_empty() { + // nothing to propagate + return + } + + for hash in new_pooled_hashes.iter_hashes().copied() { + propagated.0.entry(hash).or_default().push(PropagateKind::Hash(peer_id)); + } + + trace!(target: "net::tx::propagation", ?peer_id, ?new_pooled_hashes, "Propagating transactions to peer"); + + // send hashes of transactions + self.network.send_transactions_hashes(peer_id, new_pooled_hashes); + + // Update propagated transactions metrics + self.metrics.propagated_transactions.increment(propagated.0.len() as u64); + + propagated + }; + + // notify pool so events get fired + self.pool.on_propagated(propagated); + } + + /// Propagate the transactions to all connected peers either as full objects or hashes. + /// + /// The message for new pooled hashes depends on the negotiated version of the stream. + /// See [`NewPooledTransactionHashes`] + /// + /// Note: EIP-4844 are disallowed from being broadcast in full and are only ever sent as hashes, see also . + fn propagate_transactions( + &mut self, + to_propagate: Vec>, + propagation_mode: PropagationMode, + ) -> PropagatedTransactions { + let mut propagated = PropagatedTransactions::default(); + if self.network.tx_gossip_disabled() { + return propagated + } + + // send full transactions to a set of the connected peers based on the configured mode + let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); + + // Note: Assuming ~random~ order due to random state of the peers map hasher + for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { + // determine whether to send full tx objects or hashes. + let mut builder = if peer_idx > max_num_full { + PropagateTransactionsBuilder::pooled(peer.version) + } else { + PropagateTransactionsBuilder::full(peer.version) + }; + + if propagation_mode.is_forced() { + builder.extend(to_propagate.iter()); + } else { + // Iterate through the transactions to propagate and fill the hashes and full + // transaction lists, before deciding whether or not to send full transactions to + // the peer. + for tx in &to_propagate { + // Only proceed if the transaction is not in the peer's list of seen + // transactions + if !peer.seen_transactions.contains(tx.tx_hash()) { + builder.push(tx); + } + } + } + + if builder.is_empty() { + trace!(target: "net::tx", ?peer_id, "Nothing to propagate to peer; has seen all transactions"); + continue + } + + let PropagateTransactions { pooled, full } = builder.build(); + + // send hashes if any + if let Some(mut new_pooled_hashes) = pooled { + // enforce tx soft limit per message for the (unlikely) event the number of + // hashes exceeds it + new_pooled_hashes + .truncate(SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE); + + for hash in new_pooled_hashes.iter_hashes().copied() { + propagated.0.entry(hash).or_default().push(PropagateKind::Hash(*peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(hash); + } + + trace!(target: "net::tx", ?peer_id, num_txs=?new_pooled_hashes.len(), "Propagating tx hashes to peer"); + + // send hashes of transactions + self.network.send_transactions_hashes(*peer_id, new_pooled_hashes); + } + + // send full transactions, if any + if let Some(new_full_transactions) = full { + for tx in &new_full_transactions { + propagated + .0 + .entry(*tx.tx_hash()) + .or_default() + .push(PropagateKind::Full(*peer_id)); + // mark transaction as seen by peer + peer.seen_transactions.insert(*tx.tx_hash()); + } + + trace!(target: "net::tx", ?peer_id, num_txs=?new_full_transactions.len(), "Propagating full transactions to peer"); + + // send full transactions + self.network.send_transactions(*peer_id, new_full_transactions); + } + } + + // Update propagated transactions metrics + self.metrics.propagated_transactions.increment(propagated.0.len() as u64); + + propagated + } + + /// Propagates the given transactions to the peers + /// + /// This fetches all transaction from the pool, including the 4844 blob transactions but + /// __without__ their sidecar, because 4844 transactions are only ever announced as hashes. + fn propagate_all(&mut self, hashes: Vec) { + let propagated = self.propagate_transactions( + self.pool.get_all(hashes).into_iter().map(PropagateTransaction::new).collect(), + PropagationMode::Basic, + ); + + // notify pool so events get fired + self.pool.on_propagated(propagated); + } + + /// Request handler for an incoming request for transactions + fn on_get_pooled_transactions( + &mut self, + peer_id: PeerId, + request: GetPooledTransactions, + response: oneshot::Sender>>, + ) { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if self.network.tx_gossip_disabled() { + let _ = response.send(Ok(PooledTransactions::default())); + return + } + let transactions = self.pool.get_pooled_transaction_elements( + request.0, + GetPooledTransactionLimit::ResponseSizeSoftLimit( + self.transaction_fetcher.info.soft_limit_byte_size_pooled_transactions_response, + ), + ); + trace!(target: "net::tx::propagation", sent_txs=?transactions.iter().map(|tx| tx.tx_hash()), "Sending requested transactions to peer"); - self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); + // we sent a response at which point we assume that the peer is aware of the + // transactions + peer.seen_transactions.extend(transactions.iter().map(|tx| *tx.tx_hash())); - if has_blob_txs { - debug!(target: "net::tx", ?peer_id, "received bad full blob transaction broadcast"); - self.report_peer_bad_transactions(peer_id); - } - } - NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { - self.on_new_pooled_transaction_hashes(peer_id, msg) - } - NetworkTransactionEvent::GetPooledTransactions { peer_id, request, response } => { - self.on_get_pooled_transactions(peer_id, request, response) - } - NetworkTransactionEvent::GetTransactionsHandle(response) => { - let _ = response.send(Some(self.handle())); - } + let resp = PooledTransactions(transactions); + let _ = response.send(Ok(resp)); } } /// Handles a command received from a detached [`TransactionsHandle`] - fn on_command(&mut self, cmd: TransactionsCommand) { + fn on_command(&mut self, cmd: TransactionsCommand) { match cmd { TransactionsCommand::PropagateHash(hash) => { self.on_new_pending_transactions(vec![hash]) } TransactionsCommand::PropagateHashesTo(hashes, peer) => { - self.propagate_hashes_to(hashes, peer) + self.propagate_hashes_to(hashes, peer, PropagationMode::Forced) } TransactionsCommand::GetActivePeers(tx) => { let peers = self.peers.keys().copied().collect::>(); tx.send(peers).ok(); } TransactionsCommand::PropagateTransactionsTo(txs, peer) => { - if let Some(propagated) = self.propagate_full_transactions_to_peer(txs, peer) { + if let Some(propagated) = + self.propagate_full_transactions_to_peer(txs, peer, PropagationMode::Forced) + { self.pool.on_propagated(propagated); } } @@ -911,57 +1034,118 @@ where } } + /// Handles session establishment and peer transactions initialization. + fn handle_peer_session( + &mut self, + info: SessionInfo, + messages: PeerRequestSender>, + ) { + let SessionInfo { peer_id, client_version, version, .. } = info; + + // Insert a new peer into the peerset. + let peer = PeerMetadata::::new( + messages, + version, + client_version, + self.config.max_transactions_seen_by_peer_history, + ); + let peer = match self.peers.entry(peer_id) { + Entry::Occupied(mut entry) => { + entry.insert(peer); + entry.into_mut() + } + Entry::Vacant(entry) => entry.insert(peer), + }; + + // Send a `NewPooledTransactionHashes` to the peer with up to + // `SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE` + // transactions in the pool. + if self.network.is_initially_syncing() || self.network.tx_gossip_disabled() { + trace!(target: "net::tx", ?peer_id, "Skipping transaction broadcast: node syncing or gossip disabled"); + return + } + + // Get transactions to broadcast + let pooled_txs = self.pool.pooled_transactions_max( + SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, + ); + if pooled_txs.is_empty() { + trace!(target: "net::tx", ?peer_id, "No transactions in the pool to broadcast"); + return; + } + + // Build and send transaction hashes message + let mut msg_builder = PooledTransactionsHashesBuilder::new(version); + for pooled_tx in pooled_txs { + peer.seen_transactions.insert(*pooled_tx.hash()); + msg_builder.push_pooled(pooled_tx); + } + + debug!(target: "net::tx", ?peer_id, tx_count = msg_builder.is_empty(), "Broadcasting transaction hashes"); + let msg = msg_builder.build(); + self.network.send_transactions_hashes(peer_id, msg); + } + /// Handles a received event related to common network events. - fn on_network_event(&mut self, event_result: NetworkEvent) { + fn on_network_event(&mut self, event_result: NetworkEvent>) { match event_result { - NetworkEvent::SessionClosed { peer_id, .. } => { + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) => { // remove the peer self.peers.remove(&peer_id); self.transaction_fetcher.remove_peer(&peer_id); } - NetworkEvent::SessionEstablished { - peer_id, client_version, messages, version, .. - } => { - // Insert a new peer into the peerset. - let peer = PeerMetadata::new( - messages, - version, - client_version, - self.config.max_transactions_seen_by_peer_history, - ); - let peer = match self.peers.entry(peer_id) { - Entry::Occupied(mut entry) => { - entry.insert(peer); - entry.into_mut() + NetworkEvent::ActivePeerSession { info, messages } => { + // process active peer session and broadcast available transaction from the pool + self.handle_peer_session(info, messages); + } + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + let peer_id = info.peer_id; + // get messages from existing peer + let messages = match self.peers.get(&peer_id) { + Some(p) => p.request_tx.clone(), + None => { + debug!(target: "net::tx", ?peer_id, "No peer request sender found"); + return; } - Entry::Vacant(entry) => entry.insert(peer), }; + self.handle_peer_session(info, messages); + } + _ => {} + } + } - // Send a `NewPooledTransactionHashes` to the peer with up to - // `SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE` - // transactions in the pool. - if self.network.is_initially_syncing() || self.network.tx_gossip_disabled() { - return - } + /// Handles dedicated transaction events related to the `eth` protocol. + fn on_network_tx_event(&mut self, event: NetworkTransactionEvent) { + match event { + NetworkTransactionEvent::IncomingTransactions { peer_id, msg } => { + // ensure we didn't receive any blob transactions as these are disallowed to be + // broadcasted in full - let pooled_txs = self.pool.pooled_transactions_max( - SOFT_LIMIT_COUNT_HASHES_IN_NEW_POOLED_TRANSACTIONS_BROADCAST_MESSAGE, - ); - if pooled_txs.is_empty() { - // do not send a message if there are no transactions in the pool - return - } + let has_blob_txs = msg.has_eip4844(); - let mut msg_builder = PooledTransactionsHashesBuilder::new(version); - for pooled_tx in pooled_txs { - peer.seen_transactions.insert(*pooled_tx.hash()); - msg_builder.push_pooled(pooled_tx); - } + let non_blob_txs = msg + .0 + .into_iter() + .map(N::PooledTransaction::try_from) + .filter_map(Result::ok) + .collect(); + + self.import_transactions(peer_id, non_blob_txs, TransactionSource::Broadcast); - let msg = msg_builder.build(); - self.network.send_transactions_hashes(peer_id, msg); + if has_blob_txs { + debug!(target: "net::tx", ?peer_id, "received bad full blob transaction broadcast"); + self.report_peer_bad_transactions(peer_id); + } + } + NetworkTransactionEvent::IncomingPooledTransactionHashes { peer_id, msg } => { + self.on_new_pooled_transaction_hashes(peer_id, msg) + } + NetworkTransactionEvent::GetPooledTransactions { peer_id, request, response } => { + self.on_get_pooled_transactions(peer_id, request, response) + } + NetworkTransactionEvent::GetTransactionsHandle(response) => { + let _ = response.send(Some(self.handle())); } - _ => {} } } @@ -969,7 +1153,7 @@ where fn import_transactions( &mut self, peer_id: PeerId, - transactions: PooledTransactions, + transactions: PooledTransactions, source: TransactionSource, ) { // If the node is pipeline syncing, ignore transactions @@ -985,7 +1169,7 @@ where // mark the transactions as received self.transaction_fetcher - .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.hash())); + .remove_hashes_from_transaction_fetcher(transactions.iter().map(|tx| *tx.tx_hash())); // track that the peer knows these transaction, but only if this is a new broadcast. // If we received the transactions as the response to our `GetPooledTransactions`` @@ -993,7 +1177,7 @@ where // recorded the hashes as seen by this peer in `Self::on_new_pooled_transaction_hashes`. let mut num_already_seen_by_peer = 0; for tx in &transactions { - if source.is_broadcast() && !peer.seen_transactions.insert(*tx.hash()) { + if source.is_broadcast() && !peer.seen_transactions.insert(*tx.tx_hash()) { num_already_seen_by_peer += 1; } } @@ -1022,7 +1206,7 @@ where Err(badtx) => { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%badtx.hash(), + hash=%badtx.tx_hash(), client_version=%peer.client_version, "failed ecrecovery for transaction" ); @@ -1031,23 +1215,24 @@ where } }; - match self.transactions_by_peers.entry(*tx.hash()) { + match self.transactions_by_peers.entry(*tx.tx_hash()) { Entry::Occupied(mut entry) => { // transaction was already inserted entry.get_mut().insert(peer_id); } Entry::Vacant(entry) => { - if self.bad_imports.contains(tx.hash()) { + if self.bad_imports.contains(tx.tx_hash()) { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), - hash=%tx.hash(), + hash=%tx.tx_hash(), client_version=%peer.client_version, "received a known bad transaction from peer" ); has_bad_transactions = true; } else { // this is a new transaction that should be imported into the pool - let pool_transaction = Pool::Transaction::from_pooled(tx.into()); + + let pool_transaction = Pool::Transaction::from_pooled(tx); new_txs.push(pool_transaction); entry.insert(HashSet::from([peer_id])); @@ -1107,22 +1292,8 @@ where } } - /// Processes a batch import results. - fn on_batch_import_result(&mut self, batch_results: Vec>) { - for res in batch_results { - match res { - Ok(hash) => { - self.on_good_import(hash); - } - Err(err) => { - self.on_bad_import(err); - } - } - } - } - /// Processes a [`FetchEvent`]. - fn on_fetch_event(&mut self, fetch_event: FetchEvent) { + fn on_fetch_event(&mut self, fetch_event: FetchEvent) { match fetch_event { FetchEvent::TransactionsFetched { peer_id, transactions } => { self.import_transactions(peer_id, transactions, TransactionSource::Response); @@ -1136,100 +1307,6 @@ where } } } - - /// Runs an operation to fetch hashes that are cached in [`TransactionFetcher`]. - fn on_fetch_hashes_pending_fetch(&mut self) { - // try drain transaction hashes pending fetch - let info = &self.pending_pool_imports_info; - let max_pending_pool_imports = info.max_pending_pool_imports; - let has_capacity_wrt_pending_pool_imports = - |divisor| info.has_capacity(max_pending_pool_imports / divisor); - - self.transaction_fetcher - .on_fetch_pending_hashes(&self.peers, has_capacity_wrt_pending_pool_imports); - } - - fn report_peer_bad_transactions(&self, peer_id: PeerId) { - self.report_peer(peer_id, ReputationChangeKind::BadTransactions); - self.metrics.reported_bad_transactions.increment(1); - } - - fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { - trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); - self.network.reputation_change(peer_id, kind); - } - - fn on_request_error(&self, peer_id: PeerId, req_err: RequestError) { - let kind = match req_err { - RequestError::UnsupportedCapability => ReputationChangeKind::BadProtocol, - RequestError::Timeout => ReputationChangeKind::Timeout, - RequestError::ChannelClosed | RequestError::ConnectionDropped => { - // peer is already disconnected - return - } - RequestError::BadResponse => return self.report_peer_bad_transactions(peer_id), - }; - self.report_peer(peer_id, kind); - } - - fn report_already_seen(&self, peer_id: PeerId) { - trace!(target: "net::tx", ?peer_id, "Penalizing peer for already seen transaction"); - self.network.reputation_change(peer_id, ReputationChangeKind::AlreadySeenTransaction); - } - - /// Clear the transaction - fn on_good_import(&mut self, hash: TxHash) { - self.transactions_by_peers.remove(&hash); - } - - /// Penalize the peers that intentionally sent the bad transaction, and cache it to avoid - /// fetching or importing it again. - /// - /// Errors that count as bad transactions are: - /// - /// - intrinsic gas too low - /// - exceeds gas limit - /// - gas uint overflow - /// - exceeds max init code size - /// - oversized data - /// - signer account has bytecode - /// - chain id mismatch - /// - old legacy chain id - /// - tx type not supported - /// - /// (and additionally for blobs txns...) - /// - /// - no blobs - /// - too many blobs - /// - invalid kzg proof - /// - kzg error - /// - not blob transaction (tx type mismatch) - /// - wrong versioned kzg commitment hash - fn on_bad_import(&mut self, err: PoolError) { - let peers = self.transactions_by_peers.remove(&err.hash); - - // if we're _currently_ syncing, we ignore a bad transaction - if !err.is_bad_transaction() || self.network.is_syncing() { - return - } - // otherwise we penalize the peer that sent the bad transaction, with the assumption that - // the peer should have known that this transaction is bad (e.g. violating consensus rules) - if let Some(peers) = peers { - for peer_id in peers { - self.report_peer_bad_transactions(peer_id); - } - } - self.metrics.bad_imports.increment(1); - self.bad_imports.insert(err.hash); - } - - /// Returns `true` if [`TransactionsManager`] has capacity to request pending hashes. Returns - /// `false` if [`TransactionsManager`] is operating close to full capacity. - fn has_capacity_for_fetching_pending_hashes(&self) -> bool { - self.pending_pool_imports_info - .has_capacity(self.pending_pool_imports_info.max_pending_pool_imports) && - self.transaction_fetcher.has_capacity_for_fetching_pending_hashes() - } } /// An endless future. Preemption ensure that future is non-blocking, nonetheless. See @@ -1239,9 +1316,15 @@ where // // spawned in `NodeConfig::start_network`(reth_node_core::NodeConfig) and // `NetworkConfig::start_network`(reth_network::NetworkConfig) -impl Future for TransactionsManager +impl Future for TransactionsManager where Pool: TransactionPool + Unpin + 'static, + N: NetworkPrimitives< + BroadcastedTransaction: SignedTransaction, + PooledTransaction: SignedTransaction, + >, + Pool::Transaction: + PoolTransaction, { type Output = (); @@ -1395,42 +1478,62 @@ where } } +/// Represents the different modes of transaction propagation. +/// +/// This enum is used to determine how transactions are propagated to peers in the network. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +enum PropagationMode { + /// Default propagation mode. + /// + /// Transactions are only sent to peers that haven't seen them yet. + Basic, + /// Forced propagation mode. + /// + /// Transactions are sent to all peers regardless of whether they have been sent or received + /// before. + Forced, +} + +impl PropagationMode { + /// Returns `true` if the propagation kind is `Forced`. + const fn is_forced(self) -> bool { + matches!(self, Self::Forced) + } +} + /// A transaction that's about to be propagated to multiple peers. #[derive(Debug, Clone)] -struct PropagateTransaction { +struct PropagateTransaction { size: usize, - transaction: Arc, + transaction: Arc, } -// === impl PropagateTransaction === - -impl PropagateTransaction { - fn hash(&self) -> TxHash { - self.transaction.hash() - } - +impl PropagateTransaction { /// Create a new instance from a pooled transaction - fn new(tx: Arc>) -> Self + fn new

(tx: Arc>) -> Self where - T: PoolTransaction>, + P: PoolTransaction, { let size = tx.encoded_length(); - let recovered: TransactionSignedEcRecovered = - tx.transaction.clone().into_consensus().into(); - let transaction = Arc::new(recovered.into_signed()); + let transaction = tx.transaction.clone_into_consensus(); + let transaction = Arc::new(transaction.into_signed()); Self { size, transaction } } + + fn tx_hash(&self) -> &TxHash { + self.transaction.tx_hash() + } } /// Helper type to construct the appropriate message to send to the peer based on whether the peer /// should receive them in full or as pooled #[derive(Debug, Clone)] -enum PropagateTransactionsBuilder { +enum PropagateTransactionsBuilder { Pooled(PooledTransactionsHashesBuilder), - Full(FullTransactionsBuilder), + Full(FullTransactionsBuilder), } -impl PropagateTransactionsBuilder { +impl PropagateTransactionsBuilder { /// Create a builder for pooled transactions fn pooled(version: EthVersion) -> Self { Self::Pooled(PooledTransactionsHashesBuilder::new(version)) @@ -1441,14 +1544,6 @@ impl PropagateTransactionsBuilder { Self::Full(FullTransactionsBuilder::new(version)) } - /// Appends a transaction to the list. - fn push(&mut self, transaction: &PropagateTransaction) { - match self { - Self::Pooled(builder) => builder.push(transaction), - Self::Full(builder) => builder.push(transaction), - } - } - /// Returns true if no transactions are recorded. fn is_empty(&self) -> bool { match self { @@ -1458,7 +1553,7 @@ impl PropagateTransactionsBuilder { } /// Consumes the type and returns the built messages that should be sent to the peer. - fn build(self) -> PropagateTransactions { + fn build(self) -> PropagateTransactions { match self { Self::Pooled(pooled) => { PropagateTransactions { pooled: Some(pooled.build()), full: None } @@ -1468,12 +1563,29 @@ impl PropagateTransactionsBuilder { } } +impl PropagateTransactionsBuilder { + /// Appends all transactions + fn extend<'a>(&mut self, txs: impl IntoIterator>) { + for tx in txs { + self.push(tx); + } + } + + /// Appends a transaction to the list. + fn push(&mut self, transaction: &PropagateTransaction) { + match self { + Self::Pooled(builder) => builder.push(transaction), + Self::Full(builder) => builder.push(transaction), + } + } +} + /// Represents how the transactions should be sent to a peer if any. -struct PropagateTransactions { +struct PropagateTransactions { /// The pooled transaction hashes to send. pooled: Option, /// The transactions to send in full. - full: Option>>, + full: Option>>, } /// Helper type for constructing the full transaction message that enforces the @@ -1481,18 +1593,16 @@ struct PropagateTransactions { /// and enforces other propagation rules for EIP-4844 and tracks those transactions that can't be /// broadcasted in full. #[derive(Debug, Clone)] -struct FullTransactionsBuilder { +struct FullTransactionsBuilder { /// The soft limit to enforce for a single broadcast message of full transactions. total_size: usize, /// All transactions to be broadcasted. - transactions: Vec>, + transactions: Vec>, /// Transactions that didn't fit into the broadcast message pooled: PooledTransactionsHashesBuilder, } -// === impl FullTransactionsBuilder === - -impl FullTransactionsBuilder { +impl FullTransactionsBuilder { /// Create a builder for the negotiated version of the peer's session fn new(version: EthVersion) -> Self { Self { @@ -1502,6 +1612,27 @@ impl FullTransactionsBuilder { } } + /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. + fn is_empty(&self) -> bool { + self.transactions.is_empty() && self.pooled.is_empty() + } + + /// Returns the messages that should be propagated to the peer. + fn build(self) -> PropagateTransactions { + let pooled = Some(self.pooled.build()).filter(|pooled| !pooled.is_empty()); + let full = Some(self.transactions).filter(|full| !full.is_empty()); + PropagateTransactions { pooled, full } + } +} + +impl FullTransactionsBuilder { + /// Appends all transactions. + fn extend(&mut self, txs: impl IntoIterator>) { + for tx in txs { + self.push(&tx) + } + } + /// Append a transaction to the list of full transaction if the total message bytes size doesn't /// exceed the soft maximum target byte size. The limit is soft, meaning if one single /// transaction goes over the limit, it will be broadcasted in its own [`Transactions`] @@ -1510,7 +1641,8 @@ impl FullTransactionsBuilder { /// /// If the transaction is unsuitable for broadcast or would exceed the softlimit, it is appended /// to list of pooled transactions, (e.g. 4844 transactions). - fn push(&mut self, transaction: &PropagateTransaction) { + /// See also [`TxType::is_broadcastable_in_full`]. + fn push(&mut self, transaction: &PropagateTransaction) { // Do not send full 4844 transaction hashes to peers. // // Nodes MUST NOT automatically broadcast blob transactions to their peers. @@ -1519,7 +1651,7 @@ impl FullTransactionsBuilder { // via `GetPooledTransactions`. // // From: - if transaction.transaction.is_eip4844() { + if !transaction.transaction.tx_type().is_broadcastable_in_full() { self.pooled.push(transaction); return } @@ -1536,18 +1668,6 @@ impl FullTransactionsBuilder { self.total_size = new_size; self.transactions.push(Arc::clone(&transaction.transaction)); } - - /// Returns whether or not any transactions are in the [`FullTransactionsBuilder`]. - fn is_empty(&self) -> bool { - self.transactions.is_empty() && self.pooled.is_empty() - } - - /// Returns the messages that should be propagated to the peer. - fn build(self) -> PropagateTransactions { - let pooled = Some(self.pooled.build()).filter(|pooled| !pooled.is_empty()); - let full = Some(self.transactions).filter(|full| !full.is_empty()); - PropagateTransactions { pooled, full } - } } /// A helper type to create the pooled transactions message based on the negotiated version of the @@ -1581,11 +1701,21 @@ impl PooledTransactionsHashesBuilder { } } - fn push(&mut self, tx: &PropagateTransaction) { + /// Appends all hashes + fn extend( + &mut self, + txs: impl IntoIterator>, + ) { + for tx in txs { + self.push(&tx); + } + } + + fn push(&mut self, tx: &PropagateTransaction) { match self { - Self::Eth66(msg) => msg.0.push(tx.hash()), + Self::Eth66(msg) => msg.0.push(*tx.tx_hash()), Self::Eth68(msg) => { - msg.hashes.push(tx.hash()); + msg.hashes.push(*tx.tx_hash()); msg.sizes.push(tx.size); msg.types.push(tx.transaction.tx_type().into()); } @@ -1596,7 +1726,7 @@ impl PooledTransactionsHashesBuilder { fn new(version: EthVersion) -> Self { match version { EthVersion::Eth66 | EthVersion::Eth67 => Self::Eth66(Default::default()), - EthVersion::Eth68 => Self::Eth68(Default::default()), + EthVersion::Eth68 | EthVersion::Eth69 => Self::Eth68(Default::default()), } } @@ -1627,23 +1757,23 @@ impl TransactionSource { /// Tracks a single peer in the context of [`TransactionsManager`]. #[derive(Debug)] -pub struct PeerMetadata { +pub struct PeerMetadata { /// Optimistically keeps track of transactions that we know the peer has seen. Optimistic, in /// the sense that transactions are preemptively marked as seen by peer when they are sent to /// the peer. seen_transactions: LruCache, /// A communication channel directly to the peer's session task. - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, /// negotiated version of the session. version: EthVersion, /// The peer's client version. client_version: Arc, } -impl PeerMetadata { +impl PeerMetadata { /// Returns a new instance of [`PeerMetadata`]. fn new( - request_tx: PeerRequestSender, + request_tx: PeerRequestSender>, version: EthVersion, client_version: Arc, max_transactions_seen_by_peer: u32, @@ -1659,7 +1789,7 @@ impl PeerMetadata { /// Commands to send to the [`TransactionsManager`] #[derive(Debug)] -enum TransactionsCommand { +enum TransactionsCommand { /// Propagate a transaction hash to the network. PropagateHash(B256), /// Propagate transaction hashes to a specific peer. @@ -1678,13 +1808,13 @@ enum TransactionsCommand { /// Requests a clone of the sender sender channel to the peer. GetPeerSender { peer_id: PeerId, - peer_request_sender: oneshot::Sender>, + peer_request_sender: oneshot::Sender>>>, }, } /// All events related to transactions emitted by the network. #[derive(Debug)] -pub enum NetworkTransactionEvent { +pub enum NetworkTransactionEvent { /// Represents the event of receiving a list of transactions from a peer. /// /// This indicates transactions that were broadcasted to us from the peer. @@ -1692,7 +1822,7 @@ pub enum NetworkTransactionEvent { /// The ID of the peer from which the transactions were received. peer_id: PeerId, /// The received transactions. - msg: Transactions, + msg: Transactions, }, /// Represents the event of receiving a list of transaction hashes from a peer. IncomingPooledTransactionHashes { @@ -1708,10 +1838,10 @@ pub enum NetworkTransactionEvent { /// The received `GetPooledTransactions` request. request: GetPooledTransactions, /// The sender for responding to the request with a result of `PooledTransactions`. - response: oneshot::Sender>, + response: oneshot::Sender>>, }, /// Represents the event of receiving a `GetTransactionsHandle` request. - GetTransactionsHandle(oneshot::Sender>), + GetTransactionsHandle(oneshot::Sender>>), } /// Tracks stats about the [`TransactionsManager`]. @@ -1765,7 +1895,7 @@ mod tests { error::{RequestError, RequestResult}, sync::{NetworkSyncUpdater, SyncState}, }; - use reth_provider::test_utils::NoopProvider; + use reth_storage_api::noop::NoopProvider; use reth_transaction_pool::test_utils::{ testing_pool, MockTransaction, MockTransactionFactory, TestPool, }; @@ -1779,7 +1909,9 @@ mod tests { use tests::fetcher::TxFetchMetadata; use tracing::error; - async fn new_tx_manager() -> (TransactionsManager, NetworkManager) { + async fn new_tx_manager( + ) -> (TransactionsManager, NetworkManager) + { let secret_key = SecretKey::new(&mut rand::thread_rng()); let client = NoopProvider::default(); @@ -1810,7 +1942,7 @@ mod tests { pub(super) fn new_mock_session( peer_id: PeerId, version: EthVersion, - ) -> (PeerMetadata, mpsc::Receiver) { + ) -> (PeerMetadata, mpsc::Receiver) { let (to_mock_session_tx, to_mock_session_rx) = mpsc::channel(1); ( @@ -1842,7 +1974,7 @@ mod tests { let client = NoopProvider::default(); let pool = testing_pool(); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .disable_discovery() .listener_port(0) .build(client); @@ -1865,27 +1997,12 @@ mod tests { let mut established = listener0.take(2); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => { + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { // to insert a new peer in transactions peerset - transactions.on_network_event(NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - }) + transactions + .on_network_event(NetworkEvent::Peer(PeerEvent::SessionEstablished(info))) } - NetworkEvent::PeerAdded(_peer_id) => continue, + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, ev => { error!("unexpected event {ev:?}") } @@ -1951,28 +2068,13 @@ mod tests { let mut established = listener0.take(2); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => { + NetworkEvent::ActivePeerSession { .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(_)) => { // to insert a new peer in transactions peerset - transactions.on_network_event(NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - }) + transactions.on_network_event(ev); } - NetworkEvent::PeerAdded(_peer_id) => continue, - ev => { + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, + _ => { error!("unexpected event {ev:?}") } } @@ -2035,27 +2137,12 @@ mod tests { let mut established = listener0.take(2); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => { + NetworkEvent::ActivePeerSession { .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(_)) => { // to insert a new peer in transactions peerset - transactions.on_network_event(NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - }) + transactions.on_network_event(ev); } - NetworkEvent::PeerAdded(_peer_id) => continue, + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, ev => { error!("unexpected event {ev:?}") } @@ -2082,7 +2169,7 @@ mod tests { .await; assert!(!pool.is_empty()); - assert!(pool.get(&signed_tx.hash).is_some()); + assert!(pool.get(signed_tx.tx_hash()).is_some()); handle.terminate().await; } @@ -2126,24 +2213,11 @@ mod tests { let mut established = listener0.take(2); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - } => transactions.on_network_event(NetworkEvent::SessionEstablished { - peer_id, - remote_addr, - client_version, - capabilities, - messages, - status, - version, - }), - NetworkEvent::PeerAdded(_peer_id) => continue, + NetworkEvent::ActivePeerSession { .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(_)) => { + transactions.on_network_event(ev); + } + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, ev => { error!("unexpected event {ev:?}") } @@ -2157,7 +2231,7 @@ mod tests { .add_transaction(reth_transaction_pool::TransactionOrigin::External, tx.clone()) .await; - let request = GetPooledTransactions(vec![tx.get_hash()]); + let request = GetPooledTransactions(vec![*tx.get_hash()]); let (send, receive) = oneshot::channel::>(); @@ -2292,7 +2366,8 @@ mod tests { #[test] fn test_transaction_builder_empty() { - let mut builder = PropagateTransactionsBuilder::pooled(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::pooled(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2308,7 +2383,8 @@ mod tests { #[test] fn test_transaction_builder_large() { - let mut builder = PropagateTransactionsBuilder::full(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::full(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2336,7 +2412,8 @@ mod tests { #[test] fn test_transaction_builder_eip4844() { - let mut builder = PropagateTransactionsBuilder::full(EthVersion::Eth68); + let mut builder = + PropagateTransactionsBuilder::::full(EthVersion::Eth68); assert!(builder.is_empty()); let mut factory = MockTransactionFactory::default(); @@ -2370,17 +2447,18 @@ mod tests { network.handle().update_sync_state(SyncState::Idle); // mock a peer - let (tx, _rx) = mpsc::channel(1); - tx_manager.on_network_event(NetworkEvent::SessionEstablished { + let (tx, _rx) = mpsc::channel::(1); + let session_info = SessionInfo { peer_id, remote_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0), client_version: Arc::from(""), capabilities: Arc::new(vec![].into()), - messages: PeerRequestSender::new(peer_id, tx), status: Arc::new(Default::default()), version: EthVersion::Eth68, - }); - + }; + let messages: PeerRequestSender = PeerRequestSender::new(peer_id, tx); + tx_manager + .on_network_event(NetworkEvent::ActivePeerSession { info: session_info, messages }); let mut propagate = vec![]; let mut factory = MockTransactionFactory::default(); let eip1559_tx = Arc::new(factory.create_eip1559()); @@ -2388,7 +2466,8 @@ mod tests { let eip4844_tx = Arc::new(factory.create_eip4844()); propagate.push(PropagateTransaction::new(eip4844_tx.clone())); - let propagated = tx_manager.propagate_transactions(propagate.clone()); + let propagated = + tx_manager.propagate_transactions(propagate.clone(), PropagationMode::Basic); assert_eq!(propagated.0.len(), 2); let prop_txs = propagated.0.get(eip1559_tx.transaction.hash()).unwrap(); assert_eq!(prop_txs.len(), 1); @@ -2404,7 +2483,7 @@ mod tests { peer.seen_transactions.contains(eip4844_tx.transaction.hash()); // propagate again - let propagated = tx_manager.propagate_transactions(propagate); + let propagated = tx_manager.propagate_transactions(propagate, PropagationMode::Basic); assert!(propagated.0.is_empty()); } } diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 4038f23e85c..1018cde6b55 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -2,16 +2,15 @@ //! and [`NewPooledTransactionHashes68`](reth_eth_wire::NewPooledTransactionHashes68) //! announcements. Validation and filtering of announcements is network dependent. -use std::{fmt, fmt::Display, mem}; - use crate::metrics::{AnnouncedTxTypesMetrics, TxTypesCounter}; -use alloy_primitives::{Signature, TxHash}; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; use derive_more::{Deref, DerefMut}; use reth_eth_wire::{ DedupPayload, Eth68TxMetadata, HandleMempoolData, PartiallyValidData, ValidAnnouncementData, MAX_MESSAGE_SIZE, }; use reth_primitives::TxType; +use std::{fmt, fmt::Display, mem}; use tracing::trace; /// The size of a decoded signature in bytes. @@ -336,7 +335,6 @@ impl FilterAnnouncement for EthMessageFilter { #[cfg(test)] mod test { use super::*; - use alloy_primitives::B256; use reth_eth_wire::{NewPooledTransactionHashes66, NewPooledTransactionHashes68}; use std::{collections::HashMap, str::FromStr}; diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 3a645da6c9f..328229e87e1 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -1,4 +1,4 @@ -use alloy_primitives::B256; +use alloy_primitives::{PrimitiveSignature as Signature, B256}; use reth_eth_wire::{GetPooledTransactions, PooledTransactions}; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, @@ -6,7 +6,7 @@ use reth_network::{ }; use reth_network_api::{NetworkInfo, Peers}; use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; -use reth_primitives::{Signature, TransactionSigned}; +use reth_primitives::TransactionSigned; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ test_utils::{testing_pool, MockTransaction}, @@ -26,16 +26,13 @@ async fn test_large_tx_req() { // replace rng txhash with real txhash let mut tx = MockTransaction::eip1559(); - let ts = TransactionSigned { - hash: Default::default(), - signature: Signature::test_signature(), - transaction: tx.clone().into(), - }; + let ts = + TransactionSigned::new_unhashed(tx.clone().into(), Signature::test_signature()); tx.set_hash(ts.recalculate_hash()); tx }) .collect(); - let txs_hashes: Vec = txs.iter().map(|tx| tx.get_hash()).collect(); + let txs_hashes: Vec = txs.iter().map(|tx| *tx.get_hash()).collect(); // setup testnet let mut net = Testnet::create_with(2, MockEthProvider::default()).await; diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index ec891e5b39a..dfa4ff16046 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -1,29 +1,31 @@ //! Connection tests -use std::{net::SocketAddr, time::Duration}; - use alloy_node_bindings::Geth; use alloy_primitives::map::HashSet; use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; use reth_chainspec::MAINNET; use reth_discv4::Discv4Config; -use reth_eth_wire::{DisconnectReason, HeadersDirection}; +use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, HeadersDirection}; use reth_net_banlist::BanList; use reth_network::{ test_utils::{enr_to_peer_id, NetworkEventStream, PeerConfig, Testnet, GETH_TIMEOUT}, BlockDownloaderProvider, NetworkConfigBuilder, NetworkEvent, NetworkEventListenerProvider, NetworkManager, PeersConfig, }; -use reth_network_api::{NetworkInfo, Peers, PeersInfo}; +use reth_network_api::{ + events::{PeerEvent, SessionInfo}, + NetworkInfo, Peers, PeersInfo, +}; use reth_network_p2p::{ headers::client::{HeadersClient, HeadersRequest}, sync::{NetworkSyncUpdater, SyncState}, }; use reth_network_peers::{mainnet_nodes, NodeRecord, TrustedPeer}; -use reth_provider::test_utils::NoopProvider; +use reth_storage_api::noop::NoopProvider; use reth_transaction_pool::test_utils::testing_pool; use secp256k1::SecretKey; +use std::{net::SocketAddr, time::Duration}; use tokio::task; use url::Host; @@ -59,13 +61,15 @@ async fn test_establish_connections() { let mut established = listener0.take(4); while let Some(ev) = established.next().await { match ev { - NetworkEvent::SessionClosed { .. } | NetworkEvent::PeerRemoved(_) => { + NetworkEvent::Peer(PeerEvent::SessionClosed { .. } | PeerEvent::PeerRemoved(_)) => { panic!("unexpected event") } - NetworkEvent::SessionEstablished { peer_id, .. } => { - assert!(expected_connections.remove(&peer_id)) + NetworkEvent::ActivePeerSession { info, .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + let SessionInfo { peer_id, .. } = info; + assert!(expected_connections.remove(&peer_id)); } - NetworkEvent::PeerAdded(peer_id) => { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { assert!(expected_peers.remove(&peer_id)) } } @@ -94,7 +98,7 @@ async fn test_already_connected() { let p1 = PeerConfig::default(); // initialize two peers with the same identifier - let p2 = PeerConfig::with_secret_key(client, secret_key); + let p2 = PeerConfig::with_secret_key(client.clone(), secret_key); let p3 = PeerConfig::with_secret_key(client, secret_key); net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap(); @@ -138,7 +142,7 @@ async fn test_get_peer() { let client = NoopProvider::default(); let p1 = PeerConfig::default(); - let p2 = PeerConfig::with_secret_key(client, secret_key); + let p2 = PeerConfig::with_secret_key(client.clone(), secret_key); let p3 = PeerConfig::with_secret_key(client, secret_key_1); net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap(); @@ -171,7 +175,7 @@ async fn test_get_peer_by_id() { let secret_key_1 = SecretKey::new(&mut rand::thread_rng()); let client = NoopProvider::default(); let p1 = PeerConfig::default(); - let p2 = PeerConfig::with_secret_key(client, secret_key); + let p2 = PeerConfig::with_secret_key(client.clone(), secret_key); let p3 = PeerConfig::with_secret_key(client, secret_key_1); net.extend_peer_with_config(vec![p1, p2, p3]).await.unwrap(); @@ -204,8 +208,9 @@ async fn test_connect_with_boot_nodes() { let mut discv4 = Discv4Config::builder(); discv4.add_boot_nodes(mainnet_nodes()); - let config = - NetworkConfigBuilder::new(secret_key).discovery(discv4).build(NoopProvider::default()); + let config = NetworkConfigBuilder::::new(secret_key) + .discovery(discv4) + .build(NoopProvider::default()); let network = NetworkManager::new(config).await.unwrap(); let handle = network.handle().clone(); @@ -226,7 +231,9 @@ async fn test_connect_with_builder() { discv4.add_boot_nodes(mainnet_nodes()); let client = NoopProvider::default(); - let config = NetworkConfigBuilder::new(secret_key).discovery(discv4).build(client); + let config = NetworkConfigBuilder::::new(secret_key) + .discovery(discv4) + .build(client.clone()); let (handle, network, _, requests) = NetworkManager::new(config) .await .unwrap() @@ -262,7 +269,9 @@ async fn test_connect_to_trusted_peer() { let discv4 = Discv4Config::builder(); let client = NoopProvider::default(); - let config = NetworkConfigBuilder::new(secret_key).discovery(discv4).build(client); + let config = NetworkConfigBuilder::::new(secret_key) + .discovery(discv4) + .build(client.clone()); let transactions_manager_config = config.transactions_manager_config.clone(); let (handle, network, transactions, requests) = NetworkManager::new(config) .await @@ -464,7 +473,7 @@ async fn test_geth_disconnect() { tokio::time::timeout(GETH_TIMEOUT, async move { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .disable_discovery() .build(NoopProvider::default()); @@ -495,11 +504,16 @@ async fn test_geth_disconnect() { handle.add_peer(geth_peer_id, geth_socket); match events.next().await { - Some(NetworkEvent::PeerAdded(peer_id)) => assert_eq!(peer_id, geth_peer_id), + Some(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id))) => { + assert_eq!(peer_id, geth_peer_id) + } _ => panic!("Expected a peer added event"), } - if let Some(NetworkEvent::SessionEstablished { peer_id, .. }) = events.next().await { + if let Some(NetworkEvent::Peer(PeerEvent::SessionEstablished(session_info))) = + events.next().await + { + let SessionInfo { peer_id, .. } = session_info; assert_eq!(peer_id, geth_peer_id); } else { panic!("Expected a session established event"); @@ -509,7 +523,9 @@ async fn test_geth_disconnect() { handle.disconnect_peer(geth_peer_id); // wait for a disconnect from geth - if let Some(NetworkEvent::SessionClosed { peer_id, .. }) = events.next().await { + if let Some(NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. })) = + events.next().await + { assert_eq!(peer_id, geth_peer_id); } else { panic!("Expected a session closed event"); @@ -572,7 +588,7 @@ async fn test_disconnect_incoming_when_exceeded_incoming_connections() { let secret_key = SecretKey::new(&mut rand::thread_rng()); let peers_config = PeersConfig::default().with_max_inbound(0); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .disable_discovery() .peer_config(peers_config) @@ -681,7 +697,10 @@ async fn test_rejected_by_already_connect() { assert_eq!(handle.num_connected_peers(), 2); } -async fn new_random_peer(max_in_bound: usize, trusted_nodes: Vec) -> NetworkManager { +async fn new_random_peer( + max_in_bound: usize, + trusted_nodes: Vec, +) -> NetworkManager { let secret_key = SecretKey::new(&mut rand::thread_rng()); let peers_config = PeersConfig::default().with_max_inbound(max_in_bound).with_trusted_nodes(trusted_nodes); diff --git a/crates/net/network/tests/it/main.rs b/crates/net/network/tests/it/main.rs index ede445510c2..dd98c9624d6 100644 --- a/crates/net/network/tests/it/main.rs +++ b/crates/net/network/tests/it/main.rs @@ -6,6 +6,7 @@ mod multiplex; mod requests; mod session; mod startup; +mod transaction_hash_fetching; mod txgossip; const fn main() {} diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 61241f02d2d..0dd38c959de 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -3,8 +3,8 @@ use std::sync::Arc; -use alloy_consensus::TxEip2930; -use alloy_primitives::{Bytes, Parity, TxKind, U256}; +use alloy_consensus::{Header, TxEip2930}; +use alloy_primitives::{Bytes, PrimitiveSignature as Signature, TxKind, U256}; use rand::Rng; use reth_eth_wire::HeadersDirection; use reth_network::{ @@ -16,7 +16,7 @@ use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::{HeadersClient, HeadersRequest}, }; -use reth_primitives::{Block, Header, Signature, Transaction, TransactionSigned}; +use reth_primitives::{Block, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; /// Returns a new [`TransactionSigned`] with some random parameters @@ -31,9 +31,9 @@ pub fn rng_transaction(rng: &mut impl rand::RngCore) -> TransactionSigned { input: Bytes::from(vec![1, 2]), access_list: Default::default(), }); - let signature = Signature::new(U256::default(), U256::default(), Parity::Parity(true)); + let signature = Signature::new(U256::default(), U256::default(), true); - TransactionSigned::from_transaction_and_signature(request, signature) + TransactionSigned::new_unhashed(request, signature) } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/net/network/tests/it/session.rs b/crates/net/network/tests/it/session.rs index 6bc029d8a7b..53ab457eb0c 100644 --- a/crates/net/network/tests/it/session.rs +++ b/crates/net/network/tests/it/session.rs @@ -6,8 +6,11 @@ use reth_network::{ test_utils::{PeerConfig, Testnet}, NetworkEvent, NetworkEventListenerProvider, }; -use reth_network_api::{NetworkInfo, Peers}; -use reth_provider::test_utils::NoopProvider; +use reth_network_api::{ + events::{PeerEvent, SessionInfo}, + NetworkInfo, Peers, +}; +use reth_storage_api::noop::NoopProvider; #[tokio::test(flavor = "multi_thread")] async fn test_session_established_with_highest_version() { @@ -28,12 +31,13 @@ async fn test_session_established_with_highest_version() { while let Some(event) = events.next().await { match event { - NetworkEvent::PeerAdded(peer_id) => { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { assert_eq!(handle1.peer_id(), &peer_id); } - NetworkEvent::SessionEstablished { peer_id, status, .. } => { + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth68 as u8); + assert_eq!(status.version, EthVersion::Eth68); } ev => { panic!("unexpected event {ev:?}") @@ -66,12 +70,13 @@ async fn test_session_established_with_different_capability() { while let Some(event) = events.next().await { match event { - NetworkEvent::PeerAdded(peer_id) => { + NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)) => { assert_eq!(handle1.peer_id(), &peer_id); } - NetworkEvent::SessionEstablished { peer_id, status, .. } => { + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { peer_id, status, .. } = info; assert_eq!(handle1.peer_id(), &peer_id); - assert_eq!(status.version, EthVersion::Eth66 as u8); + assert_eq!(status.version, EthVersion::Eth66); } ev => { panic!("unexpected event: {ev:?}") diff --git a/crates/net/network/tests/it/startup.rs b/crates/net/network/tests/it/startup.rs index 89889a86946..43b6a29e21a 100644 --- a/crates/net/network/tests/it/startup.rs +++ b/crates/net/network/tests/it/startup.rs @@ -5,12 +5,13 @@ use std::{ use reth_chainspec::MAINNET; use reth_discv4::{Discv4Config, NatResolver}; +use reth_eth_wire::EthNetworkPrimitives; use reth_network::{ error::{NetworkError, ServiceKind}, Discovery, NetworkConfigBuilder, NetworkManager, }; use reth_network_api::{NetworkInfo, PeersInfo}; -use reth_provider::test_utils::NoopProvider; +use reth_storage_api::noop::NoopProvider; use secp256k1::SecretKey; use tokio::net::TcpListener; @@ -26,7 +27,7 @@ fn is_addr_in_use_kind(err: &NetworkError, kind: ServiceKind) -> bool { #[tokio::test(flavor = "multi_thread")] async fn test_is_default_syncing() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .disable_discovery() .listener_port(0) .build(NoopProvider::default()); @@ -37,13 +38,13 @@ async fn test_is_default_syncing() { #[tokio::test(flavor = "multi_thread")] async fn test_listener_addr_in_use() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .disable_discovery() .listener_port(0) .build(NoopProvider::default()); let network = NetworkManager::new(config).await.unwrap(); let listener_port = network.local_addr().port(); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(listener_port) .disable_discovery() .build(NoopProvider::default()); @@ -72,7 +73,7 @@ async fn test_discovery_addr_in_use() { #[tokio::test(flavor = "multi_thread")] async fn test_tcp_port_node_record_no_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .disable_discovery() .build_with_noop_provider(MAINNET.clone()); @@ -90,7 +91,7 @@ async fn test_tcp_port_node_record_no_discovery() { #[tokio::test(flavor = "multi_thread")] async fn test_tcp_port_node_record_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .listener_port(0) .discovery_port(0) .disable_dns_discovery() @@ -109,10 +110,11 @@ async fn test_tcp_port_node_record_discovery() { #[tokio::test(flavor = "multi_thread")] async fn test_node_record_address_with_nat() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discv4_discovery() .disable_dns_discovery() + .listener_port(0) .build_with_noop_provider(MAINNET.clone()); let network = NetworkManager::new(config).await.unwrap(); @@ -124,9 +126,10 @@ async fn test_node_record_address_with_nat() { #[tokio::test(flavor = "multi_thread")] async fn test_node_record_address_with_nat_disable_discovery() { let secret_key = SecretKey::new(&mut rand::thread_rng()); - let config = NetworkConfigBuilder::new(secret_key) + let config = NetworkConfigBuilder::::new(secret_key) .add_nat(Some(NatResolver::ExternalIp("10.1.1.1".parse().unwrap()))) .disable_discovery() + .listener_port(0) .build_with_noop_provider(MAINNET.clone()); let network = NetworkManager::new(config).await.unwrap(); diff --git a/crates/net/network/tests/it/transaction_hash_fetching.rs b/crates/net/network/tests/it/transaction_hash_fetching.rs new file mode 100644 index 00000000000..7f1d7593a22 --- /dev/null +++ b/crates/net/network/tests/it/transaction_hash_fetching.rs @@ -0,0 +1,68 @@ +use alloy_primitives::U256; +use rand::thread_rng; +use reth_network::{ + test_utils::Testnet, + transactions::{TransactionPropagationMode::Max, TransactionsManagerConfig}, +}; +use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; +use reth_tracing::init_test_tracing; +use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; +use tokio::time::Duration; + +#[tokio::test(flavor = "multi_thread")] +#[ignore] +async fn transaction_hash_fetching() { + init_test_tracing(); + + let mut config = TransactionsManagerConfig { propagation_mode: Max(0), ..Default::default() }; + config.transaction_fetcher_config.max_inflight_requests = 1; + + let provider = MockEthProvider::default(); + let num_peers = 10; + let net = Testnet::create_with(num_peers, provider.clone()).await; + + // install request handlers + let net = net.with_eth_pool_config(config); + let handle = net.spawn(); + + // connect all the peers first + handle.connect_peers().await; + + let listening_peer = &handle.peers()[num_peers - 1]; + let mut listening_peer_tx_listener = + listening_peer.pool().unwrap().pending_transactions_listener(); + + let num_tx_per_peer = 10; + + // Generate transactions for peers + for i in 1..num_peers { + let peer = &handle.peers()[i]; + let peer_pool = peer.pool().unwrap(); + + for _ in 0..num_tx_per_peer { + let mut gen = TransactionGenerator::new(thread_rng()); + let tx = gen.gen_eip1559_pooled(); + let sender = tx.sender(); + provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); + peer_pool.add_external_transaction(tx).await.unwrap(); + } + } + + // Total expected transactions + let total_expected_tx = num_tx_per_peer * (num_peers - 1); + let mut received_tx = 0; + + loop { + tokio::select! { + Some(_) = listening_peer_tx_listener.recv() => { + received_tx += 1; + if received_tx >= total_expected_tx { + break; + } + } + _ = tokio::time::sleep(Duration::from_secs(10)) => { + panic!("Timed out waiting for transactions. Received {received_tx}/{total_expected_tx}"); + } + } + } +} diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 70ac67bb5bf..c9911885ad8 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use alloy_consensus::TxLegacy; -use alloy_primitives::U256; +use alloy_primitives::{PrimitiveSignature as Signature, U256}; use futures::StreamExt; use rand::thread_rng; use reth_network::{test_utils::Testnet, NetworkEvent, NetworkEventListenerProvider}; -use reth_network_api::PeersInfo; -use reth_primitives::{Signature, TransactionSigned}; +use reth_network_api::{events::PeerEvent, PeersInfo}; +use reth_primitives::TransactionSigned; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; @@ -95,7 +95,7 @@ async fn test_4844_tx_gossip_penalization() { let peer0_reputation_after = peer1.peer_handle().peer_by_id(*peer0.peer_id()).await.unwrap().reputation(); assert_ne!(peer0_reputation_before, peer0_reputation_after); - assert_eq!(received, txs[1].transaction().hash); + assert_eq!(received, txs[1].transaction().hash()); // this will return an [`Empty`] error because blob txs are disallowed to be broadcasted assert!(peer1_tx_listener.try_recv().is_err()); @@ -132,26 +132,24 @@ async fn test_sending_invalid_transactions() { value: Default::default(), input: Default::default(), }; - let tx = TransactionSigned::from_transaction_and_signature( - tx.into(), - Signature::test_signature(), - ); + let tx = TransactionSigned::new_unhashed(tx.into(), Signature::test_signature()); peer0.network().send_transactions(*peer1.peer_id(), vec![Arc::new(tx)]); } // await disconnect for bad tx spam if let Some(ev) = peer1_events.next().await { match ev { - NetworkEvent::SessionClosed { peer_id, .. } => { + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) => { assert_eq!(peer_id, *peer0.peer_id()); } - NetworkEvent::SessionEstablished { .. } => { + NetworkEvent::ActivePeerSession { .. } | + NetworkEvent::Peer(PeerEvent::SessionEstablished { .. }) => { panic!("unexpected SessionEstablished event") } - NetworkEvent::PeerAdded(_) => { + NetworkEvent::Peer(PeerEvent::PeerAdded(_)) => { panic!("unexpected PeerAdded event") } - NetworkEvent::PeerRemoved(_) => { + NetworkEvent::Peer(PeerEvent::PeerRemoved(_)) => { panic!("unexpected PeerRemoved event") } } diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml index c43f7f5b347..2c61da75184 100644 --- a/crates/net/p2p/Cargo.toml +++ b/crates/net/p2p/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-eth-wire-types.workspace = true reth-consensus.workspace = true reth-network-peers.workspace = true @@ -21,6 +22,7 @@ reth-network-types.workspace = true reth-storage-errors.workspace = true # ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true @@ -32,7 +34,6 @@ tokio = { workspace = true, features = ["sync"] } auto_impl.workspace = true tracing.workspace = true derive_more.workspace = true - parking_lot = { workspace = true, optional = true } [dev-dependencies] @@ -43,5 +44,20 @@ tokio = { workspace = true, features = ["full"] } [features] default = ["std"] -test-utils = ["reth-consensus/test-utils", "parking_lot"] -std = ["reth-consensus/std"] +test-utils = [ + "reth-consensus/test-utils", + "parking_lot", + "reth-network-types/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils" +] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "reth-primitives-traits/std", + "alloy-consensus/std", + "derive_more/std", + "reth-network-peers/std" +] diff --git a/crates/net/p2p/src/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs index 2a4b57c2345..d48fccc6d00 100644 --- a/crates/net/p2p/src/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -9,13 +9,16 @@ use futures::{Future, FutureExt}; use reth_primitives::BlockBody; /// The bodies future type -pub type BodiesFut = Pin>> + Send + Sync>>; +pub type BodiesFut = + Pin>> + Send + Sync>>; /// A client capable of downloading block bodies. #[auto_impl::auto_impl(&, Arc, Box)] pub trait BodiesClient: DownloadClient { + /// The body type this client fetches. + type Body: Send + Sync + Unpin + 'static; /// The output of the request future for querying block bodies. - type Output: Future>> + Sync + Send + Unpin; + type Output: Future>> + Sync + Send + Unpin; /// Fetches the block body for the requested block. fn get_block_bodies(&self, hashes: Vec) -> Self::Output { @@ -49,11 +52,11 @@ pub struct SingleBodyRequest { fut: Fut, } -impl Future for SingleBodyRequest +impl Future for SingleBodyRequest where - Fut: Future>> + Sync + Send + Unpin, + Fut: Future>> + Sync + Send + Unpin, { - type Output = PeerRequestResult>; + type Output = PeerRequestResult>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let resp = ready!(self.get_mut().fut.poll_unpin(cx)); diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index b55229fa242..b80a308d8a1 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -2,17 +2,25 @@ use super::response::BlockResponse; use crate::error::DownloadResult; use alloy_primitives::BlockNumber; use futures::Stream; -use std::ops::RangeInclusive; +use std::{fmt::Debug, ops::RangeInclusive}; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block bodies, /// while a [`BodiesClient`][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. -pub trait BodyDownloader: Send + Sync + Stream + Unpin { +pub trait BodyDownloader: + Send + Sync + Stream> + Unpin +{ + /// The type of header that can be returned in a blck + type Header: Debug + Send + Sync + Unpin + 'static; + + /// The type of the body that is being downloaded. + type Body: Debug + Send + Sync + Unpin + 'static; + /// Method for setting the download range. fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()>; } diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 8ae840fbf66..1b415246f54 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,43 +1,57 @@ +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, U256}; -use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; +use reth_primitives_traits::InMemorySize; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) - Full(SealedBlock), + Full(SealedBlock), /// The empty block response - Empty(SealedHeader), + Empty(SealedHeader), } -impl BlockResponse { +impl BlockResponse +where + H: BlockHeader, +{ /// Return the reference to the response header - pub const fn header(&self) -> &SealedHeader { + pub const fn header(&self) -> &SealedHeader { match self { Self::Full(block) => &block.header, Self::Empty(header) => header, } } - /// Calculates a heuristic for the in-memory size of the [`BlockResponse`]. - #[inline] - pub fn size(&self) -> usize { - match self { - Self::Full(block) => SealedBlock::size(block), - Self::Empty(header) => SealedHeader::size(header), - } - } - /// Return the block number pub fn block_number(&self) -> BlockNumber { - self.header().number + self.header().number() } /// Return the reference to the response header pub fn difficulty(&self) -> U256 { match self { - Self::Full(block) => block.difficulty, - Self::Empty(header) => header.difficulty, + Self::Full(block) => block.difficulty(), + Self::Empty(header) => header.difficulty(), + } + } + + /// Return the reference to the response body + pub fn into_body(self) -> Option { + match self { + Self::Full(block) => Some(block.body), + Self::Empty(_) => None, + } + } +} + +impl InMemorySize for BlockResponse { + #[inline] + fn size(&self) -> usize { + match self { + Self::Full(block) => SealedBlock::size(block), + Self::Empty(header) => SealedHeader::size(header), } } } diff --git a/crates/net/p2p/src/either.rs b/crates/net/p2p/src/either.rs index 30650069b91..3f1182bd482 100644 --- a/crates/net/p2p/src/either.rs +++ b/crates/net/p2p/src/either.rs @@ -32,8 +32,9 @@ where impl BodiesClient for Either where A: BodiesClient, - B: BodiesClient, + B: BodiesClient, { + type Body = A::Body; type Output = Either; fn get_block_bodies_with_priority( @@ -51,8 +52,9 @@ where impl HeadersClient for Either where A: HeadersClient, - B: HeadersClient, + B: HeadersClient

, { + type Header = A::Header; type Output = Either; fn get_headers_with_priority( diff --git a/crates/net/p2p/src/error.rs b/crates/net/p2p/src/error.rs index 9394a9fdf6c..45d34fc04ec 100644 --- a/crates/net/p2p/src/error.rs +++ b/crates/net/p2p/src/error.rs @@ -1,13 +1,14 @@ use std::ops::RangeInclusive; use super::headers::client::HeadersRequest; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use derive_more::{Display, Error}; use reth_consensus::ConsensusError; use reth_network_peers::WithPeerId; use reth_network_types::ReputationChangeKind; -use reth_primitives::{GotExpected, GotExpectedBoxed, Header}; +use reth_primitives::{GotExpected, GotExpectedBoxed}; use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use tokio::sync::{mpsc, oneshot}; @@ -26,7 +27,7 @@ pub trait EthResponseValidator { fn reputation_change_err(&self) -> Option; } -impl EthResponseValidator for RequestResult> { +impl EthResponseValidator for RequestResult> { fn is_likely_bad_headers_response(&self, request: &HeadersRequest) -> bool { match self { Ok(headers) => { @@ -38,7 +39,7 @@ impl EthResponseValidator for RequestResult> { match request.start { BlockHashOrNumber::Number(block_number) => { - headers.first().is_some_and(|header| block_number != header.number) + headers.first().is_some_and(|header| block_number != header.number()) } BlockHashOrNumber::Hash(_) => { // we don't want to hash the header @@ -79,24 +80,24 @@ impl EthResponseValidator for RequestResult> { #[derive(Clone, Debug, Eq, PartialEq, Display, Error)] pub enum RequestError { /// Closed channel to the peer. - #[display("closed channel to the peer")] /// Indicates the channel to the peer is closed. + #[display("closed channel to the peer")] ChannelClosed, /// Connection to a peer dropped while handling the request. - #[display("connection to a peer dropped while handling the request")] /// Represents a dropped connection while handling the request. + #[display("connection to a peer dropped while handling the request")] ConnectionDropped, /// Capability message is not supported by the remote peer. - #[display("capability message is not supported by remote peer")] /// Indicates an unsupported capability message from the remote peer. + #[display("capability message is not supported by remote peer")] UnsupportedCapability, /// Request timed out while awaiting response. - #[display("request timed out while awaiting response")] /// Represents a timeout while waiting for a response. + #[display("request timed out while awaiting response")] Timeout, /// Received bad response. - #[display("received bad response")] /// Indicates a bad response was received. + #[display("received bad response")] BadResponse, } @@ -216,6 +217,8 @@ impl From for DownloadError { #[cfg(test)] mod tests { + use alloy_consensus::Header; + use super::*; #[test] diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 0116f134881..a966c01c933 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -5,16 +5,18 @@ use crate::{ headers::client::{HeadersClient, SingleHeaderRequest}, BlockClient, }; +use alloy_consensus::BlockHeader; use alloy_primitives::{Sealable, B256}; -use reth_consensus::{Consensus, ConsensusError}; +use reth_consensus::Consensus; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; -use reth_primitives::{BlockBody, GotExpected, Header, SealedBlock, SealedHeader}; +use reth_primitives::{SealedBlock, SealedHeader}; use std::{ cmp::Reverse, collections::{HashMap, VecDeque}, fmt::Debug, future::Future, + hash::Hash, pin::Pin, sync::Arc, task::{ready, Context, Poll}, @@ -23,14 +25,23 @@ use tracing::debug; /// A Client that can fetch full blocks from the network. #[derive(Debug, Clone)] -pub struct FullBlockClient { +pub struct FullBlockClient +where + Client: BlockClient, +{ client: Client, - consensus: Arc, + consensus: Arc>, } -impl FullBlockClient { +impl FullBlockClient +where + Client: BlockClient, +{ /// Creates a new instance of `FullBlockClient`. - pub fn new(client: Client, consensus: Arc) -> Self { + pub fn new( + client: Client, + consensus: Arc>, + ) -> Self { Self { client, consensus } } @@ -55,6 +66,7 @@ where let client = self.client.clone(); FetchFullBlockFuture { hash, + consensus: self.consensus.clone(), request: FullBlockRequest { header: Some(client.get_header(hash.into())), body: Some(client.get_block_body(hash)), @@ -84,11 +96,7 @@ where start_hash: hash, count, request: FullBlockRangeRequest { - headers: Some(client.get_headers(HeadersRequest { - start: hash.into(), - limit: count, - direction: HeadersDirection::Falling, - })), + headers: Some(client.get_headers(HeadersRequest::falling(hash.into(), count))), bodies: None, }, client, @@ -110,15 +118,16 @@ where Client: BlockClient, { client: Client, + consensus: Arc>, hash: B256, request: FullBlockRequest, - header: Option, - body: Option, + header: Option>, + body: Option>, } impl FetchFullBlockFuture where - Client: BlockClient, + Client: BlockClient, { /// Returns the hash of the block being requested. pub const fn hash(&self) -> &B256 { @@ -127,11 +136,11 @@ where /// If the header request is already complete, this returns the block number pub fn block_number(&self) -> Option { - self.header.as_ref().map(|h| h.number) + self.header.as_ref().map(|h| h.number()) } /// Returns the [`SealedBlock`] if the request is complete and valid. - fn take_block(&mut self) -> Option { + fn take_block(&mut self) -> Option> { if self.header.is_none() || self.body.is_none() { return None } @@ -142,7 +151,8 @@ where BodyResponse::Validated(body) => Some(SealedBlock::new(header, body)), BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry - if let Err(err) = ensure_valid_body_response(&header, resp.data()) { + if let Err(err) = self.consensus.validate_body_against_header(resp.data(), &header) + { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); self.client.report_bad_message(resp.peer_id()); self.header = Some(header); @@ -154,9 +164,9 @@ where } } - fn on_block_response(&mut self, resp: WithPeerId) { + fn on_block_response(&mut self, resp: WithPeerId) { if let Some(ref header) = self.header { - if let Err(err) = ensure_valid_body_response(header, resp.data()) { + if let Err(err) = self.consensus.validate_body_against_header(resp.data(), header) { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body"); self.client.report_bad_message(resp.peer_id()); return @@ -170,9 +180,9 @@ where impl Future for FetchFullBlockFuture where - Client: BlockClient + 'static, + Client: BlockClient + 'static, { - type Output = SealedBlock; + type Output = SealedBlock; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -185,15 +195,8 @@ where ResponseResult::Header(res) => { match res { Ok(maybe_header) => { - let (peer, maybe_header) = maybe_header - .map(|h| { - h.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - }) - .split(); + let (peer, maybe_header) = + maybe_header.map(|h| h.map(SealedHeader::seal)).split(); if let Some(header) = maybe_header { if header.hash() == this.hash { this.header = Some(header); @@ -249,7 +252,7 @@ where impl Debug for FetchFullBlockFuture where - Client: BlockClient, + Client: BlockClient, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("FetchFullBlockFuture") @@ -272,7 +275,7 @@ impl FullBlockRequest where Client: BlockClient, { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { if let Some(fut) = Pin::new(&mut self.header).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { self.header = None; @@ -293,79 +296,19 @@ where /// The result of a request for a single header or body. This is yielded by the `FullBlockRequest` /// future. -enum ResponseResult { - Header(PeerRequestResult>), - Body(PeerRequestResult>), +enum ResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), } /// The response of a body request. #[derive(Debug)] -enum BodyResponse { +enum BodyResponse { /// Already validated against transaction root of header - Validated(BlockBody), + Validated(B), /// Still needs to be validated against header - PendingValidation(WithPeerId), + PendingValidation(WithPeerId), } - -/// Ensures the block response data matches the header. -/// -/// This ensures the body response items match the header's hashes: -/// - ommer hash -/// - transaction root -/// - withdrawals root -fn ensure_valid_body_response( - header: &SealedHeader, - block: &BlockBody, -) -> Result<(), ConsensusError> { - let ommers_hash = block.calculate_ommers_root(); - if header.ommers_hash != ommers_hash { - return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: header.ommers_hash }.into(), - )) - } - - let tx_root = block.calculate_tx_root(); - if header.transactions_root != tx_root { - return Err(ConsensusError::BodyTransactionRootDiff( - GotExpected { got: tx_root, expected: header.transactions_root }.into(), - )) - } - - match (header.withdrawals_root, &block.withdrawals) { - (Some(header_withdrawals_root), Some(withdrawals)) => { - let withdrawals = withdrawals.as_slice(); - let withdrawals_root = reth_primitives::proofs::calculate_withdrawals_root(withdrawals); - if withdrawals_root != header_withdrawals_root { - return Err(ConsensusError::BodyWithdrawalsRootDiff( - GotExpected { got: withdrawals_root, expected: header_withdrawals_root }.into(), - )) - } - } - (None, None) => { - // this is ok because we assume the fork is not active in this case - } - _ => return Err(ConsensusError::WithdrawalsRootUnexpected), - } - - match (header.requests_root, &block.requests) { - (Some(header_requests_root), Some(requests)) => { - let requests = requests.0.as_slice(); - let requests_root = reth_primitives::proofs::calculate_requests_root(requests); - if requests_root != header_requests_root { - return Err(ConsensusError::BodyRequestsRootDiff( - GotExpected { got: requests_root, expected: header_requests_root }.into(), - )) - } - } - (None, None) => { - // this is ok because we assume the fork is not active in this case - } - _ => return Err(ConsensusError::RequestsRootUnexpected), - } - - Ok(()) -} - /// A future that downloads a range of full blocks from the network. /// /// This first fetches the headers for the given range using the inner `Client`. Once the request @@ -387,7 +330,7 @@ where /// The client used to fetch headers and bodies. client: Client, /// The consensus instance used to validate the blocks. - consensus: Arc, + consensus: Arc>, /// The block hash to start fetching from (inclusive). start_hash: B256, /// How many blocks to fetch: `len([start_hash, ..]) == count` @@ -395,20 +338,20 @@ where /// Requests for headers and bodies that are in progress. request: FullBlockRangeRequest, /// Fetched headers. - headers: Option>, + headers: Option>>, /// The next headers to request bodies for. This is drained as responses are received. - pending_headers: VecDeque, + pending_headers: VecDeque>, /// The bodies that have been received so far. - bodies: HashMap, + bodies: HashMap, BodyResponse>, } impl FetchFullBlockRangeFuture where - Client: BlockClient, + Client: BlockClient, { /// Returns the block hashes for the given range, if they are available. pub fn range_block_hashes(&self) -> Option> { - self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect::>()) + self.headers.as_ref().map(|h| h.iter().map(|h| h.hash()).collect()) } /// Returns whether or not the bodies map is fully populated with requested headers and bodies. @@ -419,14 +362,14 @@ where /// Inserts a block body, matching it with the `next_header`. /// /// Note: this assumes the response matches the next header in the queue. - fn insert_body(&mut self, body_response: BodyResponse) { + fn insert_body(&mut self, body_response: BodyResponse) { if let Some(header) = self.pending_headers.pop_front() { self.bodies.insert(header, body_response); } } /// Inserts multiple block bodies. - fn insert_bodies(&mut self, bodies: impl IntoIterator) { + fn insert_bodies(&mut self, bodies: impl IntoIterator>) { for body in bodies { self.insert_body(body); } @@ -445,7 +388,7 @@ where /// /// These are returned in falling order starting with the requested `hash`, i.e. with /// descending block numbers. - fn take_blocks(&mut self) -> Option> { + fn take_blocks(&mut self) -> Option>> { if !self.is_bodies_complete() { // not done with bodies yet return None @@ -462,7 +405,9 @@ where BodyResponse::Validated(body) => body, BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry - if let Err(err) = ensure_valid_body_response(header, resp.data()) { + if let Err(err) = + self.consensus.validate_body_against_header(resp.data(), header) + { debug!(target: "downloaders", %err, hash=?header.hash(), "Received wrong body in range response"); self.client.report_bad_message(resp.peer_id()); @@ -500,23 +445,14 @@ where Some(valid_responses) } - fn on_headers_response(&mut self, headers: WithPeerId>) { - let (peer, mut headers_falling) = headers - .map(|h| { - h.into_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - .collect::>() - }) - .split(); + fn on_headers_response(&mut self, headers: WithPeerId>) { + let (peer, mut headers_falling) = + headers.map(|h| h.into_iter().map(SealedHeader::seal).collect::>()).split(); // fill in the response if it's the correct length if headers_falling.len() == self.count as usize { // sort headers from highest to lowest block number - headers_falling.sort_unstable_by_key(|h| Reverse(h.number)); + headers_falling.sort_unstable_by_key(|h| Reverse(h.number())); // check the starting hash if headers_falling[0].hash() == self.start_hash { @@ -567,9 +503,9 @@ where impl Future for FetchFullBlockRangeFuture where - Client: BlockClient + 'static, + Client: BlockClient + 'static, { - type Output = Vec; + type Output = Vec>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -676,7 +612,10 @@ impl FullBlockRangeRequest where Client: BlockClient, { - fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll> { if let Some(fut) = Pin::new(&mut self.headers).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { self.headers = None; @@ -697,13 +636,15 @@ where // The result of a request for headers or block bodies. This is yielded by the // `FullBlockRangeRequest` future. -enum RangeResponseResult { - Header(PeerRequestResult>), - Body(PeerRequestResult>), +enum RangeResponseResult { + Header(PeerRequestResult>), + Body(PeerRequestResult>), } #[cfg(test)] mod tests { + use reth_primitives::BlockBody; + use super::*; use crate::test_utils::TestFullBlockClient; use std::ops::Range; @@ -711,7 +652,7 @@ mod tests { #[tokio::test] async fn download_single_full_block() { let client = TestFullBlockClient::default(); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); let client = FullBlockClient::test_client(client); @@ -723,7 +664,7 @@ mod tests { #[tokio::test] async fn download_single_full_block_range() { let client = TestFullBlockClient::default(); - let header = SealedHeader::default(); + let header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); client.insert(header.clone(), body.clone()); let client = FullBlockClient::test_client(client); @@ -738,7 +679,7 @@ mod tests { client: &TestFullBlockClient, range: Range, ) -> (SealedHeader, BlockBody) { - let mut sealed_header = SealedHeader::default(); + let mut sealed_header: SealedHeader = SealedHeader::default(); let body = BlockBody::default(); for _ in range { let (mut header, hash) = sealed_header.split(); @@ -746,9 +687,7 @@ mod tests { header.parent_hash = hash; header.number += 1; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - sealed_header = SealedHeader::new(header, seal); + sealed_header = SealedHeader::seal(header); client.insert(sealed_header.clone(), body.clone()); } @@ -801,6 +740,7 @@ mod tests { let test_consensus = reth_consensus::test_utils::TestConsensus::default(); test_consensus.set_fail_validation(true); + test_consensus.set_fail_body_against_header(false); let client = FullBlockClient::new(client, Arc::new(test_consensus)); let received = client.get_full_block_range(header.hash(), range_length as u64).await; diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index b73ea4e925f..4be6208c4a2 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -1,8 +1,8 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; +use alloy_consensus::Header; use alloy_eips::BlockHashOrNumber; use futures::{Future, FutureExt}; pub use reth_eth_wire_types::{BlockHeaders, HeadersDirection}; -use reth_primitives::Header; use std::{ fmt::Debug, pin::Pin, @@ -21,14 +21,45 @@ pub struct HeadersRequest { pub direction: HeadersDirection, } +impl HeadersRequest { + /// Creates a request for a single header (direction doesn't matter). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + pub const fn one(start: BlockHashOrNumber) -> Self { + Self { direction: HeadersDirection::Rising, limit: 1, start } + } + + /// Creates a request for headers in rising direction (ascending block numbers). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + /// * `limit` - Maximum number of headers to retrieve + pub const fn rising(start: BlockHashOrNumber, limit: u64) -> Self { + Self { direction: HeadersDirection::Rising, limit, start } + } + + /// Creates a request for headers in falling direction (descending block numbers). + /// + /// # Arguments + /// * `start` - The block hash or number to start from + /// * `limit` - Maximum number of headers to retrieve + pub const fn falling(start: BlockHashOrNumber, limit: u64) -> Self { + Self { direction: HeadersDirection::Falling, limit, start } + } +} + /// The headers future type -pub type HeadersFut = Pin>> + Send + Sync>>; +pub type HeadersFut = + Pin>> + Send + Sync>>; /// The block headers downloader client #[auto_impl::auto_impl(&, Arc, Box)] pub trait HeadersClient: DownloadClient { + /// The header type this client fetches. + type Header: Send + Sync + Unpin; /// The headers future type - type Output: Future>> + Sync + Send + Unpin; + type Output: Future>> + Sync + Send + Unpin; /// Sends the header request to the p2p network and returns the header response received from a /// peer. @@ -55,12 +86,7 @@ pub trait HeadersClient: DownloadClient { start: BlockHashOrNumber, priority: Priority, ) -> SingleHeaderRequest { - let req = HeadersRequest { - start, - limit: 1, - // doesn't matter for a single header - direction: HeadersDirection::Rising, - }; + let req = HeadersRequest::one(start); let fut = self.get_headers_with_priority(req, priority); SingleHeaderRequest { fut } } @@ -73,11 +99,11 @@ pub struct SingleHeaderRequest { fut: Fut, } -impl Future for SingleHeaderRequest +impl Future for SingleHeaderRequest where - Fut: Future>> + Sync + Send + Unpin, + Fut: Future>> + Sync + Send + Unpin, { - type Output = PeerRequestResult>; + type Output = PeerRequestResult>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let resp = ready!(self.get_mut().fut.poll_unpin(cx)); diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 5565880ed39..1bc76924a6c 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,10 +1,13 @@ use super::error::HeadersDownloaderResult; use crate::error::{DownloadError, DownloadResult}; -use alloy_eips::BlockHashOrNumber; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::BlockWithParent, BlockHashOrNumber}; use alloy_primitives::B256; use futures::Stream; -use reth_consensus::Consensus; +use reth_consensus::HeaderValidator; use reth_primitives::SealedHeader; +use std::fmt::Debug; + /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, @@ -13,19 +16,25 @@ use reth_primitives::SealedHeader; /// /// A [`HeaderDownloader`] is a [Stream] that returns batches of headers. pub trait HeaderDownloader: - Send + Sync + Stream>> + Unpin + Send + + Sync + + Stream>, Self::Header>> + + Unpin { + /// The header type being downloaded. + type Header: Debug + Send + Sync + Unpin + 'static; + /// Updates the gap to sync which ranges from local head to the sync target /// /// See also [`HeaderDownloader::update_sync_target`] and /// [`HeaderDownloader::update_local_head`] - fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { + fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { self.update_local_head(head); self.update_sync_target(target); } /// Updates the block number of the local database - fn update_local_head(&mut self, head: SealedHeader); + fn update_local_head(&mut self, head: SealedHeader); /// Updates the target we want to sync to fn update_sync_target(&mut self, target: SyncTarget); @@ -50,7 +59,7 @@ pub enum SyncTarget { /// /// The benefit of this variant is, that this already provides the block number of the highest /// missing block. - Gap(SealedHeader), + Gap(BlockWithParent), /// This represents a tip by block number TipNum(u64), } @@ -65,7 +74,7 @@ impl SyncTarget { pub fn tip(&self) -> BlockHashOrNumber { match self { Self::Tip(tip) => (*tip).into(), - Self::Gap(gap) => gap.parent_hash.into(), + Self::Gap(gap) => gap.parent.into(), Self::TipNum(num) => (*num).into(), } } @@ -74,23 +83,23 @@ impl SyncTarget { /// Validate whether the header is valid in relation to it's parent /// /// Returns Ok(false) if the -pub fn validate_header_download( - consensus: &dyn Consensus, - header: &SealedHeader, - parent: &SealedHeader, +pub fn validate_header_download( + consensus: &dyn HeaderValidator, + header: &SealedHeader, + parent: &SealedHeader, ) -> DownloadResult<()> { // validate header against parent consensus.validate_header_against_parent(header, parent).map_err(|error| { DownloadError::HeaderValidation { hash: header.hash(), - number: header.number, + number: header.number(), error: Box::new(error), } })?; // validate header standalone consensus.validate_header(header).map_err(|error| DownloadError::HeaderValidation { hash: header.hash(), - number: header.number, + number: header.number(), error: Box::new(error), })?; Ok(()) diff --git a/crates/net/p2p/src/headers/error.rs b/crates/net/p2p/src/headers/error.rs index b22aae9248e..8757bb215f5 100644 --- a/crates/net/p2p/src/headers/error.rs +++ b/crates/net/p2p/src/headers/error.rs @@ -3,19 +3,19 @@ use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; /// Header downloader result -pub type HeadersDownloaderResult = Result; +pub type HeadersDownloaderResult = Result>; /// Error variants that can happen when sending requests to a session. #[derive(Debug, Clone, Eq, PartialEq, Display, Error)] -pub enum HeadersDownloaderError { +pub enum HeadersDownloaderError { /// The downloaded header cannot be attached to the local head, /// but is valid otherwise. #[display("valid downloaded header cannot be attached to the local head: {error}")] DetachedHead { /// The local head we attempted to attach to. - local_head: Box, + local_head: Box>, /// The header we attempted to attach. - header: Box, + header: Box>, /// The error that occurred when attempting to attach the header. #[error(source)] error: Box, diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index 2ba8012f0ae..7dcb77671d4 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -52,3 +52,14 @@ pub use headers::client::HeadersClient; pub trait BlockClient: HeadersClient + BodiesClient + Unpin + Clone {} impl BlockClient for T where T: HeadersClient + BodiesClient + Unpin + Clone {} + +/// The [`BlockClient`] providing Ethereum block parts. +pub trait EthBlockClient: + BlockClient
+{ +} + +impl EthBlockClient for T where + T: BlockClient
+{ +} diff --git a/crates/net/p2p/src/test_utils/bodies.rs b/crates/net/p2p/src/test_utils/bodies.rs index cfd29212916..0689d403f2c 100644 --- a/crates/net/p2p/src/test_utils/bodies.rs +++ b/crates/net/p2p/src/test_utils/bodies.rs @@ -36,6 +36,7 @@ impl BodiesClient for TestBodiesClient where F: Fn(Vec) -> PeerRequestResult> + Send + Sync, { + type Body = BlockBody; type Output = BodiesFut; fn get_block_bodies_with_priority( diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index 8a13f69325d..ee65bcb3f07 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -5,12 +5,13 @@ use crate::{ headers::client::{HeadersClient, HeadersRequest}, priority::Priority, }; +use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; use alloy_primitives::B256; use parking_lot::Mutex; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; use std::{collections::HashMap, sync::Arc}; /// A headers+bodies client implementation that does nothing. @@ -40,6 +41,7 @@ impl DownloadClient for NoopFullBlockClient { /// Implements the `BodiesClient` trait for the `NoopFullBlockClient` struct. impl BodiesClient for NoopFullBlockClient { + type Body = BlockBody; /// Defines the output type of the function. type Output = futures::future::Ready>>; @@ -65,6 +67,7 @@ impl BodiesClient for NoopFullBlockClient { } impl HeadersClient for NoopFullBlockClient { + type Header = Header; /// The output type representing a future containing a peer request result with a vector of /// headers. type Output = futures::future::Ready>>; @@ -152,6 +155,7 @@ impl DownloadClient for TestFullBlockClient { /// Implements the `HeadersClient` trait for the `TestFullBlockClient` struct. impl HeadersClient for TestFullBlockClient { + type Header = Header; /// Specifies the associated output type. type Output = futures::future::Ready>>; @@ -205,6 +209,7 @@ impl HeadersClient for TestFullBlockClient { /// Implements the `BodiesClient` trait for the `TestFullBlockClient` struct. impl BodiesClient for TestFullBlockClient { + type Body = BlockBody; /// Defines the output type of the function. type Output = futures::future::Ready>>; diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index e61183d22e4..5809ad6bdd4 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -10,12 +10,12 @@ use crate::{ }, priority::Priority, }; -use alloy_primitives::Sealable; +use alloy_consensus::Header; use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; use std::{ fmt, pin::Pin, @@ -62,6 +62,8 @@ impl TestHeaderDownloader { } impl HeaderDownloader for TestHeaderDownloader { + type Header = Header; + fn update_local_head(&mut self, _head: SealedHeader) {} fn update_sync_target(&mut self, _target: SyncTarget) {} @@ -72,7 +74,7 @@ impl HeaderDownloader for TestHeaderDownloader { } impl Stream for TestHeaderDownloader { - type Item = HeadersDownloaderResult>; + type Item = HeadersDownloaderResult, Header>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -143,8 +145,10 @@ impl Stream for TestDownload { return Poll::Ready(None) } - let empty = SealedHeader::default(); - if let Err(error) = this.consensus.validate_header_against_parent(&empty, &empty) { + let empty: SealedHeader = SealedHeader::default(); + if let Err(error) = + >::validate_header_against_parent(&this.consensus, &empty, &empty) + { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { hash: empty.hash(), @@ -156,16 +160,8 @@ impl Stream for TestDownload { match ready!(this.get_or_init_fut().poll_unpin(cx)) { Ok(resp) => { // Skip head and seal headers - let mut headers = resp - .1 - .into_iter() - .skip(1) - .map(|header| { - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) - .collect::>(); + let mut headers = + resp.1.into_iter().skip(1).map(SealedHeader::seal).collect::>(); headers.sort_unstable_by_key(|h| h.number); headers.into_iter().for_each(|h| this.buffer.push(h)); this.done = true; @@ -227,6 +223,7 @@ impl DownloadClient for TestHeadersClient { } impl HeadersClient for TestHeadersClient { + type Header = Header; type Output = TestHeadersFut; fn get_headers_with_priority( diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index 5ac24edea75..9e7ccc3084d 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -15,8 +15,8 @@ workspace = true # eth alloy-primitives = { workspace = true, features = ["rlp"] } -alloy-rlp = { workspace = true, features = ["derive"] } -enr.workspace = true +alloy-rlp = { workspace = true, features = ["derive", "core-net", "core-error"] } +enr = { workspace = true, optional = true } # crypto @@ -32,8 +32,18 @@ alloy-primitives = { workspace = true, features = ["rand"] } rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } serde_json.workspace = true +enr.workspace = true tokio = { workspace = true, features = ["net", "macros", "rt"] } [features] +default = ["std"] +std = [ + "alloy-primitives/std", + "alloy-rlp/std", + "secp256k1?/std", + "serde_with/std", + "thiserror/std", + "url/std", +] secp256k1 = ["dep:secp256k1", "enr/secp256k1"] -net = ["dep:tokio", "tokio?/net"] +net = ["std", "dep:tokio", "tokio?/net"] diff --git a/crates/net/peers/src/bootnodes/mod.rs b/crates/net/peers/src/bootnodes/mod.rs index 31c91e5d1ce..b149c108a96 100644 --- a/crates/net/peers/src/bootnodes/mod.rs +++ b/crates/net/peers/src/bootnodes/mod.rs @@ -1,6 +1,7 @@ //! Bootnodes for the network use crate::NodeRecord; +use alloc::vec::Vec; mod ethereum; pub use ethereum::*; diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index 1d60994d8e1..a8bf51da2ee 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -52,11 +52,19 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + +use alloc::{ + format, + string::{String, ToString}, +}; use alloy_primitives::B512; -use std::str::FromStr; +use core::str::FromStr; // Re-export PeerId for ease of use. +#[cfg(feature = "secp256k1")] pub use enr::Enr; /// Alias for a peer identifier @@ -108,8 +116,8 @@ pub fn id2pk(id: PeerId) -> Result { pub enum AnyNode { /// An "enode:" peer with full ip NodeRecord(NodeRecord), - #[cfg(feature = "secp256k1")] /// An "enr:" peer + #[cfg(feature = "secp256k1")] Enr(Enr), /// An incomplete "enode" with only a peer id PeerId(PeerId), @@ -137,8 +145,8 @@ impl AnyNode { let node_record = NodeRecord { address: enr .ip4() - .map(std::net::IpAddr::from) - .or_else(|| enr.ip6().map(std::net::IpAddr::from))?, + .map(core::net::IpAddr::from) + .or_else(|| enr.ip6().map(core::net::IpAddr::from))?, tcp_port: enr.tcp4().or_else(|| enr.tcp6())?, udp_port: enr.udp4().or_else(|| enr.udp6())?, id: pk2id(&enr.public_key()), @@ -186,8 +194,8 @@ impl FromStr for AnyNode { } } -impl std::fmt::Display for AnyNode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl core::fmt::Display for AnyNode { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match self { Self::NodeRecord(record) => write!(f, "{record}"), #[cfg(feature = "secp256k1")] diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index d6836d88193..15ef5ad8522 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -1,15 +1,18 @@ //! Commonly used `NodeRecord` type for peers. -use std::{ +use crate::PeerId; +use alloc::{ + format, + string::{String, ToString}, +}; +use alloy_rlp::{RlpDecodable, RlpEncodable}; +use core::{ fmt, fmt::Write, net::{IpAddr, Ipv4Addr, SocketAddr}, num::ParseIntError, str::FromStr, }; - -use crate::PeerId; -use alloy_rlp::{RlpDecodable, RlpEncodable}; use serde_with::{DeserializeFromStr, SerializeDisplay}; #[cfg(feature = "secp256k1")] @@ -231,12 +234,11 @@ impl TryFrom<&Enr> for NodeRecord { #[cfg(test)] mod tests { + use super::*; use alloy_rlp::Decodable; use rand::{thread_rng, Rng, RngCore}; use std::net::Ipv6Addr; - use super::*; - #[test] fn test_mapped_ipv6() { let mut rng = thread_rng(); diff --git a/crates/net/peers/src/trusted_peer.rs b/crates/net/peers/src/trusted_peer.rs index aa7e0a01533..b87c4d6da2f 100644 --- a/crates/net/peers/src/trusted_peer.rs +++ b/crates/net/peers/src/trusted_peer.rs @@ -1,14 +1,14 @@ //! `NodeRecord` type that uses a domain instead of an IP. use crate::{NodeRecord, PeerId}; -use serde_with::{DeserializeFromStr, SerializeDisplay}; -use std::{ +use alloc::string::{String, ToString}; +use core::{ fmt::{self, Write}, - io::Error, net::IpAddr, num::ParseIntError, str::FromStr, }; +use serde_with::{DeserializeFromStr, SerializeDisplay}; use url::Host; /// Represents the node record of a trusted peer. The only difference between this and a @@ -45,11 +45,13 @@ impl TrustedPeer { Self { host, tcp_port: port, udp_port: port, id } } + #[cfg(any(test, feature = "std"))] const fn to_node_record(&self, ip: IpAddr) -> NodeRecord { NodeRecord { address: ip, id: self.id, tcp_port: self.tcp_port, udp_port: self.udp_port } } /// Tries to resolve directly to a [`NodeRecord`] if the host is an IP address. + #[cfg(any(test, feature = "std"))] fn try_node_record(&self) -> Result { match &self.host { Host::Ipv4(ip) => Ok(self.to_node_record((*ip).into())), @@ -61,23 +63,24 @@ impl TrustedPeer { /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. /// /// This use [`ToSocketAddr`](std::net::ToSocketAddrs) to resolve the host to an IP address. - pub fn resolve_blocking(&self) -> Result { + #[cfg(any(test, feature = "std"))] + pub fn resolve_blocking(&self) -> Result { let domain = match self.try_node_record() { Ok(record) => return Ok(record), Err(domain) => domain, }; // Resolve the domain to an IP address let mut ips = std::net::ToSocketAddrs::to_socket_addrs(&(domain, 0))?; - let ip = ips - .next() - .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; + let ip = ips.next().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found") + })?; Ok(self.to_node_record(ip.ip())) } /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. #[cfg(any(test, feature = "net"))] - pub async fn resolve(&self) -> Result { + pub async fn resolve(&self) -> Result { let domain = match self.try_node_record() { Ok(record) => return Ok(record), Err(domain) => domain, @@ -85,9 +88,9 @@ impl TrustedPeer { // Resolve the domain to an IP address let mut ips = tokio::net::lookup_host(format!("{domain}:0")).await?; - let ip = ips - .next() - .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; + let ip = ips.next().ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found") + })?; Ok(self.to_node_record(ip.ip())) } diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index c2c3eb46326..7d209a90fca 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -18,15 +18,13 @@ reth-evm.workspace = true reth-provider.workspace = true reth-engine-primitives.workspace = true reth-transaction-pool.workspace = true -reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true -reth-rpc-eth-api.workspace = true reth-network-api.workspace = true reth-node-types.workspace = true -reth-primitives.workspace = true reth-node-core.workspace = true alloy-rpc-types-engine.workspace = true -eyre.workspace = true \ No newline at end of file +eyre.workspace = true diff --git a/crates/node/api/src/lib.rs b/crates/node/api/src/lib.rs index 7692ed6f2ca..105cac47d94 100644 --- a/crates/node/api/src/lib.rs +++ b/crates/node/api/src/lib.rs @@ -16,6 +16,10 @@ pub use reth_engine_primitives::*; pub use reth_payload_primitives as payload; pub use reth_payload_primitives::*; +/// Traits and helper types used to abstract over payload builder types. +pub use reth_payload_builder_primitives as payload_builder; +pub use reth_payload_builder_primitives::*; + /// Traits and helper types used to abstract over EVM methods and types. pub use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; @@ -25,5 +29,3 @@ pub use node::*; // re-export for convenience pub use reth_node_types::*; pub use reth_provider::FullProvider; - -pub use reth_rpc_eth_api::EthApiTypes; diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 40c2a3a60b0..edb68a6589b 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -1,22 +1,18 @@ //! Traits for configuring a node. -use std::{future::Future, marker::PhantomData}; - +use crate::ConfigureEvm; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; -use reth_consensus::Consensus; -use reth_engine_primitives::EngineValidator; +use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; -use reth_node_types::{NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; -use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::Header; +use reth_node_types::{HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, TxTy}; +use reth_payload_builder_primitives::PayloadBuilder; use reth_provider::FullProvider; use reth_tasks::TaskExecutor; -use reth_transaction_pool::TransactionPool; - -use crate::ConfigureEvm; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use std::{future::Future, marker::PhantomData}; /// A helper trait that is downstream of the [`NodeTypesWithEngine`] trait and adds stateful /// components to the node. @@ -50,22 +46,23 @@ where /// Encapsulates all types and components of the node. pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { /// The transaction pool of the node. - type Pool: TransactionPool + Unpin; + type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm
; + type Evm: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: Consensus + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; /// Network API. type Network: FullNetwork; - /// Validator for the engine API. - type EngineValidator: EngineValidator<::Engine>; + /// Builds new blocks. + type PayloadBuilder: PayloadBuilder::Engine> + + Clone; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -83,12 +80,7 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { fn network(&self) -> &Self::Network; /// Returns the handle to the payload builder service. - fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine>; - - /// Returns the engine validator. - fn engine_validator(&self) -> &Self::EngineValidator; + fn payload_builder(&self) -> &Self::PayloadBuilder; /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; @@ -98,17 +90,17 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { } /// Context passed to [`NodeAddOns::launch_add_ons`], -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct AddOnsContext<'a, N: FullNodeComponents> { /// Node with all configured components. - pub node: &'a N, + pub node: N, /// Node configuration. pub config: &'a NodeConfig<::ChainSpec>, /// Handle to the beacon consensus engine. pub beacon_engine_handle: - &'a BeaconConsensusEngineHandle<::Engine>, + BeaconConsensusEngineHandle<::Engine>, /// JWT secret for the node. - pub jwt_secret: &'a JwtSecret, + pub jwt_secret: JwtSecret, } /// Customizable node add-on types. diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 53e53cd2b85..26d157e1e0c 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] ## reth -reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true reth-chain-state.workspace = true @@ -42,12 +41,11 @@ reth-node-core.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-payload-builder.workspace = true -reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true -reth-rpc = { workspace = true, features = ["js-tracer"] } +reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-builder.workspace = true reth-rpc-engine-api.workspace = true @@ -63,6 +61,8 @@ reth-transaction-pool.workspace = true ## ethereum alloy-primitives.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } +alloy-consensus.workspace = true +revm-primitives.workspace = true ## async futures.workspace = true @@ -96,4 +96,22 @@ tempfile.workspace = true [features] default = [] -test-utils = ["reth-db/test-utils"] +js-tracer = ["reth-rpc/js-tracer"] +test-utils = [ + "reth-db/test-utils", + "reth-blockchain-tree/test-utils", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-engine-tree/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-network/test-utils", + "reth-network-p2p/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-stages/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", +] diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 82d8d96f6f5..e2b18f666c7 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -2,13 +2,6 @@ #![allow(clippy::type_complexity, missing_debug_implementations)] -pub mod add_ons; -mod states; - -pub use states::*; - -use std::sync::Arc; - use crate::{ common::WithConfigs, components::NodeComponentsBuilder, @@ -17,6 +10,7 @@ use crate::{ DefaultNodeLauncher, LaunchNode, Node, NodeHandle, }; use futures::Future; +use reth_blockchain_tree::externals::NodeTypesForTree; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli_util::get_secret_key; use reth_db_api::{ @@ -29,8 +23,8 @@ use reth_network::{ NetworkHandle, NetworkManager, }; use reth_node_api::{ - FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, NodeTypesWithDBAdapter, - NodeTypesWithEngine, + FullNodePrimitives, FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, + NodeTypesWithDBAdapter, NodeTypesWithEngine, }; use reth_node_core::{ cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, @@ -38,13 +32,22 @@ use reth_node_core::{ node_config::NodeConfig, primitives::Head, }; -use reth_primitives::revm_primitives::EnvKzgSettings; -use reth_provider::{providers::BlockchainProvider, ChainSpecProvider, FullProvider}; +use reth_provider::{ + providers::{BlockchainProvider, NodeTypesForProvider}, + BlockReader, ChainSpecProvider, FullProvider, +}; use reth_tasks::TaskExecutor; -use reth_transaction_pool::{PoolConfig, TransactionPool}; +use reth_transaction_pool::{PoolConfig, PoolTransaction, TransactionPool}; +use revm_primitives::EnvKzgSettings; use secp256k1::SecretKey; +use std::sync::Arc; use tracing::{info, trace, warn}; +pub mod add_ons; + +mod states; +pub use states::*; + /// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. pub type RethFullAdapter = FullNodeTypesAdapter< @@ -79,7 +82,7 @@ pub type RethFullAdapter = FullNodeTypesAdapter< /// configured components and can interact with the node. /// /// There are convenience functions for networks that come with a preset of types and components via -/// the [Node] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OptimismNode`. +/// the [`Node`] trait, see `reth_node_ethereum::EthereumNode` or `reth_optimism_node::OpNode`. /// /// The [`NodeBuilder::node`] function configures the node's types and components in one step. /// @@ -241,7 +244,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> NodeBuilderWithTypes> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForTree, { self.with_types_and_provider() } @@ -251,7 +254,7 @@ where self, ) -> NodeBuilderWithTypes, P>> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, P: FullProvider>, { NodeBuilderWithTypes::new(self.config, self.database) @@ -265,7 +268,7 @@ where node: N, ) -> NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns> where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -302,7 +305,7 @@ where /// Configures the types of the node. pub fn with_types(self) -> WithLaunchContext>> where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForTree, { WithLaunchContext { builder: self.builder.with_types(), task_executor: self.task_executor } } @@ -314,7 +317,7 @@ where NodeBuilderWithTypes, P>>, > where - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForProvider, P: FullProvider>, { WithLaunchContext { @@ -333,7 +336,7 @@ where NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, > where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, { self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } @@ -356,13 +359,14 @@ where >, > where - N: Node, ChainSpec = ChainSpec>, + N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, N::AddOns: RethRpcAddOns< NodeAdapter< RethFullAdapter, >>::Components, >, >, + N::Primitives: FullNodePrimitives, { self.node(node).launch().await } @@ -550,7 +554,7 @@ where impl WithLaunchContext, CB, AO>> where DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: NodeTypesWithEngine, + T: NodeTypesWithEngine + NodeTypesForTree, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, { @@ -646,7 +650,18 @@ impl BuilderContext { /// connected to that network. pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle where - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = reth_primitives::TransactionSigned, + Pooled = reth_primitives::PooledTransactionsElement, + >, + > + Unpin + + 'static, + Node::Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, { self.start_network_with(builder, pool, Default::default()) } @@ -664,7 +679,18 @@ impl BuilderContext { tx_config: TransactionsManagerConfig, ) -> NetworkHandle where - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = reth_primitives::TransactionSigned, + Pooled = reth_primitives::PooledTransactionsElement, + >, + > + Unpin + + 'static, + Node::Provider: BlockReader< + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + >, { let (handle, network, txpool, eth) = builder .transactions(pool, tx_config) diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index c4da466f23e..fa12cc78b61 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -5,16 +5,6 @@ //! The node builder process is essentially a state machine that transitions through various states //! before the node can be launched. -use std::{fmt, future::Future}; - -use reth_exex::ExExContext; -use reth_node_api::{ - FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, -}; -use reth_node_core::node_config::NodeConfig; -use reth_payload_builder::PayloadBuilderHandle; -use reth_tasks::TaskExecutor; - use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, @@ -22,6 +12,11 @@ use crate::{ rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, AddOns, FullNode, }; +use reth_exex::ExExContext; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, NodeTypesWithDB}; +use reth_node_core::node_config::NodeConfig; +use reth_tasks::TaskExecutor; +use std::{fmt, future::Future}; /// A node builder that also has the configured types. pub struct NodeBuilderWithTypes { @@ -95,9 +90,9 @@ impl> FullNodeComponents for NodeAdapter< type Pool = C::Pool; type Evm = C::Evm; type Executor = C::Executor; - type Network = C::Network; type Consensus = C::Consensus; - type EngineValidator = C::EngineValidator; + type Network = C::Network; + type PayloadBuilder = C::PayloadBuilder; fn pool(&self) -> &Self::Pool { self.components.pool() @@ -111,28 +106,24 @@ impl> FullNodeComponents for NodeAdapter< self.components.block_executor() } - fn provider(&self) -> &Self::Provider { - &self.provider + fn consensus(&self) -> &Self::Consensus { + self.components.consensus() } fn network(&self) -> &Self::Network { self.components.network() } - fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine> { + fn payload_builder(&self) -> &Self::PayloadBuilder { self.components.payload_builder() } - fn task_executor(&self) -> &TaskExecutor { - &self.task_executor - } - - fn consensus(&self) -> &Self::Consensus { - self.components.consensus() + fn provider(&self) -> &Self::Provider { + &self.provider } - fn engine_validator(&self) -> &Self::EngineValidator { - self.components.engine_validator() + fn task_executor(&self) -> &TaskExecutor { + &self.task_executor } } diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index ab8e29929a9..7e2d0eb43cc 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -1,13 +1,5 @@ //! A generic [`NodeComponentsBuilder`] -use std::{future::Future, marker::PhantomData}; - -use reth_consensus::Consensus; -use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; -use reth_primitives::Header; -use reth_transaction_pool::TransactionPool; - use crate::{ components::{ Components, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, NodeComponents, @@ -15,8 +7,12 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; - -use super::EngineValidatorBuilder; +use reth_consensus::FullConsensus; +use reth_evm::execute::BlockExecutorProvider; +use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; +use reth_payload_builder::PayloadBuilderHandle; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use std::{future::Future, marker::PhantomData}; /// A generic, general purpose and customizable [`NodeComponentsBuilder`] implementation. /// @@ -38,23 +34,22 @@ use super::EngineValidatorBuilder; /// All component builders are captured in the builder state and will be consumed once the node is /// launched. #[derive(Debug)] -pub struct ComponentsBuilder { +pub struct ComponentsBuilder { pool_builder: PoolB, payload_builder: PayloadB, network_builder: NetworkB, executor_builder: ExecB, consensus_builder: ConsB, - engine_validator_builder: EVB, _marker: PhantomData, } -impl - ComponentsBuilder +impl + ComponentsBuilder { /// Configures the node types. pub fn node_types( self, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where Types: FullNodeTypes, { @@ -64,7 +59,6 @@ impl network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -73,7 +67,6 @@ impl payload_builder, network_builder, consensus_builder, - engine_validator_builder, _marker: Default::default(), } } @@ -86,7 +79,6 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } @@ -99,7 +91,6 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } @@ -112,7 +103,6 @@ impl network_builder: f(self.network_builder), executor_builder: self.executor_builder, consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } @@ -125,7 +115,6 @@ impl network_builder: self.network_builder, executor_builder: f(self.executor_builder), consensus_builder: self.consensus_builder, - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } @@ -138,14 +127,13 @@ impl network_builder: self.network_builder, executor_builder: self.executor_builder, consensus_builder: f(self.consensus_builder), - engine_validator_builder: self.engine_validator_builder, _marker: self._marker, } } } -impl - ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, { @@ -156,7 +144,7 @@ where pub fn pool( self, pool_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: PoolBuilder, { @@ -166,7 +154,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -175,14 +162,13 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } } } -impl - ComponentsBuilder +impl + ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -194,7 +180,7 @@ where pub fn network( self, network_builder: NB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where NB: NetworkBuilder, { @@ -204,7 +190,6 @@ where network_builder: _, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -213,7 +198,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } } @@ -225,7 +209,7 @@ where pub fn payload( self, payload_builder: PB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where PB: PayloadServiceBuilder, { @@ -235,7 +219,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -244,7 +227,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } } @@ -256,7 +238,7 @@ where pub fn executor( self, executor_builder: EB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where EB: ExecutorBuilder, { @@ -266,7 +248,6 @@ where network_builder, executor_builder: _, consensus_builder, - engine_validator_builder, _marker, } = self; ComponentsBuilder { @@ -275,7 +256,6 @@ where network_builder, executor_builder, consensus_builder, - engine_validator_builder, _marker, } } @@ -287,7 +267,7 @@ where pub fn consensus( self, consensus_builder: CB, - ) -> ComponentsBuilder + ) -> ComponentsBuilder where CB: ConsensusBuilder, { @@ -297,38 +277,7 @@ where network_builder, executor_builder, consensus_builder: _, - engine_validator_builder, - _marker, - } = self; - ComponentsBuilder { - pool_builder, - payload_builder, - network_builder, - executor_builder, - consensus_builder, - engine_validator_builder, - _marker, - } - } - /// Configures the consensus builder. - /// - /// This accepts a [`ConsensusBuilder`] instance that will be used to create the node's - /// components for consensus. - pub fn engine_validator( - self, - engine_validator_builder: EngineVB, - ) -> ComponentsBuilder - where - EngineVB: EngineValidatorBuilder, - { - let Self { - pool_builder, - payload_builder, - network_builder, - executor_builder, - consensus_builder, - engine_validator_builder: _, _marker, } = self; ComponentsBuilder { @@ -337,14 +286,13 @@ where network_builder, executor_builder, consensus_builder, - engine_validator_builder, _marker, } } } -impl NodeComponentsBuilder - for ComponentsBuilder +impl NodeComponentsBuilder + for ComponentsBuilder where Node: FullNodeTypes, PoolB: PoolBuilder, @@ -352,16 +300,8 @@ where PayloadB: PayloadServiceBuilder, ExecB: ExecutorBuilder, ConsB: ConsensusBuilder, - EVB: EngineValidatorBuilder, { - type Components = Components< - Node, - PoolB::Pool, - ExecB::EVM, - ExecB::Executor, - ConsB::Consensus, - EVB::Validator, - >; + type Components = Components; async fn build_components( self, @@ -373,7 +313,6 @@ where network_builder, executor_builder: evm_builder, consensus_builder, - engine_validator_builder, _marker, } = self; @@ -382,7 +321,6 @@ where let network = network_builder.build_network(context, pool.clone()).await?; let payload_builder = payload_builder.spawn_payload_service(context, pool.clone()).await?; let consensus = consensus_builder.build_consensus(context).await?; - let engine_validator = engine_validator_builder.build_validator(context).await?; Ok(Components { transaction_pool: pool, @@ -391,12 +329,11 @@ where payload_builder, executor, consensus, - engine_validator, }) } } -impl Default for ComponentsBuilder<(), (), (), (), (), (), ()> { +impl Default for ComponentsBuilder<(), (), (), (), (), ()> { fn default() -> Self { Self { pool_builder: (), @@ -404,7 +341,6 @@ impl Default for ComponentsBuilder<(), (), (), (), (), (), ()> { network_builder: (), executor_builder: (), consensus_builder: (), - engine_validator_builder: (), _marker: Default::default(), } } @@ -421,7 +357,10 @@ impl Default for ComponentsBuilder<(), (), (), (), (), (), ()> { /// A type that's responsible for building the components of the node. pub trait NodeComponentsBuilder: Send { /// The components for the node with the given types - type Components: NodeComponents; + type Components: NodeComponents< + Node, + PayloadBuilder = PayloadBuilderHandle<::Engine>, + >; /// Consumes the type and returns the created components. fn build_components( @@ -430,18 +369,19 @@ pub trait NodeComponentsBuilder: Send { ) -> impl Future> + Send; } -impl NodeComponentsBuilder for F +impl NodeComponentsBuilder for F where Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future>> + Send, - Pool: TransactionPool + Unpin + 'static, - EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, - Cons: Consensus + Clone + Unpin + 'static, - Val: EngineValidator<::Engine> + Clone + Unpin + 'static, + Fut: Future>> + Send, + Pool: TransactionPool>> + + Unpin + + 'static, + EVM: ConfigureEvm
, Transaction = TxTy>, + Executor: BlockExecutorProvider::Primitives>, + Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { - type Components = Components; + type Components = Components; fn build_components( self, diff --git a/crates/node/builder/src/components/consensus.rs b/crates/node/builder/src/components/consensus.rs index 6c90bda5475..074080d337b 100644 --- a/crates/node/builder/src/components/consensus.rs +++ b/crates/node/builder/src/components/consensus.rs @@ -1,11 +1,16 @@ //! Consensus component for the node builder. +use reth_node_api::NodeTypes; + use crate::{BuilderContext, FullNodeTypes}; use std::future::Future; /// A type that knows how to build the consensus implementation. pub trait ConsensusBuilder: Send { /// The consensus implementation to build. - type Consensus: reth_consensus::Consensus + Clone + Unpin + 'static; + type Consensus: reth_consensus::FullConsensus<::Primitives> + + Clone + + Unpin + + 'static; /// Creates the consensus implementation. fn build_consensus( @@ -17,7 +22,10 @@ pub trait ConsensusBuilder: Send { impl ConsensusBuilder for F where Node: FullNodeTypes, - Consensus: reth_consensus::Consensus + Clone + Unpin + 'static, + Consensus: reth_consensus::FullConsensus<::Primitives> + + Clone + + Unpin + + 'static, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/components/engine.rs b/crates/node/builder/src/components/engine.rs deleted file mode 100644 index b3ee7cbbbf2..00000000000 --- a/crates/node/builder/src/components/engine.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! Consensus component for the node builder. -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; - -use crate::{BuilderContext, FullNodeTypes}; -use std::future::Future; - -/// A type that knows how to build the engine validator. -pub trait EngineValidatorBuilder: Send { - /// The consensus implementation to build. - type Validator: EngineValidator<::Engine> - + Clone - + Unpin - + 'static; - - /// Creates the engine validator. - fn build_validator( - self, - ctx: &BuilderContext, - ) -> impl Future> + Send; -} - -impl EngineValidatorBuilder for F -where - Node: FullNodeTypes, - Validator: - EngineValidator<::Engine> + Clone + Unpin + 'static, - F: FnOnce(&BuilderContext) -> Fut + Send, - Fut: Future> + Send, -{ - type Validator = Validator; - - fn build_validator( - self, - ctx: &BuilderContext, - ) -> impl Future> { - self(ctx) - } -} diff --git a/crates/node/builder/src/components/execute.rs b/crates/node/builder/src/components/execute.rs index 90cff588f7c..e3226fa8e37 100644 --- a/crates/node/builder/src/components/execute.rs +++ b/crates/node/builder/src/components/execute.rs @@ -1,8 +1,7 @@ //! EVM component for the node builder. use crate::{BuilderContext, FullNodeTypes}; use reth_evm::execute::BlockExecutorProvider; -use reth_node_api::ConfigureEvm; -use reth_primitives::Header; +use reth_node_api::{ConfigureEvm, HeaderTy, TxTy}; use std::future::Future; /// A type that knows how to build the executor types. @@ -10,10 +9,12 @@ pub trait ExecutorBuilder: Send { /// The EVM config to use. /// /// This provides the node with the necessary configuration to configure an EVM. - type EVM: ConfigureEvm
; + type EVM: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider< + Primitives = ::Primitives, + >; /// Creates the EVM config. fn build_evm( @@ -25,8 +26,9 @@ pub trait ExecutorBuilder: Send { impl ExecutorBuilder for F where Node: FullNodeTypes, - EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, + EVM: ConfigureEvm
, Transaction = TxTy>, + Executor: + BlockExecutorProvider::Primitives>, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index ff1646593ed..d62e74bda29 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -9,7 +9,6 @@ mod builder; mod consensus; -mod engine; mod execute; mod network; mod payload; @@ -17,22 +16,19 @@ mod pool; pub use builder::*; pub use consensus::*; -pub use engine::*; pub use execute::*; pub use network::*; pub use payload::*; pub use pool::*; -use reth_consensus::Consensus; +use crate::{ConfigureEvm, FullNodeTypes}; +use reth_consensus::FullConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkHandle; use reth_network_api::FullNetwork; -use reth_node_api::{EngineValidator, NodeTypesWithEngine}; +use reth_node_api::{HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilder, TxTy}; use reth_payload_builder::PayloadBuilderHandle; -use reth_primitives::Header; -use reth_transaction_pool::TransactionPool; - -use crate::{ConfigureEvm, FullNodeTypes}; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; /// An abstraction over the components of a node, consisting of: /// - evm and executor @@ -41,22 +37,23 @@ use crate::{ConfigureEvm, FullNodeTypes}; /// - payload builder. pub trait NodeComponents: Clone + Unpin + Send + Sync + 'static { /// The transaction pool of the node. - type Pool: TransactionPool + Unpin; + type Pool: TransactionPool>> + Unpin; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: ConfigureEvm
; + type Evm: ConfigureEvm
, Transaction = TxTy>; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider; + type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: Consensus + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; /// Network API. type Network: FullNetwork; - /// Validator for the engine API. - type EngineValidator: EngineValidator<::Engine>; + /// Builds new blocks. + type PayloadBuilder: PayloadBuilder::Engine> + + Clone; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -74,17 +71,14 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati fn network(&self) -> &Self::Network; /// Returns the handle to the payload builder service. - fn payload_builder(&self) -> &PayloadBuilderHandle<::Engine>; - - /// Returns the engine validator. - fn engine_validator(&self) -> &Self::EngineValidator; + fn payload_builder(&self) -> &Self::PayloadBuilder; } /// All the components of the node. /// /// This provides access to all the components of the node. #[derive(Debug)] -pub struct Components { +pub struct Components { /// The transaction pool of the node. pub transaction_pool: Pool, /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. @@ -97,26 +91,25 @@ pub struct Components::Engine>, - /// The validator for the engine API. - pub engine_validator: Validator, } -impl NodeComponents - for Components +impl NodeComponents + for Components where Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, - EVM: ConfigureEvm
, - Executor: BlockExecutorProvider, - Cons: Consensus + Clone + Unpin + 'static, - Val: EngineValidator<::Engine> + Clone + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, + EVM: ConfigureEvm
, Transaction = TxTy>, + Executor: BlockExecutorProvider::Primitives>, + Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, { type Pool = Pool; type Evm = EVM; type Executor = Executor; type Consensus = Cons; type Network = NetworkHandle; - type EngineValidator = Val; + type PayloadBuilder = PayloadBuilderHandle<::Engine>; fn pool(&self) -> &Self::Pool { &self.transaction_pool @@ -138,26 +131,18 @@ where &self.network } - fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine> { + fn payload_builder(&self) -> &Self::PayloadBuilder { &self.payload_builder } - - fn engine_validator(&self) -> &Self::EngineValidator { - &self.engine_validator - } } -impl Clone - for Components +impl Clone for Components where Node: FullNodeTypes, Pool: TransactionPool, - EVM: ConfigureEvm
, + EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider, - Cons: Consensus + Clone, - Val: EngineValidator<::Engine>, + Cons: Clone, { fn clone(&self) -> Self { Self { @@ -167,7 +152,6 @@ where consensus: self.consensus.clone(), network: self.network.clone(), payload_builder: self.payload_builder.clone(), - engine_validator: self.engine_validator.clone(), } } } diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index 436a80c52e0..5b08e0a7739 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -1,7 +1,8 @@ //! Pool component for the node builder. use alloy_primitives::Address; -use reth_transaction_pool::{PoolConfig, SubPoolLimit, TransactionPool}; +use reth_node_api::TxTy; +use reth_transaction_pool::{PoolConfig, PoolTransaction, SubPoolLimit, TransactionPool}; use std::{collections::HashSet, future::Future}; use crate::{BuilderContext, FullNodeTypes}; @@ -9,7 +10,9 @@ use crate::{BuilderContext, FullNodeTypes}; /// A type that knows how to build the transaction pool. pub trait PoolBuilder: Send { /// The transaction pool to build. - type Pool: TransactionPool + Unpin + 'static; + type Pool: TransactionPool>> + + Unpin + + 'static; /// Creates the transaction pool. fn build_pool( @@ -21,7 +24,9 @@ pub trait PoolBuilder: Send { impl PoolBuilder for F where Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future> + Send, { diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index ac2339fa6cf..62226cb0b1c 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -2,26 +2,29 @@ use std::{sync::Arc, thread::available_parallelism}; +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + hooks::OnComponentInitializedHook, + BuilderContext, NodeAdapter, +}; use alloy_primitives::{BlockNumber, B256}; use eyre::{Context, OptionExt}; use rayon::ThreadPoolBuilder; -use reth_auto_seal_consensus::MiningMode; -use reth_beacon_consensus::EthBeaconConsensus; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; use reth_chainspec::{Chain, EthChainSpec, EthereumHardforks}; use reth_config::{config::EtlConfig, PruneConfig}; -use reth_consensus::Consensus; -use reth_db_api::database::Database; -use reth_db_common::init::{init_genesis, InitDatabaseError}; +use reth_consensus::noop::NoopConsensus; +use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; +use reth_db_common::init::{init_genesis, InitStorageError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; +use reth_engine_local::MiningMode; use reth_engine_tree::tree::{InvalidBlockHook, InvalidBlockHooks, NoopInvalidBlockHook}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_fs_util as fs; use reth_invalid_block_hooks::InvalidBlockWitnessHook; use reth_network_p2p::headers::client::HeadersClient; -use reth_node_api::{FullNodeTypes, NodeTypes, NodeTypesWithDB}; +use reth_node_api::{ + FullNodePrimitives, FullNodeTypes, NodePrimitives, NodeTypes, NodeTypesWithDB, +}; use reth_node_core::{ args::InvalidBlockHookType, dirs::{ChainPath, DataDirPath}, @@ -34,15 +37,15 @@ use reth_node_core::{ use reth_node_metrics::{ chain::ChainSpecInfo, hooks::Hooks, + recorder::install_prometheus_recorder, server::{MetricServer, MetricServerConfig}, version::VersionInfo, }; -use reth_primitives::Head; +use reth_primitives::{Head, TransactionSigned}; use reth_provider::{ - providers::{BlockchainProvider, BlockchainProvider2, ProviderNodeTypes, StaticFileProvider}, - BlockHashReader, CanonStateNotificationSender, ChainSpecProvider, ProviderFactory, + providers::{ProviderNodeTypes, StaticFileProvider}, + BlockHashReader, BlockNumReader, ChainSpecProvider, ProviderError, ProviderFactory, ProviderResult, StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, - TreeViewer, }; use reth_prune::{PruneModes, PrunerBuilder}; use reth_rpc_api::clients::EthApiClient; @@ -52,38 +55,12 @@ use reth_stages::{sets::DefaultStages, MetricEvent, PipelineBuilder, PipelineTar use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; +use reth_transaction_pool::TransactionPool; use tokio::sync::{ - mpsc::{unbounded_channel, Receiver, UnboundedSender}, + mpsc::{unbounded_channel, UnboundedSender}, oneshot, watch, }; -use crate::{ - components::{NodeComponents, NodeComponentsBuilder}, - hooks::OnComponentInitializedHook, - BuilderContext, NodeAdapter, -}; - -/// Allows to set a tree viewer for a configured blockchain provider. -// TODO: remove this helper trait once the engine revamp is done, the new -// blockchain provider won't require a TreeViewer. -// https://github.com/paradigmxyz/reth/issues/8742 -pub trait WithTree { - /// Setter for tree viewer. - fn set_tree(self, tree: Arc) -> Self; -} - -impl WithTree for BlockchainProvider { - fn set_tree(self, tree: Arc) -> Self { - self.with_tree(tree) - } -} - -impl WithTree for BlockchainProvider2 { - fn set_tree(self, _tree: Arc) -> Self { - self - } -} - /// Reusable setup for launching a node. /// /// This provides commonly used boilerplate for launching a node. @@ -386,13 +363,11 @@ impl LaunchContextWith) -> MiningMode { + pub fn dev_mining_mode(&self, pool: impl TransactionPool) -> MiningMode { if let Some(interval) = self.node_config().dev.block_time { MiningMode::interval(interval) - } else if let Some(max_transactions) = self.node_config().dev.block_max_transactions { - MiningMode::instant(max_transactions, pending_transactions_listener) } else { - MiningMode::instant(1, pending_transactions_listener) + MiningMode::instant(pool) } } } @@ -405,9 +380,11 @@ where /// Returns the [`ProviderFactory`] for the attached storage after executing a consistent check /// between the database and static files. **It may execute a pipeline unwind if it fails this /// check.** - pub async fn create_provider_factory>( - &self, - ) -> eyre::Result> { + pub async fn create_provider_factory(&self) -> eyre::Result> + where + N: ProviderNodeTypes, + N::Primitives: FullNodePrimitives, + { let factory = ProviderFactory::new( self.right().clone(), self.chain_spec(), @@ -417,7 +394,7 @@ where .with_static_files_metrics(); let has_receipt_pruning = - self.toml_config().prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); + self.toml_config().prune.as_ref().is_some_and(|a| a.has_receipts_pruning()); // Check for consistency between database and static files. If it fails, it unwinds to // the first block that's consistent between database and static files. @@ -438,10 +415,10 @@ where .add_stages(DefaultStages::new( factory.clone(), tip_rx, - Arc::new(EthBeaconConsensus::new(self.chain_spec())), + Arc::new(NoopConsensus::default()), NoopHeaderDownloader::default(), NoopBodiesDownloader::default(), - NoopBlockExecutorProvider::default(), + NoopBlockExecutorProvider::::default(), self.toml_config().stages.clone(), self.prune_modes(), )) @@ -468,9 +445,13 @@ where } /// Creates a new [`ProviderFactory`] and attaches it to the launch context. - pub async fn with_provider_factory>( + pub async fn with_provider_factory( self, - ) -> eyre::Result, ProviderFactory>>> { + ) -> eyre::Result, ProviderFactory>>> + where + N: ProviderNodeTypes, + N::Primitives: FullNodePrimitives, + { let factory = self.create_provider_factory().await?; let ctx = LaunchContextWith { inner: self.inner, @@ -483,7 +464,7 @@ where impl LaunchContextWith, ProviderFactory>> where - T: NodeTypesWithDB, + T: ProviderNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &T::DB { @@ -496,7 +477,7 @@ where } /// Returns the static file provider to interact with the static files. - pub fn static_file_provider(&self) -> StaticFileProvider { + pub fn static_file_provider(&self) -> StaticFileProvider { self.right().static_file_provider() } @@ -510,6 +491,9 @@ where /// Starts the prometheus endpoint. pub async fn start_prometheus_endpoint(&self) -> eyre::Result<()> { + // ensure recorder runs upkeep periodically + install_prometheus_recorder().spawn_upkeep(); + let listen_addr = self.node_config().metrics; if let Some(addr) = listen_addr { info!(target: "reth::cli", "Starting metrics endpoint at {}", addr); @@ -525,7 +509,20 @@ where }, ChainSpecInfo { name: self.left().config.chain.chain().to_string() }, self.task_executor().clone(), - Hooks::new(self.database().clone(), self.static_file_provider()), + Hooks::builder() + .with_hook({ + let db = self.database().clone(); + move || db.report_metrics() + }) + .with_hook({ + let sfp = self.static_file_provider(); + move || { + if let Err(error) = sfp.report_metrics() { + error!(%error, "Failed to report metrics for the static file provider"); + } + } + }) + .build(), ); MetricServer::new(config).serve().await?; @@ -535,13 +532,13 @@ where } /// Convenience function to [`Self::init_genesis`] - pub fn with_genesis(self) -> Result { + pub fn with_genesis(self) -> Result { init_genesis(self.provider_factory())?; Ok(self) } /// Write the genesis block and state if it has not already been written - pub fn init_genesis(&self) -> Result { + pub fn init_genesis(&self) -> Result { init_genesis(self.provider_factory()) } @@ -588,8 +585,6 @@ where pub fn with_blockchain_db( self, create_blockchain_provider: F, - tree_config: BlockchainTreeConfig, - canon_state_notification_sender: CanonStateNotificationSender, ) -> eyre::Result, WithMeteredProviders>>> where T: FullNodeTypes, @@ -603,8 +598,6 @@ where metrics_sender: self.sync_metrics_tx(), }, blockchain_db, - tree_config, - canon_state_notification_sender, }; let ctx = LaunchContextWith { @@ -621,7 +614,7 @@ impl Attached::ChainSpec>, WithMeteredProviders>, > where - T: FullNodeTypes, + T: FullNodeTypes, { /// Returns access to the underlying database. pub const fn database(&self) -> &::DB { @@ -652,16 +645,6 @@ where &self.right().blockchain_db } - /// Returns a reference to the `BlockchainTreeConfig`. - pub const fn tree_config(&self) -> &BlockchainTreeConfig { - &self.right().tree_config - } - - /// Returns the `CanonStateNotificationSender`. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.right().canon_state_notification_sender.clone() - } - /// Creates a `NodeAdapter` and attaches it to the launch context. pub async fn with_components( self, @@ -690,31 +673,12 @@ where debug!(target: "reth::cli", "creating components"); let components = components_builder.build_components(&builder_ctx).await?; - let consensus: Arc = Arc::new(components.consensus().clone()); - - let tree_externals = TreeExternals::new( - self.provider_factory().clone().with_prune_modes(self.prune_modes()), - consensus.clone(), - components.block_executor().clone(), - ); - let tree = BlockchainTree::new(tree_externals, *self.tree_config())? - .with_sync_metrics_tx(self.sync_metrics_tx()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be removed - // once the Blockchain provider no longer depends on an instance of the tree - .with_canon_state_notification_sender(self.canon_state_notification_sender()); - - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - // Replace the tree component with the actual tree - let blockchain_db = self.blockchain_db().clone().set_tree(blockchain_tree); - - debug!(target: "reth::cli", "configured blockchain tree"); + let blockchain_db = self.blockchain_db().clone(); let node_adapter = NodeAdapter { components, task_executor: self.task_executor().clone(), - provider: blockchain_db.clone(), + provider: blockchain_db, }; debug!(target: "reth::cli", "calling on_component_initialized hook"); @@ -725,11 +689,8 @@ where provider_factory: self.provider_factory().clone(), metrics_sender: self.sync_metrics_tx(), }, - blockchain_db, - tree_config: self.right().tree_config, node_adapter, head, - consensus, }; let ctx = LaunchContextWith { @@ -746,10 +707,7 @@ impl Attached::ChainSpec>, WithComponents>, > where - T: FullNodeTypes< - Provider: WithTree, - Types: NodeTypes, - >, + T: FullNodeTypes, CB: NodeComponentsBuilder, { /// Returns the configured `ProviderFactory`. @@ -761,13 +719,13 @@ where /// necessary pub async fn max_block(&self, client: C) -> eyre::Result> where - C: HeadersClient, + C: HeadersClient
, { self.node_config().max_block(client, self.provider_factory().clone()).await } /// Returns the static file provider to interact with the static files. - pub fn static_file_provider(&self) -> StaticFileProvider { + pub fn static_file_provider(&self) -> StaticFileProvider<::Primitives> { self.provider_factory().static_file_provider() } @@ -786,9 +744,14 @@ where &self.right().node_adapter } + /// Returns mutable reference to the configured `NodeAdapter`. + pub fn node_adapter_mut(&mut self) -> &mut NodeAdapter { + &mut self.right_mut().node_adapter + } + /// Returns a reference to the blockchain provider. pub const fn blockchain_db(&self) -> &T::Provider { - &self.right().blockchain_db + &self.node_adapter().provider } /// Returns the initial backfill to sync to at launch. @@ -815,6 +778,26 @@ where self.node_config().debug.terminate || self.node_config().debug.max_block.is_some() } + /// Ensures that the database matches chain-specific requirements. + /// + /// This checks for OP-Mainnet and ensures we have all the necessary data to progress (past + /// bedrock height) + fn ensure_chain_specific_db_checks(&self) -> ProviderResult<()> { + if self.chain_spec().is_optimism() && + !self.is_dev() && + self.chain_id() == Chain::optimism_mainnet() + { + let latest = self.blockchain_db().last_block_number()?; + // bedrock height + if latest < 105235063 { + error!("Op-mainnet has been launched without importing the pre-Bedrock state. The chain can't progress without this. See also https://reth.rs/run/sync-op-mainnet.html?minimal-bootstrap-recommended"); + return Err(ProviderError::BestBlockNotFound) + } + } + + Ok(()) + } + /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less /// than the checkpoint of the first stage). /// @@ -858,12 +841,9 @@ where } } - Ok(None) - } + self.ensure_chain_specific_db_checks()?; - /// Returns the configured `Consensus`. - pub fn consensus(&self) -> Arc { - self.right().consensus.clone() + Ok(None) } /// Returns the metrics sender. @@ -871,11 +851,6 @@ where self.right().db_provider_container.metrics_sender.clone() } - /// Returns a reference to the `BlockchainTreeConfig`. - pub const fn tree_config(&self) -> &BlockchainTreeConfig { - &self.right().tree_config - } - /// Returns the node adapter components. pub const fn components(&self) -> &CB::Components { &self.node_adapter().components @@ -888,13 +863,15 @@ impl > where T: FullNodeTypes< - Provider: WithTree + StateProviderFactory + ChainSpecProvider, - Types: NodeTypes, + Provider: StateProviderFactory + ChainSpecProvider, + Types: ProviderNodeTypes>, >, CB: NodeComponentsBuilder, { /// Returns the [`InvalidBlockHook`] to use for the node. - pub fn invalid_block_hook(&self) -> eyre::Result> { + pub fn invalid_block_hook( + &self, + ) -> eyre::Result::Primitives>>> { let Some(ref hook) = self.node_config().debug.invalid_block_hook else { return Ok(Box::new(NoopInvalidBlockHook::default())) }; @@ -918,7 +895,7 @@ where InvalidBlockHookType::PreState | InvalidBlockHookType::Opcode => { eyre::bail!("invalid block hook {hook:?} is not implemented yet") } - } as Box) + } as Box>) }) .collect::>()?; @@ -940,6 +917,7 @@ where alloy_rpc_types::Transaction, alloy_rpc_types::Block, alloy_rpc_types::Receipt, + alloy_rpc_types::Header, >::chain_id(&client) .await })? @@ -1022,7 +1000,7 @@ pub struct WithMeteredProvider { metrics_sender: UnboundedSender, } -/// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] +/// Helper container to bundle the [`ProviderFactory`], [`FullNodeTypes::Provider`] /// and a metrics sender. #[allow(missing_debug_implementations)] pub struct WithMeteredProviders @@ -1031,8 +1009,6 @@ where { db_provider_container: WithMeteredProvider, blockchain_db: T::Provider, - canon_state_notification_sender: CanonStateNotificationSender, - tree_config: BlockchainTreeConfig, } /// Helper container to bundle the metered providers container and [`NodeAdapter`]. @@ -1043,11 +1019,8 @@ where CB: NodeComponentsBuilder, { db_provider_container: WithMeteredProvider, - tree_config: BlockchainTreeConfig, - blockchain_db: T::Provider, node_adapter: NodeAdapter, head: Head, - consensus: Arc, } #[cfg(test)] diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 3de651cdcd0..054def94e50 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -3,12 +3,11 @@ use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, StaticFileHook}, - BeaconConsensusEngineHandle, + BeaconConsensusEngineHandle, EngineNodeTypes, }; -use reth_blockchain_tree::BlockchainTreeConfig; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; -use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder, MiningMode}; +use reth_engine_local::{LocalEngineService, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, @@ -17,9 +16,10 @@ use reth_engine_tree::{ use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; -use reth_network_api::{BlockDownloaderProvider, NetworkEventListenerProvider}; +use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithEngine, PayloadAttributesBuilder, PayloadTypes, + BlockTy, BuiltPayload, EngineValidator, FullNodeTypes, NodeTypesWithEngine, + PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -27,9 +27,8 @@ use reth_node_core::{ primitives::Head, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_payload_primitives::PayloadBuilder; use reth_primitives::EthereumHardforks; -use reth_provider::providers::{BlockchainProvider2, ProviderNodeTypes}; +use reth_provider::providers::BlockchainProvider2; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -40,7 +39,7 @@ use tokio_stream::wrappers::UnboundedReceiverStream; use crate::{ common::{Attached, LaunchContextWith, WithConfigs}, hooks::NodeHooks, - rpc::{RethRpcAddOns, RpcHandle}, + rpc::{EngineValidatorAddOn, RethRpcAddOns, RpcHandle}, setup::build_networked_pipeline, AddOns, AddOnsContext, ExExLauncher, FullNode, LaunchContext, LaunchNode, NodeAdapter, NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter, @@ -70,10 +69,15 @@ impl EngineNodeLauncher { impl LaunchNode> for EngineNodeLauncher where - Types: ProviderNodeTypes + NodeTypesWithEngine, + Types: EngineNodeTypes, T: FullNodeTypes>, CB: NodeComponentsBuilder, - AO: RethRpcAddOns>, + AO: RethRpcAddOns> + + EngineValidatorAddOn< + NodeAdapter, + Validator: EngineValidator>, + >, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, @@ -93,15 +97,6 @@ where } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - // TODO: move tree_config and canon_state_notification_sender - // initialization to with_blockchain_db once the engine revamp is done - // https://github.com/paradigmxyz/reth/issues/8742 - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - // setup the launch context let ctx = ctx .with_configured_globals() @@ -131,7 +126,7 @@ where // later the components. .with_blockchain_db::(move |provider_factory| { Ok(BlockchainProvider2::new(provider_factory)?) - }, tree_config, canon_state_notification_sender)? + })? .with_components(components_builder, on_component_initialized).await?; // spawn exexs @@ -175,13 +170,15 @@ where )); info!(target: "reth::cli", "StaticFileProducer initialized"); + let consensus = Arc::new(ctx.components().consensus().clone()); + // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); let pipeline = build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), - ctx.consensus(), + consensus.clone(), ctx.provider_factory().clone(), ctx.task_executor(), ctx.sync_metrics_tx(), @@ -203,36 +200,46 @@ where pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone()); - let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); + let event_sender = EventSender::default(); + let beacon_engine_handle = + BeaconConsensusEngineHandle::new(consensus_engine_tx.clone(), event_sender.clone()); + + // extract the jwt secret from the args if possible + let jwt_secret = ctx.auth_jwt_secret()?; + + let add_ons_ctx = AddOnsContext { + node: ctx.node_adapter().clone(), + config: ctx.node_config(), + beacon_engine_handle: beacon_engine_handle.clone(), + jwt_secret, + }; + let engine_payload_validator = add_ons.engine_validator(&add_ons_ctx).await?; + let mut engine_service = if ctx.is_dev() { - let mining_mode = if let Some(block_time) = ctx.node_config().dev.block_time { - MiningMode::interval(block_time) - } else { - MiningMode::instant(ctx.components().pool().clone()) - }; let eth_service = LocalEngineService::new( - ctx.consensus(), + consensus.clone(), ctx.components().block_executor().clone(), ctx.provider_factory().clone(), ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), consensus_engine_tx.clone(), Box::pin(consensus_engine_stream), - mining_mode, + ctx.dev_mining_mode(ctx.components().pool()), LocalPayloadAttributesBuilder::new(ctx.chain_spec()), ); Either::Left(eth_service) } else { let eth_service = EngineService::new( - ctx.consensus(), + consensus.clone(), ctx.components().block_executor().clone(), ctx.chain_spec(), network_client.clone(), @@ -243,6 +250,7 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder().clone(), + engine_payload_validator, engine_tree_config, ctx.invalid_block_hook()?, ctx.sync_metrics_tx(), @@ -251,15 +259,9 @@ where Either::Right(eth_service) }; - let event_sender = EventSender::default(); - - let beacon_engine_handle = - BeaconConsensusEngineHandle::new(consensus_engine_tx, event_sender.clone()); - info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - ctx.components().network().event_listener().map(Into::into), beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { @@ -282,16 +284,6 @@ where ), ); - // extract the jwt secret from the args if possible - let jwt_secret = ctx.auth_jwt_secret()?; - - let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter(), - config: ctx.node_config(), - beacon_engine_handle: &beacon_engine_handle, - jwt_secret: &jwt_secret, - }; - let RpcHandle { rpc_server_handles, rpc_registry } = add_ons.launch_add_ons(add_ons_ctx).await?; diff --git a/crates/node/builder/src/launch/exex.rs b/crates/node/builder/src/launch/exex.rs index a3640690c1d..0235dd929e2 100644 --- a/crates/node/builder/src/launch/exex.rs +++ b/crates/node/builder/src/launch/exex.rs @@ -40,7 +40,9 @@ impl ExExLauncher { /// /// Spawns all extensions and returns the handle to the exex manager if any extensions are /// installed. - pub async fn launch(self) -> eyre::Result> { + pub async fn launch( + self, + ) -> eyre::Result::Primitives>>> { let Self { head, extensions, components, config_container } = self; if extensions.is_empty() { diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index 36aa55541e0..e23ce38da75 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -11,30 +11,30 @@ pub use exex::ExExLauncher; use std::{future::Future, sync::Arc}; -use alloy_primitives::utils::format_ether; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngine, }; -use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_blockchain_tree::{ + externals::TreeNodeTypes, noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, + ShareableBlockchainTree, TreeExternals, +}; +use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkEventListenerProvider}; -use reth_node_api::{ - AddOnsContext, FullNodeComponents, FullNodeTypes, NodeTypesWithDB, NodeTypesWithEngine, -}; +use reth_network::BlockDownloaderProvider; +use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithEngine}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, exit::NodeExitFuture, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; -use reth_provider::providers::BlockchainProvider; +use reth_provider::providers::{BlockchainProvider, ProviderNodeTypes}; +use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use reth_transaction_pool::TransactionPool; use tokio::sync::{mpsc::unbounded_channel, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -47,14 +47,14 @@ use crate::{ AddOns, NodeBuilderWithComponents, NodeHandle, }; -/// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`FullNodeComponents`]. +/// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`RpcNodeCore`]. pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< - ::Provider, - ::Pool, - ::Evm, - ::Network, + ::Provider, + ::Pool, + ::Evm, + ::Network, TaskExecutor, - ::Provider, + ::Provider, >; /// A general purpose trait that launches a new node of any kind. @@ -70,7 +70,7 @@ pub trait LaunchNode { type Node; /// Create and return a new node asynchronously. - fn launch_node(self, target: Target) -> impl Future> + Send; + fn launch_node(self, target: Target) -> impl Future>; } impl LaunchNode for F @@ -80,7 +80,7 @@ where { type Node = Node; - fn launch_node(self, target: Target) -> impl Future> + Send { + fn launch_node(self, target: Target) -> impl Future> { self(target) } } @@ -101,7 +101,7 @@ impl DefaultNodeLauncher { impl LaunchNode> for DefaultNodeLauncher where - Types: NodeTypesWithDB + NodeTypesWithEngine, + Types: ProviderNodeTypes + NodeTypesWithEngine + TreeNodeTypes, T: FullNodeTypes, Types = Types>, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, @@ -135,7 +135,7 @@ where )); // setup the launch context - let ctx = ctx + let mut ctx = ctx .with_configured_globals() // load the toml config .with_loaded_toml_config(config)? @@ -163,9 +163,29 @@ where // later the components. .with_blockchain_db::(move |provider_factory| { Ok(BlockchainProvider::new(provider_factory, tree)?) - }, tree_config, canon_state_notification_sender)? + })? .with_components(components_builder, on_component_initialized).await?; + let consensus = Arc::new(ctx.components().consensus().clone()); + + let tree_externals = TreeExternals::new( + ctx.provider_factory().clone(), + consensus.clone(), + ctx.components().block_executor().clone(), + ); + let tree = BlockchainTree::new(tree_externals, tree_config)? + .with_sync_metrics_tx(ctx.sync_metrics_tx()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(canon_state_notification_sender); + + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + ctx.node_adapter_mut().provider = ctx.blockchain_db().clone().with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + // spawn exexs let exex_manager_handle = ExExLauncher::new( ctx.head(), @@ -211,52 +231,12 @@ where let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); let (pipeline, client) = if ctx.is_dev() { - info!(target: "reth::cli", "Starting Reth in dev mode"); - - for (idx, (address, alloc)) in ctx.chain_spec().genesis().alloc.iter().enumerate() { - info!(target: "reth::cli", "Allocated Genesis Account: {:02}. {} ({} ETH)", idx, address.to_string(), format_ether(alloc.balance)); - } - - // install auto-seal - let mining_mode = - ctx.dev_mining_mode(ctx.components().pool().pending_transactions_listener()); - info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); - - let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( - ctx.chain_spec(), - ctx.blockchain_db().clone(), - ctx.components().pool().clone(), - consensus_engine_tx.clone(), - mining_mode, - ctx.components().block_executor().clone(), - ) - .build(); - - let pipeline = crate::setup::build_networked_pipeline( - &ctx.toml_config().stages, - client.clone(), - ctx.consensus(), - ctx.provider_factory().clone(), - ctx.task_executor(), - ctx.sync_metrics_tx(), - ctx.prune_config(), - max_block, - static_file_producer, - ctx.components().block_executor().clone(), - pipeline_exex_handle, - )?; - - let pipeline_events = pipeline.events(); - task.set_pipeline_events(pipeline_events); - debug!(target: "reth::cli", "Spawning auto mine task"); - ctx.task_executor().spawn(Box::pin(task)); - - (pipeline, Either::Left(client)) + eyre::bail!("Dev mode is not supported for legacy engine") } else { let pipeline = crate::setup::build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), - ctx.consensus(), + consensus.clone(), ctx.provider_factory().clone(), ctx.task_executor(), ctx.sync_metrics_tx(), @@ -267,7 +247,7 @@ where pipeline_exex_handle, )?; - (pipeline, Either::Right(network_client.clone())) + (pipeline, network_client.clone()) }; let pipeline_events = pipeline.events(); @@ -303,8 +283,6 @@ where info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - ctx.components().network().event_listener().map(Into::into), - beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( @@ -330,10 +308,10 @@ where let jwt_secret = ctx.auth_jwt_secret()?; let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter(), + node: ctx.node_adapter().clone(), config: ctx.node_config(), - beacon_engine_handle: &beacon_engine_handle, - jwt_secret: &jwt_secret, + beacon_engine_handle, + jwt_secret, }; let RpcHandle { rpc_server_handles, rpc_registry } = diff --git a/crates/node/builder/src/lib.rs b/crates/node/builder/src/lib.rs index 899317f158c..a4f87c47984 100644 --- a/crates/node/builder/src/lib.rs +++ b/crates/node/builder/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 3e3d5b696c3..ce7d12fee3d 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -11,10 +11,10 @@ use reth_node_api::{EngineTypes, FullNodeComponents}; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, - rpc::api::EngineApiClient, }; use reth_payload_builder::PayloadBuilderHandle; use reth_provider::ChainSpecProvider; +use reth_rpc_api::EngineApiClient; use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle}; use reth_tasks::TaskExecutor; @@ -69,6 +69,10 @@ where type Primitives = ::Primitives; type ChainSpec = ::ChainSpec; + + type StateCommitment = ::StateCommitment; + + type Storage = ::Storage; } impl NodeTypesWithEngine for AnyNode diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index d8cce9217ef..32123b194e6 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -2,6 +2,7 @@ use std::{ fmt::{self, Debug}, + future::Future, marker::PhantomData, ops::{Deref, DerefMut}, }; @@ -9,16 +10,20 @@ use std::{ use alloy_rpc_types::engine::ClientVersionV1; use futures::TryFutureExt; use reth_node_api::{ - AddOnsContext, FullNodeComponents, NodeAddOns, NodeTypes, NodeTypesWithEngine, + AddOnsContext, BlockTy, EngineValidator, FullNodeComponents, NodeAddOns, NodeTypes, + NodeTypesWithEngine, }; use reth_node_core::{ node_config::NodeConfig, - rpc::eth::{EthApiTypes, FullEthApiServer}, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; -use reth_payload_builder::PayloadBuilderHandle; +use reth_payload_builder::PayloadStore; +use reth_primitives::{EthPrimitives, PooledTransactionsElement}; use reth_provider::providers::ProviderNodeTypes; -use reth_rpc::EthApi; +use reth_rpc::{ + eth::{EthApiTypes, FullEthApiServer}, + EthApi, +}; use reth_rpc_api::eth::helpers::AddDevSigners; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, @@ -28,6 +33,8 @@ use reth_rpc_builder::{ use reth_rpc_engine_api::{capabilities::EngineCapabilities, EngineApi}; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use std::sync::Arc; use crate::EthApiBuilderCtx; @@ -195,6 +202,7 @@ pub struct RpcRegistry { Node::Provider, EthApi, Node::Executor, + Node::Consensus, >, } @@ -211,6 +219,7 @@ where Node::Provider, EthApi, Node::Executor, + Node::Consensus, >; fn deref(&self) -> &Self::Target { @@ -289,9 +298,7 @@ where } /// Returns the handle to the payload builder service - pub fn payload_builder( - &self, - ) -> &PayloadBuilderHandle<::Engine> { + pub fn payload_builder(&self) -> &Node::PayloadBuilder { self.node.payload_builder() } } @@ -327,31 +334,38 @@ where /// Node add-ons containing RPC server configuration, with customizable eth API handler. #[allow(clippy::type_complexity)] -pub struct RpcAddOns { +pub struct RpcAddOns { /// Additional RPC add-ons. pub hooks: RpcHooks, /// Builder for `EthApi` eth_api_builder: Box) -> EthApi + Send + Sync>, + /// Engine validator + engine_validator_builder: EV, _pd: PhantomData<(Node, EthApi)>, } -impl Debug for RpcAddOns { +impl Debug + for RpcAddOns +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RpcAddOns") .field("hooks", &self.hooks) .field("eth_api_builder", &"...") + .field("engine_validator_builder", &self.engine_validator_builder) .finish() } } -impl RpcAddOns { +impl RpcAddOns { /// Creates a new instance of the RPC add-ons. pub fn new( eth_api_builder: impl FnOnce(&EthApiBuilderCtx) -> EthApi + Send + Sync + 'static, + engine_validator_builder: EV, ) -> Self { Self { hooks: RpcHooks::default(), eth_api_builder: Box::new(eth_api_builder), + engine_validator_builder, _pd: PhantomData, } } @@ -377,22 +391,42 @@ impl RpcAddOns { } } -impl> Default - for RpcAddOns +impl Default for RpcAddOns +where + Node: FullNodeComponents, + EthApi: EthApiTypes + EthApiBuilder, + EV: Default, { fn default() -> Self { - Self::new(EthApi::build) + Self::new(EthApi::build, EV::default()) } } -impl NodeAddOns for RpcAddOns +impl RpcAddOns where - N: FullNodeComponents, - EthApi: EthApiTypes + FullEthApiServer + AddDevSigners + Unpin + 'static, + N: FullNodeComponents< + Pool: TransactionPool>, + >, + EthApi: EthApiTypes + + FullEthApiServer + + AddDevSigners + + Unpin + + 'static, + EV: EngineValidatorBuilder, { - type Handle = RpcHandle; + /// Launches the RPC servers with the given context and an additional hook for extending + /// modules. + pub async fn launch_add_ons_with( + self, + ctx: AddOnsContext<'_, N>, + ext: F, + ) -> eyre::Result> + where + F: FnOnce(&mut TransportRpcModules, &mut AuthRpcModule) -> eyre::Result<()>, + { + let Self { eth_api_builder, engine_validator_builder, hooks, _pd: _ } = self; - async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + let engine_validator = engine_validator_builder.build(&ctx).await?; let AddOnsContext { node, config, beacon_engine_handle, jwt_secret } = ctx; let client = ClientVersionV1 { @@ -405,17 +439,17 @@ where let engine_api = EngineApi::new( node.provider().clone(), config.chain.clone(), - beacon_engine_handle.clone(), - node.payload_builder().clone().into(), + beacon_engine_handle, + PayloadStore::new(node.payload_builder().clone()), node.pool().clone(), Box::new(node.task_executor().clone()), client, EngineCapabilities::default(), - node.engine_validator().clone(), + engine_validator.clone(), ); info!(target: "reth::cli", "Engine API handler initialized"); - let auth_config = config.rpc.auth_server_config(*jwt_secret)?; + let auth_config = config.rpc.auth_server_config(jwt_secret)?; let module_config = config.rpc.transport_rpc_module_config(); debug!(target: "reth::cli", http=?module_config.http(), ws=?module_config.ws(), "Using RPC module config"); @@ -427,7 +461,13 @@ where .with_executor(node.task_executor().clone()) .with_evm_config(node.evm_config().clone()) .with_block_executor(node.block_executor().clone()) - .build_with_auth_server(module_config, engine_api, self.eth_api_builder); + .with_consensus(node.consensus().clone()) + .build_with_auth_server( + module_config, + engine_api, + eth_api_builder, + Arc::new(engine_validator), + ); // in dev mode we generate 20 random dev-signer accounts if config.dev.dev { @@ -443,8 +483,9 @@ where auth_module: &mut auth_module, }; - let RpcHooks { on_rpc_started, extend_rpc_modules } = self.hooks; + let RpcHooks { on_rpc_started, extend_rpc_modules } = hooks; + ext(ctx.modules, ctx.auth_module)?; extend_rpc_modules.extend_rpc_modules(ctx)?; let server_config = config.rpc.rpc_server_config(); @@ -491,6 +532,26 @@ where } } +impl NodeAddOns for RpcAddOns +where + N: FullNodeComponents< + Types: ProviderNodeTypes, + Pool: TransactionPool>, + >, + EthApi: EthApiTypes + + FullEthApiServer + + AddDevSigners + + Unpin + + 'static, + EV: EngineValidatorBuilder, +{ + type Handle = RpcHandle; + + async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { + self.launch_add_ons_with(ctx, |_, _| Ok(())).await + } +} + /// Helper trait implemented for add-ons producing [`RpcHandle`]. Used by common node launcher /// implementations. pub trait RethRpcAddOns: @@ -503,7 +564,7 @@ pub trait RethRpcAddOns: fn hooks_mut(&mut self) -> &mut RpcHooks; } -impl RethRpcAddOns for RpcAddOns +impl RethRpcAddOns for RpcAddOns where Self: NodeAddOns>, { @@ -520,8 +581,69 @@ pub trait EthApiBuilder: 'static { fn build(ctx: &EthApiBuilderCtx) -> Self; } -impl EthApiBuilder for EthApi { +impl>> EthApiBuilder + for EthApi +{ fn build(ctx: &EthApiBuilderCtx) -> Self { Self::with_spawner(ctx) } } + +/// Helper trait that provides the validator for the engine API +pub trait EngineValidatorAddOn: Send { + /// The Validator type to use for the engine API. + type Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone; + + /// Creates the engine validator for an engine API based node. + fn engine_validator( + &self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future>; +} + +impl EngineValidatorAddOn for RpcAddOns +where + N: FullNodeComponents, + EthApi: EthApiTypes, + EV: EngineValidatorBuilder, +{ + type Validator = EV::Validator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + self.engine_validator_builder.clone().build(ctx).await + } +} + +/// A type that knows how to build the engine validator. +pub trait EngineValidatorBuilder: Send + Sync + Clone { + /// The consensus implementation to build. + type Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone; + + /// Creates the engine validator. + fn build( + self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future> + Send; +} + +impl EngineValidatorBuilder for F +where + Node: FullNodeComponents, + Validator: EngineValidator<::Engine, Block = BlockTy> + + Clone + + Unpin + + 'static, + F: FnOnce(&AddOnsContext<'_, Node>) -> Fut + Send + Sync + Clone, + Fut: Future> + Send, +{ + type Validator = Validator; + + fn build( + self, + ctx: &AddOnsContext<'_, Node>, + ) -> impl Future> { + self(ctx) + } +} diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 3591868ddad..6dff28bd39b 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -14,6 +14,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, }; +use reth_node_api::{BodyTy, HeaderTy, NodePrimitives}; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -26,7 +27,7 @@ use tokio::sync::watch; pub fn build_networked_pipeline( config: &StageConfig, client: Client, - consensus: Arc, + consensus: Arc>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, @@ -34,16 +35,17 @@ pub fn build_networked_pipeline( max_block: Option, static_file_producer: StaticFileProducer>, executor: Executor, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where N: ProviderNodeTypes, - Client: BlockClient + 'static, - Executor: BlockExecutorProvider, + Client: BlockClient
, Body = BodyTy> + 'static, + Executor: BlockExecutorProvider, + N::Primitives: NodePrimitives, { // building network downloaders using the fetch client let header_downloader = ReverseHeadersDownloaderBuilder::new(config.headers) - .build(client.clone(), Arc::clone(&consensus)) + .build(client.clone(), consensus.clone().as_header_validator()) .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::new(config.bodies) @@ -74,19 +76,20 @@ pub fn build_pipeline( stage_config: &StageConfig, header_downloader: H, body_downloader: B, - consensus: Arc, + consensus: Arc>, max_block: Option, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, static_file_producer: StaticFileProducer>, executor: Executor, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> eyre::Result> where N: ProviderNodeTypes, - H: HeaderDownloader + 'static, - B: BodyDownloader + 'static, - Executor: BlockExecutorProvider, + H: HeaderDownloader
> + 'static, + B: BodyDownloader
, Body = BodyTy> + 'static, + Executor: BlockExecutorProvider, + N::Primitives: NodePrimitives, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 3ac90a88870..0ede9fe80c4 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -13,7 +13,9 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-consensus.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-cli-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-storage-errors.workspace = true @@ -23,8 +25,6 @@ reth-network-p2p.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-rpc-types-compat.workspace = true -reth-rpc-api = { workspace = true, features = ["client"] } -reth-rpc-eth-api = { workspace = true, features = ["client"] } reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true @@ -32,13 +32,15 @@ reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-peers.workspace = true -reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-ethereum-forks.workspace = true # ethereum alloy-primitives.workspace = true -alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } +alloy-rpc-types-engine = { workspace = true, features = ["std", "jwt"] } +alloy-consensus.workspace = true +alloy-eips.workspace = true # misc eyre.workspace = true @@ -73,15 +75,12 @@ futures.workspace = true # test vectors generation proptest.workspace = true tokio.workspace = true -tempfile.workspace = true [features] -optimism = [ - "reth-primitives/optimism" -] +optimism = ["reth-primitives/optimism", "reth-db/optimism"] # Features for vergen to generate correct env vars -jemalloc = [] -asm-keccak = [] +jemalloc = ["reth-cli-util/jemalloc"] +asm-keccak = ["reth-primitives/asm-keccak", "alloy-primitives/asm-keccak"] [build-dependencies] vergen = { version = "8.0.0", features = ["build", "cargo", "git", "gitcl"] } diff --git a/crates/node/core/src/args/database.rs b/crates/node/core/src/args/database.rs index da96deb70c1..5b9d6ae61e2 100644 --- a/crates/node/core/src/args/database.rs +++ b/crates/node/core/src/args/database.rs @@ -1,12 +1,14 @@ //! clap [Args](clap::Args) for database configuration +use std::{fmt, str::FromStr, time::Duration}; + use crate::version::default_client_version; use clap::{ builder::{PossibleValue, TypedValueParser}, error::ErrorKind, Arg, Args, Command, Error, }; -use reth_db::ClientVersion; +use reth_db::{mdbx::MaxReadTransactionDuration, ClientVersion}; use reth_storage_errors::db::LogLevel; /// Parameters for database configuration @@ -20,6 +22,15 @@ pub struct DatabaseArgs { /// NFS volume. #[arg(long = "db.exclusive")] pub exclusive: Option, + /// Maximum database size (e.g., 4TB, 8MB) + #[arg(long = "db.max-size", value_parser = parse_byte_size)] + pub max_size: Option, + /// Database growth step (e.g., 4GB, 4KB) + #[arg(long = "db.growth-step", value_parser = parse_byte_size)] + pub growth_step: Option, + /// Read transaction timeout in seconds, 0 means no timeout. + #[arg(long = "db.read-transaction-timeout")] + pub read_transaction_timeout: Option, } impl DatabaseArgs { @@ -28,14 +39,24 @@ impl DatabaseArgs { self.get_database_args(default_client_version()) } - /// Returns the database arguments with configured log level and given client version. - pub const fn get_database_args( + /// Returns the database arguments with configured log level, client version, + /// max read transaction duration, and geometry. + pub fn get_database_args( &self, client_version: ClientVersion, ) -> reth_db::mdbx::DatabaseArguments { + let max_read_transaction_duration = match self.read_transaction_timeout { + None => None, // if not specified, use default value + Some(0) => Some(MaxReadTransactionDuration::Unbounded), // if 0, disable timeout + Some(secs) => Some(MaxReadTransactionDuration::Set(Duration::from_secs(secs))), + }; + reth_db::mdbx::DatabaseArguments::new(client_version) .with_log_level(self.log_level) .with_exclusive(self.exclusive) + .with_max_read_transaction_duration(max_read_transaction_duration) + .with_geometry_max_size(self.max_size) + .with_growth_step(self.growth_step) } } @@ -77,10 +98,84 @@ impl TypedValueParser for LogLevelValueParser { Some(Box::new(values)) } } + +/// Size in bytes. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub struct ByteSize(pub usize); + +impl From for usize { + fn from(s: ByteSize) -> Self { + s.0 + } +} + +impl FromStr for ByteSize { + type Err = String; + + fn from_str(s: &str) -> Result { + let s = s.trim().to_uppercase(); + let parts: Vec<&str> = s.split_whitespace().collect(); + + let (num_str, unit) = match parts.len() { + 1 => { + let (num, unit) = + s.split_at(s.find(|c: char| c.is_alphabetic()).unwrap_or(s.len())); + (num, unit) + } + 2 => (parts[0], parts[1]), + _ => { + return Err("Invalid format. Use '' or ' '.".to_string()) + } + }; + + let num: usize = num_str.parse().map_err(|_| "Invalid number".to_string())?; + + let multiplier = match unit { + "B" | "" => 1, // Assume bytes if no unit is specified + "KB" => 1024, + "MB" => 1024 * 1024, + "GB" => 1024 * 1024 * 1024, + "TB" => 1024 * 1024 * 1024 * 1024, + _ => return Err(format!("Invalid unit: {}. Use B, KB, MB, GB, or TB.", unit)), + }; + + Ok(Self(num * multiplier)) + } +} + +impl fmt::Display for ByteSize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + const KB: usize = 1024; + const MB: usize = KB * 1024; + const GB: usize = MB * 1024; + const TB: usize = GB * 1024; + + let (size, unit) = if self.0 >= TB { + (self.0 as f64 / TB as f64, "TB") + } else if self.0 >= GB { + (self.0 as f64 / GB as f64, "GB") + } else if self.0 >= MB { + (self.0 as f64 / MB as f64, "MB") + } else if self.0 >= KB { + (self.0 as f64 / KB as f64, "KB") + } else { + (self.0 as f64, "B") + }; + + write!(f, "{:.2}{}", size, unit) + } +} + +/// Value parser function that supports various formats. +fn parse_byte_size(s: &str) -> Result { + s.parse::().map(Into::into) +} + #[cfg(test)] mod tests { use super::*; use clap::Parser; + use reth_db::mdbx::{GIGABYTE, KILOBYTE, MEGABYTE, TERABYTE}; /// A helper type to parse Args more easily #[derive(Parser)] @@ -96,6 +191,101 @@ mod tests { assert_eq!(args, default_args); } + #[test] + fn test_command_parser_with_valid_max_size() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "4398046511104", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_max_size() { + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_growth_step() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.growth-step", + "4294967296", + ]) + .unwrap(); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE * 4)); + } + + #[test] + fn test_command_parser_with_invalid_growth_step() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "invalid"]); + assert!(result.is_err()); + } + + #[test] + fn test_command_parser_with_valid_max_size_and_growth_step_from_str() { + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "2TB", + "--db.growth-step", + "1GB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(TERABYTE * 2)); + assert_eq!(cmd.args.growth_step, Some(GIGABYTE)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12MB", + "--db.growth-step", + "2KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + // with spaces + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "12 MB", + "--db.growth-step", + "2 KB", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(MEGABYTE * 12)); + assert_eq!(cmd.args.growth_step, Some(KILOBYTE * 2)); + + let cmd = CommandParser::::try_parse_from([ + "reth", + "--db.max-size", + "1073741824", + "--db.growth-step", + "1048576", + ]) + .unwrap(); + assert_eq!(cmd.args.max_size, Some(GIGABYTE)); + assert_eq!(cmd.args.growth_step, Some(MEGABYTE)); + } + + #[test] + fn test_command_parser_max_size_and_growth_step_from_str_invalid_unit() { + let result = + CommandParser::::try_parse_from(["reth", "--db.growth-step", "1 PB"]); + assert!(result.is_err()); + + let result = + CommandParser::::try_parse_from(["reth", "--db.max-size", "2PB"]); + assert!(result.is_err()); + } + #[test] fn test_possible_values() { // Initialize the LogLevelValueParser diff --git a/crates/node/core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs index aec35253af2..cd7ba7dccfb 100644 --- a/crates/node/core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,12 +1,11 @@ use crate::{cli::config::PayloadBuilderConfig, version::default_extradata}; +use alloy_consensus::constants::MAXIMUM_EXTRA_DATA_SIZE; +use alloy_eips::{eip1559::ETHEREUM_BLOCK_GAS_LIMIT, merge::SLOT_DURATION}; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use reth_cli_util::{parse_duration_from_secs, parse_duration_from_secs_or_ms}; -use reth_primitives::constants::{ - ETHEREUM_BLOCK_GAS_LIMIT, MAXIMUM_EXTRA_DATA_SIZE, SLOT_DURATION, -}; use std::{borrow::Cow, ffi::OsStr, time::Duration}; /// Parameters for configuring the Payload Builder @@ -87,7 +86,7 @@ impl TypedValueParser for ExtradataValueParser { ) -> Result { let val = value.to_str().ok_or_else(|| clap::Error::new(clap::error::ErrorKind::InvalidUtf8))?; - if val.as_bytes().len() > MAXIMUM_EXTRA_DATA_SIZE { + if val.len() > MAXIMUM_EXTRA_DATA_SIZE { return Err(clap::Error::raw( clap::error::ErrorKind::InvalidValue, format!( diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 382f22d3776..fe9b80cec47 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -1,11 +1,13 @@ //! clap [Args](clap::Args) for RPC related arguments. use std::{ + collections::HashSet, ffi::OsStr, net::{IpAddr, Ipv4Addr}, path::PathBuf, }; +use alloy_primitives::Address; use alloy_rpc_types_engine::JwtSecret; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, @@ -183,6 +185,11 @@ pub struct RpcServerArgs { #[arg(long = "rpc.proof-permits", alias = "rpc-proof-permits", value_name = "COUNT", default_value_t = constants::DEFAULT_PROOF_PERMITS)] pub rpc_proof_permits: usize, + /// Path to file containing disallowed addresses, json-encoded list of strings. Block + /// validation API will reject blocks containing transactions from these addresses. + #[arg(long = "builder.disallow", value_name = "PATH", value_parser = reth_cli_util::parsers::read_json_from_file::>)] + pub builder_disallow: Option>, + /// State cache configuration. #[command(flatten)] pub rpc_state_cache: RpcStateCacheArgs, @@ -199,6 +206,12 @@ impl RpcServerArgs { self } + /// Configures modules for the HTTP-RPC server. + pub fn with_http_api(mut self, http_api: RpcModuleSelection) -> Self { + self.http_api = Some(http_api); + self + } + /// Enables the WS-RPC server. pub const fn with_ws(mut self) -> Self { self.ws = true; @@ -318,6 +331,7 @@ impl Default for RpcServerArgs { gas_price_oracle: GasPriceOracleArgs::default(), rpc_state_cache: RpcStateCacheArgs::default(), rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS, + builder_disallow: Default::default(), } } } diff --git a/crates/node/core/src/args/rpc_state_cache.rs b/crates/node/core/src/args/rpc_state_cache.rs index 9169d40b317..b140d47b5fe 100644 --- a/crates/node/core/src/args/rpc_state_cache.rs +++ b/crates/node/core/src/args/rpc_state_cache.rs @@ -1,6 +1,6 @@ use clap::Args; use reth_rpc_server_types::constants::cache::{ - DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN, + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN, DEFAULT_RECEIPT_CACHE_MAX_LEN, }; @@ -22,12 +22,12 @@ pub struct RpcStateCacheArgs { )] pub max_receipts: u32, - /// Max number of bytes for cached env data. + /// Max number of headers in cache. #[arg( long = "rpc-cache.max-envs", - default_value_t = DEFAULT_ENV_CACHE_MAX_LEN, + default_value_t = DEFAULT_HEADER_CACHE_MAX_LEN, )] - pub max_envs: u32, + pub max_headers: u32, /// Max number of concurrent database requests. #[arg( @@ -42,7 +42,7 @@ impl Default for RpcStateCacheArgs { Self { max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, - max_envs: DEFAULT_ENV_CACHE_MAX_LEN, + max_headers: DEFAULT_HEADER_CACHE_MAX_LEN, max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, } } diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 63f6c566ca2..a8ea1d9cdba 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -1,17 +1,17 @@ //! Transaction pool arguments use crate::cli::config::RethTransactionPoolConfig; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::Address; use clap::Args; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use reth_transaction_pool::{ blobstore::disk::DEFAULT_MAX_CACHED_BLOBS, pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, validate::DEFAULT_MAX_TX_INPUT_BYTES, LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, - DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, REPLACE_BLOB_PRICE_BUMP, - TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, MAX_NEW_PENDING_TXS_NOTIFICATIONS, + REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }; /// Parameters for debugging purposes #[derive(Debug, Clone, Args, PartialEq, Eq)] @@ -86,6 +86,11 @@ pub struct TxPoolArgs { /// Maximum number of new transactions to buffer #[arg(long = "txpool.max-new-txns", alias = "txpool.max_new_txns", default_value_t = NEW_TX_LISTENER_BUFFER_SIZE)] pub new_tx_listener_buffer_size: usize, + + /// How many new pending transactions to buffer and send to in progress pending transaction + /// iterators. + #[arg(long = "txpool.max-new-pending-txs-notifications", alias = "txpool.max-new-pending-txs-notifications", default_value_t = MAX_NEW_PENDING_TXS_NOTIFICATIONS)] + pub max_new_pending_txs_notifications: usize, } impl Default for TxPoolArgs { @@ -110,6 +115,7 @@ impl Default for TxPoolArgs { additional_validation_tasks: DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, + max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS, } } } @@ -125,19 +131,19 @@ impl RethTransactionPoolConfig for TxPoolArgs { }, pending_limit: SubPoolLimit { max_txs: self.pending_max_count, - max_size: self.pending_max_size * 1024 * 1024, + max_size: self.pending_max_size.saturating_mul(1024 * 1024), }, basefee_limit: SubPoolLimit { max_txs: self.basefee_max_count, - max_size: self.basefee_max_size * 1024 * 1024, + max_size: self.basefee_max_size.saturating_mul(1024 * 1024), }, queued_limit: SubPoolLimit { max_txs: self.queued_max_count, - max_size: self.queued_max_size * 1024 * 1024, + max_size: self.queued_max_size.saturating_mul(1024 * 1024), }, blob_limit: SubPoolLimit { max_txs: self.queued_max_count, - max_size: self.queued_max_size * 1024 * 1024, + max_size: self.queued_max_size.saturating_mul(1024 * 1024), }, max_account_slots: self.max_account_slots, price_bumps: PriceBumpConfig { @@ -148,6 +154,7 @@ impl RethTransactionPoolConfig for TxPoolArgs { gas_limit: self.gas_limit, pending_tx_listener_buffer_size: self.pending_tx_listener_buffer_size, new_tx_listener_buffer_size: self.new_tx_listener_buffer_size, + max_new_pending_txs_notifications: self.max_new_pending_txs_notifications, } } } diff --git a/crates/node/core/src/cli/config.rs b/crates/node/core/src/cli/config.rs index 73ada50fcd2..27325632db9 100644 --- a/crates/node/core/src/cli/config.rs +++ b/crates/node/core/src/cli/config.rs @@ -1,7 +1,7 @@ //! Config traits for various node components. use alloy_primitives::Bytes; -use reth_network::protocol::IntoRlpxSubProtocol; +use reth_network::{protocol::IntoRlpxSubProtocol, NetworkPrimitives}; use reth_transaction_pool::PoolConfig; use std::{borrow::Cow, time::Duration}; @@ -49,7 +49,7 @@ pub trait RethNetworkConfig { // TODO add more network config methods here } -impl RethNetworkConfig for reth_network::NetworkManager { +impl RethNetworkConfig for reth_network::NetworkManager { fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { Self::add_rlpx_sub_protocol(self, protocol); } diff --git a/crates/node/core/src/lib.rs b/crates/node/core/src/lib.rs index 6af822e22ee..aa4f72bd6a4 100644 --- a/crates/node/core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -15,22 +15,15 @@ pub mod exit; pub mod node_config; pub mod utils; pub mod version; -/// Re-exported from `reth_primitives`. + +/// Re-exported primitive types pub mod primitives { - pub use reth_primitives::*; + pub use reth_ethereum_forks::*; + pub use reth_primitives_traits::*; } /// Re-export of `reth_rpc_*` crates. pub mod rpc { - /// Re-exported from `reth_rpc_api`. - pub mod api { - pub use reth_rpc_api::*; - } - /// Re-exported from `reth_rpc::eth`. - pub mod eth { - pub use reth_rpc_eth_api::*; - } - /// Re-exported from `reth_rpc::rpc`. pub mod result { pub use reth_rpc_server_types::result::*; diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index a8799d80df1..861e47fc3cf 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -8,21 +8,27 @@ use crate::{ dirs::{ChainPath, DataDirPath}, utils::get_single_header, }; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockHashOrNumber; +use alloy_primitives::{BlockNumber, B256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; +use reth_ethereum_forks::Head; use reth_network_p2p::headers::client::HeadersClient; -use serde::{de::DeserializeOwned, Serialize}; -use std::{fs, path::Path}; - -use alloy_primitives::{BlockNumber, B256}; -use reth_primitives::{BlockHashOrNumber, Head, SealedHeader}; +use reth_primitives_traits::SealedHeader; use reth_stages_types::StageId; use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, }; use reth_storage_errors::provider::ProviderResult; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use serde::{de::DeserializeOwned, Serialize}; +use std::{ + fs, + net::SocketAddr, + path::{Path, PathBuf}, + sync::Arc, +}; use tracing::*; /// This includes all necessary configuration to launch the node. @@ -272,7 +278,7 @@ impl NodeConfig { ) -> eyre::Result> where Provider: HeaderProvider, - Client: HeadersClient, + Client: HeadersClient, { let max_block = if let Some(block) = self.debug.max_block { Some(block) @@ -313,9 +319,9 @@ impl NodeConfig { Ok(Head { number: head, hash, - difficulty: header.difficulty, + difficulty: header.difficulty(), total_difficulty, - timestamp: header.timestamp, + timestamp: header.timestamp(), }) } @@ -331,17 +337,17 @@ impl NodeConfig { ) -> ProviderResult where Provider: HeaderProvider, - Client: HeadersClient, + Client: HeadersClient, { let header = provider.header_by_hash_or_number(tip.into())?; // try to look up the header in the database if let Some(header) = header { info!(target: "reth::cli", ?tip, "Successfully looked up tip block in the database"); - return Ok(header.number) + return Ok(header.number()) } - Ok(self.fetch_tip_from_network(client, tip.into()).await.number) + Ok(self.fetch_tip_from_network(client, tip.into()).await.number()) } /// Attempt to look up the block with the given number and return the header. @@ -351,9 +357,9 @@ impl NodeConfig { &self, client: Client, tip: BlockHashOrNumber, - ) -> SealedHeader + ) -> SealedHeader where - Client: HeadersClient, + Client: HeadersClient, { info!(target: "reth::cli", ?tip, "Fetching tip block from the network."); let mut fetch_failures = 0; @@ -422,6 +428,29 @@ impl NodeConfig { Err(e) => Err(eyre!("Failed to load configuration: {e}")), } } + + /// Modifies the [`ChainSpec`] generic of the config using the provided closure. + pub fn map_chainspec(self, f: F) -> NodeConfig + where + F: FnOnce(Arc) -> C, + { + let chain = Arc::new(f(self.chain)); + NodeConfig { + chain, + datadir: self.datadir, + config: self.config, + metrics: self.metrics, + instance: self.instance, + network: self.network, + rpc: self.rpc, + txpool: self.txpool, + builder: self.builder, + debug: self.debug, + db: self.db, + dev: self.dev, + pruning: self.pruning, + } + } } impl Default for NodeConfig { diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index a64d1211455..65f90f27eb7 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -1,21 +1,19 @@ //! Utility functions for node startup and shutdown, for example path parsing and retrieving single //! blocks from the network. -use alloy_primitives::Sealable; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockHashOrNumber; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; -use reth_chainspec::ChainSpec; -use reth_consensus_common::validation::validate_block_pre_execution; +use reth_consensus::Consensus; use reth_network_p2p::{ - bodies::client::BodiesClient, - headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, - priority::Priority, + bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; -use reth_primitives::{BlockHashOrNumber, SealedBlock, SealedHeader}; +use reth_primitives::SealedBlock; +use reth_primitives_traits::SealedHeader; use std::{ env::VarError, path::{Path, PathBuf}, - sync::Arc, }; use tracing::{debug, info}; @@ -40,27 +38,22 @@ pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result( client: Client, id: BlockHashOrNumber, -) -> Result +) -> Result> where - Client: HeadersClient, + Client: HeadersClient, { - let request = HeadersRequest { direction: HeadersDirection::Rising, limit: 1, start: id }; + let (peer_id, response) = client.get_header_with_priority(id, Priority::High).await?.split(); - let (peer_id, response) = - client.get_headers_with_priority(request, Priority::High).await?.split(); - - if response.len() != 1 { + let Some(header) = response else { client.report_bad_message(peer_id); - eyre::bail!("Invalid number of headers received. Expected: 1. Received: {}", response.len()) - } + eyre::bail!("Invalid number of headers received. Expected: 1. Received: 0") + }; - let sealed_header = response.into_iter().next().unwrap().seal_slow(); - let (header, seal) = sealed_header.into_parts(); - let header = SealedHeader::new(header, seal); + let header = SealedHeader::seal(header); let valid = match id { BlockHashOrNumber::Hash(hash) => header.hash() == hash, - BlockHashOrNumber::Number(number) => header.number == number, + BlockHashOrNumber::Number(number) => header.number() == number, }; if !valid { @@ -76,25 +69,23 @@ where } /// Get a body from network based on header -pub async fn get_single_body( +pub async fn get_single_body( client: Client, - chain_spec: Arc, - header: SealedHeader, -) -> Result + header: SealedHeader, + consensus: impl Consensus, +) -> Result> where Client: BodiesClient, { let (peer_id, response) = client.get_block_body(header.hash()).await?.split(); - if response.is_none() { + let Some(body) = response else { client.report_bad_message(peer_id); eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0") - } + }; - let body = response.unwrap(); let block = SealedBlock { header, body }; - - validate_block_pre_execution(&block, &chain_spec)?; + consensus.validate_block_pre_execution(&block)?; Ok(block) } diff --git a/crates/node/core/src/version.rs b/crates/node/core/src/version.rs index 84fcf3f0f11..4bf2dc56f39 100644 --- a/crates/node/core/src/version.rs +++ b/crates/node/core/src/version.rs @@ -144,9 +144,6 @@ mod tests { #[test] fn assert_extradata_less_32bytes() { let extradata = default_extradata(); - assert!( - extradata.as_bytes().len() <= 32, - "extradata must be less than 32 bytes: {extradata}" - ) + assert!(extradata.len() <= 32, "extradata must be less than 32 bytes: {extradata}") } } diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 9c56c2da9b8..03f3ab17288 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -12,19 +12,20 @@ workspace = true [dependencies] # reth -reth-provider.workspace = true +reth-storage-api.workspace = true reth-beacon-consensus.workspace = true -reth-network = { workspace = true, features = ["serde"] } reth-network-api.workspace = true reth-stages.workspace = true -reth-prune.workspace = true -reth-static-file.workspace = true -reth-primitives.workspace = true +reth-prune-types.workspace = true +reth-static-file-types.workspace = true reth-primitives-traits.workspace = true +reth-engine-primitives.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true # async tokio.workspace = true diff --git a/crates/node/events/src/cl.rs b/crates/node/events/src/cl.rs index 6d29c9bbfa2..dac13fe0763 100644 --- a/crates/node/events/src/cl.rs +++ b/crates/node/events/src/cl.rs @@ -1,7 +1,8 @@ //! Events related to Consensus Layer health. +use alloy_consensus::Header; use futures::Stream; -use reth_provider::CanonChainTracker; +use reth_storage_api::CanonChainTracker; use std::{ fmt, pin::Pin, @@ -20,9 +21,9 @@ const NO_TRANSITION_CONFIG_EXCHANGED_PERIOD: Duration = Duration::from_secs(120) const NO_FORKCHOICE_UPDATE_RECEIVED_PERIOD: Duration = Duration::from_secs(120); /// A Stream of [`ConsensusLayerHealthEvent`]. -pub struct ConsensusLayerHealthEvents { +pub struct ConsensusLayerHealthEvents { interval: Interval, - canon_chain: Box, + canon_chain: Box>, } impl fmt::Debug for ConsensusLayerHealthEvents { @@ -31,9 +32,9 @@ impl fmt::Debug for ConsensusLayerHealthEvents { } } -impl ConsensusLayerHealthEvents { +impl ConsensusLayerHealthEvents { /// Creates a new [`ConsensusLayerHealthEvents`] with the given canonical chain tracker. - pub fn new(canon_chain: Box) -> Self { + pub fn new(canon_chain: Box>) -> Self { // Skip the first tick to prevent the false `ConsensusLayerHealthEvent::NeverSeen` event. let interval = tokio::time::interval_at(Instant::now() + CHECK_INTERVAL, CHECK_INTERVAL); Self { interval, canon_chain } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index e10caaee7c5..86f1ea507ac 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -1,19 +1,17 @@ //! Support for handling events emitted by node components. use crate::cl::ConsensusLayerHealthEvent; +use alloy_consensus::constants::GWEI_TO_WEI; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; -use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, -}; -use reth_network::NetworkEvent; +use reth_beacon_consensus::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; +use reth_engine_primitives::ForkchoiceStatus; use reth_network_api::PeersInfo; -use reth_primitives::constants; use reth_primitives_traits::{format_gas, format_gas_throughput}; -use reth_prune::PrunerEvent; +use reth_prune_types::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; -use reth_static_file::StaticFileProducerEvent; +use reth_static_file_types::StaticFileProducerEvent; use std::{ fmt::{Display, Formatter}, future::Future, @@ -213,12 +211,6 @@ impl NodeState { } } - fn handle_network_event(&self, _: NetworkEvent) { - // NOTE(onbjerg): This used to log established/disconnecting sessions, but this is already - // logged in the networking component. I kept this stub in case we want to catch other - // networking events later on. - } - fn handle_consensus_engine_event(&mut self, event: BeaconConsensusEngineEvent) { match event { BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status) => { @@ -263,9 +255,9 @@ impl NodeState { gas=%format_gas(block.header.gas_used), gas_throughput=%format_gas_throughput(block.header.gas_used, elapsed), full=%format!("{:.1}%", block.header.gas_used as f64 * 100.0 / block.header.gas_limit as f64), - base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / constants::GWEI_TO_WEI as f64), - blobs=block.header.blob_gas_used.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, - excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, + base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / GWEI_TO_WEI as f64), + blobs=block.header.blob_gas_used.unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, + excess_blobs=block.header.excess_blob_gas.unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, ?elapsed, "Block added to canonical chain" ); @@ -306,10 +298,14 @@ impl NodeState { fn handle_pruner_event(&self, event: PrunerEvent) { match event { PrunerEvent::Started { tip_block_number } => { - info!(tip_block_number, "Pruner started"); + debug!(tip_block_number, "Pruner started"); } PrunerEvent::Finished { tip_block_number, elapsed, stats } => { - info!(tip_block_number, ?elapsed, ?stats, "Pruner finished"); + let stats = format!( + "[{}]", + stats.iter().map(|item| item.to_string()).collect::>().join(", ") + ); + debug!(tip_block_number, ?elapsed, pruned_segments = %stats, "Pruner finished"); } } } @@ -317,10 +313,10 @@ impl NodeState { fn handle_static_file_producer_event(&self, event: StaticFileProducerEvent) { match event { StaticFileProducerEvent::Started { targets } => { - info!(?targets, "Static File Producer started"); + debug!(?targets, "Static File Producer started"); } StaticFileProducerEvent::Finished { targets, elapsed } => { - info!(?targets, ?elapsed, "Static File Producer finished"); + debug!(?targets, ?elapsed, "Static File Producer finished"); } } } @@ -356,8 +352,6 @@ struct CurrentStage { /// A node event. #[derive(Debug)] pub enum NodeEvent { - /// A network event. - Network(NetworkEvent), /// A sync pipeline event. Pipeline(PipelineEvent), /// A consensus engine event. @@ -373,12 +367,6 @@ pub enum NodeEvent { Other(String), } -impl From for NodeEvent { - fn from(event: NetworkEvent) -> Self { - Self::Network(event) - } -} - impl From for NodeEvent { fn from(event: PipelineEvent) -> Self { Self::Pipeline(event) @@ -525,9 +513,6 @@ where while let Poll::Ready(Some(event)) = this.events.as_mut().poll_next(cx) { match event { - NodeEvent::Network(event) => { - this.state.handle_network_event(event); - } NodeEvent::Pipeline(event) => { this.state.handle_pipeline_event(event); } diff --git a/crates/node/metrics/Cargo.toml b/crates/node/metrics/Cargo.toml index 76a3a7f6632..3d79d11db7d 100644 --- a/crates/node/metrics/Cargo.toml +++ b/crates/node/metrics/Cargo.toml @@ -8,8 +8,6 @@ homepage.workspace = true repository.workspace = true [dependencies] -reth-db-api.workspace = true -reth-provider.workspace = true reth-metrics.workspace = true reth-tasks.workspace = true @@ -20,7 +18,7 @@ metrics-util.workspace = true tokio.workspace = true -jsonrpsee = { workspace = true, features = ["server"] } +jsonrpsee-server.workspace = true http.workspace = true tower.workspace = true @@ -35,9 +33,7 @@ procfs = "0.16.0" [dev-dependencies] reqwest.workspace = true -reth-chainspec.workspace = true socket2 = { version = "0.5", default-features = false } -reth-provider = { workspace = true, features = ["test-utils"] } [lints] workspace = true diff --git a/crates/node/metrics/src/hooks.rs b/crates/node/metrics/src/hooks.rs index 18755717667..3b6d23a3900 100644 --- a/crates/node/metrics/src/hooks.rs +++ b/crates/node/metrics/src/hooks.rs @@ -1,15 +1,59 @@ use metrics_process::Collector; -use reth_db_api::database_metrics::DatabaseMetrics; -use reth_provider::providers::StaticFileProvider; use std::{fmt, sync::Arc}; -pub(crate) trait Hook: Fn() + Send + Sync {} -impl Hook for T {} -impl fmt::Debug for Hooks { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hooks_len = self.inner.len(); - f.debug_struct("Hooks") - .field("inner", &format!("Arc>>, len: {}", hooks_len)) +/// The simple alias for function types that are `'static`, `Send`, and `Sync`. +pub trait Hook: Fn() + Send + Sync + 'static {} +impl Hook for T {} + +/// A builder-like type to create a new [`Hooks`] instance. +pub struct HooksBuilder { + hooks: Vec>>, +} + +impl HooksBuilder { + /// Registers a [`Hook`]. + pub fn with_hook(self, hook: impl Hook) -> Self { + self.with_boxed_hook(Box::new(hook)) + } + + /// Registers a [`Hook`] by calling the provided closure. + pub fn install_hook(self, f: F) -> Self + where + F: FnOnce() -> H, + H: Hook, + { + self.with_hook(f()) + } + + /// Registers a [`Hook`]. + #[inline] + pub fn with_boxed_hook(mut self, hook: Box>) -> Self { + self.hooks.push(hook); + self + } + + /// Builds the [`Hooks`] collection from the registered hooks. + pub fn build(self) -> Hooks { + Hooks { inner: Arc::new(self.hooks) } + } +} + +impl Default for HooksBuilder { + fn default() -> Self { + Self { + hooks: vec![ + Box::new(|| Collector::default().collect()), + Box::new(collect_memory_stats), + Box::new(collect_io_stats), + ], + } + } +} + +impl std::fmt::Debug for HooksBuilder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("HooksBuilder") + .field("hooks", &format_args!("Vec>, len: {}", self.hooks.len())) .finish() } } @@ -21,23 +65,10 @@ pub struct Hooks { } impl Hooks { - /// Create a new set of hooks - pub fn new( - db: Metrics, - static_file_provider: StaticFileProvider, - ) -> Self { - let hooks: Vec>> = vec![ - Box::new(move || db.report_metrics()), - Box::new(move || { - let _ = static_file_provider.report_metrics().map_err( - |error| tracing::error!(%error, "Failed to report static file provider metrics"), - ); - }), - Box::new(move || Collector::default().collect()), - Box::new(collect_memory_stats), - Box::new(collect_io_stats), - ]; - Self { inner: Arc::new(hooks) } + /// Creates a new [`HooksBuilder`] instance. + #[inline] + pub fn builder() -> HooksBuilder { + HooksBuilder::default() } pub(crate) fn iter(&self) -> impl Iterator>> { @@ -45,6 +76,15 @@ impl Hooks { } } +impl fmt::Debug for Hooks { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hooks_len = self.inner.len(); + f.debug_struct("Hooks") + .field("inner", &format_args!("Arc>>, len: {}", hooks_len)) + .finish() + } +} + #[cfg(all(feature = "jemalloc", unix))] fn collect_memory_stats() { use metrics::gauge; diff --git a/crates/node/metrics/src/recorder.rs b/crates/node/metrics/src/recorder.rs index a7421ab355c..e62b98c81cd 100644 --- a/crates/node/metrics/src/recorder.rs +++ b/crates/node/metrics/src/recorder.rs @@ -3,25 +3,78 @@ use eyre::WrapErr; use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle}; use metrics_util::layers::{PrefixLayer, Stack}; -use std::sync::LazyLock; +use std::sync::{atomic::AtomicBool, LazyLock}; /// Installs the Prometheus recorder as the global recorder. -pub fn install_prometheus_recorder() -> &'static PrometheusHandle { +/// +/// Note: This must be installed before any metrics are `described`. +/// +/// Caution: This only configures the global recorder and does not spawn the exporter. +/// Callers must run [`PrometheusRecorder::spawn_upkeep`] manually. +pub fn install_prometheus_recorder() -> &'static PrometheusRecorder { &PROMETHEUS_RECORDER_HANDLE } /// The default Prometheus recorder handle. We use a global static to ensure that it is only /// installed once. -static PROMETHEUS_RECORDER_HANDLE: LazyLock = +static PROMETHEUS_RECORDER_HANDLE: LazyLock = LazyLock::new(|| PrometheusRecorder::install().unwrap()); -/// Prometheus recorder installer +/// A handle to the Prometheus recorder. +/// +/// This is intended to be used as the global recorder. +/// Callers must ensure that [`PrometheusRecorder::spawn_upkeep`] is called once. #[derive(Debug)] -pub struct PrometheusRecorder; +pub struct PrometheusRecorder { + handle: PrometheusHandle, + upkeep: AtomicBool, +} impl PrometheusRecorder { + const fn new(handle: PrometheusHandle) -> Self { + Self { handle, upkeep: AtomicBool::new(false) } + } + + /// Returns a reference to the [`PrometheusHandle`]. + pub const fn handle(&self) -> &PrometheusHandle { + &self.handle + } + + /// Spawns the upkeep task if there hasn't been one spawned already. + /// + /// ## Panics + /// + /// This method must be called from within an existing Tokio runtime or it will panic. + /// + /// See also [`PrometheusHandle::run_upkeep`] + pub fn spawn_upkeep(&self) { + if self + .upkeep + .compare_exchange( + false, + true, + std::sync::atomic::Ordering::SeqCst, + std::sync::atomic::Ordering::Acquire, + ) + .is_err() + { + return; + } + + let handle = self.handle.clone(); + tokio::spawn(async move { + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + handle.run_upkeep(); + } + }); + } + /// Installs Prometheus as the metrics recorder. - pub fn install() -> eyre::Result { + /// + /// Caution: This only configures the global recorder and does not spawn the exporter. + /// Callers must run [`Self::spawn_upkeep`] manually. + pub fn install() -> eyre::Result { let recorder = PrometheusBuilder::new().build_recorder(); let handle = recorder.handle(); @@ -31,7 +84,7 @@ impl PrometheusRecorder { .install() .wrap_err("Couldn't set metrics recorder.")?; - Ok(handle) + Ok(Self::new(handle)) } } @@ -52,7 +105,7 @@ mod tests { process.describe(); process.collect(); - let metrics = PROMETHEUS_RECORDER_HANDLE.render(); + let metrics = PROMETHEUS_RECORDER_HANDLE.handle.render(); assert!(metrics.contains("process_cpu_seconds_total"), "{metrics:?}"); } } diff --git a/crates/node/metrics/src/server.rs b/crates/node/metrics/src/server.rs index 87521349d4d..313b578f800 100644 --- a/crates/node/metrics/src/server.rs +++ b/crates/node/metrics/src/server.rs @@ -103,7 +103,7 @@ impl MetricServer { let hook = hook.clone(); let service = tower::service_fn(move |_| { (hook)(); - let metrics = handle.render(); + let metrics = handle.handle().render(); let mut response = Response::new(metrics); response .headers_mut() @@ -113,12 +113,12 @@ impl MetricServer { let mut shutdown = signal.clone().ignore_guard(); tokio::task::spawn(async move { - if let Err(error) = - jsonrpsee::server::serve_with_graceful_shutdown(io, service, &mut shutdown) + let _ = + jsonrpsee_server::serve_with_graceful_shutdown(io, service, &mut shutdown) .await - { - tracing::debug!(%error, "failed to serve request") - } + .inspect_err( + |error| tracing::debug!(%error, "failed to serve request"), + ); }); } }); @@ -206,7 +206,6 @@ const fn describe_io_stats() {} mod tests { use super::*; use reqwest::Client; - use reth_provider::{test_utils::create_test_provider_factory, StaticFileProviderFactory}; use reth_tasks::TaskManager; use socket2::{Domain, Socket, Type}; use std::net::{SocketAddr, TcpListener}; @@ -236,8 +235,7 @@ mod tests { let tasks = TaskManager::current(); let executor = tasks.executor(); - let factory = create_test_provider_factory(); - let hooks = Hooks::new(factory.db_ref().clone(), factory.static_file_provider()); + let hooks = Hooks::builder().build(); let listen_addr = get_random_available_addr(); let config = @@ -252,7 +250,7 @@ mod tests { // Check the response body let body = response.text().await.unwrap(); - assert!(body.contains("reth_db_table_size")); - assert!(body.contains("reth_jemalloc_metadata")); + assert!(body.contains("reth_process_cpu_seconds_total")); + assert!(body.contains("reth_process_start_time_seconds")); } } diff --git a/crates/node/types/Cargo.toml b/crates/node/types/Cargo.toml index f04925d9cd4..588fe7c4062 100644 --- a/crates/node/types/Cargo.toml +++ b/crates/node/types/Cargo.toml @@ -14,4 +14,13 @@ workspace = true # reth reth-chainspec.workspace = true reth-db-api.workspace = true -reth-engine-primitives.workspace = true \ No newline at end of file +reth-engine-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-trie-db.workspace = true + +[features] +default = ["std"] +std = [ + "reth-primitives-traits/std", + "reth-chainspec/std", +] \ No newline at end of file diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 2c72e02d3ed..6e1eb81a0c8 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -7,8 +7,12 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] -use std::marker::PhantomData; +use core::{fmt::Debug, marker::PhantomData}; +pub use reth_primitives_traits::{ + Block, BlockBody, FullBlock, FullNodePrimitives, FullReceipt, FullSignedTx, NodePrimitives, +}; use reth_chainspec::EthChainSpec; use reth_db_api::{ @@ -16,13 +20,7 @@ use reth_db_api::{ Database, }; use reth_engine_primitives::EngineTypes; - -/// Configures all the primitive types of the node. -// TODO(mattsse): this is currently a placeholder -pub trait NodePrimitives {} - -// TODO(mattsse): Placeholder -impl NodePrimitives for () {} +use reth_trie_db::StateCommitment; /// The type that configures the essential types of an Ethereum-like node. /// @@ -33,7 +31,11 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { /// The node's primitive types, defining basic operations and structures. type Primitives: NodePrimitives; /// The type used for configuration of the EVM. - type ChainSpec: EthChainSpec; + type ChainSpec: EthChainSpec
::BlockHeader>; + /// The type used to perform state commitment operations. + type StateCommitment: StateCommitment; + /// The type responsible for writing chain primitives to storage. + type Storage: Default + Send + Sync + Unpin + Debug + 'static; } /// The type that configures an Ethereum-like node with an engine for consensus. @@ -84,6 +86,8 @@ where { type Primitives = Types::Primitives; type ChainSpec = Types::ChainSpec; + type StateCommitment = Types::StateCommitment; + type Storage = Types::Storage; } impl NodeTypesWithEngine for NodeTypesWithDBAdapter @@ -103,71 +107,143 @@ where } /// A [`NodeTypes`] type builder. -#[derive(Default, Debug)] -pub struct AnyNodeTypes

(PhantomData

, PhantomData); +#[derive(Debug)] +pub struct AnyNodeTypes

( + PhantomData

, + PhantomData, + PhantomData, + PhantomData, +); + +impl Default for AnyNodeTypes { + fn default() -> Self { + Self::new() + } +} + +impl AnyNodeTypes { + /// Creates a new instance of [`AnyNodeTypes`]. + pub const fn new() -> Self { + Self(PhantomData, PhantomData, PhantomData, PhantomData) + } -impl AnyNodeTypes { /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::, PhantomData::) + pub const fn primitives(self) -> AnyNodeTypes { + AnyNodeTypes::new() } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypes { - AnyNodeTypes::(PhantomData::

, PhantomData::) + pub const fn chain_spec(self) -> AnyNodeTypes { + AnyNodeTypes::new() + } + + /// Sets the `StateCommitment` associated type. + pub const fn state_commitment(self) -> AnyNodeTypes { + AnyNodeTypes::new() + } + + /// Sets the `Storage` associated type. + pub const fn storage(self) -> AnyNodeTypes { + AnyNodeTypes::new() } } -impl NodeTypes for AnyNodeTypes +impl NodeTypes for AnyNodeTypes where P: NodePrimitives + Send + Sync + Unpin + 'static, - C: EthChainSpec + 'static, + C: EthChainSpec

+ 'static, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Primitives = P; type ChainSpec = C; + type StateCommitment = SC; + type Storage = S; } /// A [`NodeTypesWithEngine`] type builder. -#[derive(Default, Debug)] -pub struct AnyNodeTypesWithEngine

{ +#[derive(Debug)] +pub struct AnyNodeTypesWithEngine

{ /// Embedding the basic node types. - base: AnyNodeTypes, + _base: AnyNodeTypes, /// Phantom data for the engine. _engine: PhantomData, } -impl AnyNodeTypesWithEngine { +impl Default for AnyNodeTypesWithEngine { + fn default() -> Self { + Self::new() + } +} + +impl AnyNodeTypesWithEngine { + /// Creates a new instance of [`AnyNodeTypesWithEngine`]. + pub const fn new() -> Self { + Self { _base: AnyNodeTypes::new(), _engine: PhantomData } + } + /// Sets the `Primitives` associated type. - pub const fn primitives(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.primitives::(), _engine: PhantomData } + pub const fn primitives(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `Engine` associated type. - pub const fn engine(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base, _engine: PhantomData:: } + pub const fn engine(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } /// Sets the `ChainSpec` associated type. - pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { - AnyNodeTypesWithEngine { base: self.base.chain_spec::(), _engine: PhantomData } + pub const fn chain_spec(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() + } + + /// Sets the `StateCommitment` associated type. + pub const fn state_commitment(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() + } + + /// Sets the `Storage` associated type. + pub const fn storage(self) -> AnyNodeTypesWithEngine { + AnyNodeTypesWithEngine::new() } } -impl NodeTypes for AnyNodeTypesWithEngine +impl NodeTypes for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, - C: EthChainSpec + 'static, + C: EthChainSpec

+ 'static, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Primitives = P; type ChainSpec = C; + type StateCommitment = SC; + type Storage = S; } -impl NodeTypesWithEngine for AnyNodeTypesWithEngine +impl NodeTypesWithEngine for AnyNodeTypesWithEngine where P: NodePrimitives + Send + Sync + Unpin + 'static, E: EngineTypes + Send + Sync + Unpin, - C: EthChainSpec + 'static, + C: EthChainSpec
+ 'static, + SC: StateCommitment, + S: Default + Send + Sync + Unpin + Debug + 'static, { type Engine = E; } + +/// Helper adapter type for accessing [`NodePrimitives::Block`] on [`NodeTypes`]. +pub type BlockTy = <::Primitives as NodePrimitives>::Block; + +/// Helper adapter type for accessing [`NodePrimitives::BlockHeader`] on [`NodeTypes`]. +pub type HeaderTy = <::Primitives as NodePrimitives>::BlockHeader; + +/// Helper adapter type for accessing [`NodePrimitives::BlockBody`] on [`NodeTypes`]. +pub type BodyTy = <::Primitives as NodePrimitives>::BlockBody; + +/// Helper adapter type for accessing [`NodePrimitives::SignedTx`] on [`NodeTypes`]. +pub type TxTy = <::Primitives as NodePrimitives>::SignedTx; + +/// Helper adapter type for accessing [`NodePrimitives::Receipt`] on [`NodeTypes`]. +pub type ReceiptTy = <::Primitives as NodePrimitives>::Receipt; diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 2de0bb6ee18..b182a4f278a 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -9,7 +9,7 @@ repository.workspace = true exclude.workspace = true [dependencies] -reth-node-builder.workspace = true +reth-node-builder = { workspace = true, features = ["js-tracer"] } reth-cli-util.workspace = true reth-optimism-cli.workspace = true reth-provider.workspace = true @@ -37,7 +37,21 @@ tracy-allocator = ["reth-cli-util/tracy-allocator"] asm-keccak = ["reth-optimism-cli/asm-keccak", "reth-optimism-node/asm-keccak"] -optimism = ["reth-optimism-cli/optimism", "reth-optimism-node/optimism"] +optimism = [ + "reth-optimism-cli/optimism", + "reth-optimism-node/optimism", + "reth-optimism-consensus/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-optimism-rpc/optimism", + "reth-provider/optimism", + "reth-optimism-primitives/optimism", +] + +dev = [ + "reth-optimism-cli/dev", + "reth-optimism-primitives/arbitrary", +] min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index c6d3e32b7cf..82fb3c24195 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -3,9 +3,9 @@ #![cfg(feature = "optimism")] use clap::Parser; -use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; +use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher, Node}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; -use reth_optimism_node::{args::RollupArgs, node::OptimismAddOns, OptimismNode}; +use reth_optimism_node::{args::RollupArgs, OpNode}; use reth_provider::providers::BlockchainProvider2; use tracing as _; @@ -23,17 +23,19 @@ fn main() { if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { - let enable_engine2 = rollup_args.experimental; - let sequencer_http_arg = rollup_args.sequencer_http.clone(); - match enable_engine2 { - true => { + if rollup_args.experimental { + tracing::warn!(target: "reth::cli", "Experimental engine is default now, and the --engine.experimental flag is deprecated. To enable the legacy functionality, use --engine.legacy."); + } + let use_legacy_engine = rollup_args.legacy; + match use_legacy_engine { + false => { let engine_tree_config = TreeConfig::default() .with_persistence_threshold(rollup_args.persistence_threshold) .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); let handle = builder - .with_types_and_provider::>() - .with_components(OptimismNode::components(rollup_args)) - .with_add_ons(OptimismAddOns::new(sequencer_http_arg)) + .with_types_and_provider::>() + .with_components(OpNode::components(rollup_args.clone())) + .with_add_ons(OpNode::new(rollup_args).add_ons()) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), @@ -46,9 +48,9 @@ fn main() { handle.node_exit_future.await } - false => { + true => { let handle = - builder.node(OptimismNode::new(rollup_args.clone())).launch().await?; + builder.node(OpNode::new(rollup_args.clone())).launch().await?; handle.node_exit_future.await } diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index c9f951c8d20..5ccf2660709 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -25,6 +25,8 @@ reth-optimism-forks.workspace = true alloy-chains.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true # op op-alloy-rpc-types.workspace = true @@ -44,12 +46,17 @@ op-alloy-rpc-types.workspace = true [features] default = ["std"] std = [ - "alloy-chains/std", - "alloy-genesis/std", - "alloy-primitives/std", - "op-alloy-rpc-types/std", - "reth-chainspec/std", - "reth-ethereum-forks/std", - "reth-primitives-traits/std", - "reth-optimism-forks/std", + "alloy-chains/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-eips/std", + "op-alloy-rpc-types/std", + "reth-chainspec/std", + "reth-ethereum-forks/std", + "reth-primitives-traits/std", + "reth-optimism-forks/std", + "alloy-consensus/std", + "once_cell/std", + "derive_more/std", + "reth-network-peers/std" ] diff --git a/crates/optimism/chainspec/src/base.rs b/crates/optimism/chainspec/src/base.rs index 7aa26bf9a64..f43457ead43 100644 --- a/crates/optimism/chainspec/src/base.rs +++ b/crates/optimism/chainspec/src/base.rs @@ -6,7 +6,7 @@ use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -21,11 +21,11 @@ pub static BASE_MAINNET: LazyLock> = LazyLock::new(|| { "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::base_mainnet(), + hardforks: OpHardfork::base_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/base_sepolia.rs b/crates/optimism/chainspec/src/base_sepolia.rs index b992dcabaf6..adcb9e2bc1f 100644 --- a/crates/optimism/chainspec/src/base_sepolia.rs +++ b/crates/optimism/chainspec/src/base_sepolia.rs @@ -6,7 +6,7 @@ use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -21,11 +21,11 @@ pub static BASE_SEPOLIA: LazyLock> = LazyLock::new(|| { "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::base_sepolia(), + hardforks: OpHardfork::base_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::base_sepolia()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::base_sepolia_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::base_sepolia_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/dev.rs b/crates/optimism/chainspec/src/dev.rs index cb8163dfc52..eae25f73e01 100644 --- a/crates/optimism/chainspec/src/dev.rs +++ b/crates/optimism/chainspec/src/dev.rs @@ -3,10 +3,10 @@ use alloc::sync::Arc; use alloy_chains::Chain; +use alloy_consensus::constants::DEV_GENESIS_HASH; use alloy_primitives::U256; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_optimism_forks::DEV_HARDFORKS; -use reth_primitives_traits::constants::DEV_GENESIS_HASH; use crate::{LazyLock, OpChainSpec}; diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 98c6589d1ce..907599fe2a2 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -17,14 +17,14 @@ mod dev; mod op; mod op_sepolia; -use alloc::{vec, vec::Vec}; +use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; +use alloy_consensus::Header; use alloy_genesis::Genesis; -use alloy_primitives::{Parity, Signature, B256, U256}; +use alloy_primitives::{B256, U256}; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; -use core::fmt::Display; -use derive_more::{Constructor, Deref, From, Into}; +use derive_more::{Constructor, Deref, Display, From, Into}; pub use dev::OP_DEV; #[cfg(not(feature = "std"))] pub(crate) use once_cell::sync::Lazy as LazyLock; @@ -36,8 +36,7 @@ use reth_chainspec::{ }; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; use reth_network_peers::NodeRecord; -use reth_optimism_forks::OptimismHardforks; -use reth_primitives_traits::Header; +use reth_optimism_forks::{OpHardfork, OpHardforks}; #[cfg(feature = "std")] pub(crate) use std::sync::LazyLock; @@ -97,7 +96,7 @@ impl OpChainSpecBuilder { } /// Remove the given fork from the spec. - pub fn without_fork(mut self, fork: reth_optimism_forks::OptimismHardfork) -> Self { + pub fn without_fork(mut self, fork: reth_optimism_forks::OpHardfork) -> Self { self.inner = self.inner.without_fork(fork); self } @@ -105,19 +104,17 @@ impl OpChainSpecBuilder { /// Enable Bedrock at genesis pub fn bedrock_activated(mut self) -> Self { self.inner = self.inner.paris_activated(); - self.inner = self - .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Bedrock, ForkCondition::Block(0)); + self.inner = + self.inner.with_fork(reth_optimism_forks::OpHardfork::Bedrock, ForkCondition::Block(0)); self } /// Enable Regolith at genesis pub fn regolith_activated(mut self) -> Self { self = self.bedrock_activated(); - self.inner = self.inner.with_fork( - reth_optimism_forks::OptimismHardfork::Regolith, - ForkCondition::Timestamp(0), - ); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OpHardfork::Regolith, ForkCondition::Timestamp(0)); self } @@ -128,7 +125,7 @@ impl OpChainSpecBuilder { self.inner = self.inner.with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Canyon, ForkCondition::Timestamp(0)); self } @@ -138,7 +135,7 @@ impl OpChainSpecBuilder { self.inner = self.inner.with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Ecotone, ForkCondition::Timestamp(0)); self } @@ -147,7 +144,7 @@ impl OpChainSpecBuilder { self = self.ecotone_activated(); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Fjord, ForkCondition::Timestamp(0)); self } @@ -156,7 +153,23 @@ impl OpChainSpecBuilder { self = self.fjord_activated(); self.inner = self .inner - .with_fork(reth_optimism_forks::OptimismHardfork::Granite, ForkCondition::Timestamp(0)); + .with_fork(reth_optimism_forks::OpHardfork::Granite, ForkCondition::Timestamp(0)); + self + } + + /// Enable Holocene at genesis + pub fn holocene_activated(mut self) -> Self { + self = self.granite_activated(); + self.inner = self + .inner + .with_fork(reth_optimism_forks::OpHardfork::Holocene, ForkCondition::Timestamp(0)); + self + } + + /// Enable Isthmus at genesis + pub fn isthmus_activated(mut self) -> Self { + self = self.holocene_activated(); + self.inner = self.inner.with_fork(OpHardfork::Isthmus, ForkCondition::Timestamp(0)); self } @@ -178,13 +191,96 @@ pub struct OpChainSpec { pub inner: ChainSpec, } -/// Returns the signature for the optimism deposit transactions, which don't include a -/// signature. -pub fn optimism_deposit_tx_signature() -> Signature { - Signature::new(U256::ZERO, U256::ZERO, Parity::Parity(false)) +impl OpChainSpec { + /// Extracts the Holcene 1599 parameters from the encoded extradata from the parent header. + /// + /// Caution: Caller must ensure that holocene is active in the parent header. + /// + /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) + pub fn decode_holocene_base_fee( + &self, + parent: &Header, + timestamp: u64, + ) -> Result { + let (denominator, elasticity) = decode_holocene_1559_params(&parent.extra_data)?; + let base_fee = if elasticity == 0 && denominator == 0 { + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default() + } else { + let base_fee_params = BaseFeeParams::new(denominator as u128, elasticity as u128); + parent.next_block_base_fee(base_fee_params).unwrap_or_default() + }; + Ok(base_fee) + } + + /// Read from parent to determine the base fee for the next block + /// + /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) + pub fn next_block_base_fee( + &self, + parent: &Header, + timestamp: u64, + ) -> Result { + // > if Holocene is active in parent_header.timestamp, then the parameters from + // > parent_header.extraData are used. + let is_holocene_activated = self.inner.is_fork_active_at_timestamp( + reth_optimism_forks::OpHardfork::Holocene, + parent.timestamp, + ); + + // If we are in the Holocene, we need to use the base fee params + // from the parent block's extra data. + // Else, use the base fee params (default values) from chainspec + if is_holocene_activated { + Ok(U256::from(self.decode_holocene_base_fee(parent, timestamp)?)) + } else { + Ok(U256::from( + parent + .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) + .unwrap_or_default(), + )) + } + } +} + +#[derive(Clone, Debug, Display, Eq, PartialEq)] +/// Error type for decoding Holocene 1559 parameters +pub enum DecodeError { + #[display("Insufficient data to decode")] + /// Insufficient data to decode + InsufficientData, + #[display("Invalid denominator parameter")] + /// Invalid denominator parameter + InvalidDenominator, + #[display("Invalid elasticity parameter")] + /// Invalid elasticity parameter + InvalidElasticity, +} + +impl core::error::Error for DecodeError { + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { + // None of the errors have sub-errors + None + } +} + +/// Extracts the Holcene 1599 parameters from the encoded form: +/// +pub fn decode_holocene_1559_params(extra_data: &[u8]) -> Result<(u32, u32), DecodeError> { + if extra_data.len() < 9 { + return Err(DecodeError::InsufficientData); + } + let denominator: [u8; 4] = + extra_data[1..5].try_into().map_err(|_| DecodeError::InvalidDenominator)?; + let elasticity: [u8; 4] = + extra_data[5..9].try_into().map_err(|_| DecodeError::InvalidElasticity)?; + Ok((u32::from_be_bytes(denominator), u32::from_be_bytes(elasticity))) } impl EthChainSpec for OpChainSpec { + type Header = Header; + fn chain(&self) -> alloy_chains::Chain { self.inner.chain() } @@ -209,11 +305,11 @@ impl EthChainSpec for OpChainSpec { self.inner.prune_delete_limit() } - fn display_hardforks(&self) -> impl Display { - self.inner.display_hardforks() + fn display_hardforks(&self) -> Box { + Box::new(ChainSpec::display_hardforks(self)) } - fn genesis_header(&self) -> &Header { + fn genesis_header(&self) -> &Self::Header { self.inner.genesis_header() } @@ -268,12 +364,12 @@ impl EthereumHardforks for OpChainSpec { } } -impl OptimismHardforks for OpChainSpec {} +impl OpHardforks for OpChainSpec {} impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { - use reth_optimism_forks::OptimismHardfork; - let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + use reth_optimism_forks::OpHardfork; + let optimism_genesis_info = OpGenesisInfo::extract_from(&genesis); let genesis_info = optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); @@ -291,7 +387,7 @@ impl From for OpChainSpec { (EthereumHardfork::London.boxed(), genesis.config.london_block), (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), - (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), + (OpHardfork::Bedrock.boxed(), genesis_info.bedrock_block), ]; let mut block_hardforks = hardfork_opts .into_iter() @@ -319,11 +415,13 @@ impl From for OpChainSpec { (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), - (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), - (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), - (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), - (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), - (OptimismHardfork::Granite.boxed(), genesis_info.granite_time), + (OpHardfork::Regolith.boxed(), genesis_info.regolith_time), + (OpHardfork::Canyon.boxed(), genesis_info.canyon_time), + (OpHardfork::Ecotone.boxed(), genesis_info.ecotone_time), + (OpHardfork::Fjord.boxed(), genesis_info.fjord_time), + (OpHardfork::Granite.boxed(), genesis_info.granite_time), + (OpHardfork::Holocene.boxed(), genesis_info.holocene_time), + (OpHardfork::Isthmus.boxed(), genesis_info.isthmus_time), ]; let mut time_hardforks = time_hardfork_opts @@ -336,7 +434,7 @@ impl From for OpChainSpec { block_hardforks.append(&mut time_hardforks); // Ordered Hardforks - let mainnet_hardforks = OptimismHardfork::op_mainnet(); + let mainnet_hardforks = OpHardfork::op_mainnet(); let mainnet_order = mainnet_hardforks.forks_iter(); let mut ordered_hardforks = Vec::with_capacity(block_hardforks.len()); @@ -363,15 +461,15 @@ impl From for OpChainSpec { } #[derive(Default, Debug)] -struct OptimismGenesisInfo { - optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo, +struct OpGenesisInfo { + optimism_chain_info: op_alloy_rpc_types::OpChainInfo, base_fee_params: BaseFeeParamsKind, } -impl OptimismGenesisInfo { +impl OpGenesisInfo { fn extract_from(genesis: &Genesis) -> Self { let mut info = Self { - optimism_chain_info: op_alloy_rpc_types::genesis::OpChainInfo::extract_from( + optimism_chain_info: op_alloy_rpc_types::OpChainInfo::extract_from( &genesis.config.extra_fields, ) .unwrap_or_default(), @@ -392,7 +490,7 @@ impl OptimismGenesisInfo { BaseFeeParams::new(denominator as u128, elasticity as u128), ), ( - reth_optimism_forks::OptimismHardfork::Canyon.boxed(), + reth_optimism_forks::OpHardfork::Canyon.boxed(), BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), ), ] @@ -412,11 +510,13 @@ impl OptimismGenesisInfo { #[cfg(test)] mod tests { + use std::sync::Arc; + use alloy_genesis::{ChainConfig, Genesis}; - use alloy_primitives::b256; + use alloy_primitives::{b256, Bytes}; use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; - use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; + use reth_optimism_forks::{OpHardfork, OpHardforks}; use crate::*; @@ -502,7 +602,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 0 }, + ForkId { hash: ForkHash([0x75, 0xde, 0xa4, 0x1e]), next: 1732633200 }, + ), + ( + Head { number: 0, timestamp: 1732633200, ..Default::default() }, + ForkId { hash: ForkHash([0x4a, 0x1c, 0x79, 0x2e]), next: 0 }, ), ], ); @@ -569,7 +673,11 @@ mod tests { ), ( Head { number: 0, timestamp: 1723478400, ..Default::default() }, - ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 0 }, + ForkId { hash: ForkHash([0x5e, 0xdf, 0xa3, 0xb6]), next: 1732633200 }, + ), + ( + Head { number: 0, timestamp: 1732633200, ..Default::default() }, + ForkId { hash: ForkHash([0x8b, 0x5e, 0x76, 0x29]), next: 0 }, ), ], ); @@ -651,6 +759,7 @@ mod tests { "ecotoneTime": 40, "fjordTime": 50, "graniteTime": 51, + "holoceneTime": 52, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70 @@ -672,6 +781,8 @@ mod tests { assert_eq!(actual_fjord_timestamp, Some(serde_json::Value::from(50)).as_ref()); let actual_granite_timestamp = genesis.config.extra_fields.get("graniteTime"); assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); + let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); + assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -689,19 +800,21 @@ mod tests { BaseFeeParamsKind::Constant(BaseFeeParams::new(70, 60)) ); - assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 0)); - - assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 51)); + assert!(!chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 0)); + + assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 52)); } #[test] @@ -715,6 +828,7 @@ mod tests { "ecotoneTime": 40, "fjordTime": 50, "graniteTime": 51, + "holoceneTime": 52, "optimism": { "eip1559Elasticity": 60, "eip1559Denominator": 70, @@ -737,6 +851,8 @@ mod tests { assert_eq!(actual_fjord_timestamp, Some(serde_json::Value::from(50)).as_ref()); let actual_granite_timestamp = genesis.config.extra_fields.get("graniteTime"); assert_eq!(actual_granite_timestamp, Some(serde_json::Value::from(51)).as_ref()); + let actual_holocene_timestamp = genesis.config.extra_fields.get("holoceneTime"); + assert_eq!(actual_holocene_timestamp, Some(serde_json::Value::from(52)).as_ref()); let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); assert_eq!( @@ -755,30 +871,32 @@ mod tests { BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::new(70, 60)), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)), + (OpHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)), ] .into() ) ); - assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 0)); - - assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); - assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Granite, 51)); + assert!(!chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 0)); + + assert!(chain_spec.is_fork_active_at_block(OpHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, 50)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Granite, 51)); + assert!(chain_spec.is_fork_active_at_timestamp(OpHardfork::Holocene, 52)); } #[test] fn parse_genesis_optimism_with_variable_base_fee_params() { - use op_alloy_rpc_types::genesis::OpBaseFeeInfo; + use op_alloy_rpc_types::OpBaseFeeInfo; let geth_genesis = r#" { @@ -847,14 +965,14 @@ mod tests { }) ); - assert!(chainspec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + assert!(chainspec.is_fork_active_at_block(OpHardfork::Bedrock, 0)); - assert!(chainspec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); + assert!(chainspec.is_fork_active_at_timestamp(OpHardfork::Regolith, 20)); } #[test] fn test_fork_order_optimism_mainnet() { - use reth_optimism_forks::OptimismHardfork; + use reth_optimism_forks::OpHardfork; let genesis = Genesis { config: ChainConfig { @@ -885,6 +1003,7 @@ mod tests { (String::from("ecotoneTime"), 0.into()), (String::from("fjordTime"), 0.into()), (String::from("graniteTime"), 0.into()), + (String::from("holoceneTime"), 0.into()), ] .into_iter() .collect(), @@ -910,14 +1029,16 @@ mod tests { EthereumHardfork::ArrowGlacier.boxed(), EthereumHardfork::GrayGlacier.boxed(), EthereumHardfork::Paris.boxed(), - OptimismHardfork::Bedrock.boxed(), - OptimismHardfork::Regolith.boxed(), + OpHardfork::Bedrock.boxed(), + OpHardfork::Regolith.boxed(), EthereumHardfork::Shanghai.boxed(), - OptimismHardfork::Canyon.boxed(), + OpHardfork::Canyon.boxed(), EthereumHardfork::Cancun.boxed(), - OptimismHardfork::Ecotone.boxed(), - OptimismHardfork::Fjord.boxed(), - OptimismHardfork::Granite.boxed(), + OpHardfork::Ecotone.boxed(), + OpHardfork::Fjord.boxed(), + OpHardfork::Granite.boxed(), + OpHardfork::Holocene.boxed(), + // OpHardfork::Isthmus.boxed(), ]; assert!(expected_hardforks @@ -926,4 +1047,87 @@ mod tests { .all(|(expected, actual)| &**expected == *actual)); assert_eq!(expected_hardforks.len(), hardforks.len()); } + + #[test] + fn test_get_base_fee_pre_holocene() { + let op_chain_spec = &BASE_SEPOLIA; + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + ..Default::default() + }; + let base_fee = op_chain_spec.next_block_base_fee(&parent, 0); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ) + ); + } + + fn holocene_chainspec() -> Arc { + let mut hardforks = OpHardfork::base_sepolia(); + hardforks.insert(OpHardfork::Holocene.boxed(), ForkCondition::Timestamp(1800000000)); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: Some((0, U256::from(0))), + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + max_gas_limit: crate::constants::BASE_SEPOLIA_MAX_GAS_LIMIT, + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + #[test] + fn test_get_base_fee_holocene_extra_data_not_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + timestamp: 1800000003, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), + ..Default::default() + }; + let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(op_chain_spec.base_fee_params_at_timestamp(0)) + .unwrap_or_default() + ) + ); + } + + #[test] + fn test_get_base_fee_holocene_extra_data_set() { + let op_chain_spec = holocene_chainspec(); + let parent = Header { + base_fee_per_gas: Some(1), + gas_used: 15763614, + gas_limit: 144000000, + extra_data: Bytes::from_static(&[0, 0, 0, 0, 8, 0, 0, 0, 8]), + timestamp: 1800000003, + ..Default::default() + }; + + let base_fee = op_chain_spec.next_block_base_fee(&parent, 1800000005); + assert_eq!( + base_fee.unwrap(), + U256::from( + parent + .next_block_base_fee(BaseFeeParams::new(0x00000008, 0x00000008)) + .unwrap_or_default() + ) + ); + } } diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 8c0da5320f9..fcbe7dee7dd 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -3,11 +3,11 @@ use alloc::{sync::Arc, vec}; use alloy_chains::Chain; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -24,11 +24,11 @@ pub static OP_MAINNET: LazyLock> = LazyLock::new(|| { "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::op_mainnet(), + hardforks: OpHardfork::op_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_canyon()), ] .into(), ), diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index d3243ebd534..35466cb2154 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -3,11 +3,11 @@ use alloc::{sync::Arc, vec}; use alloy_chains::{Chain, NamedChain}; +use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; use reth_ethereum_forks::EthereumHardfork; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives_traits::constants::ETHEREUM_BLOCK_GAS_LIMIT; +use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; @@ -22,11 +22,11 @@ pub static OP_SEPOLIA: LazyLock> = LazyLock::new(|| { "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OptimismHardfork::op_sepolia(), + hardforks: OpHardfork::op_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ (EthereumHardfork::London.boxed(), BaseFeeParams::optimism_sepolia()), - (OptimismHardfork::Canyon.boxed(), BaseFeeParams::optimism_sepolia_canyon()), + (OpHardfork::Canyon.boxed(), BaseFeeParams::optimism_sepolia_canyon()), ] .into(), ), diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index d53270cd62f..4e18b51160e 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -26,6 +26,7 @@ reth-execution-types.workspace = true reth-node-core.workspace = true reth-optimism-node.workspace = true reth-primitives.workspace = true +reth-fs-util.workspace = true # so jemalloc metrics can be included reth-node-metrics.workspace = true @@ -47,11 +48,15 @@ reth-node-builder.workspace = true reth-tracing.workspace = true # eth +alloy-eips.workspace = true +alloy-consensus = { workspace = true, optional = true } alloy-primitives.workspace = true alloy-rlp.workspace = true # misc futures-util.workspace = true +derive_more = { workspace = true, optional = true } +serde = { workspace = true, optional = true } clap = { workspace = true, features = ["derive", "env"] } @@ -65,6 +70,11 @@ tokio-util = { workspace = true, features = ["codec"] } tracing.workspace = true eyre.workspace = true +# reth test-vectors +proptest = { workspace = true, optional = true } +op-alloy-consensus = { workspace = true, optional = true } + + [dev-dependencies] tempfile.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } @@ -73,11 +83,20 @@ reth-cli-commands.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-optimism-evm/optimism", - "reth-provider/optimism", - "reth-node-core/optimism", - "reth-optimism-node/optimism", + "op-alloy-consensus", + "alloy-consensus", + "dep:derive_more", + "dep:serde", + "reth-primitives/optimism", + "reth-optimism-evm/optimism", + "reth-provider/optimism", + "reth-node-core/optimism", + "reth-optimism-node/optimism", + "reth-execution-types/optimism", + "reth-db/optimism", + "reth-db-api/optimism", + "reth-optimism-primitives/optimism", + "reth-downloaders/optimism" ] asm-keccak = [ "alloy-primitives/asm-keccak", @@ -91,3 +110,17 @@ jemalloc = [ "reth-node-core/jemalloc", "reth-node-metrics/jemalloc" ] + +dev = [ + "dep:proptest", + "reth-cli-commands/arbitrary" +] +serde = [ + "alloy-consensus?/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "op-alloy-consensus?/serde", + "reth-execution-types/serde", + "reth-provider/serde", + "reth-optimism-primitives/serde", +] diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs index f23cb9a7c16..8ebefdcc0b4 100644 --- a/crates/optimism/cli/src/commands/build_pipeline.rs +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -1,5 +1,6 @@ use alloy_primitives::B256; use futures_util::{Stream, StreamExt}; +use reth_cli_commands::common::CliNodeTypes; use reth_config::Config; use reth_consensus::Consensus; use reth_downloaders::{ @@ -11,11 +12,13 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_builder::NodeTypesWithDB; use reth_node_events::node::NodeEvent; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::OpExecutorProvider; -use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderFactory}; +use reth_provider::{ + providers::ProviderNodeTypes, BlockNumReader, ChainSpecProvider, HeaderProvider, + ProviderFactory, +}; use reth_prune::PruneModes; use reth_stages::{sets::DefaultStages, Pipeline, StageSet}; use reth_stages_types::StageId; @@ -36,7 +39,7 @@ pub(crate) async fn build_import_pipeline( disable_exec: bool, ) -> eyre::Result<(Pipeline, impl Stream)> where - N: NodeTypesWithDB, + N: CliNodeTypes + ProviderNodeTypes, C: Consensus + 'static, { if !file_client.has_canonical_blocks() { @@ -75,6 +78,7 @@ where .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) + .with_fail_on_unwind(true) .add_stages( DefaultStages::new( provider_factory.clone(), diff --git a/crates/optimism/cli/src/commands/import.rs b/crates/optimism/cli/src/commands/import.rs index e5f037c3d5c..5e3de5a8671 100644 --- a/crates/optimism/cli/src/commands/import.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -2,14 +2,13 @@ //! file. use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_consensus::noop::NoopConsensus; use reth_db::tables; use reth_db_api::transaction::DbTx; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; -use reth_node_builder::NodeTypesWithEngine; use reth_node_core::version::SHORT_VERSION; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::is_dup_tx; @@ -42,9 +41,7 @@ pub struct ImportOpCommand { impl> ImportOpCommand { /// Execute `import` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); info!(target: "reth::cli", diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 838a99818e9..a5c12a48cfb 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -5,22 +5,21 @@ use std::path::{Path, PathBuf}; use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_db::tables; use reth_downloaders::{ file_client::{ChunkedFileReader, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, receipt_file_client::ReceiptFileClient, }; use reth_execution_types::ExecutionOutcome; -use reth_node_builder::{NodeTypesWithDB, NodeTypesWithEngine}; use reth_node_core::version::SHORT_VERSION; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::bedrock::is_dup_tx; -use reth_primitives::Receipts; +use reth_primitives::{NodePrimitives, Receipts}; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, DatabaseProviderFactory, OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StageCheckpointWriter, - StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, + StateWriter, StaticFileProviderFactory, StatsReader, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -48,9 +47,7 @@ pub struct ImportReceiptsOpCommand { impl> ImportReceiptsOpCommand { /// Execute `import` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); debug!(target: "reth::cli", @@ -88,7 +85,10 @@ pub async fn import_receipts_from_file( filter: F, ) -> eyre::Result<()> where - N: NodeTypesWithDB, + N: ProviderNodeTypes< + ChainSpec = OpChainSpec, + Primitives: NodePrimitives, + >, P: AsRef, F: FnMut(u64, &mut Receipts) -> usize, { @@ -126,7 +126,7 @@ pub async fn import_receipts_from_reader( mut filter: F, ) -> eyre::Result where - N: ProviderNodeTypes, + N: ProviderNodeTypes>, F: FnMut(u64, &mut Receipts) -> usize, { let static_file_provider = provider_factory.static_file_provider(); @@ -150,7 +150,7 @@ where } } - let provider = provider_factory.provider_rw()?; + let provider = provider_factory.database_provider_rw()?; let mut total_decoded_receipts = 0; let mut total_receipts = 0; let mut total_filtered_out_dup_txns = 0; @@ -222,11 +222,11 @@ where ExecutionOutcome::new(Default::default(), receipts, first_block, Default::default()); // finally, write the receipts - let mut storage_writer = UnifiedStorageWriter::from( - &provider, - static_file_provider.latest_writer(StaticFileSegment::Receipts)?, - ); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_state( + execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::StaticFiles, + )?; } // Only commit if we have imported as many receipts as the number of transactions. @@ -247,7 +247,7 @@ where provider .save_stage_checkpoint(StageId::Execution, StageCheckpoint::new(highest_block_receipts))?; - UnifiedStorageWriter::commit(provider, static_file_provider)?; + UnifiedStorageWriter::commit(provider)?; Ok(ImportReceiptsResult { total_decoded_receipts, total_filtered_out_dup_txns }) } diff --git a/crates/optimism/cli/src/commands/init_state/mod.rs b/crates/optimism/cli/src/commands/init_state.rs similarity index 80% rename from crates/optimism/cli/src/commands/init_state/mod.rs rename to crates/optimism/cli/src/commands/init_state.rs index 3537f89e751..7bbfc3bb820 100644 --- a/crates/optimism/cli/src/commands/init_state/mod.rs +++ b/crates/optimism/cli/src/commands/init_state.rs @@ -2,20 +2,18 @@ use clap::Parser; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, Environment}; +use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment}; use reth_db_common::init::init_from_state_dump; -use reth_node_builder::NodeTypesWithEngine; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_primitives::bedrock::BEDROCK_HEADER; +use reth_optimism_primitives::bedrock::{BEDROCK_HEADER, BEDROCK_HEADER_HASH, BEDROCK_HEADER_TTD}; +use reth_primitives::SealedHeader; use reth_provider::{ BlockNumReader, ChainSpecProvider, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; -use std::{fs::File, io::BufReader}; +use std::io::BufReader; use tracing::info; -mod bedrock; - /// Initializes the database with the genesis block. #[derive(Debug, Parser)] pub struct InitStateCommandOp { @@ -37,9 +35,7 @@ pub struct InitStateCommandOp { impl> InitStateCommandOp { /// Execute the `init` command - pub async fn execute>( - self, - ) -> eyre::Result<()> { + pub async fn execute>(self) -> eyre::Result<()> { info!(target: "reth::cli", "Reth init-state starting"); let Environment { config, provider_factory, .. } = @@ -53,7 +49,11 @@ impl> InitStateCommandOp { let last_block_number = provider_rw.last_block_number()?; if last_block_number == 0 { - bedrock::setup_op_mainnet_without_ovm(&provider_rw, &static_file_provider)?; + reth_cli_commands::init_state::without_evm::setup_without_evm( + &provider_rw, + SealedHeader::new(BEDROCK_HEADER, BEDROCK_HEADER_HASH), + BEDROCK_HEADER_TTD, + )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they // will be unwinded according to database checkpoints. @@ -70,7 +70,7 @@ impl> InitStateCommandOp { info!(target: "reth::cli", "Initiating state dump"); - let reader = BufReader::new(File::open(self.init_state.state)?); + let reader = BufReader::new(reth_fs_util::open(self.init_state.state)?); let hash = init_from_state_dump(reader, &provider_rw, config.stages.etl)?; provider_rw.commit()?; diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs index a7674ec2c9b..d51f8993296 100644 --- a/crates/optimism/cli/src/commands/mod.rs +++ b/crates/optimism/cli/src/commands/mod.rs @@ -16,6 +16,9 @@ pub mod import; pub mod import_receipts; pub mod init_state; +#[cfg(feature = "dev")] +pub mod test_vectors; + /// Commands to be executed #[derive(Debug, Subcommand)] pub enum Commands @@ -55,4 +58,8 @@ pub enum Commands), + /// Generate Test Vectors + #[cfg(feature = "dev")] + #[command(name = "test-vectors")] + TestVectors(test_vectors::Command), } diff --git a/crates/optimism/cli/src/commands/test_vectors.rs b/crates/optimism/cli/src/commands/test_vectors.rs new file mode 100644 index 00000000000..093d63148ee --- /dev/null +++ b/crates/optimism/cli/src/commands/test_vectors.rs @@ -0,0 +1,72 @@ +//! Command for generating test vectors. + +use clap::{Parser, Subcommand}; +use op_alloy_consensus::TxDeposit; +use proptest::test_runner::TestRunner; +use reth_cli_commands::{ + compact_types, + test_vectors::{ + compact, + compact::{ + generate_vector, read_vector, GENERATE_VECTORS as ETH_GENERATE_VECTORS, + READ_VECTORS as ETH_READ_VECTORS, + }, + tables, + }, +}; + +/// Generate test-vectors for different data types. +#[derive(Debug, Parser)] +pub struct Command { + #[command(subcommand)] + command: Subcommands, +} + +#[derive(Subcommand, Debug)] +/// `reth test-vectors` subcommands +pub enum Subcommands { + /// Generates test vectors for specified tables. If no table is specified, generate for all. + Tables { + /// List of table names. Case-sensitive. + names: Vec, + }, + /// Generates test vectors for `Compact` types with `--write`. Reads and checks generated + /// vectors with `--read`. + #[group(multiple = false, required = true)] + Compact { + /// Write test vectors to a file. + #[arg(long)] + write: bool, + + /// Read test vectors from a file. + #[arg(long)] + read: bool, + }, +} + +impl Command { + /// Execute the command + pub async fn execute(self) -> eyre::Result<()> { + match self.command { + Subcommands::Tables { names } => { + tables::generate_vectors(names)?; + } + Subcommands::Compact { write, .. } => { + compact_types!( + regular: [ + TxDeposit + ], identifier: [] + ); + + if write { + compact::generate_vectors_with(ETH_GENERATE_VECTORS)?; + compact::generate_vectors_with(GENERATE_VECTORS)?; + } else { + compact::read_vectors_with(ETH_READ_VECTORS)?; + compact::read_vectors_with(READ_VECTORS)?; + } + } + } + Ok(()) + } +} diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index e6eed86bf7f..23eaa99b521 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -27,6 +27,11 @@ pub mod commands; /// made for op-erigon's import needs). pub mod receipt_file_codec; +/// OVM block, same as EVM block at bedrock, except for signature of deposit transaction +/// not having a signature back then. +/// Enables decoding and encoding `Block` types within file contexts. +pub mod ovm_file_codec; + pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; use reth_optimism_chainspec::OpChainSpec; @@ -47,13 +52,14 @@ use reth_node_core::{ version::{LONG_VERSION, SHORT_VERSION}, }; use reth_optimism_evm::OpExecutorProvider; -use reth_optimism_node::OptimismNode; +use reth_optimism_node::OpNode; use reth_tracing::FileWorkerGuard; use tracing::info; // This allows us to manually enable node metrics features, required for proper jemalloc metric // reporting use reth_node_metrics as _; +use reth_node_metrics::recorder::install_prometheus_recorder; /// The main op-reth cli interface. /// @@ -135,36 +141,39 @@ where let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + let runner = CliRunner::default(); match self.command { Commands::Node(command) => { runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) } Commands::Init(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::InitState(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::ImportOp(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::ImportReceiptsOp(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) - } + Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute::(ctx, OpExecutorProvider::optimism) + command.execute::(ctx, OpExecutorProvider::optimism) }), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Recover(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) + runner.run_command_until_exit(|ctx| command.execute::(ctx)) } - Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + #[cfg(feature = "dev")] + Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), } } diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs new file mode 100644 index 00000000000..3d746d6d1e0 --- /dev/null +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -0,0 +1,383 @@ +use alloy_consensus::{ + transaction::{from_eip155_value, RlpEcdsaTx}, + Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip4895::Withdrawals, +}; +use alloy_primitives::{ + bytes::{Buf, BytesMut}, + keccak256, PrimitiveSignature as Signature, TxHash, B256, U256, +}; +use alloy_rlp::{Decodable, Error as RlpError, RlpDecodable}; +use derive_more::{AsRef, Deref}; +use op_alloy_consensus::TxDeposit; +use reth_downloaders::file_client::FileClientError; +use reth_primitives::transaction::{Transaction, TxType}; +use serde::{Deserialize, Serialize}; +use tokio_util::codec::Decoder; + +#[allow(dead_code)] +/// Specific codec for reading raw block bodies from a file +/// with optimism-specific signature handling +pub(crate) struct OvmBlockFileCodec; + +impl Decoder for OvmBlockFileCodec { + type Item = Block; + type Error = FileClientError; + + fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + if src.is_empty() { + return Ok(None); + } + + let buf_slice = &mut src.as_ref(); + let body = + Block::decode(buf_slice).map_err(|err| FileClientError::Rlp(err, src.to_vec()))?; + src.advance(src.len() - buf_slice.len()); + + Ok(Some(body)) + } +} + +/// OVM block, same as EVM block but with different transaction signature handling +/// Pre-bedrock system transactions on Optimism were sent from the zero address +/// with an empty signature, +#[derive(Debug, Clone, PartialEq, Eq, RlpDecodable)] +pub struct Block { + /// Block header + pub header: Header, + /// Block body + pub body: BlockBody, +} + +impl Block { + /// Decodes a `Block` from the given byte slice. + pub fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let header = Header::decode(buf)?; + let body = BlockBody::decode(buf)?; + Ok(Self { header, body }) + } +} + +/// The body of a block for OVM +#[derive(Debug, Clone, PartialEq, Eq, Default, RlpDecodable)] +#[rlp(trailing)] +pub struct BlockBody { + /// Transactions in the block + pub transactions: Vec, + /// Uncle headers for the given block + pub ommers: Vec
, + /// Withdrawals in the block. + pub withdrawals: Option, +} + +/// Signed transaction. +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] +pub struct TransactionSigned { + /// Transaction hash + pub hash: TxHash, + /// The transaction signature values + pub signature: Signature, + /// Raw transaction info + #[deref] + #[as_ref] + pub transaction: Transaction, +} + +impl Default for TransactionSigned { + fn default() -> Self { + Self { + hash: Default::default(), + signature: Signature::test_signature(), + transaction: Default::default(), + } + } +} + +impl AsRef for TransactionSigned { + fn as_ref(&self) -> &Self { + self + } +} + +// === impl TransactionSigned === +impl TransactionSigned { + /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with + /// tx type. + pub fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } + + /// Create a new signed transaction from a transaction and its signature. + /// + /// This will also calculate the transaction hash using its encoding. + pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { + let mut initial_tx = Self { transaction, hash: Default::default(), signature }; + initial_tx.hash = initial_tx.recalculate_hash(); + initial_tx + } + + /// Decodes legacy transaction from the data buffer into a tuple. + /// + /// This expects `rlp(legacy_tx)` + /// + /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact + /// format expected. + pub(crate) fn decode_rlp_legacy_transaction_tuple( + data: &mut &[u8], + ) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> { + let original_encoding = *data; + + let header = alloy_rlp::Header::decode(data)?; + let remaining_len = data.len(); + + let transaction_payload_len = header.payload_length; + + if transaction_payload_len > remaining_len { + return Err(RlpError::InputTooShort); + } + + let mut transaction = TxLegacy { + nonce: Decodable::decode(data)?, + gas_price: Decodable::decode(data)?, + gas_limit: Decodable::decode(data)?, + to: Decodable::decode(data)?, + value: Decodable::decode(data)?, + input: Decodable::decode(data)?, + chain_id: None, + }; + + let v = Decodable::decode(data)?; + let r: U256 = Decodable::decode(data)?; + let s: U256 = Decodable::decode(data)?; + + let tx_length = header.payload_length + header.length(); + let hash = keccak256(&original_encoding[..tx_length]); + + // Handle both pre-bedrock and regular cases + let (signature, chain_id) = if v == 0 && r.is_zero() && s.is_zero() { + // Pre-bedrock system transactions case + (Signature::new(r, s, false), None) + } else { + // Regular transaction case + let (parity, chain_id) = from_eip155_value(v) + .ok_or(alloy_rlp::Error::Custom("invalid parity for legacy transaction"))?; + (Signature::new(r, s, parity), chain_id) + }; + + // Set chain ID and verify length + transaction.chain_id = chain_id; + let decoded = remaining_len - data.len(); + if decoded != transaction_payload_len { + return Err(RlpError::UnexpectedLength); + } + + Ok((transaction, hash, signature)) + } + + /// Decodes legacy transaction from the data buffer. + /// + /// This should be used _only_ be used in general transaction decoding methods, which have + /// already ensured that the input is a legacy transaction with the following format: + /// `rlp(legacy_tx)` + /// + /// Legacy transactions are encoded as lists, so the input should start with a RLP list header. + /// + /// This expects `rlp(legacy_tx)` + // TODO: make buf advancement semantics consistent with `decode_enveloped_typed_transaction`, + // so decoding methods do not need to manually advance the buffer + pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { + let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; + let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + Ok(signed) + } +} + +impl Decodable for TransactionSigned { + /// This `Decodable` implementation only supports decoding rlp encoded transactions as it's used + /// by p2p. + /// + /// The p2p encoding format always includes an RLP header, although the type RLP header depends + /// on whether or not the transaction is a legacy transaction. + /// + /// If the transaction is a legacy transaction, it is just encoded as a RLP list: + /// `rlp(tx-data)`. + /// + /// If the transaction is a typed transaction, it is encoded as a RLP string: + /// `rlp(tx-type || rlp(tx-data))` + /// + /// This can be used for decoding all signed transactions in p2p `BlockBodies` responses. + /// + /// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since + /// the EIP-4844 variant of [`TransactionSigned`] does not include the blob sidecar. + /// + /// For a method suitable for decoding pooled transactions, see \[`PooledTransactionsElement`\]. + /// + /// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed + /// transaction is encoded in this format, and does not start with a RLP header: + /// `tx-type || rlp(tx-data)`. + /// + /// This is because [`Header::decode`] does not advance the buffer, and returns a length-1 + /// string header if the first byte is less than `0xf7`. + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Self::network_decode(buf).map_err(Into::into) + } +} + +impl Encodable2718 for TransactionSigned { + fn type_flag(&self) -> Option { + match self.transaction.tx_type() { + TxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + match &self.transaction { + Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), + Transaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), + Transaction::Eip7702(set_code_tx) => { + set_code_tx.eip2718_encoded_length(&self.signature) + } + Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + } + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.transaction.eip2718_encode(&self.signature, out) + } +} + +impl Decodable2718 for TransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + TxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + } + TxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + } + TxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + } + TxType::Eip4844 => { + let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + } + TxType::Deposit => Ok(Self::from_transaction_and_signature( + Transaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), + )), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + Ok(Self::decode_rlp_legacy_transaction(buf)?) + } +} + +#[cfg(test)] +mod tests { + use crate::ovm_file_codec::TransactionSigned; + use alloy_primitives::{address, hex, TxKind, B256, U256}; + use reth_primitives::transaction::Transaction; + const DEPOSIT_FUNCTION_SELECTOR: [u8; 4] = [0xb6, 0xb5, 0x5f, 0x25]; + use alloy_rlp::Decodable; + + #[test] + fn test_decode_legacy_transactions() { + // Test Case 1: contract deposit - regular L2 transaction calling deposit() function + // tx: https://optimistic.etherscan.io/getRawTx?tx=0x7860252963a2df21113344f323035ef59648638a571eef742e33d789602c7a1c + let deposit_tx_bytes = hex!("f88881f0830f481c830c6e4594a75127121d28a9bf848f3b70e7eea26570aa770080a4b6b55f2500000000000000000000000000000000000000000000000000000000000710b238a0d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45fa02c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def"); + let deposit_decoded = TransactionSigned::decode(&mut &deposit_tx_bytes[..]).unwrap(); + + // Verify deposit transaction + let deposit_tx = match &deposit_decoded.transaction { + Transaction::Legacy(ref tx) => tx, + _ => panic!("Expected legacy transaction for NFT deposit"), + }; + + assert_eq!( + deposit_tx.to, + TxKind::Call(address!("a75127121d28a9bf848f3b70e7eea26570aa7700")) + ); + assert_eq!(deposit_tx.nonce, 240); + assert_eq!(deposit_tx.gas_price, 1001500); + assert_eq!(deposit_tx.gas_limit, 814661); + assert_eq!(deposit_tx.value, U256::ZERO); + assert_eq!(&deposit_tx.input.as_ref()[0..4], DEPOSIT_FUNCTION_SELECTOR); + assert_eq!(deposit_tx.chain_id, Some(10)); + assert_eq!( + deposit_decoded.signature.r(), + U256::from_str_radix( + "d5c622d92ddf37f9c18a3465a572f74d8b1aeaf50c1cfb10b3833242781fd45f", + 16 + ) + .unwrap() + ); + assert_eq!( + deposit_decoded.signature.s(), + U256::from_str_radix( + "2c4f1d5819bf8b70bf651e7a063b7db63c55bd336799c6ae3e5bc72ad6ef3def", + 16 + ) + .unwrap() + ); + + // Test Case 2: pre-bedrock system transaction from block 105235052 + // tx: https://optimistic.etherscan.io/getRawTx?tx=0xe20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e + let system_tx_bytes = hex!("f9026c830d899383124f808302a77e94a0cc33dd6f4819d473226257792afe230ec3c67f80b902046c459a280000000000000000000000004d73adb72bc3dd368966edd0f0b2148401a178e2000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000647fac7f00000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000000084704316e5000000000000000000000000000000000000000000000000000000000000006e10975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000001410975631049de3c008989b0d8c19fc720dc556ca01abfbd794c6eb5075dd000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000082a39325251d44e11f3b6d92f9382438eb6c8b5068d4a488d4f177b26f2ca20db34ae53467322852afcc779f25eafd124c5586f54b9026497ba934403d4c578e3c1b5aa754c918ee2ecd25402df656c2419717e4017a7aecb84af3914fd3c7bf6930369c4e6ff76950246b98e354821775f02d33cdbee5ef6aed06c15b75691692d31c00000000000000000000000000000000000000000000000000000000000038a0e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbea013ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4"); + let system_decoded = TransactionSigned::decode(&mut &system_tx_bytes[..]).unwrap(); + + // Verify system transaction + assert!(system_decoded.is_legacy()); + + let system_tx = match &system_decoded.transaction { + Transaction::Legacy(ref tx) => tx, + _ => panic!("Expected Legacy transaction"), + }; + + assert_eq!(system_tx.nonce, 887187); + assert_eq!(system_tx.gas_price, 1200000); + assert_eq!(system_tx.gas_limit, 173950); + assert_eq!( + system_tx.to, + TxKind::Call(address!("a0cc33dd6f4819d473226257792afe230ec3c67f")) + ); + assert_eq!(system_tx.value, U256::ZERO); + assert_eq!(system_tx.chain_id, Some(10)); + + assert_eq!( + system_decoded.signature.r(), + U256::from_str_radix( + "e8991e95e66d809f4b6fb0af27c31368ca0f30e657165c428aa681ec5ea25bbe", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.signature.s(), + U256::from_str_radix( + "13ed325bd97365087ec713e9817d252b59113ea18430b71a5890c4eeb6b9efc4", + 16 + ) + .unwrap() + ); + assert_eq!( + system_decoded.hash, + B256::from(hex!("e20b11349681dd049f8df32f5cdbb4c68d46b537685defcd86c7fa42cfe75b9e")) + ); + } +} diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index f5f061c5992..4f4868a454d 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -22,9 +22,14 @@ reth-trie-common.workspace = true # op-reth reth-optimism-forks.workspace = true reth-optimism-chainspec.workspace = true +# TODO: remove this after feature cleanup +reth-optimism-primitives = { workspace = true, features = ["serde"] } # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true +alloy-trie.workspace = true tracing.workspace = true @@ -33,4 +38,4 @@ alloy-primitives.workspace = true reth-optimism-chainspec.workspace = true [features] -optimism = ["reth-primitives/optimism"] +optimism = ["reth-primitives/optimism", "reth-optimism-primitives/optimism"] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index fe67ff1bcd9..d05ff9c9bd7 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,20 +9,22 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +use alloy_consensus::{BlockHeader, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; -use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; +use reth_consensus::{ + Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput, +}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, - validate_against_parent_hash_number, validate_against_parent_timestamp, validate_cancun_gas, - validate_header_base_fee, validate_header_extradata, validate_header_gas, - validate_shanghai_withdrawals, + validate_against_parent_hash_number, validate_against_parent_timestamp, + validate_body_against_header, validate_cancun_gas, validate_header_base_fee, + validate_header_extradata, validate_header_gas, validate_shanghai_withdrawals, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardforks; -use reth_primitives::{ - BlockWithSenders, GotExpected, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, -}; +use reth_optimism_forks::OpHardforks; +use reth_optimism_primitives::OpPrimitives; +use reth_primitives::{BlockBody, BlockWithSenders, GotExpected, SealedBlock, SealedHeader}; use std::{sync::Arc, time::SystemTime}; mod proof; @@ -35,22 +37,68 @@ pub use validation::validate_block_post_execution; /// /// Provides basic checks as outlined in the execution specs. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismBeaconConsensus { +pub struct OpBeaconConsensus { /// Configuration chain_spec: Arc, } -impl OptimismBeaconConsensus { - /// Create a new instance of [`OptimismBeaconConsensus`] +impl OpBeaconConsensus { + /// Create a new instance of [`OpBeaconConsensus`] pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } } -impl Consensus for OptimismBeaconConsensus { +impl FullConsensus for OpBeaconConsensus { + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + input: PostExecutionInput<'_>, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, input.receipts) + } +} + +impl Consensus for OpBeaconConsensus { + fn validate_body_against_header( + &self, + body: &BlockBody, + header: &SealedHeader, + ) -> Result<(), ConsensusError> { + validate_body_against_header(body, header.header()) + } + + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + // Check ommers hash + let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); + if block.header.ommers_hash != ommers_hash { + return Err(ConsensusError::BodyOmmersHashDiff( + GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), + )) + } + + // Check transaction root + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } + + // EIP-4895: Beacon chain push withdrawals as operations + if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + validate_shanghai_withdrawals(block)?; + } + + if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + validate_cancun_gas(block)?; + } + + Ok(()) + } +} + +impl HeaderValidator for OpBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validate_header_gas(header)?; - validate_header_base_fee(header, &self.chain_spec) + validate_header_gas(header.header())?; + validate_header_base_fee(header.header(), &self.chain_spec) } fn validate_header_against_parent( @@ -58,17 +106,40 @@ impl Consensus for OptimismBeaconConsensus { header: &SealedHeader, parent: &SealedHeader, ) -> Result<(), ConsensusError> { - validate_against_parent_hash_number(header, parent)?; + validate_against_parent_hash_number(header.header(), parent)?; if self.chain_spec.is_bedrock_active_at_block(header.number) { - validate_against_parent_timestamp(header, parent)?; + validate_against_parent_timestamp(header.header(), parent.header())?; } - validate_against_parent_eip1559_base_fee(header, parent, &self.chain_spec)?; + // EIP1559 base fee validation + // + // > if Holocene is active in parent_header.timestamp, then the parameters from + // > parent_header.extraData are used. + if self.chain_spec.is_holocene_active_at_timestamp(parent.timestamp) { + let header_base_fee = + header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; + let expected_base_fee = self + .chain_spec + .decode_holocene_base_fee(parent, header.timestamp) + .map_err(|_| ConsensusError::BaseFeeMissing)?; + if expected_base_fee != header_base_fee { + return Err(ConsensusError::BaseFeeDiff(GotExpected { + expected: expected_base_fee, + got: header_base_fee, + })) + } + } else { + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; + } // ensure that the blob gas fields for this block if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - validate_against_parent_4844(header, parent)?; + validate_against_parent_4844(header.header(), parent.header())?; } Ok(()) @@ -119,38 +190,4 @@ impl Consensus for OptimismBeaconConsensus { Ok(()) } - - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - // Check ommers hash - let ommers_hash = reth_primitives::proofs::calculate_ommers_root(&block.body.ommers); - if block.header.ommers_hash != ommers_hash { - return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: block.header.ommers_hash }.into(), - )) - } - - // Check transaction root - if let Err(error) = block.ensure_transaction_root_valid() { - return Err(ConsensusError::BodyTransactionRootDiff(error.into())) - } - - // EIP-4895: Beacon chain push withdrawals as operations - if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { - validate_shanghai_withdrawals(block)?; - } - - if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { - validate_cancun_gas(block)?; - } - - Ok(()) - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - input: PostExecutionInput<'_>, - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts) - } } diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index b283356016c..df0669568b3 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,14 +1,15 @@ //! Helper function for Receipt root calculation for Optimism hardforks. +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; +use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; -use reth_trie_common::root::ordered_trie_root_with_encoder; +use reth_optimism_forks::OpHardfork; +use reth_primitives::{Receipt, ReceiptWithBloom}; /// Calculates the receipt root for a header. pub(crate) fn calculate_receipt_root_optimism( - receipts: &[ReceiptWithBloom], + receipts: &[ReceiptWithBloom], chain_spec: &ChainSpec, timestamp: u64, ) -> B256 { @@ -17,8 +18,8 @@ pub(crate) fn calculate_receipt_root_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) { let receipts = receipts .iter() @@ -29,12 +30,10 @@ pub(crate) fn calculate_receipt_root_optimism( }) .collect::>(); - return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| { - r.encode_inner(buf, false) - }) + return ordered_trie_root_with_encoder(receipts.as_slice(), |r, buf| r.encode_2718(buf)) } - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) + ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_2718(buf)) } /// Calculates the receipt root for a header for the reference type of [Receipt]. @@ -50,8 +49,8 @@ pub fn calculate_receipt_root_no_memo_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) { let receipts = receipts .iter() @@ -63,12 +62,12 @@ pub fn calculate_receipt_root_no_memo_optimism( .collect::>(); return ordered_trie_root_with_encoder(&receipts, |r, buf| { - ReceiptWithBloomRef::from(r).encode_inner(buf, false) + r.with_bloom_ref().encode_2718(buf); }) } ordered_trie_root_with_encoder(receipts, |r, buf| { - ReceiptWithBloomRef::from(*r).encode_inner(buf, false) + r.with_bloom_ref().encode_2718(buf); }) } @@ -123,7 +122,7 @@ mod tests { deposit_nonce: Some(4012991u64), deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into()), }, // 0x2f433586bae30573c393adfa02bc81d2a1888a3d6c9869f473fb57245166bd9a ReceiptWithBloom { @@ -169,7 +168,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00001000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000010000").into()), + logs_bloom: Bloom(hex!("00001000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000010000").into()), }, // 0x6c33676e8f6077f46a62eabab70bc6d1b1b18a624b0739086d77093a1ecf8266 ReceiptWithBloom { @@ -211,7 +210,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000200000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000020000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000200000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000002000000000020000000000000000000000000000000000000000000000000000000000000000020000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000040000000000004000000000080000000000000000000000000000000000000000000000000000008000000000000080020000000000000000000000000002000000000000000000000000000080000000000").into()), }, // 0x4d3ecbef04ba7ce7f5ab55be0c61978ca97c117d7da448ed9771d4ff0c720a3f ReceiptWithBloom { @@ -283,7 +282,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00200000000000000000000080000000000000000000000000040000100004000000000000000000000000100000000000000000000000000000100000000000000000000000000002000008000000200000000200000000020000000000000040000000000000000400000200000000000000000000000000000010000000000400000000010400000000000000000000000000002000c80000004080002000000000000000400200000000800000000000000000000000000000000000000000000002000000000000000000000000000000000100001000000000000000000000002000000000000000000000010000000000000000000000800000800000").into()), + logs_bloom: Bloom(hex!("00200000000000000000000080000000000000000000000000040000100004000000000000000000000000100000000000000000000000000000100000000000000000000000000002000008000000200000000200000000020000000000000040000000000000000400000200000000000000000000000000000010000000000400000000010400000000000000000000000000002000c80000004080002000000000000000400200000000800000000000000000000000000000000000000000000002000000000000000000000000000000000100001000000000000000000000002000000000000000000000010000000000000000000000800000800000").into()), }, // 0xf738af5eb00ba23dbc1be2dbce41dbc0180f0085b7fb46646e90bf737af90351 ReceiptWithBloom { @@ -325,7 +324,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom: Bloom(hex!("00000000000000000000000000000000400000000000000000000000000000000000004000000000000001000000000000000002000000000100000000000000000000000000000000000008000000000000000000000000000000000000000004000000020000000000000000000800000000000000000000000010200100200008000002000000000000000000800000000000000000000002000000000000000000000000000000080000000000000000000000004000000000000000000000000002000000000000000000000000000000000000200000000000000020002000000000000000002000000000000000000000000000000000000000000000").into()), + logs_bloom: Bloom(hex!("00000000000000000000000000000000400000000000000000000000000000000000004000000000000001000000000000000002000000000100000000000000000000000000000000000008000000000000000000000000000000000000000004000000020000000000000000000800000000000000000000000010200100200008000002000000000000000000800000000000000000000002000000000000000000000000000000080000000000000000000000004000000000000000000000000002000000000000000000000000000000000000200000000000000020002000000000000000002000000000000000000000000000000000000000000000").into()), }, ]; let root = calculate_receipt_root_optimism(&receipts, BASE_SEPOLIA.as_ref(), case.1); @@ -339,7 +338,7 @@ mod tests { address: Address::ZERO, data: LogData::new_unchecked(vec![], Default::default()), }]; - let bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); + let logs_bloom = bloom!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); let receipt = ReceiptWithBloom { receipt: Receipt { tx_type: TxType::Eip2930, @@ -349,7 +348,7 @@ mod tests { deposit_nonce: None, deposit_receipt_version: None, }, - bloom, + logs_bloom, }; let receipt = vec![receipt]; let root = calculate_receipt_root_optimism(&receipt, BASE_SEPOLIA.as_ref(), 0); diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 3a76ec13854..5290603e7b8 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,4 +1,5 @@ use crate::proof::calculate_receipt_root_optimism; +use alloy_consensus::TxReceipt; use alloy_primitives::{Bloom, B256}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; @@ -57,7 +58,7 @@ fn verify_receipts( calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); // Calculate header logs bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom()); compare_receipts_root_and_logs_bloom( receipts_root, diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 0a22dcfddb4..7afb3b50e67 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -16,13 +16,16 @@ reth-chainspec.workspace = true reth-ethereum-forks.workspace = true reth-evm.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-revm.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true reth-prune-types.workspace = true reth-consensus.workspace = true +reth-consensus-common.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true op-alloy-consensus.workspace = true alloy-consensus.workspace = true @@ -31,29 +34,48 @@ alloy-consensus.workspace = true reth-optimism-consensus.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-forks.workspace = true +reth-optimism-primitives.workspace = true # revm revm.workspace = true revm-primitives.workspace = true # misc -thiserror.workspace = true +derive_more.workspace = true tracing.workspace = true [dev-dependencies] -alloy-eips.workspace = true - reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } reth-primitives = { workspace = true, features = ["test-utils"] } reth-optimism-chainspec.workspace = true alloy-genesis.workspace = true alloy-consensus.workspace = true +reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } [features] +default = ["std"] +std = [ + "reth-consensus/std", + "reth-primitives/std", + "reth-revm/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "reth-primitives-traits/std", + "revm/std", + "reth-optimism-primitives/std", + "reth-ethereum-forks/std", + "derive_more/std", + "reth-optimism-forks/std" +] optimism = [ - "reth-primitives/optimism", - "reth-execution-types/optimism", - "reth-optimism-consensus/optimism", - "revm/optimism", + "reth-primitives/optimism", + "reth-execution-types/optimism", + "reth-optimism-consensus/optimism", + "revm/optimism", + "revm-primitives/optimism", + "reth-optimism-primitives/optimism", ] diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index 668fcba4ddc..b32b0929424 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -1,6 +1,6 @@ use reth_ethereum_forks::{EthereumHardfork, Head}; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; +use reth_optimism_forks::OpHardfork; /// Returns the revm [`SpecId`](revm_primitives::SpecId) at the given timestamp. /// @@ -12,15 +12,19 @@ pub fn revm_spec_by_timestamp_after_bedrock( chain_spec: &OpChainSpec, timestamp: u64, ) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) { + if chain_spec.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) { + todo!() + } else if chain_spec.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) { + revm_primitives::HOLOCENE + } else if chain_spec.fork(OpHardfork::Granite).active_at_timestamp(timestamp) { revm_primitives::GRANITE - } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) { revm_primitives::FJORD - } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) { revm_primitives::ECOTONE - } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) { revm_primitives::CANYON - } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(timestamp) { + } else if chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) { revm_primitives::REGOLITH } else { revm_primitives::BEDROCK @@ -29,17 +33,21 @@ pub fn revm_spec_by_timestamp_after_bedrock( /// Map the latest active hardfork at the given block to a revm [`SpecId`](revm_primitives::SpecId). pub fn revm_spec(chain_spec: &OpChainSpec, block: &Head) -> revm_primitives::SpecId { - if chain_spec.fork(OptimismHardfork::Granite).active_at_head(block) { + if chain_spec.fork(OpHardfork::Isthmus).active_at_head(block) { + todo!() + } else if chain_spec.fork(OpHardfork::Holocene).active_at_head(block) { + revm_primitives::HOLOCENE + } else if chain_spec.fork(OpHardfork::Granite).active_at_head(block) { revm_primitives::GRANITE - } else if chain_spec.fork(OptimismHardfork::Fjord).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Fjord).active_at_head(block) { revm_primitives::FJORD - } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Ecotone).active_at_head(block) { revm_primitives::ECOTONE - } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Canyon).active_at_head(block) { revm_primitives::CANYON - } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Regolith).active_at_head(block) { revm_primitives::REGOLITH - } else if chain_spec.fork(OptimismHardfork::Bedrock).active_at_head(block) { + } else if chain_spec.fork(OpHardfork::Bedrock).active_at_head(block) { revm_primitives::BEDROCK } else if chain_spec.fork(EthereumHardfork::Prague).active_at_head(block) { revm_primitives::PRAGUE @@ -88,6 +96,10 @@ mod tests { let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)).into(); f(cs).build() } + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.holocene_activated()), 0), + revm_primitives::HOLOCENE + ); assert_eq!( revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.granite_activated()), 0), revm_primitives::GRANITE @@ -121,6 +133,10 @@ mod tests { let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)).into(); f(cs).build() } + assert_eq!( + revm_spec(&op_cs(|cs| cs.holocene_activated()), &Head::default()), + revm_primitives::HOLOCENE + ); assert_eq!( revm_spec(&op_cs(|cs| cs.granite_activated()), &Head::default()), revm_primitives::GRANITE diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index c5c6a0a4a3d..db042950674 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -1,29 +1,32 @@ //! Error types for the Optimism EVM module. +use alloc::string::String; use reth_evm::execute::BlockExecutionError; /// Optimism Block Executor Errors -#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] -pub enum OptimismBlockExecutionError { +#[derive(Debug, Clone, PartialEq, Eq, derive_more::Display)] +pub enum OpBlockExecutionError { /// Error when trying to parse L1 block info - #[error("could not get L1 block info from L2 block: {message:?}")] + #[display("could not get L1 block info from L2 block: {message}")] L1BlockInfoError { /// The inner error message message: String, }, /// Thrown when force deploy of create2deployer code fails. - #[error("failed to force create2deployer account code")] + #[display("failed to force create2deployer account code")] ForceCreate2DeployerFail, /// Thrown when a blob transaction is included in a sequencer's block. - #[error("blob transaction included in sequencer block")] + #[display("blob transaction included in sequencer block")] BlobTransactionRejected, /// Thrown when a database account could not be loaded. - #[error("failed to load account {0}")] + #[display("failed to load account {_0}")] AccountLoadFailed(alloy_primitives::Address), } -impl From for BlockExecutionError { - fn from(err: OptimismBlockExecutionError) -> Self { +impl core::error::Error for OpBlockExecutionError {} + +impl From for BlockExecutionError { + fn from(err: OpBlockExecutionError) -> Self { Self::other(err) } } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index f7da1c250d9..205c85160dc 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,152 +1,174 @@ -//! Optimism block executor. - -use crate::{ - l1::ensure_create2_deployer, OpChainSpec, OptimismBlockExecutionError, OptimismEvmConfig, -}; -use alloy_consensus::Transaction as _; -use alloy_primitives::{BlockNumber, U256}; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +//! Optimism block execution strategy. + +use crate::{l1::ensure_create2_deployer, OpBlockExecutionError, OpEvmConfig}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloy_consensus::{Header, Transaction as _}; +use alloy_eips::eip7685::Requests; +use core::fmt::Display; +use op_alloy_consensus::DepositTransaction; +use reth_chainspec::EthereumHardforks; +use reth_consensus::ConsensusError; use reth_evm::{ execute::{ - BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, BlockValidationError, Executor, ProviderError, + balance_increment_state, BasicBlockExecutorProvider, BlockExecutionError, + BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, + ProviderError, }, - system_calls::{NoopHook, OnStateHook, SystemCaller}, - ConfigureEvm, + state_change::post_block_balance_increments, + system_calls::{OnStateHook, SystemCaller}, + ConfigureEvm, TxEnvOverrides, }; -use reth_execution_types::ExecutionOutcome; +use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, Receipts, TxType}; -use reth_prune_types::PruneModes; -use reth_revm::{ - batch::BlockBatchRecord, db::states::bundle_state::BundleRetention, - state_change::post_block_balance_increments, Evm, State, -}; -use revm_primitives::{ - db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, -}; -use std::{fmt::Display, sync::Arc}; +use reth_optimism_forks::OpHardfork; +use reth_optimism_primitives::OpPrimitives; +use reth_primitives::{BlockWithSenders, Receipt, TransactionSigned, TxType}; +use reth_revm::{Database, State}; +use revm_primitives::{db::DatabaseCommit, EnvWithHandlerCfg, ResultAndState, U256}; use tracing::trace; -/// Provides executors to execute regular optimism blocks +/// Factory for [`OpExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct OpExecutorProvider { +pub struct OpExecutionStrategyFactory { + /// The chainspec chain_spec: Arc, + /// How to create an EVM. evm_config: EvmConfig, } -impl OpExecutorProvider { - /// Creates a new default optimism executor provider. +impl OpExecutionStrategyFactory { + /// Creates a new default optimism executor strategy factory. pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) + Self::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)) } } -impl OpExecutorProvider { - /// Creates a new executor provider. +impl OpExecutionStrategyFactory { + /// Creates a new executor strategy factory. pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { Self { chain_spec, evm_config } } } -impl OpExecutorProvider +impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone + + Unpin + + Sync + + Send + + 'static + + ConfigureEvm
, { - fn op_executor(&self, db: DB) -> OpBlockExecutor + type Primitives = OpPrimitives; + type Strategy + Display>> = + OpExecutionStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy where DB: Database + Display>, { - OpBlockExecutor::new( - self.chain_spec.clone(), - self.evm_config.clone(), - State::builder().with_database(db).with_bundle_update().without_state_clear().build(), - ) + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) } } -impl BlockExecutorProvider for OpExecutorProvider +/// Block execution strategy for Optimism. +#[allow(missing_debug_implementations)] +pub struct OpExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - type Executor + Display>> = - OpBlockExecutor; - - type BatchExecutor + Display>> = - OpBatchExecutor; - fn executor(&self, db: DB) -> Self::Executor - where - DB: Database + Display>, - { - self.op_executor(db) - } - - fn batch_executor(&self, db: DB) -> Self::BatchExecutor - where - DB: Database + Display>, - { - let executor = self.op_executor(db); - OpBatchExecutor { executor, batch_record: BlockBatchRecord::default() } - } -} - -/// Helper container type for EVM with chain spec. -#[derive(Debug, Clone)] -pub struct OpEvmExecutor { /// The chainspec chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Optional overrides for the transactions environment. + tx_env_overrides: Option>, + /// Current state for block execution. + state: State, + /// Utility to call system smart contracts. + system_caller: SystemCaller, } -impl OpEvmExecutor +impl OpExecutionStrategy where - EvmConfig: ConfigureEvm
, + EvmConfig: Clone, { - /// Executes the transactions in the block and returns the receipts. - /// - /// This applies the pre-execution changes, and executes the transactions. - /// - /// The optional `state_hook` will be executed with the state changes if present. - /// - /// # Note + /// Creates a new [`OpExecutionStrategy`] + pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { + let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); + Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } + } +} + +impl OpExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + /// Configures a new evm configuration and block environment for the given block. /// - /// It does __not__ apply post-execution changes. - fn execute_pre_and_transactions( - &self, + /// Caution: this does not initialize the tx environment. + fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for OpExecutionStrategy +where + DB: Database + Display>, + EvmConfig: ConfigureEvm
, +{ + type DB = DB; + type Primitives = OpPrimitives; + type Error = BlockExecutionError; + + fn init(&mut self, tx_env_overrides: Box) { + self.tx_env_overrides = Some(tx_env_overrides); + } + + fn apply_pre_execution_changes( + &mut self, block: &BlockWithSenders, - mut evm: Evm<'_, Ext, &mut State>, - state_hook: Option, - ) -> Result<(Vec, u64), BlockExecutionError> - where - DB: Database + Display>, - F: OnStateHook + 'static, - { - let mut system_caller = SystemCaller::new(self.evm_config.clone(), &self.chain_spec); - if let Some(hook) = state_hook { - system_caller.with_state_hook(Some(Box::new(hook) as Box)); - } + total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); - // apply pre execution changes - system_caller.apply_beacon_root_contract_call( + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + self.system_caller.apply_beacon_root_contract_call( block.timestamp, block.number, block.parent_beacon_block_root, &mut evm, )?; - // execute transactions - let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) - .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; + .map_err(|_| OpBlockExecutionError::ForceCreate2DeployerFail)?; + + Ok(()) + } + + fn execute_transactions( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result, Self::Error> { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + let is_regolith = + self.chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(block.timestamp); let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); @@ -166,7 +188,7 @@ where // An optimism block should never contain blob transactions. if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) + return Err(OpBlockExecutionError::BlobTransactionRejected.into()) } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -181,10 +203,14 @@ where .map(|acc| acc.account_info().unwrap_or_default()) }) .transpose() - .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; + .map_err(|_| OpBlockExecutionError::AccountLoadFailed(*sender))?; self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); + if let Some(tx_env_overrides) = &mut self.tx_env_overrides { + tx_env_overrides.apply(evm.tx_mut()); + } + // Execute transaction. let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); @@ -200,7 +226,7 @@ where ?transaction, "Executed transaction" ); - system_caller.on_state(&result_and_state); + self.system_caller.on_state(&result_and_state.state); let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); @@ -221,292 +247,65 @@ where // this is only set for post-Canyon deposit transactions. deposit_receipt_version: (transaction.is_deposit() && self.chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) + .is_fork_active_at_timestamp(OpHardfork::Canyon, block.timestamp)) .then_some(1), }); } - drop(evm); - Ok((receipts, cumulative_gas_used)) + Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) } -} -/// A basic Optimism block executor. -/// -/// Expected usage: -/// - Create a new instance of the executor. -/// - Execute the block. -#[derive(Debug)] -pub struct OpBlockExecutor { - /// Chain specific evm config that's used to execute a block. - executor: OpEvmExecutor, - /// The state to use for execution - state: State, -} - -impl OpBlockExecutor { - /// Creates a new Optimism block executor. - pub const fn new( - chain_spec: Arc, - evm_config: EvmConfig, - state: State, - ) -> Self { - Self { executor: OpEvmExecutor { chain_spec, evm_config }, state } - } - - /// Returns the chain spec. - #[inline] - pub fn chain_spec(&self) -> &ChainSpec { - &self.executor.chain_spec - } - - /// Returns mutable reference to the state that wraps the underlying database. - pub fn state_mut(&mut self) -> &mut State { - &mut self.state - } -} - -impl OpBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.executor.evm_config.fill_cfg_and_block_env( - &mut cfg, - &mut block_env, - header, - total_difficulty, - ); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } - - /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the - /// state hook as `None`. - fn execute_without_verification( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { - self.execute_without_verification_with_state_hook(block, total_difficulty, None::) - } - - /// Execute a single block and apply the state changes to the internal state. - /// - /// Returns the receipts of the transactions in the block and the total gas used. - /// - /// Returns an error if execution fails. - fn execute_without_verification_with_state_hook( + fn apply_post_execution_changes( &mut self, block: &BlockWithSenders, total_difficulty: U256, - state_hook: Option, - ) -> Result<(Vec, u64), BlockExecutionError> - where - F: OnStateHook + 'static, - { - // 1. prepare state on new block - self.on_new_block(&block.header); - - // 2. configure the evm and execute - let env = self.evm_env_for_block(&block.header, total_difficulty); - - let (receipts, gas_used) = { - let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_pre_and_transactions(block, evm, state_hook) - }?; - - // 3. apply post execution changes - self.post_execution(block, total_difficulty)?; - - Ok((receipts, gas_used)) - } - - /// Apply settings before a new block is executed. - pub(crate) fn on_new_block(&mut self, header: &Header) { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = self.chain_spec().is_spurious_dragon_active_at_block(header.number); - self.state.set_state_clear_flag(state_clear_flag); - } - - /// Apply post execution state changes, including block rewards, withdrawals, and irregular DAO - /// hardfork state change. - pub fn post_execution( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), BlockExecutionError> { + _receipts: &[Receipt], + ) -> Result { let balance_increments = - post_block_balance_increments(self.chain_spec(), block, total_difficulty); + post_block_balance_increments(&self.chain_spec.clone(), &block.block, total_difficulty); // increment balances self.state - .increment_balances(balance_increments) + .increment_balances(balance_increments.clone()) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + // call state hook with changes due to balance increments. + let balance_state = balance_increment_state(&balance_increments, &mut self.state)?; + self.system_caller.on_state(&balance_state); - Ok(()) + Ok(Requests::default()) } -} - -impl Executor for OpBlockExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = BlockExecutionOutput; - type Error = BlockExecutionError; - /// Executes the block and commits the state changes. - /// - /// Returns the receipts of the transactions in the block. - /// - /// Returns an error if the block could not be executed or failed verification. - /// - /// State changes are committed to the database. - fn execute(mut self, input: Self::Input<'_>) -> Result { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: vec![], - gas_used, - }) + fn state_ref(&self) -> &State { + &self.state } - fn execute_with_state_closure( - mut self, - input: Self::Input<'_>, - mut witness: F, - ) -> Result - where - F: FnMut(&State), - { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - witness(&self.state); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: vec![], - gas_used, - }) + fn state_mut(&mut self) -> &mut State { + &mut self.state } - fn execute_with_state_hook( - mut self, - input: Self::Input<'_>, - state_hook: F, - ) -> Result - where - F: OnStateHook + 'static, - { - let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_without_verification_with_state_hook( - block, - total_difficulty, - Some(state_hook), - )?; - - // NOTE: we need to merge keep the reverts for the bundle retention - self.state.merge_transitions(BundleRetention::Reverts); - - Ok(BlockExecutionOutput { - state: self.state.take_bundle(), - receipts, - requests: vec![], - gas_used, - }) + fn with_state_hook(&mut self, hook: Option>) { + self.system_caller.with_state_hook(hook); } -} -/// An executor for a batch of blocks. -/// -/// State changes are tracked until the executor is finalized. -#[derive(Debug)] -pub struct OpBatchExecutor { - /// The executor used to execute blocks. - executor: OpBlockExecutor, - /// Keeps track of the batch and record receipts based on the configured prune mode - batch_record: BlockBatchRecord, -} - -impl OpBatchExecutor { - /// Returns the receipts of the executed blocks. - pub const fn receipts(&self) -> &Receipts { - self.batch_record.receipts() - } - - /// Returns mutable reference to the state that wraps the underlying database. - pub fn state_mut(&mut self) -> &mut State { - self.executor.state_mut() + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + _requests: &Requests, + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec.clone(), receipts) } } -impl BatchExecutor for OpBatchExecutor -where - EvmConfig: ConfigureEvm
, - DB: Database + Display>, -{ - type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; - type Output = ExecutionOutcome; - type Error = BlockExecutionError; - - fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { - let BlockExecutionInput { block, total_difficulty } = input; - - if self.batch_record.first_block().is_none() { - self.batch_record.set_first_block(block.number); - } - - let (receipts, _gas_used) = - self.executor.execute_without_verification(block, total_difficulty)?; - - validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; - - // prepare the state according to the prune mode - let retention = self.batch_record.bundle_retention(block.number); - self.executor.state.merge_transitions(retention); - - // store receipts in the set - self.batch_record.save_receipts(receipts)?; - - Ok(()) - } - - fn finalize(mut self) -> Self::Output { - ExecutionOutcome::new( - self.executor.state.take_bundle(), - self.batch_record.take_receipts(), - self.batch_record.first_block().unwrap_or_default(), - self.batch_record.take_requests(), - ) - } - - fn set_tip(&mut self, tip: BlockNumber) { - self.batch_record.set_tip(tip); - } - - fn set_prune_modes(&mut self, prune_modes: PruneModes) { - self.batch_record.set_prune_modes(prune_modes); - } +/// Helper type with backwards compatible methods to obtain executor providers. +#[derive(Debug)] +pub struct OpExecutorProvider; - fn size_hint(&self) -> Option { - Some(self.executor.state.bundle_state.size_hint()) +impl OpExecutorProvider { + /// Creates a new default optimism executor strategy factory. + pub fn optimism( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + BasicBlockExecutorProvider::new(OpExecutionStrategyFactory::optimism(chain_spec)) } } @@ -515,10 +314,14 @@ mod tests { use super::*; use crate::OpChainSpec; use alloy_consensus::TxEip1559; - use alloy_primitives::{b256, Address, StorageKey, StorageValue}; + use alloy_primitives::{ + b256, Address, PrimitiveSignature as Signature, StorageKey, StorageValue, + }; + use op_alloy_consensus::TxDeposit; use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; - use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; + use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; + use reth_optimism_chainspec::OpChainSpecBuilder; + use reth_primitives::{Account, Block, BlockBody, Transaction, TransactionSigned}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, }; @@ -551,8 +354,13 @@ mod tests { db } - fn executor_provider(chain_spec: Arc) -> OpExecutorProvider { - OpExecutorProvider { evm_config: OptimismEvmConfig::new(chain_spec.clone()), chain_spec } + fn executor_provider( + chain_spec: Arc, + ) -> BasicBlockExecutorProvider { + let strategy_factory = + OpExecutionStrategyFactory::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)); + + BasicBlockExecutorProvider::new(strategy_factory) } #[test] @@ -576,7 +384,7 @@ mod tests { let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); - let tx = TransactionSigned::from_transaction_and_signature( + let tx = TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, @@ -587,7 +395,7 @@ mod tests { Signature::test_signature(), ); - let tx_deposit = TransactionSigned::from_transaction_and_signature( + let tx_deposit = TransactionSigned::new_unhashed( Transaction::Deposit(op_alloy_consensus::TxDeposit { from: addr, to: addr.into(), @@ -600,7 +408,10 @@ mod tests { let provider = executor_provider(chain_spec); let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); // Attempt to execute a block with one deposit and one non-deposit transaction executor @@ -622,8 +433,9 @@ mod tests { ) .unwrap(); - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); // deposit_receipt_version is not present in pre canyon transactions assert!(deposit_receipt.deposit_receipt_version.is_none()); @@ -656,7 +468,7 @@ mod tests { let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); - let tx = TransactionSigned::from_transaction_and_signature( + let tx = TransactionSigned::new_unhashed( Transaction::Eip1559(TxEip1559 { chain_id: chain_spec.chain.id(), nonce: 0, @@ -667,20 +479,23 @@ mod tests { Signature::test_signature(), ); - let tx_deposit = TransactionSigned::from_transaction_and_signature( + let tx_deposit = TransactionSigned::new_unhashed( Transaction::Deposit(op_alloy_consensus::TxDeposit { from: addr, to: addr.into(), gas_limit: MIN_TRANSACTION_GAS, ..Default::default() }), - optimism_deposit_tx_signature(), + TxDeposit::signature(), ); let provider = executor_provider(chain_spec); let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + // make sure the L1 block contract state is preloaded. + executor.with_state_mut(|state| { + state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); + }); // attempt to execute an empty block with parent beacon block root, this should not fail executor @@ -702,8 +517,9 @@ mod tests { ) .expect("Executing a block while canyon is active should not fail"); - let tx_receipt = executor.receipts()[0][0].as_ref().unwrap(); - let deposit_receipt = executor.receipts()[0][1].as_ref().unwrap(); + let receipts = executor.receipts(); + let tx_receipt = receipts[0][0].as_ref().unwrap(); + let deposit_receipt = receipts[0][1].as_ref().unwrap(); // deposit_receipt_version is set to 1 for post canyon deposit transactions assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 3412501eb99..1194dd63c2b 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,17 +1,18 @@ //! Optimism-specific implementation and utilities for the executor -use crate::OptimismBlockExecutionError; +use crate::OpBlockExecutionError; +use alloc::{string::ToString, sync::Arc}; +use alloy_consensus::Transaction; use alloy_primitives::{address, b256, hex, Address, Bytes, B256, U256}; use reth_chainspec::ChainSpec; use reth_execution_errors::BlockExecutionError; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives::BlockBody; +use reth_optimism_forks::OpHardfork; +use reth_primitives_traits::BlockBody; use revm::{ primitives::{Bytecode, HashMap, SpecId}, DatabaseCommit, L1BlockInfo, }; -use std::sync::Arc; use tracing::trace; /// The address of the create2 deployer @@ -31,17 +32,17 @@ const L1_BLOCK_ECOTONE_SELECTOR: [u8; 4] = hex!("440a5e20"); /// transaction in the L2 block. /// /// Returns an error if the L1 info transaction is not found, if the block is empty. -pub fn extract_l1_info(body: &BlockBody) -> Result { +pub fn extract_l1_info(body: &B) -> Result { let l1_info_tx_data = body - .transactions + .transactions() .first() - .ok_or_else(|| OptimismBlockExecutionError::L1BlockInfoError { + .ok_or_else(|| OpBlockExecutionError::L1BlockInfoError { message: "could not find l1 block info tx in the L2 block".to_string(), }) .map(|tx| tx.input())?; if l1_info_tx_data.len() < 4 { - return Err(OptimismBlockExecutionError::L1BlockInfoError { + return Err(OpBlockExecutionError::L1BlockInfoError { message: "invalid l1 block info transaction calldata in the L2 block".to_string(), }) } @@ -52,7 +53,7 @@ pub fn extract_l1_info(body: &BlockBody) -> Result Result { +pub fn parse_l1_info(input: &[u8]) -> Result { // If the first 4 bytes of the calldata are the L1BlockInfoEcotone selector, then we parse the // calldata as an Ecotone hardfork L1BlockInfo transaction. Otherwise, we parse it as a // Bedrock hardfork L1BlockInfo transaction. @@ -64,7 +65,7 @@ pub fn parse_l1_info(input: &[u8]) -> Result Result { +pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result { // The setL1BlockValues tx calldata must be exactly 260 bytes long, considering that // we already removed the first 4 bytes (the function selector). Detailed breakdown: // 32 bytes for the block number @@ -76,23 +77,23 @@ pub fn parse_l1_info_tx_bedrock(data: &[u8]) -> Result Result -pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { +pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result { if data.len() != 160 { - return Err(OptimismBlockExecutionError::L1BlockInfoError { + return Err(OpBlockExecutionError::L1BlockInfoError { message: "unexpected l1 block info tx calldata length found".to_string(), }) } @@ -142,22 +143,22 @@ pub fn parse_l1_info_tx_ecotone(data: &[u8]) -> Result Result { - let spec_id = if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, timestamp) - { + let spec_id = if chain_spec.is_fork_active_at_timestamp(OpHardfork::Fjord, timestamp) { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Bedrock, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OpHardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { - return Err(OptimismBlockExecutionError::L1BlockInfoError { + return Err(OpBlockExecutionError::L1BlockInfoError { message: "Optimism hardforks are not active".to_string(), } .into()) @@ -270,9 +269,8 @@ where // If the canyon hardfork is active at the current timestamp, and it was not active at the // previous block timestamp (heuristically, block time is not perfectly constant at 2s), and the // chain is an optimism chain, then we need to force-deploy the create2 deployer contract. - if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) && - !chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp.saturating_sub(2)) + if chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) && + !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp.saturating_sub(2)) { trace!(target: "evm", "Forcing create2 deployer contract deployment on Canyon transition"); @@ -300,15 +298,16 @@ where mod tests { use alloy_eips::eip2718::Decodable2718; use reth_optimism_chainspec::OP_MAINNET; - use reth_optimism_forks::OptimismHardforks; + use reth_optimism_forks::OpHardforks; use reth_primitives::{Block, BlockBody, TransactionSigned}; use super::*; #[test] fn sanity_l1_block() { + use alloy_consensus::Header; use alloy_primitives::{hex_literal::hex, Bytes}; - use reth_primitives::{Header, TransactionSigned}; + use reth_primitives::TransactionSigned; let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); let l1_info_tx = TransactionSigned::decode_2718(&mut bytes.as_ref()).unwrap(); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 4d0f9d89ff4..7424379f5ae 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -6,19 +6,23 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +extern crate alloc; + +use alloc::{sync::Arc, vec::Vec}; +use alloy_consensus::Header; use alloy_primitives::{Address, U256}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; -use reth_optimism_chainspec::OpChainSpec; -use reth_primitives::{ - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - transaction::FillTxEnv, - Head, Header, TransactionSigned, +use reth_optimism_chainspec::{DecodeError, OpChainSpec}; +use reth_primitives::{transaction::FillTxEnv, Head, TransactionSigned}; +use reth_revm::{ + inspector_handle_register, + primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + Database, Evm, EvmBuilder, GetInspector, }; -use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -use std::sync::Arc; mod config; pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock}; @@ -28,33 +32,33 @@ pub mod l1; pub use l1::*; mod error; -pub use error::OptimismBlockExecutionError; +pub use error::OpBlockExecutionError; use revm_primitives::{ BlobExcessGasAndPrice, BlockEnv, Bytes, CfgEnv, Env, HandlerCfg, OptimismFields, SpecId, TxKind, }; -pub mod strategy; - /// Optimism-related EVM configuration. #[derive(Debug, Clone)] -pub struct OptimismEvmConfig { +pub struct OpEvmConfig { chain_spec: Arc, } -impl OptimismEvmConfig { - /// Creates a new [`OptimismEvmConfig`] with the given chain spec. +impl OpEvmConfig { + /// Creates a new [`OpEvmConfig`] with the given chain spec. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } } /// Returns the chain spec associated with this configuration. - pub fn chain_spec(&self) -> &OpChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } } -impl ConfigureEvmEnv for OptimismEvmConfig { +impl ConfigureEvmEnv for OpEvmConfig { type Header = Header; + type Transaction = TransactionSigned; + type Error = DecodeError; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { transaction.fill_tx_env(tx_env, sender); @@ -133,7 +137,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { // configure evm env based on parent block let cfg = CfgEnv::default().with_chain_id(self.chain_spec.chain().id()); @@ -155,13 +159,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { prevrandao: Some(attributes.prev_randao), gas_limit: U256::from(parent.gas_limit), // calculate basefee based on parent block's gas usage - basefee: U256::from( - parent - .next_block_base_fee( - self.chain_spec.base_fee_params_at_timestamp(attributes.timestamp), - ) - .unwrap_or_default(), - ), + basefee: self.chain_spec.next_block_base_fee(parent, attributes.timestamp)?, // calculate excess gas based on parent block's blob gas usage blob_excess_gas_and_price, }; @@ -174,11 +172,11 @@ impl ConfigureEvmEnv for OptimismEvmConfig { }; } - (cfg_with_handler_cfg, block_env) + Ok((cfg_with_handler_cfg, block_env)) } } -impl ConfigureEvm for OptimismEvmConfig { +impl ConfigureEvm for OpEvmConfig { type DefaultExternalContext<'a> = (); fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { @@ -204,36 +202,36 @@ impl ConfigureEvm for OptimismEvmConfig { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::{constants::KECCAK_EMPTY, Header}; + use alloy_eips::eip7685::Requests; use alloy_genesis::Genesis; - use alloy_primitives::{B256, U256}; + use alloy_primitives::{bytes, Address, LogData, B256, U256}; use reth_chainspec::ChainSpec; use reth_evm::execute::ProviderError; - use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_optimism_chainspec::BASE_MAINNET; - use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnv, SpecId}, - Header, Receipt, Receipts, SealedBlockWithSenders, TxType, KECCAK_EMPTY, + use reth_execution_types::{ + AccountRevertInit, BundleStateInit, Chain, ExecutionOutcome, RevertsInit, }; + use reth_optimism_chainspec::BASE_MAINNET; + use reth_optimism_primitives::OpPrimitives; + use reth_primitives::{Account, Log, Receipt, Receipts, SealedBlockWithSenders, TxType}; use reth_revm::{ - db::{CacheDB, EmptyDBTyped}, + db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, + primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{CfgEnvWithHandlerCfg, EnvWithHandlerCfg, HandlerCfg}; - use std::{collections::HashSet, sync::Arc}; + use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; - fn test_evm_config() -> OptimismEvmConfig { - OptimismEvmConfig::new(BASE_MAINNET.clone()) + fn test_evm_config() -> OpEvmConfig { + OpEvmConfig::new(BASE_MAINNET.clone()) } #[test] fn test_fill_cfg_and_block_env() { - // Create a new configuration environment - let mut cfg_env = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - // Create a default block environment - let mut block_env = BlockEnv::default(); - // Create a default header let header = Header::default(); @@ -250,10 +248,10 @@ mod tests { // Define the total difficulty as zero (default) let total_difficulty = U256::ZERO; - // Use the `OptimismEvmConfig` to fill the `cfg_env` and `block_env` based on the ChainSpec, + // Use the `OpEvmConfig` to create the `cfg_env` and `block_env` based on the ChainSpec, // Header, and total difficulty - OptimismEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) - .fill_cfg_and_block_env(&mut cfg_env, &mut block_env, &header, total_difficulty); + let (cfg_env, _) = OpEvmConfig::new(Arc::new(OpChainSpec { inner: chain_spec.clone() })) + .cfg_and_block_env(&header, total_difficulty); // Assert that the chain ID in the `cfg_env` is correctly set to the chain ID of the // ChainSpec @@ -262,7 +260,7 @@ mod tests { #[test] fn test_evm_configure() { - // Create a default `OptimismEvmConfig` + // Create a default `OpEvmConfig` let evm_config = test_evm_config(); // Initialize an empty database wrapped in CacheDB @@ -547,7 +545,7 @@ mod tests { #[test] fn receipts_by_block_hash() { // Create a default SealedBlockWithSenders object - let block = SealedBlockWithSenders::default(); + let block: SealedBlockWithSenders = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); @@ -599,7 +597,8 @@ mod tests { // Create a Chain object with a BTreeMap of blocks mapped to their block numbers, // including block1_hash and block2_hash, and the execution_outcome - let chain = Chain::new([block1, block2], execution_outcome.clone(), None); + let chain: Chain = + Chain::new([block1, block2], execution_outcome.clone(), None); // Assert that the proper receipt vector is returned for block1_hash assert_eq!(chain.receipts_by_block_hash(block1_hash), Some(vec![&receipt1])); @@ -618,4 +617,399 @@ mod tests { // Assert that the execution outcome at the tip block contains the whole execution outcome assert_eq!(chain.execution_outcome_at_block(11), Some(execution_outcome)); } + + #[test] + fn test_initialisation() { + // Create a new BundleState object with initial data + let bundle = BundleState::new( + vec![(Address::new([2; 20]), None, Some(AccountInfo::default()), HashMap::default())], + vec![vec![(Address::new([2; 20]), None, vec![])]], + vec![], + ); + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Create a Requests object with a vector of requests + let requests = vec![Requests::new(vec![bytes!("dead"), bytes!("beef"), bytes!("beebee")])]; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: bundle.clone(), + receipts: receipts.clone(), + requests: requests.clone(), + first_block, + }; + + // Assert that creating a new ExecutionOutcome using the constructor matches exec_res + assert_eq!( + ExecutionOutcome::new(bundle, receipts.clone(), first_block, requests.clone()), + exec_res + ); + + // Create a BundleStateInit object and insert initial data + let mut state_init: BundleStateInit = HashMap::default(); + state_init + .insert(Address::new([2; 20]), (None, Some(Account::default()), HashMap::default())); + + // Create a HashMap for account reverts and insert initial data + let mut revert_inner: HashMap = HashMap::default(); + revert_inner.insert(Address::new([2; 20]), (None, vec![])); + + // Create a RevertsInit object and insert the revert_inner data + let mut revert_init: RevertsInit = HashMap::default(); + revert_init.insert(123, revert_inner); + + // Assert that creating a new ExecutionOutcome using the new_init method matches + // exec_res + assert_eq!( + ExecutionOutcome::new_init( + state_init, + revert_init, + vec![], + receipts, + first_block, + requests, + ), + exec_res + ); + } + + #[test] + fn test_block_number_to_index() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block, + }; + + // Test before the first block + assert_eq!(exec_res.block_number_to_index(12), None); + + // Test after after the first block but index larger than receipts length + assert_eq!(exec_res.block_number_to_index(133), None); + + // Test after the first block + assert_eq!(exec_res.block_number_to_index(123), Some(0)); + } + + #[test] + fn test_get_logs() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), + receipts, + requests: vec![], + first_block, + }; + + // Get logs for block number 123 + let logs: Vec<&Log> = exec_res.logs(123).unwrap().collect(); + + // Assert that the logs match the expected logs + assert_eq!(logs, vec![&Log::::default()]); + } + + #[test] + fn test_receipts_by_block() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts, // Include the created receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Get receipts for block number 123 and convert the result into a vector + let receipts_by_block: Vec<_> = exec_res.receipts_by_block(123).iter().collect(); + + // Assert that the receipts for block number 123 match the expected receipts + assert_eq!( + receipts_by_block, + vec![&Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })] + ); + } + + #[test] + fn test_receipts_len() { + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![Log::::default()], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + })]], + }; + + // Create an empty Receipts object + let receipts_empty = Receipts:: { receipt_vec: vec![] }; + + // Define the first block number + let first_block = 123; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts, // Include the created receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Assert that the length of receipts in exec_res is 1 + assert_eq!(exec_res.len(), 1); + + // Assert that exec_res is not empty + assert!(!exec_res.is_empty()); + + // Create a ExecutionOutcome object with an empty Receipts object + let exec_res_empty_receipts = ExecutionOutcome { + bundle: Default::default(), // Default value for bundle + receipts: receipts_empty, // Include the empty receipts + requests: vec![], // Empty vector for requests + first_block, // Set the first block number + }; + + // Assert that the length of receipts in exec_res_empty_receipts is 0 + assert_eq!(exec_res_empty_receipts.len(), 0); + + // Assert that exec_res_empty_receipts is empty + assert!(exec_res_empty_receipts.is_empty()); + } + + #[test] + fn test_revert_to() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt.clone())]], + }; + + // Define the first block number + let first_block = 123; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = + vec![Requests::new(vec![request.clone()]), Requests::new(vec![request.clone()])]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let mut exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Assert that the revert_to method returns true when reverting to the initial block number. + assert!(exec_res.revert_to(123)); + + // Assert that the receipts are properly cut after reverting to the initial block number. + assert_eq!(exec_res.receipts, Receipts { receipt_vec: vec![vec![Some(receipt)]] }); + + // Assert that the requests are properly cut after reverting to the initial block number. + assert_eq!(exec_res.requests, vec![Requests::new(vec![request])]); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number greater than the initial block number. + assert!(!exec_res.revert_to(133)); + + // Assert that the revert_to method returns false when attempting to revert to a block + // number less than the initial block number. + assert!(!exec_res.revert_to(10)); + } + + #[test] + fn test_extend_execution_outcome() { + // Create a Receipt object with specific attributes. + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object containing the receipt. + let receipts = Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = vec![Requests::new(vec![request.clone()])]; + + // Define the initial block number. + let first_block = 123; + + // Create an ExecutionOutcome object. + let mut exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Extend the ExecutionOutcome object by itself. + exec_res.extend(exec_res.clone()); + + // Assert the extended ExecutionOutcome matches the expected outcome. + assert_eq!( + exec_res, + ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]] + }, + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], + first_block: 123, + } + ); + } + + #[test] + fn test_split_at_execution_outcome() { + // Create a random receipt object + let receipt = Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 46913, + logs: vec![], + success: true, + deposit_nonce: Some(18), + deposit_receipt_version: Some(34), + }; + + // Create a Receipts object with a vector of receipt vectors + let receipts = Receipts { + receipt_vec: vec![ + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + vec![Some(receipt.clone())], + ], + }; + + // Define the first block number + let first_block = 123; + + // Create a request. + let request = bytes!("deadbeef"); + + // Create a vector of Requests containing the request. + let requests = vec![ + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + Requests::new(vec![request.clone()]), + ]; + + // Create a ExecutionOutcome object with the created bundle, receipts, requests, and + // first_block + let exec_res = + ExecutionOutcome { bundle: Default::default(), receipts, requests, first_block }; + + // Split the ExecutionOutcome at block number 124 + let result = exec_res.clone().split_at(124); + + // Define the expected lower ExecutionOutcome after splitting + let lower_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { receipt_vec: vec![vec![Some(receipt.clone())]] }, + requests: vec![Requests::new(vec![request.clone()])], + first_block, + }; + + // Define the expected higher ExecutionOutcome after splitting + let higher_execution_outcome = ExecutionOutcome { + bundle: Default::default(), + receipts: Receipts { + receipt_vec: vec![vec![Some(receipt.clone())], vec![Some(receipt)]], + }, + requests: vec![Requests::new(vec![request.clone()]), Requests::new(vec![request])], + first_block: 124, + }; + + // Assert that the split result matches the expected lower and higher outcomes + assert_eq!(result.0, Some(lower_execution_outcome)); + assert_eq!(result.1, higher_execution_outcome); + + // Assert that splitting at the first block number returns None for the lower outcome + assert_eq!(exec_res.clone().split_at(123), (None, exec_res)); + } } diff --git a/crates/optimism/evm/src/strategy.rs b/crates/optimism/evm/src/strategy.rs deleted file mode 100644 index fe8164cc7cf..00000000000 --- a/crates/optimism/evm/src/strategy.rs +++ /dev/null @@ -1,492 +0,0 @@ -//! Optimism block execution strategy, - -use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; -use alloy_consensus::Transaction as _; -use reth_chainspec::EthereumHardforks; -use reth_consensus::ConsensusError; -use reth_evm::{ - execute::{ - BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, - BlockValidationError, ProviderError, - }, - system_calls::{OnStateHook, SystemCaller}, - ConfigureEvm, ConfigureEvmEnv, -}; -use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::validate_block_post_execution; -use reth_optimism_forks::OptimismHardfork; -use reth_primitives::{BlockWithSenders, Header, Receipt, Request, TxType}; -use reth_revm::{ - db::{states::bundle_state::BundleRetention, BundleState}, - state_change::post_block_balance_increments, - Database, State, -}; -use revm_primitives::{ - db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, U256, -}; -use std::{fmt::Display, sync::Arc}; -use tracing::trace; - -/// Factory for [`OpExecutionStrategy`]. -#[derive(Debug, Clone)] -pub struct OpExecutionStrategyFactory { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, -} - -impl OpExecutionStrategyFactory { - /// Creates a new default optimism executor strategy factory. - pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)) - } -} - -impl OpExecutionStrategyFactory { - /// Creates a new executor strategy factory. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config } - } -} - -impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory { - type Strategy + Display>> = OpExecutionStrategy; - - fn create_strategy(&self, db: DB) -> Self::Strategy - where - DB: Database + Display>, - { - let state = - State::builder().with_database(db).with_bundle_update().without_state_clear().build(); - OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) - } -} - -/// Block execution strategy for Optimism. -#[allow(missing_debug_implementations)] -pub struct OpExecutionStrategy { - /// The chainspec - chain_spec: Arc, - /// How to create an EVM. - evm_config: EvmConfig, - /// Current state for block execution. - state: State, - /// Utility to call system smart contracts. - system_caller: SystemCaller, -} - -impl OpExecutionStrategy { - /// Creates a new [`OpExecutionStrategy`] - pub fn new( - state: State, - chain_spec: Arc, - evm_config: OptimismEvmConfig, - ) -> Self { - let system_caller = SystemCaller::new(evm_config.clone(), (*chain_spec).clone()); - Self { state, chain_spec, evm_config, system_caller } - } -} - -impl OpExecutionStrategy { - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { - let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); - let mut block_env = BlockEnv::default(); - self.evm_config.fill_cfg_and_block_env(&mut cfg, &mut block_env, header, total_difficulty); - - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) - } -} - -impl BlockExecutionStrategy for OpExecutionStrategy -where - DB: Database + Display>, -{ - type Error = BlockExecutionError; - - fn apply_pre_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(), Self::Error> { - // Set state clear flag if the block is after the Spurious Dragon hardfork. - let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); - self.state.set_state_clear_flag(state_clear_flag); - - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - self.system_caller.apply_beacon_root_contract_call( - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut evm, - )?; - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) - .map_err(|_| OptimismBlockExecutionError::ForceCreate2DeployerFail)?; - - Ok(()) - } - - fn execute_transactions( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - ) -> Result<(Vec, u64), Self::Error> { - let env = self.evm_env_for_block(&block.header, total_difficulty); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - - let is_regolith = - self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); - - let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.transactions.len()); - for (sender, transaction) in block.transactions_with_sender() { - // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, - // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_system_transaction()) - { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) - } - - // An optimism block should never contain blob transactions. - if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) - } - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && transaction.is_deposit()) - .then(|| { - evm.db_mut() - .load_cache_account(*sender) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; - - self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); - - // Execute transaction. - let result_and_state = evm.transact().map_err(move |err| { - let new_err = err.map_db_err(|e| e.into()); - // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: Box::new(new_err), - } - })?; - - trace!( - target: "evm", - ?transaction, - "Executed transaction" - ); - self.system_caller.on_state(&result_and_state); - let ResultAndState { result, state } = result_and_state; - evm.db_mut().commit(state); - - // append gas used - cumulative_gas_used += result.gas_used(); - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs(), - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process ensures - // this is only set for post-Canyon deposit transactions. - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec - .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) - .then_some(1), - }); - } - - Ok((receipts, cumulative_gas_used)) - } - - fn apply_post_execution_changes( - &mut self, - block: &BlockWithSenders, - total_difficulty: U256, - _receipts: &[Receipt], - ) -> Result, Self::Error> { - let balance_increments = - post_block_balance_increments(&self.chain_spec.clone(), block, total_difficulty); - // increment balances - self.state - .increment_balances(balance_increments) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; - - Ok(vec![]) - } - - fn state_ref(&self) -> &State { - &self.state - } - - fn state_mut(&mut self) -> &mut State { - &mut self.state - } - - fn with_state_hook(&mut self, hook: Option>) { - self.system_caller.with_state_hook(hook); - } - - fn finish(&mut self) -> BundleState { - self.state.merge_transitions(BundleRetention::Reverts); - self.state.take_bundle() - } - - fn validate_block_post_execution( - &self, - block: &BlockWithSenders, - receipts: &[Receipt], - _requests: &[Request], - ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec.clone(), receipts) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::OpChainSpec; - use alloy_consensus::TxEip1559; - use alloy_primitives::{b256, Address, StorageKey, StorageValue}; - use reth_chainspec::MIN_TRANSACTION_GAS; - use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; - use reth_optimism_chainspec::{optimism_deposit_tx_signature, OpChainSpecBuilder}; - use reth_primitives::{Account, Block, BlockBody, Signature, Transaction, TransactionSigned}; - use reth_revm::{ - database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, - }; - use std::{collections::HashMap, str::FromStr}; - - fn create_op_state_provider() -> StateProviderTest { - let mut db = StateProviderTest::default(); - - let l1_block_contract_account = - Account { balance: U256::ZERO, bytecode_hash: None, nonce: 1 }; - - let mut l1_block_storage = HashMap::default(); - // base fee - l1_block_storage.insert(StorageKey::with_last_byte(1), StorageValue::from(1000000000)); - // l1 fee overhead - l1_block_storage.insert(StorageKey::with_last_byte(5), StorageValue::from(188)); - // l1 fee scalar - l1_block_storage.insert(StorageKey::with_last_byte(6), StorageValue::from(684000)); - // l1 free scalars post ecotone - l1_block_storage.insert( - StorageKey::with_last_byte(3), - StorageValue::from_str( - "0x0000000000000000000000000000000000001db0000d27300000000000000005", - ) - .unwrap(), - ); - - db.insert_account(L1_BLOCK_CONTRACT, l1_block_contract_account, None, l1_block_storage); - - db - } - - fn executor_provider( - chain_spec: Arc, - ) -> BasicBlockExecutorProvider { - let strategy_factory = - OpExecutionStrategyFactory::new(chain_spec.clone(), OptimismEvmConfig::new(chain_spec)); - - BasicBlockExecutorProvider::new(strategy_factory) - } - - #[test] - fn op_deposit_fields_pre_canyon() { - let header = Header { - timestamp: 1, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "83465d1e7d01578c0d609be33570f91242f013e9e295b0879905346abbd63731" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - db.insert_account(addr, account, None, HashMap::default()); - - let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().regolith_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: MIN_TRANSACTION_GAS, - to: addr.into(), - ..Default::default() - }), - Signature::test_signature(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(op_alloy_consensus::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: MIN_TRANSACTION_GAS, - ..Default::default() - }), - Signature::test_signature(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // make sure the L1 block contract state is preloaded. - executor.with_state_mut(|state| { - state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - }); - - // Attempt to execute a block with one deposit and one non-deposit transaction - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![tx, tx_deposit], - ..Default::default() - }, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .unwrap(); - - let receipts = executor.receipts(); - let tx_receipt = receipts[0][0].as_ref().unwrap(); - let deposit_receipt = receipts[0][1].as_ref().unwrap(); - - // deposit_receipt_version is not present in pre canyon transactions - assert!(deposit_receipt.deposit_receipt_version.is_none()); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } - - #[test] - fn op_deposit_fields_post_canyon() { - // ensure_create2_deployer will fail if timestamp is set to less then 2 - let header = Header { - timestamp: 2, - number: 1, - gas_limit: 1_000_000, - gas_used: 42_000, - receipts_root: b256!( - "fffc85c4004fd03c7bfbe5491fae98a7473126c099ac11e8286fd0013f15f908" - ), - ..Default::default() - }; - - let mut db = create_op_state_provider(); - let addr = Address::ZERO; - let account = Account { balance: U256::MAX, ..Account::default() }; - - db.insert_account(addr, account, None, HashMap::default()); - - let chain_spec = Arc::new(OpChainSpecBuilder::base_mainnet().canyon_activated().build()); - - let tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce: 0, - gas_limit: MIN_TRANSACTION_GAS, - to: addr.into(), - ..Default::default() - }), - Signature::test_signature(), - ); - - let tx_deposit = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(op_alloy_consensus::TxDeposit { - from: addr, - to: addr.into(), - gas_limit: MIN_TRANSACTION_GAS, - ..Default::default() - }), - optimism_deposit_tx_signature(), - ); - - let provider = executor_provider(chain_spec); - let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); - - // make sure the L1 block contract state is preloaded. - executor.with_state_mut(|state| { - state.load_cache_account(L1_BLOCK_CONTRACT).unwrap(); - }); - - // attempt to execute an empty block with parent beacon block root, this should not fail - executor - .execute_and_verify_one( - ( - &BlockWithSenders { - block: Block { - header, - body: BlockBody { - transactions: vec![tx, tx_deposit], - ..Default::default() - }, - }, - senders: vec![addr, addr], - }, - U256::ZERO, - ) - .into(), - ) - .expect("Executing a block while canyon is active should not fail"); - - let receipts = executor.receipts(); - let tx_receipt = receipts[0][0].as_ref().unwrap(); - let deposit_receipt = receipts[0][1].as_ref().unwrap(); - - // deposit_receipt_version is set to 1 for post canyon deposit transactions - assert_eq!(deposit_receipt.deposit_receipt_version, Some(1)); - assert!(tx_receipt.deposit_receipt_version.is_none()); - - // deposit_nonce is present only in deposit transactions - assert!(deposit_receipt.deposit_nonce.is_some()); - assert!(tx_receipt.deposit_nonce.is_none()); - } -} diff --git a/crates/optimism/hardforks/Cargo.toml b/crates/optimism/hardforks/Cargo.toml index 815d50c6bcc..1ea23069a68 100644 --- a/crates/optimism/hardforks/Cargo.toml +++ b/crates/optimism/hardforks/Cargo.toml @@ -27,5 +27,16 @@ once_cell.workspace = true [features] default = ["std"] -std = [] -serde = ["dep:serde"] \ No newline at end of file +std = [ + "alloy-primitives/std", + "once_cell/std", + "serde?/std", + "alloy-chains/std", + "reth-ethereum-forks/std" +] +serde = [ + "dep:serde", + "alloy-chains/serde", + "alloy-primitives/serde", + "reth-ethereum-forks/serde" +] diff --git a/crates/optimism/hardforks/src/dev.rs b/crates/optimism/hardforks/src/dev.rs index 328ef501c46..6dcd28c46c9 100644 --- a/crates/optimism/hardforks/src/dev.rs +++ b/crates/optimism/hardforks/src/dev.rs @@ -1,3 +1,4 @@ +use alloc::vec; use alloy_primitives::U256; use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; @@ -24,13 +25,13 @@ pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { EthereumHardfork::Paris.boxed(), ForkCondition::TTD { fork_block: None, total_difficulty: U256::ZERO }, ), - (crate::OptimismHardfork::Bedrock.boxed(), ForkCondition::Block(0)), - (crate::OptimismHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Bedrock.boxed(), ForkCondition::Block(0)), + (crate::OpHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Canyon.boxed(), ForkCondition::Timestamp(0)), (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Fjord.boxed(), ForkCondition::Timestamp(0)), - (crate::OptimismHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Fjord.boxed(), ForkCondition::Timestamp(0)), + (crate::OpHardfork::Granite.boxed(), ForkCondition::Timestamp(0)), ]) }); diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 011c4ae72fd..962d7bca4bc 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -18,7 +18,7 @@ hardfork!( /// /// When building a list of hardforks for a chain, it's still expected to mix with /// [`EthereumHardfork`]. - OptimismHardfork { + OpHardfork { /// Bedrock: . Bedrock, /// Regolith: . @@ -31,10 +31,14 @@ hardfork!( Fjord, /// Granite: Granite, + /// Holocene: + Holocene, + /// Isthmus: + Isthmus, } ); -impl OptimismHardfork { +impl OpHardfork { /// Retrieves the activation block for the specified hardfork on the given chain. pub fn activation_block(self, fork: H, chain: Chain) -> Option { if chain == Chain::base_sepolia() { @@ -156,6 +160,8 @@ impl OptimismHardfork { Self::Ecotone => Some(1708534800), Self::Fjord => Some(1716998400), Self::Granite => Some(1723478400), + Self::Holocene => Some(1732633200), + Self::Isthmus => todo!(), }, ) } @@ -190,6 +196,8 @@ impl OptimismHardfork { Self::Ecotone => Some(1710374401), Self::Fjord => Some(1720627201), Self::Granite => Some(1726070401), + Self::Holocene => None, + Self::Isthmus => todo!(), }, ) } @@ -253,6 +261,7 @@ impl OptimismHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), ]) } @@ -284,6 +293,7 @@ impl OptimismHardfork { (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), (Self::Granite.boxed(), ForkCondition::Timestamp(1723478400)), + (Self::Holocene.boxed(), ForkCondition::Timestamp(1732633200)), ]) } @@ -324,13 +334,13 @@ fn match_hardfork(fork: H, hardfork_fn: HF, optimism_hardfork_fn: OH where H: Hardfork, HF: Fn(&EthereumHardfork) -> Option, - OHF: Fn(&OptimismHardfork) -> Option, + OHF: Fn(&OpHardfork) -> Option, { let fork: &dyn Any = ⋔ if let Some(fork) = fork.downcast_ref::() { return hardfork_fn(fork) } - fork.downcast_ref::().and_then(optimism_hardfork_fn) + fork.downcast_ref::().and_then(optimism_hardfork_fn) } #[cfg(test)] @@ -342,35 +352,35 @@ mod tests { #[test] fn test_match_hardfork() { assert_eq!( - OptimismHardfork::base_mainnet_activation_block(EthereumHardfork::Cancun), + OpHardfork::base_mainnet_activation_block(EthereumHardfork::Cancun), Some(11188936) ); - assert_eq!( - OptimismHardfork::base_mainnet_activation_block(OptimismHardfork::Canyon), - Some(9101527) - ); + assert_eq!(OpHardfork::base_mainnet_activation_block(OpHardfork::Canyon), Some(9101527)); } #[test] fn check_op_hardfork_from_str() { - let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe"]; + let hardfork_str = + ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE", "isthMUS"]; let expected_hardforks = [ - OptimismHardfork::Bedrock, - OptimismHardfork::Regolith, - OptimismHardfork::Canyon, - OptimismHardfork::Ecotone, - OptimismHardfork::Fjord, - OptimismHardfork::Granite, + OpHardfork::Bedrock, + OpHardfork::Regolith, + OpHardfork::Canyon, + OpHardfork::Ecotone, + OpHardfork::Fjord, + OpHardfork::Granite, + OpHardfork::Holocene, + OpHardfork::Isthmus, ]; - let hardforks: Vec = - hardfork_str.iter().map(|h| OptimismHardfork::from_str(h).unwrap()).collect(); + let hardforks: Vec = + hardfork_str.iter().map(|h| OpHardfork::from_str(h).unwrap()).collect(); assert_eq!(hardforks, expected_hardforks); } #[test] fn check_nonexistent_hardfork_from_str() { - assert!(OptimismHardfork::from_str("not a hardfork").is_err()); + assert!(OpHardfork::from_str("not a hardfork").is_err()); } } diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 91c11d3fd23..36f42155e94 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -6,6 +6,7 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; @@ -14,30 +15,53 @@ pub mod hardfork; mod dev; pub use dev::DEV_HARDFORKS; -pub use hardfork::OptimismHardfork; +pub use hardfork::OpHardfork; use reth_ethereum_forks::EthereumHardforks; /// Extends [`EthereumHardforks`] with optimism helper methods. -pub trait OptimismHardforks: EthereumHardforks { - /// Convenience method to check if [`OptimismHardfork::Bedrock`] is active at a given block +pub trait OpHardforks: EthereumHardforks { + /// Convenience method to check if [`OpHardfork::Bedrock`] is active at a given block /// number. fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { - self.fork(OptimismHardfork::Bedrock).active_at_block(block_number) + self.fork(OpHardfork::Bedrock).active_at_block(block_number) } - /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block + /// timestamp. + fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Canyon`](OpHardfork::Canyon) is active at given block timestamp. + fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Ecotone`](OpHardfork::Ecotone) is active at given block timestamp. fn is_ecotone_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + self.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) } - /// Returns `true` if [`Ecotone`](OptimismHardfork::Ecotone) is active at given block timestamp. + /// Returns `true` if [`Fjord`](OpHardfork::Fjord) is active at given block timestamp. fn is_fjord_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) + self.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) } - /// Returns `true` if [`Granite`](OptimismHardfork::Granite) is active at given block timestamp. + /// Returns `true` if [`Granite`](OpHardfork::Granite) is active at given block timestamp. fn is_granite_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OptimismHardfork::Granite).active_at_timestamp(timestamp) + self.fork(OpHardfork::Granite).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Holocene`](OpHardfork::Holocene) is active at given block + /// timestamp. + fn is_holocene_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) + } + + /// Returns `true` if [`Isthmus`](OpHardfork::Isthmus) is active at given block + /// timestamp. + fn is_isthmus_active_at_timestamp(&self, timestamp: u64) -> bool { + self.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) } } diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 8e359e60265..b833342282a 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -13,13 +13,14 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-db.workspace = true reth-engine-local.workspace = true reth-primitives.workspace = true reth-payload-builder.workspace = true -reth-auto-seal-consensus.workspace = true +reth-payload-util.workspace = true +reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true reth-consensus.workspace = true -reth-rpc-types-compat.workspace = true reth-node-api.workspace = true reth-node-builder.workspace = true reth-tracing.workspace = true @@ -29,10 +30,9 @@ reth-network.workspace = true reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-beacon-consensus.workspace = true -reth-discv5.workspace = true -reth-rpc-eth-types.workspace = true -reth-rpc-eth-api.workspace = true -reth-rpc.workspace = true +reth-trie-db.workspace = true +reth-rpc-server-types.workspace = true +reth-tasks = { workspace = true, optional = true } # op-reth reth-optimism-payload-builder.workspace = true @@ -41,6 +41,7 @@ reth-optimism-rpc.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-forks.workspace = true +reth-optimism-primitives = { workspace = true, features = ["serde"] } # revm with required optimism features revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } @@ -50,47 +51,86 @@ alloy-eips.workspace = true alloy-primitives.workspace = true op-alloy-rpc-types-engine.workspace = true alloy-rpc-types-engine.workspace = true - -# async -async-trait.workspace = true -reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } -tracing.workspace = true +alloy-consensus.workspace = true # misc clap.workspace = true serde.workspace = true eyre.workspace = true parking_lot.workspace = true -thiserror.workspace = true # rpc -jsonrpsee.workspace = true -jsonrpsee-types.workspace = true serde_json.workspace = true +# test-utils dependencies +reth-e2e-test-utils = { workspace = true, optional = true } +alloy-genesis = { workspace = true, optional = true } +tokio = { workspace = true, optional = true } + [dev-dependencies] -reth.workspace = true +reth-optimism-node = { workspace = true, features = ["test-utils"] } reth-db.workspace = true -reth-e2e-test-utils.workspace = true +reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } -tokio.workspace = true +reth-tasks.workspace = true + alloy-primitives.workspace = true -alloy-genesis.workspace = true op-alloy-consensus.workspace = true +alloy-signer-local.workspace = true +alloy-network.workspace = true +alloy-consensus.workspace = true +futures.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", - "revm/optimism", - "reth-auto-seal-consensus/optimism", - "reth-optimism-rpc/optimism", - "reth-engine-local/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "reth-optimism-payload-builder/optimism", + "reth-beacon-consensus/optimism", + "revm/optimism", + "reth-optimism-rpc/optimism", + "reth-engine-local/optimism", + "reth-optimism-consensus/optimism", + "reth-db/optimism", + "reth-optimism-node/optimism", + "reth-node-core/optimism", + "reth-optimism-primitives/optimism", +] +asm-keccak = [ + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak", + "reth-optimism-node/asm-keccak", + "reth-node-core/asm-keccak" +] +js-tracer = [ + "reth-node-builder/js-tracer" +] +test-utils = [ + "reth-tasks", + "reth-e2e-test-utils", + "alloy-genesis", + "tokio", + "reth-node-builder/test-utils", + "reth-chainspec/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-network/test-utils", + "reth-payload-builder/test-utils", + "reth-primitives/test-utils", + "reth-revm/test-utils", + "reth-db/test-utils", + "reth-provider/test-utils", + "reth-transaction-pool/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-optimism-node/test-utils", + "reth-optimism-primitives/arbitrary", +] +reth-codec = [ + "reth-primitives/reth-codec", + "reth-optimism-primitives/reth-codec", ] -asm-keccak = ["reth-primitives/asm-keccak"] -test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 54be83dc510..b84e98d28b1 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -38,16 +38,23 @@ pub struct RollupArgs { #[arg(long = "rollup.discovery.v4", default_value = "false")] pub discovery_v4: bool, - /// Enable the engine2 experimental features on op-reth binary + /// Enable the experimental engine features on reth binary + /// + /// DEPRECATED: experimental engine is default now, use --engine.legacy to enable the legacy + /// functionality #[arg(long = "engine.experimental", default_value = "false")] pub experimental: bool, + /// Enable the legacy engine on reth binary + #[arg(long = "engine.legacy", default_value = "false")] + pub legacy: bool, + /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", requires = "experimental", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] pub persistence_threshold: u64, /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", requires = "experimental", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] pub memory_block_buffer_target: u64, } @@ -60,6 +67,7 @@ impl Default for RollupArgs { compute_pending_block: false, discovery_v4: false, experimental: false, + legacy: false, persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, } diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index a83f4c696a1..1db50b72ee8 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -1,6 +1,7 @@ -use std::sync::Arc; - -use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1}; +use alloy_rpc_types_engine::{ + ExecutionPayload, ExecutionPayloadEnvelopeV2, ExecutionPayloadSidecar, ExecutionPayloadV1, + PayloadError, +}; use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpPayloadAttributes, }; @@ -11,59 +12,139 @@ use reth_node_api::{ EngineObjectValidationError, MessageValidationKind, PayloadOrAttributes, PayloadTypes, VersionSpecificValidationError, }, - validate_version_specific_fields, EngineTypes, EngineValidator, + validate_version_specific_fields, EngineTypes, EngineValidator, PayloadValidator, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_forks::OptimismHardfork; -use reth_optimism_payload_builder::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; +use reth_optimism_forks::{OpHardfork, OpHardforks}; +use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{Block, SealedBlockFor}; +use std::sync::Arc; /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismEngineTypes { +pub struct OpEngineTypes { _marker: std::marker::PhantomData, } -impl PayloadTypes for OptimismEngineTypes { +impl PayloadTypes for OpEngineTypes { type BuiltPayload = T::BuiltPayload; type PayloadAttributes = T::PayloadAttributes; type PayloadBuilderAttributes = T::PayloadBuilderAttributes; } -impl EngineTypes for OptimismEngineTypes +impl EngineTypes for OpEngineTypes where T::BuiltPayload: TryInto + TryInto + TryInto + TryInto, { - type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = OpExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = OpExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = OpExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; } -/// A default payload type for [`OptimismEngineTypes`] +/// A default payload type for [`OpEngineTypes`] #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismPayloadTypes; +pub struct OpPayloadTypes; -impl PayloadTypes for OptimismPayloadTypes { - type BuiltPayload = OptimismBuiltPayload; +impl PayloadTypes for OpPayloadTypes { + type BuiltPayload = OpBuiltPayload; type PayloadAttributes = OpPayloadAttributes; - type PayloadBuilderAttributes = OptimismPayloadBuilderAttributes; + type PayloadBuilderAttributes = OpPayloadBuilderAttributes; } /// Validator for Optimism engine API. #[derive(Debug, Clone)] -pub struct OptimismEngineValidator { - chain_spec: Arc, +pub struct OpEngineValidator { + inner: ExecutionPayloadValidator, } -impl OptimismEngineValidator { +impl OpEngineValidator { /// Instantiates a new validator. pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &OpChainSpec { + self.inner.chain_spec() + } +} + +impl PayloadValidator for OpEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } +} + +impl EngineValidator for OpEngineValidator +where + Types: EngineTypes, +{ + fn validate_version_specific_fields( + &self, + version: EngineApiMessageVersion, + payload_or_attrs: PayloadOrAttributes<'_, OpPayloadAttributes>, + ) -> Result<(), EngineObjectValidationError> { + validate_withdrawals_presence( + self.chain_spec(), + version, + payload_or_attrs.message_validation_kind(), + payload_or_attrs.timestamp(), + payload_or_attrs.withdrawals().is_some(), + )?; + validate_parent_beacon_block_root_presence( + self.chain_spec(), + version, + payload_or_attrs.message_validation_kind(), + payload_or_attrs.timestamp(), + payload_or_attrs.parent_beacon_block_root().is_some(), + ) + } + + fn ensure_well_formed_attributes( + &self, + version: EngineApiMessageVersion, + attributes: &OpPayloadAttributes, + ) -> Result<(), EngineObjectValidationError> { + validate_version_specific_fields(self.chain_spec(), version, attributes.into())?; + + if attributes.gas_limit.is_none() { + return Err(EngineObjectValidationError::InvalidParams( + "MissingGasLimitInPayloadAttributes".to_string().into(), + )) + } + + if self + .chain_spec() + .is_holocene_active_at_timestamp(attributes.payload_attributes.timestamp) + { + let (elasticity, denominator) = + attributes.decode_eip_1559_params().ok_or_else(|| { + EngineObjectValidationError::InvalidParams( + "MissingEip1559ParamsInPayloadAttributes".to_string().into(), + ) + })?; + if elasticity != 0 && denominator == 0 { + return Err(EngineObjectValidationError::InvalidParams( + "Eip1559ParamsDenominatorZero".to_string().into(), + )) + } + } + + Ok(()) } } @@ -81,7 +162,7 @@ pub fn validate_withdrawals_presence( timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp); + let is_shanghai = chain_spec.fork(OpHardfork::Canyon).active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { @@ -109,44 +190,115 @@ pub fn validate_withdrawals_presence( Ok(()) } -impl EngineValidator for OptimismEngineValidator -where - Types: EngineTypes, -{ - fn validate_version_specific_fields( - &self, - version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, OpPayloadAttributes>, - ) -> Result<(), EngineObjectValidationError> { - validate_withdrawals_presence( - &self.chain_spec, - version, - payload_or_attrs.message_validation_kind(), - payload_or_attrs.timestamp(), - payload_or_attrs.withdrawals().is_some(), - )?; - validate_parent_beacon_block_root_presence( - &self.chain_spec, - version, - payload_or_attrs.message_validation_kind(), - payload_or_attrs.timestamp(), - payload_or_attrs.parent_beacon_block_root().is_some(), - ) - } +#[cfg(test)] +mod test { - fn ensure_well_formed_attributes( - &self, - version: EngineApiMessageVersion, - attributes: &OpPayloadAttributes, - ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into())?; + use crate::engine; + use alloy_primitives::{b64, Address, B256, B64}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_optimism_chainspec::BASE_SEPOLIA; - if attributes.gas_limit.is_none() { - return Err(EngineObjectValidationError::InvalidParams( - "MissingGasLimitInPayloadAttributes".to_string().into(), - )) + use super::*; + + fn get_chainspec() -> Arc { + let hardforks = OpHardfork::base_sepolia(); + Arc::new(OpChainSpec { + inner: ChainSpec { + chain: BASE_SEPOLIA.inner.chain, + genesis: BASE_SEPOLIA.inner.genesis.clone(), + genesis_hash: BASE_SEPOLIA.inner.genesis_hash.clone(), + paris_block_and_final_difficulty: BASE_SEPOLIA + .inner + .paris_block_and_final_difficulty, + hardforks, + base_fee_params: BASE_SEPOLIA.inner.base_fee_params.clone(), + max_gas_limit: BASE_SEPOLIA.inner.max_gas_limit, + prune_delete_limit: 10000, + ..Default::default() + }, + }) + } + + const fn get_attributes(eip_1559_params: Option, timestamp: u64) -> OpPayloadAttributes { + OpPayloadAttributes { + gas_limit: Some(1000), + eip_1559_params, + transactions: None, + no_tx_pool: None, + payload_attributes: PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, + }, } + } - Ok(()) + #[test] + fn test_well_formed_attributes_pre_holocene() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(None, 1732633199); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } + + #[test] + fn test_well_formed_attributes_holocene_no_eip1559_params() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(None, 1732633200); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); + } + + #[test] + fn test_well_formed_attributes_holocene_eip1559_params_zero_denominator() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(Some(b64!("0000000000000008")), 1732633200); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(matches!(result, Err(EngineObjectValidationError::InvalidParams(_)))); + } + + #[test] + fn test_well_formed_attributes_holocene_valid() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(Some(b64!("0000000800000008")), 1732633200); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); + } + + #[test] + fn test_well_formed_attributes_holocene_valid_all_zero() { + let validator = OpEngineValidator::new(get_chainspec()); + let attributes = get_attributes(Some(b64!("0000000000000000")), 1732633200); + + let result = >::ensure_well_formed_attributes( + &validator, EngineApiMessageVersion::V3, &attributes + ); + assert!(result.is_ok()); } } diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 768f4d94efd..81db8b2b7fc 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -1,4 +1,7 @@ //! Standalone crate for Optimism-specific Reth configuration and builder types. +//! +//! # features +//! - `js-tracer`: Enable the `JavaScript` tracer for the `debug_trace` endpoints #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -15,15 +18,19 @@ pub mod args; /// Exports optimism-specific implementations of the [`EngineTypes`](reth_node_api::EngineTypes) /// trait. pub mod engine; -pub use engine::OptimismEngineTypes; +pub use engine::OpEngineTypes; pub mod node; -pub use node::OptimismNode; +pub use node::OpNode; pub mod txpool; +/// Helpers for running test node instances. +#[cfg(feature = "test-utils")] +pub mod utils; + pub use reth_optimism_payload_builder::{ - OptimismBuiltPayload, OptimismPayloadBuilder, OptimismPayloadBuilderAttributes, + OpBuiltPayload, OpPayloadBuilder, OpPayloadBuilderAttributes, }; pub use reth_optimism_evm::*; diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 648da85d0bb..e9e7e23bc9c 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -1,53 +1,135 @@ //! Optimism Node types config. -use std::sync::Arc; - +use crate::{ + args::RollupArgs, + engine::OpEngineValidator, + txpool::{OpTransactionPool, OpTransactionValidator}, + OpEngineTypes, +}; +use alloy_consensus::Header; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; -use reth_chainspec::{EthChainSpec, Hardforks}; -use reth_evm::ConfigureEvm; -use reth_network::{NetworkConfig, NetworkHandle, NetworkManager}; -use reth_node_api::{EngineValidator, FullNodeComponents, NodeAddOns}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_evm::{execute::BasicBlockExecutorProvider, ConfigureEvm}; +use reth_network::{NetworkConfig, NetworkHandle, NetworkManager, PeersInfo}; +use reth_node_api::{AddOnsContext, EngineValidator, FullNodeComponents, NodeAddOns, TxTy}; use reth_node_builder::{ components::{ - ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, - NetworkBuilder, PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, + ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NetworkBuilder, + PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, - rpc::{RethRpcAddOns, RpcAddOns, RpcHandle}, + rpc::{EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcAddOns, RpcHandle}, BuilderContext, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, }; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::OptimismBeaconConsensus; -use reth_optimism_evm::{OpExecutorProvider, OptimismEvmConfig}; -use reth_optimism_rpc::OpEthApi; +use reth_optimism_consensus::OpBeaconConsensus; +use reth_optimism_evm::{OpEvmConfig, OpExecutionStrategyFactory}; +use reth_optimism_payload_builder::{builder::OpPayloadTransactions, config::OpDAConfig}; +use reth_optimism_primitives::OpPrimitives; +use reth_optimism_rpc::{ + miner::{MinerApiExtServer, OpMinerExtApi}, + witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, + OpEthApi, SequencerClient, +}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::Header; -use reth_provider::CanonStateSubscriptions; +use reth_primitives::{BlockBody, PooledTransactionsElement, TransactionSigned}; +use reth_provider::{ + providers::ChainStorage, BlockBodyReader, BlockBodyWriter, CanonStateSubscriptions, + ChainSpecProvider, DBProvider, EthStorage, ProviderResult, ReadBodyInput, +}; +use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ - blobstore::DiskFileBlobStore, CoinbaseTipOrdering, TransactionPool, + blobstore::DiskFileBlobStore, CoinbaseTipOrdering, PoolTransaction, TransactionPool, TransactionValidationTaskExecutor, }; +use reth_trie_db::MerklePatriciaTrie; +use std::sync::Arc; -use crate::{ - args::RollupArgs, - engine::OptimismEngineValidator, - txpool::{OpTransactionPool, OpTransactionValidator}, - OptimismEngineTypes, -}; +/// Storage implementation for Optimism. +#[derive(Debug, Default, Clone)] +pub struct OpStorage(EthStorage); + +impl> BlockBodyWriter for OpStorage { + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(u64, Option)>, + ) -> ProviderResult<()> { + self.0.write_block_bodies(provider, bodies) + } + + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: alloy_primitives::BlockNumber, + ) -> ProviderResult<()> { + self.0.remove_block_bodies_above(provider, block) + } +} + +impl> + BlockBodyReader for OpStorage +{ + type Block = reth_primitives::Block; + + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult> { + self.0.read_block_bodies(provider, inputs) + } +} + +impl ChainStorage for OpStorage { + fn reader( + &self, + ) -> impl reth_provider::ChainStorageReader, OpPrimitives> + where + TX: DbTx + 'static, + Types: reth_provider::providers::NodeTypesForProvider, + { + self + } + + fn writer( + &self, + ) -> impl reth_provider::ChainStorageWriter, OpPrimitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypes, + { + self + } +} /// Type configuration for a regular Optimism node. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismNode { +pub struct OpNode { /// Additional Optimism args pub args: RollupArgs, + /// Data availability configuration for the OP builder. + /// + /// Used to throttle the size of the data availability payloads (configured by the batcher via + /// the `miner_` api). + /// + /// By default no throttling is applied. + pub da_config: OpDAConfig, } -impl OptimismNode { +impl OpNode { /// Creates a new instance of the Optimism node type. - pub const fn new(args: RollupArgs) -> Self { - Self { args } + pub fn new(args: RollupArgs) -> Self { + Self { args, da_config: OpDAConfig::default() } + } + + /// Configure the data availability configuration for the OP builder. + pub fn with_da_config(mut self, da_config: OpDAConfig) -> Self { + self.da_config = da_config; + self } /// Returns the components for the given [`RollupArgs`]. @@ -55,91 +137,116 @@ impl OptimismNode { args: RollupArgs, ) -> ComponentsBuilder< Node, - OptimismPoolBuilder, - OptimismPayloadBuilder, - OptimismNetworkBuilder, - OptimismExecutorBuilder, - OptimismConsensusBuilder, - OptimismEngineValidatorBuilder, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, > where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, { let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = args; ComponentsBuilder::default() .node_types::() - .pool(OptimismPoolBuilder::default()) - .payload(OptimismPayloadBuilder::new(compute_pending_block)) - .network(OptimismNetworkBuilder { + .pool(OpPoolBuilder::default()) + .payload(OpPayloadBuilder::new(compute_pending_block)) + .network(OpNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4, }) - .executor(OptimismExecutorBuilder::default()) - .consensus(OptimismConsensusBuilder::default()) - .engine_validator(OptimismEngineValidatorBuilder::default()) + .executor(OpExecutorBuilder::default()) + .consensus(OpConsensusBuilder::default()) } } -impl Node for OptimismNode +impl Node for OpNode where N: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + >, >, { type ComponentsBuilder = ComponentsBuilder< N, - OptimismPoolBuilder, - OptimismPayloadBuilder, - OptimismNetworkBuilder, - OptimismExecutorBuilder, - OptimismConsensusBuilder, - OptimismEngineValidatorBuilder, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, >; - type AddOns = OptimismAddOns< - NodeAdapter>::Components>, - >; + type AddOns = + OpAddOns>::Components>>; fn components_builder(&self) -> Self::ComponentsBuilder { - let Self { args } = self; - Self::components(args.clone()) + Self::components(self.args.clone()) } fn add_ons(&self) -> Self::AddOns { - OptimismAddOns::new(self.args.sequencer_http.clone()) + Self::AddOns::builder() + .with_sequencer(self.args.sequencer_http.clone()) + .with_da_config(self.da_config.clone()) + .build() } } -impl NodeTypes for OptimismNode { - type Primitives = (); +impl NodeTypes for OpNode { + type Primitives = OpPrimitives; type ChainSpec = OpChainSpec; + type StateCommitment = MerklePatriciaTrie; + type Storage = OpStorage; } -impl NodeTypesWithEngine for OptimismNode { - type Engine = OptimismEngineTypes; +impl NodeTypesWithEngine for OpNode { + type Engine = OpEngineTypes; } /// Add-ons w.r.t. optimism. #[derive(Debug)] -pub struct OptimismAddOns(pub RpcAddOns>); +pub struct OpAddOns { + /// Rpc add-ons responsible for launching the RPC servers and instantiating the RPC handlers + /// and eth-api. + pub rpc_add_ons: RpcAddOns, OpEngineValidatorBuilder>, + /// Data availability configuration for the OP builder. + pub da_config: OpDAConfig, +} -impl Default for OptimismAddOns { +impl>> Default for OpAddOns { fn default() -> Self { - Self::new(None) + Self::builder().build() } } -impl OptimismAddOns { - /// Create a new instance with the given `sequencer_http` URL. - pub fn new(sequencer_http: Option) -> Self { - Self(RpcAddOns::new(move |ctx| OpEthApi::new(ctx, sequencer_http))) +impl>> OpAddOns { + /// Build a [`OpAddOns`] using [`OpAddOnsBuilder`]. + pub fn builder() -> OpAddOnsBuilder { + OpAddOnsBuilder::default() } } -impl>> NodeAddOns - for OptimismAddOns +impl NodeAddOns for OpAddOns +where + N: FullNodeComponents< + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + Engine = OpEngineTypes, + >, + Pool: TransactionPool>, + >, + OpEngineValidator: EngineValidator<::Engine>, { type Handle = RpcHandle>; @@ -147,38 +254,138 @@ impl>> NodeAddOn self, ctx: reth_node_api::AddOnsContext<'_, N>, ) -> eyre::Result { - self.0.launch_add_ons(ctx).await + let Self { rpc_add_ons, da_config } = self; + // install additional OP specific rpc methods + let debug_ext = OpDebugWitnessApi::new( + ctx.node.provider().clone(), + ctx.node.evm_config().clone(), + Box::new(ctx.node.task_executor().clone()), + ); + let miner_ext = OpMinerExtApi::new(da_config); + + rpc_add_ons + .launch_add_ons_with(ctx, move |modules, auth_modules| { + debug!(target: "reth::cli", "Installing debug payload witness rpc endpoint"); + modules.merge_if_module_configured(RethRpcModule::Debug, debug_ext.into_rpc())?; + + // extend the miner namespace if configured in the regular http server + modules.merge_if_module_configured( + RethRpcModule::Miner, + miner_ext.clone().into_rpc(), + )?; + + // install the miner extension in the authenticated if configured + if modules.module_config().contains_any(&RethRpcModule::Miner) { + debug!(target: "reth::cli", "Installing miner DA rpc enddpoint"); + auth_modules.merge_auth_methods(miner_ext.into_rpc())?; + } + + Ok(()) + }) + .await } } -impl>> RethRpcAddOns - for OptimismAddOns +impl RethRpcAddOns for OpAddOns +where + N: FullNodeComponents< + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Storage = OpStorage, + Engine = OpEngineTypes, + >, + Pool: TransactionPool>, + >, + OpEngineValidator: EngineValidator<::Engine>, { type EthApi = OpEthApi; fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { - self.0.hooks_mut() + self.rpc_add_ons.hooks_mut() + } +} + +impl EngineValidatorAddOn for OpAddOns +where + N: FullNodeComponents< + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Engine = OpEngineTypes, + >, + >, +{ + type Validator = OpEngineValidator; + + async fn engine_validator(&self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + OpEngineValidatorBuilder::default().build(ctx).await + } +} + +/// A regular optimism evm and executor builder. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct OpAddOnsBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, + /// Data availability configuration for the OP builder. + da_config: Option, +} + +impl OpAddOnsBuilder { + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { + self.sequencer_client = sequencer_client.map(SequencerClient::new); + self + } + + /// Configure the data availability configuration for the OP builder. + pub fn with_da_config(mut self, da_config: OpDAConfig) -> Self { + self.da_config = Some(da_config); + self + } +} + +impl OpAddOnsBuilder { + /// Builds an instance of [`OpAddOns`]. + pub fn build(self) -> OpAddOns + where + N: FullNodeComponents>, + { + let Self { sequencer_client, da_config } = self; + + OpAddOns { + rpc_add_ons: RpcAddOns::new( + move |ctx| OpEthApi::::builder().with_sequencer(sequencer_client).build(ctx), + Default::default(), + ), + da_config: da_config.unwrap_or_default(), + } } } /// A regular optimism evm and executor builder. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] -pub struct OptimismExecutorBuilder; +pub struct OpExecutorBuilder; -impl ExecutorBuilder for OptimismExecutorBuilder +impl ExecutorBuilder for OpExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { - type EVM = OptimismEvmConfig; - type Executor = OpExecutorProvider; + type EVM = OpEvmConfig; + type Executor = BasicBlockExecutorProvider; async fn build_evm( self, ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { - let evm_config = OptimismEvmConfig::new(ctx.chain_spec()); - let executor = OpExecutorProvider::new(ctx.chain_spec(), evm_config.clone()); + let evm_config = OpEvmConfig::new(ctx.chain_spec()); + let strategy_factory = + OpExecutionStrategyFactory::new(ctx.chain_spec(), evm_config.clone()); + let executor = BasicBlockExecutorProvider::new(strategy_factory); Ok((evm_config, executor)) } @@ -189,14 +396,14 @@ where /// This contains various settings that can be configured and take precedence over the node's /// config. #[derive(Debug, Default, Clone)] -pub struct OptimismPoolBuilder { +pub struct OpPoolBuilder { /// Enforced overrides that are applied to the pool config. pub pool_config_overrides: PoolBuilderConfigOverrides, } -impl PoolBuilder for OptimismPoolBuilder +impl PoolBuilder for OpPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Pool = OpTransactionPool; @@ -208,6 +415,7 @@ where let validator = TransactionValidationTaskExecutor::eth_builder(Arc::new( ctx.chain_spec().inner.clone(), )) + .no_eip4844() .with_head_timestamp(ctx.head().timestamp) .kzg_settings(ctx.kzg_settings()?) .with_additional_tasks( @@ -271,7 +479,7 @@ where /// A basic optimism payload service builder #[derive(Debug, Default, Clone)] -pub struct OptimismPayloadBuilder { +pub struct OpPayloadBuilder { /// By default the pending block equals the latest block /// to save resources and not leak txs from the tx-pool, /// this flag enables computing of the pending block @@ -281,12 +489,30 @@ pub struct OptimismPayloadBuilder { /// will use the payload attributes from the latest block. Note /// that this flag is not yet functional. pub compute_pending_block: bool, + /// The type responsible for yielding the best transactions for the payload if mempool + /// transactions are allowed. + pub best_transactions: Txs, } -impl OptimismPayloadBuilder { +impl OpPayloadBuilder { /// Create a new instance with the given `compute_pending_block` flag. pub const fn new(compute_pending_block: bool) -> Self { - Self { compute_pending_block } + Self { compute_pending_block, best_transactions: () } + } +} + +impl OpPayloadBuilder +where + Txs: OpPayloadTransactions, +{ + /// Configures the type responsible for yielding the transactions that should be included in the + /// payload. + pub fn with_transactions( + self, + best_transactions: T, + ) -> OpPayloadBuilder { + let Self { compute_pending_block, .. } = self; + OpPayloadBuilder { compute_pending_block, best_transactions } } /// A helper method to initialize [`PayloadBuilderService`] with the given EVM config. @@ -295,17 +521,23 @@ impl OptimismPayloadBuilder { evm_config: Evm, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> + ) -> eyre::Result> where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, - Pool: TransactionPool + Unpin + 'static, - Evm: ConfigureEvm
, + Pool: TransactionPool>> + + Unpin + + 'static, + Evm: ConfigureEvm
, { - let payload_builder = - reth_optimism_payload_builder::OptimismPayloadBuilder::new(evm_config) - .set_compute_pending_block(self.compute_pending_block); + let payload_builder = reth_optimism_payload_builder::OpPayloadBuilder::new(evm_config) + .with_transactions(self.best_transactions) + .set_compute_pending_block(self.compute_pending_block); let conf = ctx.payload_builder_config(); let payload_job_config = BasicPayloadJobGeneratorConfig::default() @@ -331,35 +563,42 @@ impl OptimismPayloadBuilder { } } -impl PayloadServiceBuilder for OptimismPayloadBuilder +impl PayloadServiceBuilder for OpPayloadBuilder where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, + Txs: OpPayloadTransactions, { async fn spawn_payload_service( self, ctx: &BuilderContext, pool: Pool, - ) -> eyre::Result> { - self.spawn(OptimismEvmConfig::new(ctx.chain_spec()), ctx, pool) + ) -> eyre::Result> { + self.spawn(OpEvmConfig::new(ctx.chain_spec()), ctx, pool) } } /// A basic optimism network builder. #[derive(Debug, Default, Clone)] -pub struct OptimismNetworkBuilder { +pub struct OpNetworkBuilder { /// Disable transaction pool gossip pub disable_txpool_gossip: bool, /// Disable discovery v4 pub disable_discovery_v4: bool, } -impl OptimismNetworkBuilder { +impl OpNetworkBuilder { /// Returns the [`NetworkConfig`] that contains the settings to launch the p2p network. /// - /// This applies the configured [`OptimismNetworkBuilder`] settings. + /// This applies the configured [`OpNetworkBuilder`] settings. pub fn network_config( &self, ctx: &BuilderContext, @@ -404,10 +643,16 @@ impl OptimismNetworkBuilder { } } -impl NetworkBuilder for OptimismNetworkBuilder +impl NetworkBuilder for OpNetworkBuilder where - Node: FullNodeTypes>, - Pool: TransactionPool + Unpin + 'static, + Node: FullNodeTypes>, + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus = TxTy, + Pooled = PooledTransactionsElement, + >, + > + Unpin + + 'static, { async fn build_network( self, @@ -417,6 +662,7 @@ where let network_config = self.network_config(ctx)?; let network = NetworkManager::builder(network_config).await?; let handle = ctx.start_network(network, pool); + info!(target: "reth::cli", enode=%handle.local_node_record(), "P2P networking initialized"); Ok(handle) } @@ -425,37 +671,36 @@ where /// A basic optimism consensus builder. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismConsensusBuilder; +pub struct OpConsensusBuilder; -impl ConsensusBuilder for OptimismConsensusBuilder +impl ConsensusBuilder for OpConsensusBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { - type Consensus = Arc; + type Consensus = Arc; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { - if ctx.is_dev() { - Ok(Arc::new(reth_auto_seal_consensus::AutoSealConsensus::new(ctx.chain_spec()))) - } else { - Ok(Arc::new(OptimismBeaconConsensus::new(ctx.chain_spec()))) - } + Ok(Arc::new(OpBeaconConsensus::new(ctx.chain_spec()))) } } -/// Builder for [`OptimismEngineValidator`]. +/// Builder for [`OpEngineValidator`]. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct OptimismEngineValidatorBuilder; +pub struct OpEngineValidatorBuilder; -impl EngineValidatorBuilder for OptimismEngineValidatorBuilder +impl EngineValidatorBuilder for OpEngineValidatorBuilder where - Types: NodeTypesWithEngine, - Node: FullNodeTypes, - OptimismEngineValidator: EngineValidator, + Types: NodeTypesWithEngine< + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + Engine = OpEngineTypes, + >, + Node: FullNodeComponents, { - type Validator = OptimismEngineValidator; + type Validator = OpEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(OptimismEngineValidator::new(ctx.chain_spec())) + async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { + Ok(OpEngineValidator::new(ctx.config.chain.clone())) } } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 09aa76fefb8..a3e474a6076 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -3,7 +3,9 @@ use alloy_eips::eip2718::Encodable2718; use parking_lot::RwLock; use reth_chainspec::ChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_primitives::{Block, GotExpected, InvalidTransactionError, SealedBlock}; +use reth_primitives::{ + Block, GotExpected, InvalidTransactionError, SealedBlock, TransactionSigned, +}; use reth_provider::{BlockReaderIdExt, StateProviderFactory}; use reth_revm::L1BlockInfo; use reth_transaction_pool::{ @@ -67,14 +69,14 @@ impl OpTransactionValidator { impl OpTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, - Tx: EthPoolTransaction, + Client: StateProviderFactory + BlockReaderIdExt, + Tx: EthPoolTransaction, { /// Create a new [`OpTransactionValidator`]. pub fn new(inner: EthTransactionValidator) -> Self { let this = Self::with_block_info(inner, OpL1BlockInfo::default()); if let Ok(Some(block)) = - this.inner.client().block_by_number_or_tag(reth_primitives::BlockNumberOrTag::Latest) + this.inner.client().block_by_number_or_tag(alloy_eips::BlockNumberOrTag::Latest) { // genesis block has no txs, so we can't extract L1 info, we set the block info to empty // so that we will accept txs into the pool before the first block @@ -140,7 +142,8 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - valid_tx.transaction().clone().into_consensus().into().encode_2718(&mut encoded); + let tx = valid_tx.transaction().clone_into_consensus(); + tx.encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( &self.chain_spec(), @@ -192,8 +195,8 @@ where impl TransactionValidator for OpTransactionValidator where - Client: StateProviderFactory + BlockReaderIdExt, - Tx: EthPoolTransaction, + Client: StateProviderFactory + BlockReaderIdExt, + Tx: EthPoolTransaction, { type Transaction = Tx; @@ -231,11 +234,10 @@ pub struct OpL1BlockInfo { mod tests { use crate::txpool::OpTransactionValidator; use alloy_eips::eip2718::Encodable2718; - use alloy_primitives::{TxKind, U256}; + use alloy_primitives::{PrimitiveSignature as Signature, TxKind, U256}; use op_alloy_consensus::TxDeposit; - use reth::primitives::Signature; use reth_chainspec::MAINNET; - use reth_primitives::{Transaction, TransactionSigned, TransactionSignedEcRecovered}; + use reth_primitives::{RecoveredTx, Transaction, TransactionSigned}; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, @@ -263,9 +265,8 @@ mod tests { input: Default::default(), }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(deposit_tx, signature); - let signed_recovered = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, signer); + let signed_tx = TransactionSigned::new_unhashed(deposit_tx, signature); + let signed_recovered = RecoveredTx::from_signed_transaction(signed_tx, signer); let len = signed_recovered.encode_2718_len(); let pooled_tx = EthPooledTransaction::new(signed_recovered, len); let outcome = validator.validate_one(origin, pooled_tx); diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs new file mode 100644 index 00000000000..147aaac59dc --- /dev/null +++ b/crates/optimism/node/src/utils.rs @@ -0,0 +1,69 @@ +use crate::{OpBuiltPayload, OpNode as OtherOpNode, OpPayloadBuilderAttributes}; +use alloy_genesis::Genesis; +use alloy_primitives::{Address, B256}; +use alloy_rpc_types_engine::PayloadAttributes; +use reth_e2e_test_utils::{transaction::TransactionTestContext, wallet::Wallet, NodeHelperType}; +use reth_optimism_chainspec::OpChainSpecBuilder; +use reth_payload_builder::EthPayloadBuilderAttributes; +use reth_tasks::TaskManager; +use std::sync::Arc; +use tokio::sync::Mutex; + +/// Optimism Node Helper type +pub(crate) type OpNode = NodeHelperType; + +/// Creates the initial setup with `num_nodes` of the node config, started and connected. +pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { + let genesis: Genesis = + serde_json::from_str(include_str!("../tests/assets/genesis.json")).unwrap(); + reth_e2e_test_utils::setup( + num_nodes, + Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()), + false, + optimism_payload_attributes, + ) + .await +} + +/// Advance the chain with sequential payloads returning them in the end. +pub async fn advance_chain( + length: usize, + node: &mut OpNode, + wallet: Arc>, +) -> eyre::Result> { + node.advance(length as u64, |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + wallet.inner_nonce, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }) + .await +} + +/// Helper function to create a new eth payload attributes +pub fn optimism_payload_attributes(timestamp: u64) -> OpPayloadBuilderAttributes { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + target_blobs_per_block: None, + max_blobs_per_block: None, + }; + + OpPayloadBuilderAttributes { + payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), + transactions: vec![], + no_tx_pool: false, + gas_limit: Some(30_000_000), + eip_1559_params: None, + } +} diff --git a/crates/optimism/node/tests/e2e/main.rs b/crates/optimism/node/tests/e2e/main.rs index 3438c766048..7f4b22ba7e0 100644 --- a/crates/optimism/node/tests/e2e/main.rs +++ b/crates/optimism/node/tests/e2e/main.rs @@ -3,7 +3,4 @@ #[cfg(feature = "optimism")] mod p2p; -#[cfg(feature = "optimism")] -mod utils; - const fn main() {} diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e/p2p.rs index ebd35cc8a5c..90623d9e65d 100644 --- a/crates/optimism/node/tests/e2e/p2p.rs +++ b/crates/optimism/node/tests/e2e/p2p.rs @@ -1,6 +1,6 @@ -use crate::utils::{advance_chain, setup}; use alloy_rpc_types_engine::PayloadStatusEnum; -use reth::blockchain_tree::error::BlockchainTreeError; +use futures::StreamExt; +use reth_optimism_node::utils::{advance_chain, setup}; use std::sync::Arc; use tokio::sync::Mutex; @@ -25,6 +25,19 @@ async fn can_sync() -> eyre::Result<()> { canonical_payload_chain.iter().map(|p| p.0.block().hash()).collect::>(); // On second node, sync optimistically up to block number 88a + second_node + .engine_api + .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth - 1]) + .await?; + second_node + .wait_block( + (tip - reorg_depth - 1) as u64, + canonical_chain[tip_index - reorg_depth - 1], + true, + ) + .await?; + // We send FCU twice to ensure that pool receives canonical chain update on the second FCU + // This is required because notifications are not sent during backfill sync second_node .engine_api .update_optimistic_forkchoice(canonical_chain[tip_index - reorg_depth]) @@ -32,6 +45,7 @@ async fn can_sync() -> eyre::Result<()> { second_node .wait_block((tip - reorg_depth) as u64, canonical_chain[tip_index - reorg_depth], true) .await?; + second_node.engine_api.canonical_stream.next().await.unwrap(); // On third node, sync optimistically up to block number 90a third_node.engine_api.update_optimistic_forkchoice(canonical_chain[tip_index]).await?; @@ -51,7 +65,6 @@ async fn can_sync() -> eyre::Result<()> { side_payload_chain[0].0.clone(), side_payload_chain[0].1.clone(), PayloadStatusEnum::Valid, - Default::default(), ) .await; @@ -76,12 +89,11 @@ async fn can_sync() -> eyre::Result<()> { canonical_payload_chain[tip_index - reorg_depth + 1].0.clone(), canonical_payload_chain[tip_index - reorg_depth + 1].1.clone(), PayloadStatusEnum::Invalid { - validation_error: BlockchainTreeError::PendingBlockIsFinalized { - last_finalized: (tip - reorg_depth) as u64 + 1, - } - .to_string(), + validation_error: format!( + "block number is lower than the last finalized block number {}", + (tip - reorg_depth) as u64 + 1 + ), }, - Default::default(), ) .await; diff --git a/crates/optimism/node/tests/e2e/utils.rs b/crates/optimism/node/tests/e2e/utils.rs deleted file mode 100644 index 8ea8df380b0..00000000000 --- a/crates/optimism/node/tests/e2e/utils.rs +++ /dev/null @@ -1,70 +0,0 @@ -use alloy_genesis::Genesis; -use alloy_primitives::{Address, B256}; -use reth::{rpc::types::engine::PayloadAttributes, tasks::TaskManager}; -use reth_e2e_test_utils::{ - transaction::TransactionTestContext, wallet::Wallet, Adapter, NodeHelperType, -}; -use reth_optimism_chainspec::OpChainSpecBuilder; -use reth_optimism_node::{ - node::OptimismAddOns, OptimismBuiltPayload, OptimismNode, OptimismPayloadBuilderAttributes, -}; -use reth_payload_builder::EthPayloadBuilderAttributes; -use std::sync::Arc; -use tokio::sync::Mutex; - -/// Optimism Node Helper type -pub(crate) type OpNode = NodeHelperType>>; - -pub(crate) async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { - let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); - reth_e2e_test_utils::setup( - num_nodes, - Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()), - false, - ) - .await -} - -/// Advance the chain with sequential payloads returning them in the end. -pub(crate) async fn advance_chain( - length: usize, - node: &mut OpNode, - wallet: Arc>, -) -> eyre::Result> { - node.advance( - length as u64, - |_| { - let wallet = wallet.clone(); - Box::pin(async move { - let mut wallet = wallet.lock().await; - let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( - wallet.chain_id, - wallet.inner.clone(), - wallet.inner_nonce, - ); - wallet.inner_nonce += 1; - tx_fut.await - }) - }, - optimism_payload_attributes, - ) - .await -} - -/// Helper function to create a new eth payload attributes -pub(crate) fn optimism_payload_attributes(timestamp: u64) -> OptimismPayloadBuilderAttributes { - let attributes = PayloadAttributes { - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: Address::ZERO, - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - - OptimismPayloadBuilderAttributes { - payload_attributes: EthPayloadBuilderAttributes::new(B256::ZERO, attributes), - transactions: vec![], - no_tx_pool: false, - gas_limit: Some(30_000_000), - } -} diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index f1dde4c2c0a..875b282e0ad 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -2,20 +2,21 @@ use reth_db::test_utils::create_test_rw_db; use reth_node_api::FullNodeComponents; -use reth_node_builder::{NodeBuilder, NodeConfig}; +use reth_node_builder::{Node, NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; -use reth_optimism_node::{node::OptimismAddOns, OptimismNode}; +use reth_optimism_node::{args::RollupArgs, OpNode}; #[test] fn test_basic_setup() { // parse CLI -> config let config = NodeConfig::new(BASE_MAINNET.clone()); let db = create_test_rw_db(); + let args = RollupArgs::default(); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types::() - .with_components(OptimismNode::components(Default::default())) - .with_add_ons(OptimismAddOns::new(None)) + .with_types::() + .with_components(OpNode::components(args.clone())) + .with_add_ons(OpNode::new(args).add_ons()) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); Ok(()) diff --git a/crates/optimism/node/tests/it/main.rs b/crates/optimism/node/tests/it/main.rs index b84dd7426c2..d0533fc4541 100644 --- a/crates/optimism/node/tests/it/main.rs +++ b/crates/optimism/node/tests/it/main.rs @@ -3,4 +3,7 @@ #[cfg(feature = "optimism")] mod builder; +#[cfg(feature = "optimism")] +mod priority; + const fn main() {} diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs new file mode 100644 index 00000000000..1b49ed684bf --- /dev/null +++ b/crates/optimism/node/tests/it/priority.rs @@ -0,0 +1,194 @@ +//! Node builder test that customizes priority of transactions in the block. + +use alloy_consensus::TxEip1559; +use alloy_genesis::Genesis; +use alloy_network::TxSignerSync; +use alloy_primitives::{Address, ChainId, TxKind}; +use reth_chainspec::EthChainSpec; +use reth_db::test_utils::create_test_rw_db_with_path; +use reth_e2e_test_utils::{ + node::NodeTestContext, transaction::TransactionTestContext, wallet::Wallet, +}; +use reth_node_api::{FullNodeTypes, NodeTypesWithEngine}; +use reth_node_builder::{ + components::ComponentsBuilder, EngineNodeLauncher, NodeBuilder, NodeConfig, +}; +use reth_node_core::args::DatadirArgs; +use reth_optimism_chainspec::{OpChainSpec, OpChainSpecBuilder}; +use reth_optimism_node::{ + args::RollupArgs, + node::{ + OpAddOns, OpConsensusBuilder, OpExecutorBuilder, OpNetworkBuilder, OpPayloadBuilder, + OpPoolBuilder, + }, + utils::optimism_payload_attributes, + OpEngineTypes, OpNode, +}; +use reth_optimism_payload_builder::builder::OpPayloadTransactions; +use reth_optimism_primitives::OpPrimitives; +use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; +use reth_primitives::{RecoveredTx, SealedBlock, Transaction, TransactionSigned}; +use reth_provider::providers::BlockchainProvider2; +use reth_tasks::TaskManager; +use reth_transaction_pool::{pool::BestPayloadTransactions, PoolTransaction}; +use std::sync::Arc; +use tokio::sync::Mutex; + +#[derive(Clone, Debug)] +struct CustomTxPriority { + chain_id: ChainId, +} + +impl OpPayloadTransactions for CustomTxPriority { + fn best_transactions( + &self, + pool: Pool, + attr: reth_transaction_pool::BestTransactionsAttributes, + ) -> impl PayloadTransactions + where + Pool: reth_transaction_pool::TransactionPool< + Transaction: PoolTransaction, + >, + { + // Block composition: + // 1. Best transactions from the pool (up to 250k gas) + // 2. End-of-block transaction created by the node (up to 100k gas) + + // End of block transaction should send a 0-value transfer to a random address. + let sender = Wallet::default().inner; + let mut end_of_block_tx = TxEip1559 { + chain_id: self.chain_id, + nonce: 1, // it will be 2nd tx after L1 block info tx that uses the same sender + gas_limit: 21000, + max_fee_per_gas: 20e9 as u128, + to: TxKind::Call(Address::random()), + value: 0.try_into().unwrap(), + ..Default::default() + }; + let signature = sender.sign_transaction_sync(&mut end_of_block_tx).unwrap(); + let end_of_block_tx = RecoveredTx::from_signed_transaction( + TransactionSigned::new_unhashed(Transaction::Eip1559(end_of_block_tx), signature), + sender.address(), + ); + + PayloadTransactionsChain::new( + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)), + // Allow 250k gas for the transactions from the pool + Some(250_000), + PayloadTransactionsFixed::single(end_of_block_tx), + // Allow 100k gas for the end-of-block transaction + Some(100_000), + ) + } +} + +/// Builds the node with custom transaction priority service within default payload builder. +fn build_components( + chain_id: ChainId, +) -> ComponentsBuilder< + Node, + OpPoolBuilder, + OpPayloadBuilder, + OpNetworkBuilder, + OpExecutorBuilder, + OpConsensusBuilder, +> +where + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = OpEngineTypes, + ChainSpec = OpChainSpec, + Primitives = OpPrimitives, + >, + >, +{ + let RollupArgs { disable_txpool_gossip, compute_pending_block, discovery_v4, .. } = + RollupArgs::default(); + ComponentsBuilder::default() + .node_types::() + .pool(OpPoolBuilder::default()) + .payload( + OpPayloadBuilder::new(compute_pending_block) + .with_transactions(CustomTxPriority { chain_id }), + ) + .network(OpNetworkBuilder { disable_txpool_gossip, disable_discovery_v4: !discovery_v4 }) + .executor(OpExecutorBuilder::default()) + .consensus(OpConsensusBuilder::default()) +} + +#[tokio::test] +async fn test_custom_block_priority_config() { + reth_tracing::init_test_tracing(); + + let genesis: Genesis = serde_json::from_str(include_str!("../assets/genesis.json")).unwrap(); + let chain_spec = + Arc::new(OpChainSpecBuilder::base_mainnet().genesis(genesis).ecotone_activated().build()); + + // This wallet is going to send: + // 1. L1 block info tx + // 2. End-of-block custom tx + let wallet = Arc::new(Mutex::new(Wallet::default().with_chain_id(chain_spec.chain().into()))); + + // Configure and launch the node. + let config = NodeConfig::new(chain_spec).with_datadir_args(DatadirArgs { + datadir: reth_db::test_utils::tempdir_path().into(), + ..Default::default() + }); + let db = create_test_rw_db_with_path( + config + .datadir + .datadir + .unwrap_or_chain_default(config.chain.chain(), config.datadir.clone()) + .db(), + ); + let tasks = TaskManager::current(); + let node_handle = NodeBuilder::new(config.clone()) + .with_database(db) + .with_types_and_provider::>() + .with_components(build_components(config.chain.chain_id())) + .with_add_ons(OpAddOns::default()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + tasks.executor(), + builder.config.datadir(), + Default::default(), + ); + builder.launch_with(launcher) + }) + .await + .expect("Failed to launch node"); + + // Advance the chain with a single block. + let block_payloads = NodeTestContext::new(node_handle.node, optimism_payload_attributes) + .await + .unwrap() + .advance(1, |_| { + let wallet = wallet.clone(); + Box::pin(async move { + let mut wallet = wallet.lock().await; + let tx_fut = TransactionTestContext::optimism_l1_block_info_tx( + wallet.chain_id, + wallet.inner.clone(), + // This doesn't matter in the current test (because it's only one block), + // but make sure you're not reusing the nonce from end-of-block tx + // if they have the same signer. + wallet.inner_nonce * 2, + ); + wallet.inner_nonce += 1; + tx_fut.await + }) + }) + .await + .unwrap(); + assert_eq!(block_payloads.len(), 1); + let (block_payload, _) = block_payloads.first().unwrap(); + let block_payload: SealedBlock = block_payload.block().clone(); + assert_eq!(block_payload.body.transactions.len(), 2); // L1 block info tx + end-of-block custom tx + + // Check that last transaction in the block looks like a transfer to a random address. + let end_of_block_tx = block_payload.body.transactions.last().unwrap(); + let end_of_block_tx = end_of_block_tx.transaction.as_eip1559().unwrap(); + assert_eq!(end_of_block_tx.nonce, 1); + assert_eq!(end_of_block_tx.gas_limit, 21_000); + assert!(end_of_block_tx.input.is_empty()); +} diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index e1d6fe47d29..1c4f855b6aa 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -15,16 +15,17 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true -reth-revm.workspace = true +reth-revm = { workspace = true, features = ["witness"] } reth-transaction-pool.workspace = true reth-provider.workspace = true reth-rpc-types-compat.workspace = true reth-evm.workspace = true reth-execution-types.workspace = true reth-payload-builder.workspace = true -reth-payload-primitives.workspace = true +reth-payload-builder-primitives.workspace = true +reth-payload-util.workspace = true +reth-payload-primitives = { workspace = true, features = ["op"] } reth-basic-payload-builder.workspace = true -reth-trie.workspace = true reth-chain-state.workspace = true # op-reth @@ -39,8 +40,10 @@ alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true op-alloy-rpc-types-engine.workspace = true -revm-primitives.workspace = true +op-alloy-consensus.workspace = true alloy-rpc-types-engine.workspace = true +alloy-rpc-types-debug.workspace = true +alloy-consensus.workspace = true # misc tracing.workspace = true @@ -49,8 +52,10 @@ sha2.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-optimism-evm/optimism", - "revm/optimism", -] + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-optimism-evm/optimism", + "revm/optimism", + "reth-execution-types/optimism", + "reth-optimism-consensus/optimism" +] \ No newline at end of file diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 0a8dcdb1244..27778da8f42 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -1,64 +1,98 @@ //! Optimism payload builder implementation. -use std::sync::Arc; - -use alloy_primitives::U256; +use crate::{ + config::OpBuilderConfig, + error::OpPayloadBuilderError, + payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, +}; +use alloy_consensus::{Header, Transaction, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{eip4895::Withdrawals, merge::BEACON_NONCE}; +use alloy_primitives::{Address, Bytes, B256, U256}; +use alloy_rpc_types_debug::ExecutionWitness; +use alloy_rpc_types_engine::PayloadId; +use op_alloy_consensus::DepositTransaction; +use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_basic_payload_builder::*; use reth_chain_state::ExecutedBlock; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; +use reth_evm::{system_calls::SystemCaller, ConfigureEvm, NextBlockEnvAttributes}; use reth_execution_types::ExecutionOutcome; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_optimism_forks::OptimismHardfork; -use reth_payload_primitives::{PayloadBuilderAttributes, PayloadBuilderError}; +use reth_optimism_forks::OpHardforks; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_payload_util::PayloadTransactions; use reth_primitives::{ - constants::BEACON_NONCE, - proofs, - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Block, BlockBody, Header, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, Receipt, + SealedHeader, TransactionSigned, TxType, }; -use reth_provider::StateProviderFactory; -use reth_revm::database::StateProviderDatabase; +use reth_provider::{ + HashedPostStateProvider, ProviderError, StateProofProvider, StateProviderFactory, + StateRootProvider, +}; +use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_transaction_pool::{ - noop::NoopTransactionPool, BestTransactionsAttributes, TransactionPool, + noop::NoopTransactionPool, pool::BestPayloadTransactions, BestTransactionsAttributes, + PoolTransaction, TransactionPool, }; -use reth_trie::HashedPostState; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{EVMError, EnvWithHandlerCfg, InvalidTransaction, ResultAndState}, - DatabaseCommit, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, InvalidTransaction, + ResultAndState, TxEnv, + }, + Database, DatabaseCommit, }; -use revm_primitives::calc_excess_blob_gas; +use std::{fmt::Display, sync::Arc}; use tracing::{debug, trace, warn}; -use crate::{ - error::OptimismPayloadBuilderError, - payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, -}; - /// Optimism's payload builder -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismPayloadBuilder { +#[derive(Debug, Clone)] +pub struct OpPayloadBuilder { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, /// The type responsible for creating the evm. pub evm_config: EvmConfig, + /// Settings for the builder, e.g. DA settings. + pub config: OpBuilderConfig, + /// The type responsible for yielding the best transactions for the payload if mempool + /// transactions are allowed. + pub best_transactions: Txs, } -impl OptimismPayloadBuilder { - /// `OptimismPayloadBuilder` constructor. - pub const fn new(evm_config: EvmConfig) -> Self { - Self { compute_pending_block: true, evm_config } +impl OpPayloadBuilder { + /// `OpPayloadBuilder` constructor. + /// + /// Configures the builder with the default settings. + pub fn new(evm_config: EvmConfig) -> Self { + Self::with_builder_config(evm_config, Default::default()) + } + + /// Configures the builder with the given [`OpBuilderConfig`]. + pub const fn with_builder_config(evm_config: EvmConfig, config: OpBuilderConfig) -> Self { + Self { compute_pending_block: true, evm_config, config, best_transactions: () } } +} +impl OpPayloadBuilder { /// Sets the rollup's compute pending block configuration option. pub const fn set_compute_pending_block(mut self, compute_pending_block: bool) -> Self { self.compute_pending_block = compute_pending_block; self } + /// Configures the type responsible for yielding the transactions that should be included in the + /// payload. + pub fn with_transactions( + self, + best_transactions: T, + ) -> OpPayloadBuilder { + let Self { compute_pending_block, evm_config, config, .. } = self; + OpPayloadBuilder { compute_pending_block, evm_config, best_transactions, config } + } + /// Enables the rollup's compute pending block configuration option. pub const fn compute_pending_block(self) -> Self { self.set_compute_pending_block(true) @@ -69,53 +103,143 @@ impl OptimismPayloadBuilder { self.compute_pending_block } } -impl OptimismPayloadBuilder +impl OpPayloadBuilder where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvm
, + Txs: OpPayloadTransactions, +{ + /// Constructs an Optimism payload from the transactions sent via the + /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in + /// the payload attributes, the transaction pool will be ignored and the only transactions + /// included in the payload will be those sent through the attributes. + /// + /// Given build arguments including an Optimism client, transaction pool, + /// and configuration, this function creates a transaction payload. Returns + /// a result indicating success with the payload or an error in case of failure. + fn build_payload( + &self, + args: BuildArguments, + ) -> Result, PayloadBuilderError> + where + Client: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool>, + { + let (initialized_cfg, initialized_block_env) = self + .cfg_and_block_env(&args.config.attributes, &args.config.parent_header) + .map_err(PayloadBuilderError::other)?; + + let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + + let ctx = OpPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + chain_spec: client.chain_spec(), + config, + initialized_cfg, + initialized_block_env, + cancel, + best_payload, + }; + + let builder = OpBuilder { pool, best: self.best_transactions.clone() }; + + let state_provider = client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(state_provider); + + if ctx.attributes().no_tx_pool { + let db = State::builder().with_database(state).with_bundle_update().build(); + builder.build(db, ctx) + } else { + // sequencer mode we can reuse cachedreads from previous runs + let db = State::builder() + .with_database(cached_reads.as_db_mut(state)) + .with_bundle_update() + .build(); + builder.build(db, ctx) + } + .map(|out| out.with_cached_reads(cached_reads)) + } +} + +impl OpPayloadBuilder +where + EvmConfig: ConfigureEvm
, { /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload /// (that has the `parent` as its parent). pub fn cfg_and_block_env( &self, - config: &PayloadConfig, + attributes: &OpPayloadBuilderAttributes, parent: &Header, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), EvmConfig::Error> { let next_attributes = NextBlockEnvAttributes { - timestamp: config.attributes.timestamp(), - suggested_fee_recipient: config.attributes.suggested_fee_recipient(), - prev_randao: config.attributes.prev_randao(), + timestamp: attributes.timestamp(), + suggested_fee_recipient: attributes.suggested_fee_recipient(), + prev_randao: attributes.prev_randao(), }; self.evm_config.next_cfg_and_block_env(parent, next_attributes) } + + /// Computes the witness for the payload. + pub fn payload_witness( + &self, + client: &Client, + parent: SealedHeader, + attributes: OpPayloadAttributes, + ) -> Result + where + Client: StateProviderFactory + ChainSpecProvider, + { + let attributes = OpPayloadBuilderAttributes::try_new(parent.hash(), attributes, 3) + .map_err(PayloadBuilderError::other)?; + + let (initialized_cfg, initialized_block_env) = + self.cfg_and_block_env(&attributes, &parent).map_err(PayloadBuilderError::other)?; + + let config = PayloadConfig { + parent_header: Arc::new(parent), + attributes, + extra_data: Default::default(), + }; + let ctx = OpPayloadBuilderCtx { + evm_config: self.evm_config.clone(), + chain_spec: client.chain_spec(), + config, + initialized_cfg, + initialized_block_env, + cancel: Default::default(), + best_payload: Default::default(), + }; + + let state_provider = client.state_by_block_hash(ctx.parent().hash())?; + let state = StateProviderDatabase::new(state_provider); + let mut state = State::builder().with_database(state).with_bundle_update().build(); + + let builder = OpBuilder { pool: NoopTransactionPool::default(), best: () }; + builder.witness(&mut state, &ctx) + } } -/// Implementation of the [`PayloadBuilder`] trait for [`OptimismPayloadBuilder`]. -impl PayloadBuilder for OptimismPayloadBuilder +/// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. +impl PayloadBuilder for OpPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, - EvmConfig: ConfigureEvm
, + Pool: TransactionPool>, + EvmConfig: ConfigureEvm
, + Txs: OpPayloadTransactions, { - type Attributes = OptimismPayloadBuilderAttributes; - type BuiltPayload = OptimismBuiltPayload; + type Attributes = OpPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; fn try_build( &self, - args: BuildArguments, - ) -> Result, PayloadBuilderError> { - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - optimism_payload( - self.evm_config.clone(), - args, - cfg_env, - block_env, - self.compute_pending_block, - ) + args: BuildArguments, + ) -> Result, PayloadBuilderError> { + self.build_payload(args) } fn on_missing_payload( &self, - _args: BuildArguments, + _args: BuildArguments, ) -> MissingPayloadBehaviour { // we want to await the job that's already in progress because that should be returned as // is, there's no benefit in racing another job @@ -128,7 +252,7 @@ where &self, client: &Client, config: PayloadConfig, - ) -> Result { + ) -> Result { let args = BuildArguments { client, config, @@ -138,229 +262,625 @@ where cancel: Default::default(), best_payload: None, }; - let (cfg_env, block_env) = self.cfg_and_block_env(&args.config, &args.config.parent_block); - optimism_payload(self.evm_config.clone(), args, cfg_env, block_env, false)? - .into_payload() - .ok_or_else(|| PayloadBuilderError::MissingPayload) + self.build_payload(args)?.into_payload().ok_or_else(|| PayloadBuilderError::MissingPayload) } } -/// Constructs an Ethereum transaction payload from the transactions sent through the -/// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in -/// the payload attributes, the transaction pool will be ignored and the only transactions -/// included in the payload will be those sent through the attributes. +/// The type that builds the payload. +/// +/// Payload building for optimism is composed of several steps. +/// The first steps are mandatory and defined by the protocol. /// -/// Given build arguments including an Ethereum client, transaction pool, -/// and configuration, this function creates a transaction payload. Returns -/// a result indicating success with the payload or an error in case of failure. -#[inline] -pub(crate) fn optimism_payload( - evm_config: EvmConfig, - args: BuildArguments, - initialized_cfg: CfgEnvWithHandlerCfg, - initialized_block_env: BlockEnv, - _compute_pending_block: bool, -) -> Result, PayloadBuilderError> +/// 1. first all System calls are applied. +/// 2. After canyon the forced deployed `create2deployer` must be loaded +/// 3. all sequencer transactions are executed (part of the payload attributes) +/// +/// Depending on whether the node acts as a sequencer and is allowed to include additional +/// transactions (`no_tx_pool == false`): +/// 4. include additional transactions +/// +/// And finally +/// 5. build the block: compute all roots (txs, state) +#[derive(Debug)] +pub struct OpBuilder { + /// The transaction pool + pool: Pool, + /// Yields the best transaction to include if transactions from the mempool are allowed. + best: Txs, +} + +impl OpBuilder where - EvmConfig: ConfigureEvm
, - Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, + Txs: OpPayloadTransactions, { - let BuildArguments { client, pool, mut cached_reads, config, cancel, best_payload } = args; + /// Executes the payload and returns the outcome. + pub fn execute( + self, + state: &mut State, + ctx: &OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> + where + EvmConfig: ConfigureEvm
, + DB: Database, + { + let Self { pool, best } = self; + debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number, "building new payload"); + + // 1. apply eip-4788 pre block contract call + ctx.apply_pre_beacon_root_contract_call(state)?; + + // 2. ensure create2deployer is force deployed + ctx.ensure_create2_deployer(state)?; + + // 3. execute sequencer transactions + let mut info = ctx.execute_sequencer_transactions(state)?; + + // 4. if mem pool transactions are requested we execute them + if !ctx.attributes().no_tx_pool { + let best_txs = best.best_transactions(pool, ctx.best_transaction_attributes()); + if ctx.execute_best_transactions::<_, Pool>(&mut info, state, best_txs)?.is_some() { + return Ok(BuildOutcomeKind::Cancelled) + } + + // check if the new payload is even more valuable + if !ctx.is_better_payload(info.total_fees) { + // can skip building the block + return Ok(BuildOutcomeKind::Aborted { fees: info.total_fees }) + } + } - let chain_spec = client.chain_spec(); - let state_provider = client.state_by_block_hash(config.parent_block.hash())?; - let state = StateProviderDatabase::new(state_provider); - let mut db = - State::builder().with_database_ref(cached_reads.as_db(state)).with_bundle_update().build(); - let PayloadConfig { parent_block, attributes, extra_data } = config; + let withdrawals_root = ctx.commit_withdrawals(state)?; - debug!(target: "payload_builder", id=%attributes.payload_attributes.payload_id(), parent_hash = ?parent_block.hash(), parent_number = parent_block.number, "building new payload"); + // merge all transitions into bundle state, this would apply the withdrawal balance changes + // and 4788 contract call + state.merge_transitions(BundleRetention::Reverts); - let mut cumulative_gas_used = 0; - let block_gas_limit: u64 = attributes.gas_limit.unwrap_or_else(|| { - initialized_block_env.gas_limit.try_into().unwrap_or(chain_spec.max_gas_limit) - }); - let base_fee = initialized_block_env.basefee.to::(); + Ok(BuildOutcomeKind::Better { payload: ExecutedPayload { info, withdrawals_root } }) + } - let mut executed_txs = Vec::with_capacity(attributes.transactions.len()); - let mut executed_senders = Vec::with_capacity(attributes.transactions.len()); + /// Builds the payload on top of the state. + pub fn build( + self, + mut state: State, + ctx: OpPayloadBuilderCtx, + ) -> Result, PayloadBuilderError> + where + EvmConfig: ConfigureEvm
, + DB: Database + AsRef

, + P: StateRootProvider + HashedPostStateProvider, + { + let ExecutedPayload { info, withdrawals_root } = match self.execute(&mut state, &ctx)? { + BuildOutcomeKind::Better { payload } | BuildOutcomeKind::Freeze(payload) => payload, + BuildOutcomeKind::Cancelled => return Ok(BuildOutcomeKind::Cancelled), + BuildOutcomeKind::Aborted { fees } => return Ok(BuildOutcomeKind::Aborted { fees }), + }; - let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( - base_fee, - initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), - )); + let block_number = ctx.block_number(); + let execution_outcome = ExecutionOutcome::new( + state.take_bundle(), + vec![info.receipts].into(), + block_number, + Vec::new(), + ); + let receipts_root = execution_outcome + .generic_receipts_root_slow(block_number, |receipts| { + calculate_receipt_root_no_memo_optimism( + receipts, + &ctx.chain_spec, + ctx.attributes().timestamp(), + ) + }) + .expect("Number is in range"); + let logs_bloom = + execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); + + // // calculate the state root + let state_provider = state.database.as_ref(); + let hashed_state = state_provider.hashed_post_state(execution_outcome.state()); + let (state_root, trie_output) = { + state_provider.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { + warn!(target: "payload_builder", + parent_header=%ctx.parent().hash(), + %err, + "failed to calculate state root for payload" + ); + })? + }; - let mut total_fees = U256::ZERO; + // create the block header + let transactions_root = proofs::calculate_transaction_root(&info.executed_transactions); + + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + let (excess_blob_gas, blob_gas_used) = ctx.blob_fields(); + let extra_data = ctx.extra_data()?; + + let header = Header { + parent_hash: ctx.parent().hash(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: ctx.initialized_block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root, + logs_bloom, + timestamp: ctx.attributes().payload_attributes.timestamp, + mix_hash: ctx.attributes().payload_attributes.prev_randao, + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(ctx.base_fee()), + number: ctx.parent().number + 1, + gas_limit: ctx.block_gas_limit(), + difficulty: U256::ZERO, + gas_used: info.cumulative_gas_used, + extra_data, + parent_beacon_block_root: ctx.attributes().payload_attributes.parent_beacon_block_root, + blob_gas_used, + excess_blob_gas, + requests_hash: None, + target_blobs_per_block: None, + }; - let block_number = initialized_block_env.number.to::(); + // seal the block + let block = Block { + header, + body: BlockBody { + transactions: info.executed_transactions, + ommers: vec![], + withdrawals: ctx.withdrawals().cloned(), + }, + }; - let is_regolith = chain_spec.is_fork_active_at_timestamp( - OptimismHardfork::Regolith, - attributes.payload_attributes.timestamp, - ); + let sealed_block = Arc::new(block.seal_slow()); + debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header, "sealed built block"); - // apply eip-4788 pre block contract call - let mut system_caller = SystemCaller::new(evm_config.clone(), &chain_spec); + // create the executed block data + let executed = ExecutedBlock { + block: sealed_block.clone(), + senders: Arc::new(info.executed_senders), + execution_output: Arc::new(execution_outcome), + hashed_state: Arc::new(hashed_state), + trie: Arc::new(trie_output), + }; - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &initialized_cfg, - &initialized_block_env, - attributes.payload_attributes.parent_beacon_block_root, - ) - .map_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to apply beacon root contract call for payload" - ); - PayloadBuilderError::Internal(err.into()) - })?; - - // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism - // blocks will always have at least a single transaction in them (the L1 info transaction), - // so we can safely assume that this will always be triggered upon the transition and that - // the above check for empty blocks will never be hit on OP chains. - reth_optimism_evm::ensure_create2_deployer( - chain_spec.clone(), - attributes.payload_attributes.timestamp, - &mut db, - ) - .map_err(|err| { - warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); - PayloadBuilderError::other(OptimismPayloadBuilderError::ForceCreate2DeployerFail) - })?; - - let mut receipts = Vec::with_capacity(attributes.transactions.len()); - for sequencer_tx in &attributes.transactions { - // Check if the job was cancelled, if so we can exit early. - if cancel.is_cancelled() { - return Ok(BuildOutcome::Cancelled) + let no_tx_pool = ctx.attributes().no_tx_pool; + + let payload = OpBuiltPayload::new( + ctx.payload_id(), + sealed_block, + info.total_fees, + ctx.chain_spec.clone(), + ctx.config.attributes, + Some(executed), + ); + + if no_tx_pool { + // if `no_tx_pool` is set only transactions from the payload attributes will be included + // in the payload. In other words, the payload is deterministic and we can + // freeze it once we've successfully built it. + Ok(BuildOutcomeKind::Freeze(payload)) + } else { + Ok(BuildOutcomeKind::Better { payload }) } + } + + /// Builds the payload and returns its [`ExecutionWitness`] based on the state after execution. + pub fn witness( + self, + state: &mut State, + ctx: &OpPayloadBuilderCtx, + ) -> Result + where + EvmConfig: ConfigureEvm

, + DB: Database + AsRef

, + P: StateProofProvider, + { + let _ = self.execute(state, ctx)?; + let ExecutionWitnessRecord { hashed_state, codes, keys } = + ExecutionWitnessRecord::from_executed_state(state); + let state = state.database.as_ref().witness(Default::default(), hashed_state)?; + Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys }) + } +} + +/// A type that returns a the [`PayloadTransactions`] that should be included in the pool. +pub trait OpPayloadTransactions: Clone + Send + Sync + Unpin + 'static { + /// Returns an iterator that yields the transaction in the order they should get included in the + /// new payload. + fn best_transactions< + Pool: TransactionPool>, + >( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions; +} - // A sequencer's block should never contain blob transactions. - if sequencer_tx.value().is_eip4844() { - return Err(PayloadBuilderError::other( - OptimismPayloadBuilderError::BlobTransactionRejected, - )) +impl OpPayloadTransactions for () { + fn best_transactions< + Pool: TransactionPool>, + >( + &self, + pool: Pool, + attr: BestTransactionsAttributes, + ) -> impl PayloadTransactions { + BestPayloadTransactions::new(pool.best_transactions_with_attributes(attr)) + } +} + +/// Holds the state after execution +#[derive(Debug)] +pub struct ExecutedPayload { + /// Tracked execution info + pub info: ExecutionInfo, + /// Withdrawal hash. + pub withdrawals_root: Option, +} + +/// This acts as the container for executed transactions and its byproducts (receipts, gas used) +#[derive(Default, Debug)] +pub struct ExecutionInfo { + /// All executed transactions (unrecovered). + pub executed_transactions: Vec, + /// The recovered senders for the executed transactions. + pub executed_senders: Vec

, + /// The transaction receipts + pub receipts: Vec>, + /// All gas used so far + pub cumulative_gas_used: u64, + /// Tracks fees from executed mempool transactions + pub total_fees: U256, +} + +impl ExecutionInfo { + /// Create a new instance with allocated slots. + pub fn with_capacity(capacity: usize) -> Self { + Self { + executed_transactions: Vec::with_capacity(capacity), + executed_senders: Vec::with_capacity(capacity), + receipts: Vec::with_capacity(capacity), + cumulative_gas_used: 0, + total_fees: U256::ZERO, } + } +} - // Convert the transaction to a [TransactionSignedEcRecovered]. This is - // purely for the purposes of utilizing the `evm_config.tx_env`` function. - // Deposit transactions do not have signatures, so if the tx is a deposit, this - // will just pull in its `from` address. - let sequencer_tx = sequencer_tx.value().clone().try_into_ecrecovered().map_err(|_| { - PayloadBuilderError::other(OptimismPayloadBuilderError::TransactionEcRecoverFailed) - })?; - - // Cache the depositor account prior to the state transition for the deposit nonce. - // - // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces - // were not introduced in Bedrock. In addition, regular transactions don't have deposit - // nonces, so we don't need to touch the DB for those. - let depositor = (is_regolith && sequencer_tx.is_deposit()) - .then(|| { - db.load_cache_account(sequencer_tx.signer()) - .map(|acc| acc.account_info().unwrap_or_default()) - }) - .transpose() - .map_err(|_| { - PayloadBuilderError::other(OptimismPayloadBuilderError::AccountLoadFailed( - sequencer_tx.signer(), - )) +/// Container type that holds all necessities to build a new payload. +#[derive(Debug)] +pub struct OpPayloadBuilderCtx { + /// The type that knows how to perform system calls and configure the evm. + pub evm_config: EvmConfig, + /// The chainspec + pub chain_spec: Arc, + /// How to build the payload. + pub config: PayloadConfig, + /// Evm Settings + pub initialized_cfg: CfgEnvWithHandlerCfg, + /// Block config + pub initialized_block_env: BlockEnv, + /// Marker to check whether the job has been cancelled. + pub cancel: Cancelled, + /// The currently best payload. + pub best_payload: Option, +} + +impl OpPayloadBuilderCtx { + /// Returns the parent block the payload will be build on. + pub fn parent(&self) -> &SealedHeader { + &self.config.parent_header + } + + /// Returns the builder attributes. + pub const fn attributes(&self) -> &OpPayloadBuilderAttributes { + &self.config.attributes + } + + /// Returns the withdrawals if shanghai is active. + pub fn withdrawals(&self) -> Option<&Withdrawals> { + self.chain_spec + .is_shanghai_active_at_timestamp(self.attributes().timestamp()) + .then(|| &self.attributes().payload_attributes.withdrawals) + } + + /// Returns the block gas limit to target. + pub fn block_gas_limit(&self) -> u64 { + self.attributes() + .gas_limit + .unwrap_or_else(|| self.initialized_block_env.gas_limit.saturating_to()) + } + + /// Returns the block number for the block. + pub fn block_number(&self) -> u64 { + self.initialized_block_env.number.to() + } + + /// Returns the current base fee + pub fn base_fee(&self) -> u64 { + self.initialized_block_env.basefee.to() + } + + /// Returns the current blob gas price. + pub fn get_blob_gasprice(&self) -> Option { + self.initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64) + } + + /// Returns the blob fields for the header. + /// + /// This will always return `Some(0)` after ecotone. + pub fn blob_fields(&self) -> (Option, Option) { + // OP doesn't support blobs/EIP-4844. + // https://specs.optimism.io/protocol/exec-engine.html#ecotone-disable-blob-transactions + // Need [Some] or [None] based on hardfork to match block hash. + if self.is_ecotone_active() { + (Some(0), Some(0)) + } else { + (None, None) + } + } + + /// Returns the extra data for the block. + /// + /// After holocene this extracts the extradata from the paylpad + pub fn extra_data(&self) -> Result { + if self.is_holocene_active() { + self.attributes() + .get_holocene_extra_data( + self.chain_spec.base_fee_params_at_timestamp( + self.attributes().payload_attributes.timestamp, + ), + ) + .map_err(PayloadBuilderError::other) + } else { + Ok(self.config.extra_data.clone()) + } + } + + /// Returns the current fee settings for transactions from the mempool + pub fn best_transaction_attributes(&self) -> BestTransactionsAttributes { + BestTransactionsAttributes::new(self.base_fee(), self.get_blob_gasprice()) + } + + /// Returns the unique id for this payload job. + pub fn payload_id(&self) -> PayloadId { + self.attributes().payload_id() + } + + /// Returns true if regolith is active for the payload. + pub fn is_regolith_active(&self) -> bool { + self.chain_spec.is_regolith_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if ecotone is active for the payload. + pub fn is_ecotone_active(&self) -> bool { + self.chain_spec.is_ecotone_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if canyon is active for the payload. + pub fn is_canyon_active(&self) -> bool { + self.chain_spec.is_canyon_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if holocene is active for the payload. + pub fn is_holocene_active(&self) -> bool { + self.chain_spec.is_holocene_active_at_timestamp(self.attributes().timestamp()) + } + + /// Returns true if the fees are higher than the previous payload. + pub fn is_better_payload(&self, total_fees: U256) -> bool { + is_better_payload(self.best_payload.as_ref(), total_fees) + } + + /// Commits the withdrawals from the payload attributes to the state. + pub fn commit_withdrawals(&self, db: &mut State) -> Result, ProviderError> + where + DB: Database, + { + commit_withdrawals( + db, + &self.chain_spec, + self.attributes().payload_attributes.timestamp, + &self.attributes().payload_attributes.withdrawals, + ) + } + + /// Ensure that the create2deployer is force-deployed at the canyon transition. Optimism + /// blocks will always have at least a single transaction in them (the L1 info transaction), + /// so we can safely assume that this will always be triggered upon the transition and that + /// the above check for empty blocks will never be hit on OP chains. + pub fn ensure_create2_deployer(&self, db: &mut State) -> Result<(), PayloadBuilderError> + where + DB: Database, + DB::Error: Display, + { + reth_optimism_evm::ensure_create2_deployer( + self.chain_spec.clone(), + self.attributes().payload_attributes.timestamp, + db, + ) + .map_err(|err| { + warn!(target: "payload_builder", %err, "missing create2 deployer, skipping block."); + PayloadBuilderError::other(OpPayloadBuilderError::ForceCreate2DeployerFail) + }) + } +} + +impl OpPayloadBuilderCtx +where + EvmConfig: ConfigureEvm
, +{ + /// apply eip-4788 pre block contract call + pub fn apply_pre_beacon_root_contract_call( + &self, + db: &mut DB, + ) -> Result<(), PayloadBuilderError> + where + DB: Database + DatabaseCommit, + DB::Error: Display, + { + SystemCaller::new(self.evm_config.clone(), self.chain_spec.clone()) + .pre_block_beacon_root_contract_call( + db, + &self.initialized_cfg, + &self.initialized_block_env, + self.attributes().payload_attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_header=%self.parent().hash(), + %err, + "failed to apply beacon root contract call for payload" + ); + PayloadBuilderError::Internal(err.into()) })?; + Ok(()) + } + + /// Executes all sequencer transactions that are included in the payload attributes. + pub fn execute_sequencer_transactions( + &self, + db: &mut State, + ) -> Result + where + DB: Database, + { + let mut info = ExecutionInfo::with_capacity(self.attributes().transactions.len()); + let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()), + self.initialized_cfg.clone(), + self.initialized_block_env.clone(), + TxEnv::default(), ); + let mut evm = self.evm_config.evm_with_env(&mut *db, env); + + for sequencer_tx in &self.attributes().transactions { + // A sequencer's block should never contain blob transactions. + if sequencer_tx.value().is_eip4844() { + return Err(PayloadBuilderError::other( + OpPayloadBuilderError::BlobTransactionRejected, + )) + } - let mut evm = evm_config.evm_with_env(&mut db, env); + // Convert the transaction to a [RecoveredTx]. This is + // purely for the purposes of utilizing the `evm_config.tx_env`` function. + // Deposit transactions do not have signatures, so if the tx is a deposit, this + // will just pull in its `from` address. + let sequencer_tx = + sequencer_tx.value().clone().try_into_ecrecovered().map_err(|_| { + PayloadBuilderError::other(OpPayloadBuilderError::TransactionEcRecoverFailed) + })?; + + // Cache the depositor account prior to the state transition for the deposit nonce. + // + // Note that this *only* needs to be done post-regolith hardfork, as deposit nonces + // were not introduced in Bedrock. In addition, regular transactions don't have deposit + // nonces, so we don't need to touch the DB for those. + let depositor = (self.is_regolith_active() && sequencer_tx.is_deposit()) + .then(|| { + evm.db_mut() + .load_cache_account(sequencer_tx.signer()) + .map(|acc| acc.account_info().unwrap_or_default()) + }) + .transpose() + .map_err(|_| { + PayloadBuilderError::other(OpPayloadBuilderError::AccountLoadFailed( + sequencer_tx.signer(), + )) + })?; + + *evm.tx_mut() = self.evm_config.tx_env(sequencer_tx.as_signed(), sequencer_tx.signer()); - let ResultAndState { result, state } = match evm.transact() { - Ok(res) => res, - Err(err) => { - match err { - EVMError::Transaction(err) => { - trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); - continue - } - err => { - // this is an error that we should treat as fatal for this attempt - return Err(PayloadBuilderError::EvmExecutionError(err)) + let ResultAndState { result, state } = match evm.transact() { + Ok(res) => res, + Err(err) => { + match err { + EVMError::Transaction(err) => { + trace!(target: "payload_builder", %err, ?sequencer_tx, "Error in sequencer transaction, skipping."); + continue + } + err => { + // this is an error that we should treat as fatal for this attempt + return Err(PayloadBuilderError::EvmExecutionError(err)) + } } } - } - }; + }; - // to release the db reference drop evm. - drop(evm); - // commit changes - db.commit(state); - - let gas_used = result.gas_used(); - - // add gas used by the transaction to cumulative gas used, before creating the receipt - cumulative_gas_used += gas_used; - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(Receipt { - tx_type: sequencer_tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs().into_iter().map(Into::into).collect(), - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to how - // receipt hashes should be computed when set. The state transition process - // ensures this is only set for post-Canyon deposit transactions. - deposit_receipt_version: chain_spec - .is_fork_active_at_timestamp( - OptimismHardfork::Canyon, - attributes.payload_attributes.timestamp, - ) - .then_some(1), - })); + // commit changes + evm.db_mut().commit(state); + + let gas_used = result.gas_used(); - // append sender and transaction to the respective lists - executed_senders.push(sequencer_tx.signer()); - executed_txs.push(sequencer_tx.into_signed()); + // add gas used by the transaction to cumulative gas used, before creating the receipt + info.cumulative_gas_used += gas_used; + + // Push transaction changeset and calculate header bloom filter for receipt. + info.receipts.push(Some(Receipt { + tx_type: sequencer_tx.tx_type(), + success: result.is_success(), + cumulative_gas_used: info.cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + deposit_nonce: depositor.map(|account| account.nonce), + // The deposit receipt version was introduced in Canyon to indicate an update to how + // receipt hashes should be computed when set. The state transition process + // ensures this is only set for post-Canyon deposit transactions. + deposit_receipt_version: self.is_canyon_active().then_some(1), + })); + + // append sender and transaction to the respective lists + info.executed_senders.push(sequencer_tx.signer()); + info.executed_transactions.push(sequencer_tx.into_signed()); + } + + Ok(info) } - if !attributes.no_tx_pool { - while let Some(pool_tx) = best_txs.next() { + /// Executes the given best transactions and updates the execution info. + /// + /// Returns `Ok(Some(())` if the job was cancelled. + pub fn execute_best_transactions( + &self, + info: &mut ExecutionInfo, + db: &mut State, + mut best_txs: impl PayloadTransactions, + ) -> Result, PayloadBuilderError> + where + DB: Database, + { + let block_gas_limit = self.block_gas_limit(); + let base_fee = self.base_fee(); + + let env = EnvWithHandlerCfg::new_with_cfg_env( + self.initialized_cfg.clone(), + self.initialized_block_env.clone(), + TxEnv::default(), + ); + let mut evm = self.evm_config.evm_with_env(&mut *db, env); + + while let Some(tx) = best_txs.next(()) { // ensure we still have capacity for this transaction - if cumulative_gas_used + pool_tx.gas_limit() > block_gas_limit { + if info.cumulative_gas_used + tx.gas_limit() > block_gas_limit { // we can't fit this transaction into the block, so we need to mark it as // invalid which also removes all dependent transaction from // the iterator before we can continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid(tx.signer(), tx.nonce()); continue } // A sequencer's block should never contain blob or deposit transactions from the pool. - if pool_tx.is_eip4844() || pool_tx.tx_type() == TxType::Deposit as u8 { - best_txs.mark_invalid(&pool_tx); + if tx.is_eip4844() || tx.tx_type() == TxType::Deposit as u8 { + best_txs.mark_invalid(tx.signer(), tx.nonce()); continue } // check if the job was cancelled, if so we can exit early - if cancel.is_cancelled() { - return Ok(BuildOutcome::Cancelled) + if self.cancel.is_cancelled() { + return Ok(Some(())) } - // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); - let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - evm_config.tx_env(tx.as_signed(), tx.signer()), - ); - - // Configure the environment for the block. - let mut evm = evm_config.evm_with_env(&mut db, env); + // Configure the environment for the tx. + *evm.tx_mut() = self.evm_config.tx_env(tx.as_signed(), tx.signer()); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -374,7 +894,7 @@ where // if the transaction is invalid, we can skip it and all of its // descendants trace!(target: "payload_builder", %err, ?tx, "skipping invalid transaction and its descendants"); - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid(tx.signer(), tx.nonce()); } continue @@ -386,22 +906,21 @@ where } } }; - // drop evm so db is released. - drop(evm); + // commit changes - db.commit(state); + evm.db_mut().commit(state); let gas_used = result.gas_used(); // add gas used by the transaction to cumulative gas used, before creating the // receipt - cumulative_gas_used += gas_used; + info.cumulative_gas_used += gas_used; // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(Receipt { + info.receipts.push(Some(Receipt { tx_type: tx.tx_type(), success: result.is_success(), - cumulative_gas_used, + cumulative_gas_used: info.cumulative_gas_used, logs: result.into_logs().into_iter().map(Into::into).collect(), deposit_nonce: None, deposit_receipt_version: None, @@ -409,135 +928,15 @@ where // update add to total fees let miner_fee = tx - .effective_tip_per_gas(Some(base_fee)) + .effective_tip_per_gas(base_fee) .expect("fee is always valid; execution succeeded"); - total_fees += U256::from(miner_fee) * U256::from(gas_used); + info.total_fees += U256::from(miner_fee) * U256::from(gas_used); // append sender and transaction to the respective lists - executed_senders.push(tx.signer()); - executed_txs.push(tx.into_signed()); + info.executed_senders.push(tx.signer()); + info.executed_transactions.push(tx.into_signed()); } - } - // check if we have a better block - if !is_better_payload(best_payload.as_ref(), total_fees) { - // can skip building the block - return Ok(BuildOutcome::Aborted { fees: total_fees, cached_reads }) - } - - let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( - &mut db, - &chain_spec, - attributes.payload_attributes.timestamp, - attributes.clone().payload_attributes.withdrawals, - )?; - - // merge all transitions into bundle state, this would apply the withdrawal balance changes - // and 4788 contract call - db.merge_transitions(BundleRetention::Reverts); - - let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![receipts.clone()].into(), - block_number, - Vec::new(), - ); - let receipts_root = execution_outcome - .generic_receipts_root_slow(block_number, |receipts| { - calculate_receipt_root_no_memo_optimism(receipts, &chain_spec, attributes.timestamp()) - }) - .expect("Number is in range"); - let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Number is in range"); - - // calculate the state root - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - let (state_root, trie_output) = { - let state_provider = db.database.0.inner.borrow_mut(); - state_provider.db.state_root_with_updates(hashed_state.clone()).inspect_err(|err| { - warn!(target: "payload_builder", - parent_hash=%parent_block.hash(), - %err, - "failed to calculate state root for payload" - ); - })? - }; - - // create the block header - let transactions_root = proofs::calculate_transaction_root(&executed_txs); - - // initialize empty blob sidecars. There are no blob transactions on L2. - let blob_sidecars = Vec::new(); - let mut excess_blob_gas = None; - let mut blob_gas_used = None; - - // only determine cancun fields when active - if chain_spec.is_cancun_active_at_timestamp(attributes.payload_attributes.timestamp) { - excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_block.timestamp) { - let parent_excess_blob_gas = parent_block.excess_blob_gas.unwrap_or_default(); - let parent_blob_gas_used = parent_block.blob_gas_used.unwrap_or_default(); - Some(calc_excess_blob_gas(parent_excess_blob_gas, parent_blob_gas_used)) - } else { - // for the first post-fork block, both parent.blob_gas_used and - // parent.excess_blob_gas are evaluated as 0 - Some(calc_excess_blob_gas(0, 0)) - }; - - blob_gas_used = Some(0); - } - - let header = Header { - parent_hash: parent_block.hash(), - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: initialized_block_env.coinbase, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - logs_bloom, - timestamp: attributes.payload_attributes.timestamp, - mix_hash: attributes.payload_attributes.prev_randao, - nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(base_fee), - number: parent_block.number + 1, - gas_limit: block_gas_limit, - difficulty: U256::ZERO, - gas_used: cumulative_gas_used, - extra_data, - parent_beacon_block_root: attributes.payload_attributes.parent_beacon_block_root, - blob_gas_used, - excess_blob_gas: excess_blob_gas.map(Into::into), - requests_root: None, - }; - - // seal the block - let block = Block { - header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, requests: None }, - }; - - let sealed_block = block.seal_slow(); - debug!(target: "payload_builder", ?sealed_block, "sealed built block"); - - // create the executed block data - let executed = ExecutedBlock { - block: Arc::new(sealed_block.clone()), - senders: Arc::new(executed_senders), - execution_output: Arc::new(execution_outcome), - hashed_state: Arc::new(hashed_state), - trie: Arc::new(trie_output), - }; - - let mut payload = OptimismBuiltPayload::new( - attributes.payload_attributes.id, - sealed_block, - total_fees, - chain_spec, - attributes, - Some(executed), - ); - - // extend the payload with the blob sidecars from the executed txs - payload.extend_sidecars(blob_sidecars); - - Ok(BuildOutcome::Better { payload, cached_reads }) + Ok(None) + } } diff --git a/crates/optimism/payload/src/config.rs b/crates/optimism/payload/src/config.rs new file mode 100644 index 00000000000..469bfc9fe31 --- /dev/null +++ b/crates/optimism/payload/src/config.rs @@ -0,0 +1,125 @@ +//! Additional configuration for the OP builder + +use std::sync::{atomic::AtomicU64, Arc}; + +/// Settings for the OP builder. +#[derive(Debug, Clone, Default)] +pub struct OpBuilderConfig { + /// Data availability configuration for the OP builder. + pub da_config: OpDAConfig, +} + +impl OpBuilderConfig { + /// Creates a new OP builder configuration with the given data availability configuration. + pub const fn new(da_config: OpDAConfig) -> Self { + Self { da_config } + } + + /// Returns the Data Availability configuration for the OP builder, if it has configured + /// constraints. + pub fn constrained_da_config(&self) -> Option<&OpDAConfig> { + if self.da_config.is_empty() { + None + } else { + Some(&self.da_config) + } + } +} + +/// Contains the Data Availability configuration for the OP builder. +/// +/// This type is shareable and can be used to update the DA configuration for the OP payload +/// builder. +#[derive(Debug, Clone, Default)] +pub struct OpDAConfig { + inner: Arc, +} + +impl OpDAConfig { + /// Creates a new Data Availability configuration with the given maximum sizes. + pub fn new(max_da_tx_size: u64, max_da_block_size: u64) -> Self { + let this = Self::default(); + this.set_max_da_size(max_da_tx_size, max_da_block_size); + this + } + + /// Returns whether the configuration is empty. + pub fn is_empty(&self) -> bool { + self.max_da_tx_size().is_none() && self.max_da_block_size().is_none() + } + + /// Returns the max allowed data availability size per transactions, if any. + pub fn max_da_tx_size(&self) -> Option { + let val = self.inner.max_da_tx_size.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + + /// Returns the max allowed data availability size per block, if any. + pub fn max_da_block_size(&self) -> Option { + let val = self.inner.max_da_block_size.load(std::sync::atomic::Ordering::Relaxed); + if val == 0 { + None + } else { + Some(val) + } + } + + /// Sets the maximum data availability size currently allowed for inclusion. 0 means no maximum. + pub fn set_max_da_size(&self, max_da_tx_size: u64, max_da_block_size: u64) { + self.set_max_tx_size(max_da_tx_size); + self.set_max_block_size(max_da_block_size); + } + + /// Sets the maximum data availability size per transaction currently allowed for inclusion. 0 + /// means no maximum. + pub fn set_max_tx_size(&self, max_da_tx_size: u64) { + self.inner.max_da_tx_size.store(max_da_tx_size, std::sync::atomic::Ordering::Relaxed); + } + + /// Sets the maximum data availability size per block currently allowed for inclusion. 0 means + /// no maximum. + pub fn set_max_block_size(&self, max_da_block_size: u64) { + self.inner.max_da_block_size.store(max_da_block_size, std::sync::atomic::Ordering::Relaxed); + } +} + +#[derive(Debug, Default)] +struct OpDAConfigInner { + /// Don't include any transactions with data availability size larger than this in any built + /// block + /// + /// 0 means no limit. + max_da_tx_size: AtomicU64, + /// Maximum total data availability size for a block + /// + /// 0 means no limit. + max_da_block_size: AtomicU64, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_da() { + let da = OpDAConfig::default(); + assert_eq!(da.max_da_tx_size(), None); + assert_eq!(da.max_da_block_size(), None); + da.set_max_da_size(100, 200); + assert_eq!(da.max_da_tx_size(), Some(100)); + assert_eq!(da.max_da_block_size(), Some(200)); + da.set_max_da_size(0, 0); + assert_eq!(da.max_da_tx_size(), None); + assert_eq!(da.max_da_block_size(), None); + } + + #[test] + fn test_da_constrained() { + let config = OpBuilderConfig::default(); + assert!(config.constrained_da_config().is_none()); + } +} diff --git a/crates/optimism/payload/src/error.rs b/crates/optimism/payload/src/error.rs index 2016fdc6dd9..6b2a85e7a97 100644 --- a/crates/optimism/payload/src/error.rs +++ b/crates/optimism/payload/src/error.rs @@ -2,10 +2,10 @@ /// Optimism specific payload building errors. #[derive(Debug, thiserror::Error)] -pub enum OptimismPayloadBuilderError { +pub enum OpPayloadBuilderError { /// Thrown when a transaction fails to convert to a - /// [`reth_primitives::TransactionSignedEcRecovered`]. - #[error("failed to convert deposit transaction to TransactionSignedEcRecovered")] + /// [`reth_primitives::RecoveredTx`]. + #[error("failed to convert deposit transaction to RecoveredTx")] TransactionEcRecoverFailed, /// Thrown when the L1 block info could not be parsed from the calldata of the /// first transaction supplied in the payload attributes. diff --git a/crates/optimism/payload/src/lib.rs b/crates/optimism/payload/src/lib.rs index c06b49c5376..53fad1118fd 100644 --- a/crates/optimism/payload/src/lib.rs +++ b/crates/optimism/payload/src/lib.rs @@ -12,7 +12,9 @@ #![cfg(feature = "optimism")] pub mod builder; -pub use builder::OptimismPayloadBuilder; +pub use builder::OpPayloadBuilder; pub mod error; pub mod payload; -pub use payload::{OpPayloadAttributes, OptimismBuiltPayload, OptimismPayloadBuilderAttributes}; +pub use payload::{OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes}; + +pub mod config; diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 122c2fde526..e243745cea6 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -1,11 +1,13 @@ //! Payload related types -//! Optimism builder support - -use alloy_eips::eip2718::Decodable2718; -use alloy_primitives::{Address, B256, U256}; +use alloy_eips::{ + eip1559::BaseFeeParams, eip2718::Decodable2718, eip4844::BlobTransactionSidecar, + eip4895::Withdrawals, eip7685::Requests, +}; +use alloy_primitives::{keccak256, Address, Bytes, B256, B64, U256}; use alloy_rlp::Encodable; use alloy_rpc_types_engine::{ExecutionPayloadEnvelopeV2, ExecutionPayloadV1, PayloadId}; +use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; use op_alloy_rpc_types_engine::{OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4}; @@ -14,18 +16,15 @@ use reth_chainspec::EthereumHardforks; use reth_optimism_chainspec::OpChainSpec; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{ - transaction::WithEncoded, BlobTransactionSidecar, SealedBlock, TransactionSigned, Withdrawals, -}; +use reth_primitives::{transaction::WithEncoded, SealedBlock, TransactionSigned}; use reth_rpc_types_compat::engine::payload::{ - block_to_payload_v1, block_to_payload_v3, block_to_payload_v4, - convert_block_to_payload_field_v2, + block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; use std::sync::Arc; /// Optimism Payload Builder Attributes -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct OptimismPayloadBuilderAttributes { +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct OpPayloadBuilderAttributes { /// Inner ethereum payload builder attributes pub payload_attributes: EthPayloadBuilderAttributes, /// `NoTxPool` option for the generated payload @@ -35,17 +34,35 @@ pub struct OptimismPayloadBuilderAttributes { pub transactions: Vec>, /// The gas limit for the generated payload pub gas_limit: Option, + /// EIP-1559 parameters for the generated payload + pub eip_1559_params: Option, } -impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { +impl OpPayloadBuilderAttributes { + /// Extracts the `eip1559` parameters for the payload. + pub fn get_holocene_extra_data( + &self, + default_base_fee_params: BaseFeeParams, + ) -> Result { + self.eip_1559_params + .map(|params| decode_holocene_extra_data(params, default_base_fee_params)) + .ok_or(EIP1559ParamError::NoEIP1559Params)? + } +} + +impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { type RpcPayloadAttributes = OpPayloadAttributes; type Error = alloy_rlp::Error; /// Creates a new payload builder for the given parent block and the attributes. /// /// Derives the unique [`PayloadId`] for the given parent and attributes - fn try_new(parent: B256, attributes: OpPayloadAttributes) -> Result { - let id = payload_id_optimism(&parent, &attributes); + fn try_new( + parent: B256, + attributes: OpPayloadAttributes, + version: u8, + ) -> Result { + let id = payload_id_optimism(&parent, &attributes, version); let transactions = attributes .transactions @@ -79,6 +96,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { no_tx_pool: attributes.no_tx_pool.unwrap_or_default(), transactions, gas_limit: attributes.gas_limit, + eip_1559_params: attributes.eip_1559_params, }) } @@ -113,11 +131,11 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { /// Contains the built payload. #[derive(Debug, Clone)] -pub struct OptimismBuiltPayload { +pub struct OpBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block - pub(crate) block: SealedBlock, + pub(crate) block: Arc, /// Block execution data for the payload, if any. pub(crate) executed_block: Option, /// The fees of the block @@ -128,19 +146,19 @@ pub struct OptimismBuiltPayload { /// The rollup's chainspec. pub(crate) chain_spec: Arc, /// The payload attributes. - pub(crate) attributes: OptimismPayloadBuilderAttributes, + pub(crate) attributes: OpPayloadBuilderAttributes, } // === impl BuiltPayload === -impl OptimismBuiltPayload { +impl OpBuiltPayload { /// Initializes the payload with the given initial block. pub const fn new( id: PayloadId, - block: SealedBlock, + block: Arc, fees: U256, chain_spec: Arc, - attributes: OptimismPayloadBuilderAttributes, + attributes: OpPayloadBuilderAttributes, executed_block: Option, ) -> Self { Self { id, block, executed_block, fees, sidecars: Vec::new(), chain_spec, attributes } @@ -152,7 +170,7 @@ impl OptimismBuiltPayload { } /// Returns the built block(sealed) - pub const fn block(&self) -> &SealedBlock { + pub fn block(&self) -> &SealedBlock { &self.block } @@ -167,7 +185,7 @@ impl OptimismBuiltPayload { } } -impl BuiltPayload for OptimismBuiltPayload { +impl BuiltPayload for OpBuiltPayload { fn block(&self) -> &SealedBlock { &self.block } @@ -179,9 +197,13 @@ impl BuiltPayload for OptimismBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + None + } } -impl BuiltPayload for &OptimismBuiltPayload { +impl BuiltPayload for &OpBuiltPayload { fn block(&self) -> &SealedBlock { (**self).block() } @@ -193,27 +215,34 @@ impl BuiltPayload for &OptimismBuiltPayload { fn executed_block(&self) -> Option { self.executed_block.clone() } + + fn requests(&self) -> Option { + None + } } // V1 engine_getPayloadV1 response -impl From for ExecutionPayloadV1 { - fn from(value: OptimismBuiltPayload) -> Self { - block_to_payload_v1(value.block) +impl From for ExecutionPayloadV1 { + fn from(value: OpBuiltPayload) -> Self { + block_to_payload_v1(Arc::unwrap_or_clone(value.block)) } } // V2 engine_getPayloadV2 response -impl From for ExecutionPayloadEnvelopeV2 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, .. } = value; +impl From for ExecutionPayloadEnvelopeV2 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, .. } = value; - Self { block_value: fees, execution_payload: convert_block_to_payload_field_v2(block) } + Self { + block_value: fees, + execution_payload: convert_block_to_payload_field_v2(Arc::unwrap_or_clone(block)), + } } } -impl From for OpExecutionPayloadEnvelopeV3 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; +impl From for OpExecutionPayloadEnvelopeV3 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; let parent_beacon_block_root = if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { @@ -222,7 +251,7 @@ impl From for OpExecutionPayloadEnvelopeV3 { B256::ZERO }; Self { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -238,9 +267,9 @@ impl From for OpExecutionPayloadEnvelopeV3 { } } } -impl From for OpExecutionPayloadEnvelopeV4 { - fn from(value: OptimismBuiltPayload) -> Self { - let OptimismBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; +impl From for OpExecutionPayloadEnvelopeV4 { + fn from(value: OpBuiltPayload) -> Self { + let OpBuiltPayload { block, fees, sidecars, chain_spec, attributes, .. } = value; let parent_beacon_block_root = if chain_spec.is_cancun_active_at_timestamp(attributes.timestamp()) { @@ -249,7 +278,7 @@ impl From for OpExecutionPayloadEnvelopeV4 { B256::ZERO }; Self { - execution_payload: block_to_payload_v4(block), + execution_payload: block_to_payload_v3(Arc::unwrap_or_clone(block)), block_value: fees, // From the engine API spec: // @@ -262,6 +291,7 @@ impl From for OpExecutionPayloadEnvelopeV4 { should_override_builder: false, blobs_bundle: sidecars.into_iter().map(Into::into).collect::>().into(), parent_beacon_block_root, + execution_requests: vec![], } } } @@ -269,7 +299,11 @@ impl From for OpExecutionPayloadEnvelopeV4 { /// Generates the payload id for the configured payload from the [`OpPayloadAttributes`]. /// /// Returns an 8-byte identifier by hashing the payload components with sha256 hash. -pub(crate) fn payload_id_optimism(parent: &B256, attributes: &OpPayloadAttributes) -> PayloadId { +pub(crate) fn payload_id_optimism( + parent: &B256, + attributes: &OpPayloadAttributes, + payload_version: u8, +) -> PayloadId { use sha2::Digest; let mut hasher = sha2::Sha256::new(); hasher.update(parent.as_slice()); @@ -287,15 +321,91 @@ pub(crate) fn payload_id_optimism(parent: &B256, attributes: &OpPayloadAttribute } let no_tx_pool = attributes.no_tx_pool.unwrap_or_default(); - hasher.update([no_tx_pool as u8]); - if let Some(txs) = &attributes.transactions { - txs.iter().for_each(|tx| hasher.update(tx)); + if no_tx_pool || attributes.transactions.as_ref().is_some_and(|txs| !txs.is_empty()) { + hasher.update([no_tx_pool as u8]); + let txs_len = attributes.transactions.as_ref().map(|txs| txs.len()).unwrap_or_default(); + hasher.update(&txs_len.to_be_bytes()[..]); + if let Some(txs) = &attributes.transactions { + for tx in txs { + // we have to just hash the bytes here because otherwise we would need to decode + // the transactions here which really isn't ideal + let tx_hash = keccak256(tx); + // maybe we can try just taking the hash and not decoding + hasher.update(tx_hash) + } + } } if let Some(gas_limit) = attributes.gas_limit { hasher.update(gas_limit.to_be_bytes()); } - let out = hasher.finalize(); + if let Some(eip_1559_params) = attributes.eip_1559_params { + hasher.update(eip_1559_params.as_slice()); + } + + let mut out = hasher.finalize(); + out[0] = payload_version; PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::OpPayloadAttributes; + use alloy_primitives::{address, b256, bytes, FixedBytes}; + use alloy_rpc_types_engine::PayloadAttributes; + use reth_payload_primitives::EngineApiMessageVersion; + use std::str::FromStr; + + #[test] + fn test_payload_id_parity_op_geth() { + // INFO rollup_boost::server:received fork_choice_updated_v3 from builder and l2_client + // payload_id_builder="0x6ef26ca02318dcf9" payload_id_l2="0x03d2dae446d2a86a" + let expected = + PayloadId::new(FixedBytes::<8>::from_str("0x03d2dae446d2a86a").unwrap().into()); + let attrs = OpPayloadAttributes { + payload_attributes: PayloadAttributes { + timestamp: 1728933301, + prev_randao: b256!("9158595abbdab2c90635087619aa7042bbebe47642dfab3c9bfb934f6b082765"), + suggested_fee_recipient: address!("4200000000000000000000000000000000000011"), + withdrawals: Some([].into()), + parent_beacon_block_root: b256!("8fe0193b9bf83cb7e5a08538e494fecc23046aab9a497af3704f4afdae3250ff").into(), + target_blobs_per_block: None, + max_blobs_per_block: None, + }, + transactions: Some([bytes!("7ef8f8a0dc19cfa777d90980e4875d0a548a881baaa3f83f14d1bc0d3038bc329350e54194deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000f424000000000000000000000000300000000670d6d890000000000000125000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000014bf9181db6e381d4384bbf69c48b0ee0eed23c6ca26143c6d2544f9d39997a590000000000000000000000007f83d659683caf2767fd3c720981d51f5bc365bc")].into()), + no_tx_pool: None, + gas_limit: Some(30000000), + eip_1559_params: None, + }; + + // Reth's `PayloadId` should match op-geth's `PayloadId`. This fails + assert_eq!( + expected, + payload_id_optimism( + &b256!("3533bf30edaf9505d0810bf475cbe4e5f4b9889904b9845e83efdeab4e92eb1e"), + &attrs, + EngineApiMessageVersion::V3 as u8 + ) + ); + } + + #[test] + fn test_get_extra_data_post_holocene() { + let attributes = OpPayloadBuilderAttributes { + eip_1559_params: Some(B64::from_str("0x0000000800000008").unwrap()), + ..Default::default() + }; + let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); + assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 8, 0, 0, 0, 8])); + } + + #[test] + fn test_get_extra_data_post_holocene_default() { + let attributes = + OpPayloadBuilderAttributes { eip_1559_params: Some(B64::ZERO), ..Default::default() }; + let extra_data = attributes.get_holocene_extra_data(BaseFeeParams::new(80, 60)); + assert_eq!(extra_data.unwrap(), Bytes::copy_from_slice(&[0, 0, 0, 0, 80, 0, 0, 0, 60])); + } +} diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 2054de7305b..ed8e9686fa7 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -12,7 +12,96 @@ description = "OP primitive types" workspace = true [dependencies] +# reth reth-primitives.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["op"] } +reth-codecs = { workspace = true, optional = true, features = ["op"] } + +# ethereum alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-rlp.workspace = true +alloy-eips.workspace = true +revm-primitives.workspace = true +secp256k1 = { workspace = true, optional = true } + +# op +op-alloy-consensus.workspace = true + +# codec +bytes = { workspace = true, optional = true } +serde = { workspace = true, optional = true } + +# misc +derive_more = { workspace = true, features = ["deref", "from", "into", "constructor"] } +rand = { workspace = true, optional = true } + +# test +arbitrary = { workspace = true, features = ["derive"], optional = true } +proptest = { workspace = true, optional = true } + +[dev-dependencies] +proptest-arbitrary-interop.workspace = true +reth-codecs = { workspace = true, features = ["test-utils", "op"] } +rstest.workspace = true +arbitrary.workspace = true +proptest.workspace = true + +[features] +default = ["std"] +std = [ + "reth-primitives-traits/std", + "reth-primitives/std", + "reth-codecs?/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-primitives/std", + "serde?/std", + "bytes?/std", + "derive_more/std", + "revm-primitives/std", + "secp256k1?/std", + "alloy-rlp/std", +] +reth-codec = [ + "dep:reth-codecs", + "std", + "rand", + "dep:proptest", + "dep:arbitrary", + "reth-primitives/reth-codec", + "reth-primitives-traits/reth-codec", + "reth-codecs?/op", + "reth-primitives/reth-codec", + "dep:bytes", +] +serde = [ + "dep:serde", + "reth-primitives-traits/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "bytes?/serde", + "reth-codecs?/serde", + "op-alloy-consensus/serde", + "rand?/serde", + "revm-primitives/serde", + "secp256k1?/serde", +] +arbitrary = [ + "dep:arbitrary", + "dep:secp256k1", + "reth-primitives-traits/arbitrary", + "reth-primitives/arbitrary", + "reth-codecs?/arbitrary", + "op-alloy-consensus/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "revm-primitives/arbitrary", + "rand", +] +optimism = [ + "revm-primitives/optimism", + "reth-primitives/optimism" +] diff --git a/crates/optimism/primitives/src/bedrock.rs b/crates/optimism/primitives/src/bedrock.rs index 1a347aecafe..3a345abe20a 100644 --- a/crates/optimism/primitives/src/bedrock.rs +++ b/crates/optimism/primitives/src/bedrock.rs @@ -1,9 +1,7 @@ //! OP mainnet bedrock related data. -use alloy_consensus::EMPTY_ROOT_HASH; +use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH, EMPTY_ROOT_HASH}; use alloy_primitives::{address, b256, bloom, bytes, B256, B64, U256}; -use reth_primitives::Header; -use reth_primitives_traits::constants::EMPTY_OMMER_ROOT_HASH; /// Transaction 0x9ed8f713b2cc6439657db52dcd2fdb9cc944915428f3c6e2a7703e242b259cb9 in block 985, /// replayed in blocks: @@ -86,7 +84,8 @@ pub const BEDROCK_HEADER: Header = Header { blob_gas_used: None, excess_blob_gas: None, parent_beacon_block_root: None, - requests_root: None, + requests_hash: None, + target_blobs_per_block: None, }; /// Bedrock total difficulty on Optimism Mainnet. diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index 659900b9adb..b1f029d20bc 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -6,5 +6,32 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; pub mod bedrock; +pub mod transaction; + +pub use transaction::{signed::OpTransactionSigned, tx_type::OpTxType, OpTransaction}; + +/// Optimism primitive types. +pub type OpPrimitives = reth_primitives::EthPrimitives; + +// TODO: once we are ready for separating primitive types, introduce a separate `NodePrimitives` +// implementation used exclusively by legacy engine. +// +// #[derive(Debug, Default, Clone, PartialEq, Eq)] +// pub struct OpPrimitives; +// +// impl NodePrimitives for OpPrimitives { +// type Block = Block; +// type BlockHeader = Header; +// type BlockBody = BlockBody; +// type SignedTx = TransactionSigned; +// type TxType = OpTxType; +// type Receipt = Receipt; +// } diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs new file mode 100644 index 00000000000..86ac822c744 --- /dev/null +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -0,0 +1,199 @@ +//! Wrapper of [`OpTypedTransaction`], that implements reth database encoding [`Compact`]. + +pub mod signed; +pub mod tx_type; + +use alloy_primitives::{bytes, Bytes, TxKind, Uint, B256}; + +#[cfg(any(test, feature = "reth-codec"))] +use alloy_consensus::constants::EIP7702_TX_TYPE_ID; +use alloy_consensus::{SignableTransaction, TxLegacy}; +use alloy_eips::{eip2930::AccessList, eip7702::SignedAuthorization}; +use derive_more::{Constructor, Deref, From}; +use op_alloy_consensus::OpTypedTransaction; +#[cfg(any(test, feature = "reth-codec"))] +use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; +#[cfg(any(test, feature = "reth-codec"))] +use reth_codecs::Compact; +#[cfg(any(test, feature = "reth-codec"))] +use reth_primitives::transaction::{ + COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, + COMPACT_IDENTIFIER_LEGACY, +}; +use reth_primitives_traits::InMemorySize; + +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, PartialEq, Eq, Deref, Hash, From, Constructor)] +/// Optimistic transaction. +pub struct OpTransaction(OpTypedTransaction); + +impl OpTransaction { + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self.deref() { + OpTypedTransaction::Legacy(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip2930(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip1559(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Eip7702(tx) => tx.encode_for_signing(out), + OpTypedTransaction::Deposit(_) => {} + } + } +} + +impl Default for OpTransaction { + fn default() -> Self { + Self(OpTypedTransaction::Legacy(TxLegacy::default())) + } +} + +#[cfg(any(test, feature = "reth-codec"))] +impl Compact for OpTransaction { + fn to_compact(&self, out: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + match &self.0 { + OpTypedTransaction::Legacy(tx) => tx.to_compact(out), + OpTypedTransaction::Eip2930(tx) => tx.to_compact(out), + OpTypedTransaction::Eip1559(tx) => tx.to_compact(out), + OpTypedTransaction::Eip7702(tx) => tx.to_compact(out), + OpTypedTransaction::Deposit(tx) => tx.to_compact(out), + } + } + + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + + match identifier { + COMPACT_IDENTIFIER_LEGACY => { + let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Legacy(tx)), buf) + } + COMPACT_IDENTIFIER_EIP2930 => { + let (tx, buf) = + alloy_consensus::transaction::TxEip2930::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip2930(tx)), buf) + } + COMPACT_IDENTIFIER_EIP1559 => { + let (tx, buf) = + alloy_consensus::transaction::TxEip1559::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip1559(tx)), buf) + } + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + // An identifier of 3 indicates that the transaction type did not fit into + // the backwards compatible 2 bit identifier, their transaction types are + // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, + // we need to read the concrete transaction type from the buffer by + // reading the full 8 bits (single byte) and match on this transaction type. + let identifier = buf.get_u8(); + match identifier { + EIP7702_TX_TYPE_ID => { + let (tx, buf) = + alloy_consensus::transaction::TxEip7702::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Eip7702(tx)), buf) + } + DEPOSIT_TX_TYPE_ID => { + let (tx, buf) = op_alloy_consensus::TxDeposit::from_compact(buf, buf.len()); + (Self(OpTypedTransaction::Deposit(tx)), buf) + } + _ => unreachable!( + "Junk data in database: unknown Transaction variant: {identifier}" + ), + } + } + _ => unreachable!("Junk data in database: unknown Transaction variant: {identifier}"), + } + } +} + +impl alloy_consensus::Transaction for OpTransaction { + fn chain_id(&self) -> Option { + self.0.chain_id() + } + + fn nonce(&self) -> u64 { + self.0.nonce() + } + + fn gas_limit(&self) -> u64 { + self.0.gas_limit() + } + + fn gas_price(&self) -> Option { + self.0.gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.0.max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.0.max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.0.max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.0.priority_fee_or_price() + } + + fn kind(&self) -> TxKind { + self.0.kind() + } + + fn is_create(&self) -> bool { + self.0.is_create() + } + + fn value(&self) -> Uint<256, 4> { + self.0.value() + } + + fn input(&self) -> &Bytes { + self.0.input() + } + + fn ty(&self) -> u8 { + self.0.ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.0.access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.0.blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.0.authorization_list() + } + + fn is_dynamic_fee(&self) -> bool { + self.0.is_dynamic_fee() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.0.effective_gas_price(base_fee) + } + + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.0.effective_tip_per_gas(base_fee) + } +} + +impl InMemorySize for OpTransaction { + fn size(&self) -> usize { + match &self.0 { + OpTypedTransaction::Legacy(tx) => tx.size(), + OpTypedTransaction::Eip2930(tx) => tx.size(), + OpTypedTransaction::Eip1559(tx) => tx.size(), + OpTypedTransaction::Eip7702(tx) => tx.size(), + OpTypedTransaction::Deposit(tx) => tx.size(), + } + } +} diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs new file mode 100644 index 00000000000..26581214e67 --- /dev/null +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -0,0 +1,477 @@ +//! A signed Optimism transaction. + +use crate::{OpTransaction, OpTxType}; +use alloc::vec::Vec; +use alloy_consensus::{ + transaction::RlpEcdsaTx, SignableTransaction, Transaction, TxEip1559, TxEip2930, TxEip7702, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, + eip2930::AccessList, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{ + keccak256, Address, Bytes, PrimitiveSignature as Signature, TxHash, TxKind, Uint, B256, U256, +}; +use alloy_rlp::Header; +use core::{ + hash::{Hash, Hasher}, + mem, +}; +use derive_more::{AsRef, Deref}; +#[cfg(not(feature = "std"))] +use once_cell::sync::OnceCell as OnceLock; +use op_alloy_consensus::{OpTypedTransaction, TxDeposit}; +#[cfg(any(test, feature = "reth-codec"))] +use proptest as _; +use reth_primitives::{ + transaction::{recover_signer, recover_signer_unchecked}, + TransactionSigned, +}; +use reth_primitives_traits::{FillTxEnv, InMemorySize, SignedTransaction}; +use revm_primitives::{AuthorizationList, OptimismFields, TxEnv}; +#[cfg(feature = "std")] +use std::sync::OnceLock; + +/// Signed transaction. +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, Eq, AsRef, Deref)] +pub struct OpTransactionSigned { + /// Transaction hash + #[serde(skip)] + pub hash: OnceLock, + /// The transaction signature values + pub signature: Signature, + /// Raw transaction info + #[deref] + #[as_ref] + pub transaction: OpTransaction, +} + +impl OpTransactionSigned { + /// Calculates hash of given transaction and signature and returns new instance. + pub fn new(transaction: OpTypedTransaction, signature: Signature) -> Self { + let signed_tx = Self::new_unhashed(transaction, signature); + if !matches!(signed_tx.tx_type(), OpTxType::Deposit) { + signed_tx.hash.get_or_init(|| signed_tx.recalculate_hash()); + } + + signed_tx + } + + /// Creates a new signed transaction from the given transaction and signature without the hash. + /// + /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. + pub fn new_unhashed(transaction: OpTypedTransaction, signature: Signature) -> Self { + Self { hash: Default::default(), signature, transaction: OpTransaction::new(transaction) } + } +} + +impl SignedTransaction for OpTransactionSigned { + type Type = OpTxType; + + fn tx_hash(&self) -> &TxHash { + self.hash.get_or_init(|| self.recalculate_hash()) + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn recover_signer(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + + let Self { transaction, signature, .. } = self; + let signature_hash = signature_hash(transaction); + recover_signer(signature, signature_hash) + } + + fn recover_signer_unchecked(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + + let Self { transaction, signature, .. } = self; + let signature_hash = signature_hash(transaction); + recover_signer_unchecked(signature, signature_hash) + } + + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + if let OpTypedTransaction::Deposit(TxDeposit { from, .. }) = *self.transaction { + return Some(from) + } + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(&self.signature, signature_hash) + } + + fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } +} + +impl FillTxEnv for OpTransactionSigned { + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + let envelope = self.encoded_2718(); + + tx_env.caller = sender; + match self.transaction.deref() { + OpTypedTransaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + OpTypedTransaction::Eip7702(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to.into(); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = + Some(AuthorizationList::Signed(tx.authorization_list.clone())); + } + OpTypedTransaction::Deposit(tx) => { + tx_env.access_list.clear(); + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::ZERO; + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = None; + tx_env.nonce = None; + tx_env.authorization_list = None; + + tx_env.optimism = OptimismFields { + source_hash: Some(tx.source_hash), + mint: tx.mint, + is_system_transaction: Some(tx.is_system_transaction), + enveloped_tx: Some(envelope.into()), + }; + return + } + } + + tx_env.optimism = OptimismFields { + source_hash: None, + mint: None, + is_system_transaction: Some(false), + enveloped_tx: Some(envelope.into()), + } + } +} + +impl InMemorySize for OpTransactionSigned { + #[inline] + fn size(&self) -> usize { + mem::size_of::() + self.transaction.size() + mem::size_of::() + } +} + +impl alloy_rlp::Encodable for OpTransactionSigned { + /// See [`alloy_rlp::Encodable`] impl for [`TransactionSigned`]. + fn encode(&self, out: &mut dyn alloy_rlp::bytes::BufMut) { + self.network_encode(out); + } + + fn length(&self) -> usize { + let mut payload_length = self.encode_2718_len(); + if !self.is_legacy() { + payload_length += Header { list: false, payload_length }.length(); + } + + payload_length + } +} + +impl alloy_rlp::Decodable for OpTransactionSigned { + /// See [`alloy_rlp::Decodable`] impl for [`TransactionSigned`]. + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + Self::network_decode(buf).map_err(Into::into) + } +} + +impl Encodable2718 for OpTransactionSigned { + fn type_flag(&self) -> Option { + match self.tx_type() { + op_alloy_consensus::OpTxType::Legacy => None, + tx_type => Some(tx_type as u8), + } + } + + fn encode_2718_len(&self) -> usize { + match self.transaction.deref() { + OpTypedTransaction::Legacy(legacy_tx) => { + legacy_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip7702(set_code_tx) => { + set_code_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + } + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + let Self { transaction, signature, .. } = self; + + match transaction.deref() { + OpTypedTransaction::Legacy(legacy_tx) => { + // do nothing w/ with_header + legacy_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip2930(access_list_tx) => { + access_list_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip1559(dynamic_fee_tx) => { + dynamic_fee_tx.eip2718_encode(signature, out) + } + OpTypedTransaction::Eip7702(set_code_tx) => set_code_tx.eip2718_encode(signature, out), + OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.encode_2718(out), + } + } +} + +impl Decodable2718 for OpTransactionSigned { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { + op_alloy_consensus::OpTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + op_alloy_consensus::OpTxType::Eip2930 => { + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip2930(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Eip1559 => { + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip1559(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Eip7702 => { + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + let signed_tx = Self::new_unhashed(OpTypedTransaction::Eip7702(tx), signature); + signed_tx.hash.get_or_init(|| hash); + Ok(signed_tx) + } + op_alloy_consensus::OpTxType::Deposit => Ok(Self::new_unhashed( + OpTypedTransaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), + )), + } + } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + let (transaction, hash, signature) = + TransactionSigned::decode_rlp_legacy_transaction_tuple(buf)?; + let signed_tx = Self::new_unhashed(OpTypedTransaction::Legacy(transaction), signature); + signed_tx.hash.get_or_init(|| hash); + + Ok(signed_tx) + } +} + +impl Transaction for OpTransactionSigned { + fn chain_id(&self) -> Option { + self.deref().chain_id() + } + + fn nonce(&self) -> u64 { + self.deref().nonce() + } + + fn gas_limit(&self) -> u64 { + self.deref().gas_limit() + } + + fn gas_price(&self) -> Option { + self.deref().gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.deref().max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.deref().max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.deref().max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.deref().priority_fee_or_price() + } + + fn kind(&self) -> TxKind { + self.deref().kind() + } + + fn is_create(&self) -> bool { + self.deref().is_create() + } + + fn value(&self) -> Uint<256, 4> { + self.deref().value() + } + + fn input(&self) -> &Bytes { + self.deref().input() + } + + fn ty(&self) -> u8 { + self.deref().ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.deref().access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + self.deref().blob_versioned_hashes() + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.deref().authorization_list() + } + + fn is_dynamic_fee(&self) -> bool { + self.deref().is_dynamic_fee() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.deref().effective_gas_price(base_fee) + } + + fn effective_tip_per_gas(&self, base_fee: u64) -> Option { + self.deref().effective_tip_per_gas(base_fee) + } +} + +impl Default for OpTransactionSigned { + fn default() -> Self { + Self { + hash: Default::default(), + signature: Signature::test_signature(), + transaction: OpTransaction::new(OpTypedTransaction::Legacy(Default::default())), + } + } +} + +impl PartialEq for OpTransactionSigned { + fn eq(&self, other: &Self) -> bool { + self.signature == other.signature && + self.transaction == other.transaction && + self.tx_hash() == other.tx_hash() + } +} + +impl Hash for OpTransactionSigned { + fn hash(&self, state: &mut H) { + self.signature.hash(state); + self.transaction.hash(state); + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for OpTransactionSigned { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + #[allow(unused_mut)] + let mut transaction = OpTypedTransaction::arbitrary(u)?; + + let secp = secp256k1::Secp256k1::new(); + let key_pair = secp256k1::Keypair::new(&secp, &mut rand::thread_rng()); + let signature = reth_primitives::transaction::util::secp256k1::sign_message( + B256::from_slice(&key_pair.secret_bytes()[..]), + signature_hash(&transaction), + ) + .unwrap(); + + // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces + // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that + // it's `None` if zero. + if let OpTypedTransaction::Deposit(ref mut tx_deposit) = transaction { + if tx_deposit.mint == Some(0) { + tx_deposit.mint = None; + } + } + + let signature = if is_deposit(&transaction) { TxDeposit::signature() } else { signature }; + + Ok(Self::new(transaction, signature)) + } +} + +/// Calculates the signing hash for the transaction. +pub fn signature_hash(tx: &OpTypedTransaction) -> B256 { + match tx { + OpTypedTransaction::Legacy(tx) => tx.signature_hash(), + OpTypedTransaction::Eip2930(tx) => tx.signature_hash(), + OpTypedTransaction::Eip1559(tx) => tx.signature_hash(), + OpTypedTransaction::Eip7702(tx) => tx.signature_hash(), + OpTypedTransaction::Deposit(_) => B256::ZERO, + } +} + +/// Returns `true` if transaction is deposit transaction. +pub const fn is_deposit(tx: &OpTypedTransaction) -> bool { + matches!(tx, OpTypedTransaction::Deposit(_)) +} diff --git a/crates/optimism/primitives/src/transaction/tx_type.rs b/crates/optimism/primitives/src/transaction/tx_type.rs new file mode 100644 index 00000000000..8be5f3a3d5e --- /dev/null +++ b/crates/optimism/primitives/src/transaction/tx_type.rs @@ -0,0 +1,50 @@ +//! Optimism transaction type. + +pub use op_alloy_consensus::OpTxType; + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::constants::EIP7702_TX_TYPE_ID; + use op_alloy_consensus::DEPOSIT_TX_TYPE_ID; + use reth_codecs::{txtype::*, Compact}; + use rstest::rstest; + + #[rstest] + #[case(OpTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + fn test_txtype_to_compact( + #[case] tx_type: OpTxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!( + identifier, expected_identifier, + "Unexpected identifier for OpTxType {tx_type:?}", + ); + assert_eq!(buf, expected_buf, "Unexpected buffer for OpTxType {tx_type:?}",); + } + + #[rstest] + #[case(OpTxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(OpTxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(OpTxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(OpTxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[case(OpTxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID])] + fn test_txtype_from_compact( + #[case] expected_type: OpTxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = OpTxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); + } +} diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 90984998ac7..d4a0b1fce27 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -22,6 +22,7 @@ reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-rpc.workspace = true +reth-rpc-api.workspace = true reth-node-api.workspace = true reth-network-api.workspace = true reth-node-builder.workspace = true @@ -31,16 +32,20 @@ reth-chainspec.workspace = true reth-optimism-chainspec.workspace = true reth-optimism-consensus.workspace = true reth-optimism-evm.workspace = true +reth-optimism-payload-builder.workspace = true +reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true +alloy-rpc-types-debug.workspace = true alloy-consensus.workspace = true op-alloy-network.workspace = true op-alloy-rpc-types.workspace = true +op-alloy-rpc-types-engine.workspace = true +op-alloy-rpc-jsonrpsee.workspace = true op-alloy-consensus.workspace = true revm.workspace = true @@ -50,21 +55,24 @@ tokio.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } # rpc +jsonrpsee-core.workspace = true jsonrpsee-types.workspace = true serde_json.workspace = true # misc thiserror.workspace = true tracing.workspace = true -derive_more.workspace = true [dev-dependencies] reth-optimism-chainspec.workspace = true [features] optimism = [ - "reth-optimism-evm/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "revm/optimism", + "reth-optimism-evm/optimism", + "reth-primitives/optimism", + "reth-provider/optimism", + "revm/optimism", + "reth-optimism-consensus/optimism", + "reth-optimism-payload-builder/optimism", + "reth-optimism-primitives/optimism", ] diff --git a/crates/optimism/rpc/src/error.rs b/crates/optimism/rpc/src/error.rs index b4d349e1cc4..caafe798c81 100644 --- a/crates/optimism/rpc/src/error.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,12 +1,12 @@ //! RPC errors specific to OP. -use alloy_rpc_types::error::EthRpcErrorCode; +use alloy_rpc_types_eth::{error::EthRpcErrorCode, BlockError}; use jsonrpsee_types::error::INTERNAL_ERROR_CODE; -use reth_optimism_evm::OptimismBlockExecutionError; -use reth_primitives::revm_primitives::{InvalidTransaction, OptimismInvalidTransaction}; +use reth_optimism_evm::OpBlockExecutionError; use reth_rpc_eth_api::AsEthApiError; use reth_rpc_eth_types::EthApiError; use reth_rpc_server_types::result::{internal_rpc_err, rpc_err}; +use revm::primitives::{InvalidTransaction, OptimismInvalidTransaction}; /// Optimism specific errors, that extend [`EthApiError`]. #[derive(Debug, thiserror::Error)] @@ -16,7 +16,7 @@ pub enum OpEthApiError { Eth(#[from] EthApiError), /// EVM error originating from invalid optimism data. #[error(transparent)] - Evm(#[from] OptimismBlockExecutionError), + Evm(#[from] OpBlockExecutionError), /// Thrown when calculating L1 gas fee. #[error("failed to calculate l1 gas fee")] L1BlockFeeError, @@ -25,7 +25,7 @@ pub enum OpEthApiError { L1BlockGasError, /// Wrapper for [`revm_primitives::InvalidTransaction`](InvalidTransaction). #[error(transparent)] - InvalidTransaction(#[from] OptimismInvalidTransactionError), + InvalidTransaction(#[from] OpInvalidTransactionError), /// Sequencer client error. #[error(transparent)] Sequencer(#[from] SequencerClientError), @@ -55,7 +55,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { /// Optimism specific invalid transaction errors #[derive(thiserror::Error, Debug)] -pub enum OptimismInvalidTransactionError { +pub enum OpInvalidTransactionError { /// A deposit transaction was submitted as a system transaction post-regolith. #[error("no system transactions allowed after regolith")] DepositSystemTxPostRegolith, @@ -64,18 +64,18 @@ pub enum OptimismInvalidTransactionError { HaltedDepositPostRegolith, } -impl From for jsonrpsee_types::error::ErrorObject<'static> { - fn from(err: OptimismInvalidTransactionError) -> Self { +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: OpInvalidTransactionError) -> Self { match err { - OptimismInvalidTransactionError::DepositSystemTxPostRegolith | - OptimismInvalidTransactionError::HaltedDepositPostRegolith => { + OpInvalidTransactionError::DepositSystemTxPostRegolith | + OpInvalidTransactionError::HaltedDepositPostRegolith => { rpc_err(EthRpcErrorCode::TransactionRejected.code(), err.to_string(), None) } } } } -impl TryFrom for OptimismInvalidTransactionError { +impl TryFrom for OpInvalidTransactionError { type Error = InvalidTransaction; fn try_from(err: InvalidTransaction) -> Result { @@ -113,3 +113,9 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> ) } } + +impl From for OpEthApiError { + fn from(error: BlockError) -> Self { + Self::Eth(error.into()) + } +} diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index dfdd0960856..3899e0b7f5c 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,34 +1,30 @@ //! Loads and formats OP block RPC response. -use alloy_rpc_types::BlockId; +use alloy_consensus::BlockHeader; +use alloy_rpc_types_eth::BlockId; use op_alloy_network::Network; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; -use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_api::BlockBody; use reth_optimism_chainspec::OpChainSpec; -use reth_primitives::TransactionMeta; -use reth_provider::{BlockReaderIdExt, HeaderProvider}; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_provider::{BlockReader, HeaderProvider}; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, RpcReceipt, }; -use reth_rpc_eth_types::EthStateCache; -use crate::{OpEthApi, OpEthApiError, OpReceiptBuilder}; +use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, OpReceiptBuilder}; impl EthBlocks for OpEthApi where Self: LoadBlock< Error = OpEthApiError, NetworkTypes: Network, + Provider: BlockReader, >, - N: FullNodeComponents>, + N: OpNodeCore + HeaderProvider>, { - #[inline] - fn provider(&self) -> impl HeaderProvider { - self.inner.provider() - } - async fn block_receipts( &self, block_id: BlockId, @@ -37,25 +33,24 @@ where Self: LoadReceipt, { if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number; - let base_fee = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee = block.base_fee_per_gas(); let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas; - let timestamp = block.timestamp; - let block = block.unseal(); + let excess_blob_gas = block.excess_blob_gas(); + let timestamp = block.timestamp(); let l1_block_info = reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; return block .body - .transactions - .into_iter() + .transactions() + .iter() .zip(receipts.iter()) .enumerate() - .map(|(idx, (ref tx, receipt))| -> Result<_, _> { + .map(|(idx, (tx, receipt))| -> Result<_, _> { let meta = TransactionMeta { - tx_hash: tx.hash, + tx_hash: tx.hash(), index: idx as u64, block_hash, block_number, @@ -65,7 +60,7 @@ where }; Ok(OpReceiptBuilder::new( - &self.inner.provider().chain_spec(), + &self.inner.eth_api.provider().chain_spec(), tx, meta, receipt, @@ -85,15 +80,6 @@ where impl LoadBlock for OpEthApi where Self: LoadPendingBlock + SpawnBlocking, - N: FullNodeComponents, + N: OpNodeCore, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index f1c10e6f172..959d765e349 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,46 +1,45 @@ +use super::OpNodeCore; +use crate::{OpEthApi, OpEthApiError}; use alloy_primitives::{Bytes, TxKind, U256}; use alloy_rpc_types_eth::transaction::TransactionRequest; -use reth_chainspec::EthereumHardforks; use reth_evm::ConfigureEvm; -use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::{ - revm_primitives::{BlockEnv, OptimismFields, TxEnv}, - Header, -}; +use reth_provider::ProviderHeader; use reth_rpc_eth_api::{ - helpers::{Call, EthCall, LoadState, SpawnBlocking}, - FromEthApiError, IntoEthApiError, + helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, + FromEthApiError, FullEthApiTypes, IntoEthApiError, }; use reth_rpc_eth_types::{revm_utils::CallFees, RpcInvalidTransactionError}; - -use crate::{OpEthApi, OpEthApiError}; +use revm::primitives::{BlockEnv, OptimismFields, TxEnv}; impl EthCall for OpEthApi +where + Self: EstimateCall + LoadBlock + FullEthApiTypes, + N: OpNodeCore, +{ +} + +impl EstimateCall for OpEthApi where Self: Call, - N: FullNodeComponents>, + Self::Error: From, + N: OpNodeCore, { } impl Call for OpEthApi where - Self: LoadState + SpawnBlocking, + Self: LoadState>> + SpawnBlocking, Self::Error: From, - N: FullNodeComponents, + N: OpNodeCore, { #[inline] fn call_gas_limit(&self) -> u64 { - self.inner.gas_cap() + self.inner.eth_api.gas_cap() } #[inline] fn max_simulate_blocks(&self) -> u64 { - self.inner.max_simulate_blocks() - } - - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() + self.inner.eth_api.max_simulate_blocks() } fn create_txn_env( @@ -49,7 +48,7 @@ where request: TransactionRequest, ) -> Result { // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { + if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) } diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index d65dd8edd1d..4304a2a3741 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -8,21 +8,21 @@ mod call; mod pending_block; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; +use reth_node_api::NodePrimitives; +use reth_optimism_primitives::OpPrimitives; use std::{fmt, sync::Arc}; use alloy_primitives::U256; -use derive_more::Deref; use op_alloy_network::Optimism; -use reth_chainspec::EthereumHardforks; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; use reth_node_builder::EthApiBuilderCtx; -use reth_primitives::Header; use reth_provider::{ - BlockIdReader, BlockNumReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, - StageCheckpointReader, StateProviderFactory, + BlockNumReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, + EvmEnvProvider, NodePrimitivesProvider, ProviderBlock, ProviderHeader, ProviderReceipt, + ProviderTx, StageCheckpointReader, StateProviderFactory, }; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -30,7 +30,7 @@ use reth_rpc_eth_api::{ AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, + EthApiTypes, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_tasks::{ @@ -39,16 +39,20 @@ use reth_tasks::{ }; use reth_transaction_pool::TransactionPool; -use crate::{OpEthApiError, OpTxBuilder, SequencerClient}; +use crate::{OpEthApiError, SequencerClient}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. pub type EthApiNodeBackend = EthApiInner< - ::Provider, - ::Pool, - ::Network, - ::Evm, + ::Provider, + ::Pool, + ::Network, + ::Evm, >; +/// A helper trait with requirements for [`RpcNodeCore`] to be used in [`OpEthApi`]. +pub trait OpNodeCore: RpcNodeCore {} +impl OpNodeCore for T where T: RpcNodeCore {} + /// OP-Reth `Eth` API implementation. /// /// This type provides the functionality for handling `eth_` related requests. @@ -59,193 +63,272 @@ pub type EthApiNodeBackend = EthApiInner< /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Clone, Deref)] -pub struct OpEthApi { +#[derive(Clone)] +pub struct OpEthApi { /// Gateway to node's core components. - #[deref] - inner: Arc>, - /// Sequencer client, configured to forward submitted transactions to sequencer of given OP - /// network. - sequencer_client: Option, + inner: Arc>, } -impl OpEthApi { - /// Creates a new instance for given context. - pub fn new(ctx: &EthApiBuilderCtx, sequencer_http: Option) -> Self { - let blocking_task_pool = - BlockingTaskPool::build().expect("failed to build blocking task pool"); - - let inner = EthApiInner::new( - ctx.provider.clone(), - ctx.pool.clone(), - ctx.network.clone(), - ctx.cache.clone(), - ctx.new_gas_price_oracle(), - ctx.config.rpc_gas_cap, - ctx.config.rpc_max_simulate_blocks, - ctx.config.eth_proof_window, - blocking_task_pool, - ctx.new_fee_history_cache(), - ctx.evm_config.clone(), - ctx.executor.clone(), - ctx.config.proof_permits, - ); - - Self { inner: Arc::new(inner), sequencer_client: sequencer_http.map(SequencerClient::new) } +impl OpEthApi +where + N: OpNodeCore< + Provider: BlockReaderIdExt + + ChainSpecProvider + + CanonStateSubscriptions + + Clone + + 'static, + >, +{ + /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. + pub const fn builder() -> OpEthApiBuilder { + OpEthApiBuilder::new() } } impl EthApiTypes for OpEthApi where Self: Send + Sync, - N: FullNodeComponents, + N: OpNodeCore, { type Error = OpEthApiError; type NetworkTypes = Optimism; - type TransactionCompat = OpTxBuilder; + type TransactionCompat = Self; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + self + } } -impl EthApiSpec for OpEthApi +impl RpcNodeCore for OpEthApi where - Self: Send + Sync, - N: FullNodeComponents>, + N: OpNodeCore, { + type Provider = N::Provider; + type Pool = N::Pool; + type Evm = ::Evm; + type Network = ::Network; + type PayloadBuilder = (); + #[inline] - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader - { - self.inner.provider() + fn pool(&self) -> &Self::Pool { + self.inner.eth_api.pool() } #[inline] - fn network(&self) -> impl NetworkInfo { - self.inner.network() + fn evm_config(&self) -> &Self::Evm { + self.inner.eth_api.evm_config() } #[inline] - fn starting_block(&self) -> U256 { - self.inner.starting_block() + fn network(&self) -> &Self::Network { + self.inner.eth_api.network() } #[inline] - fn signers(&self) -> &parking_lot::RwLock>> { - self.inner.signers() + fn payload_builder(&self) -> &Self::PayloadBuilder { + &() + } + + #[inline] + fn provider(&self) -> &Self::Provider { + self.inner.eth_api.provider() } } -impl SpawnBlocking for OpEthApi +impl RpcNodeCoreExt for OpEthApi where - Self: Send + Sync + Clone + 'static, - N: FullNodeComponents, + N: OpNodeCore, { #[inline] - fn io_task_spawner(&self) -> impl TaskSpawner { - self.inner.task_spawner() + fn cache(&self) -> &EthStateCache, ProviderReceipt> { + self.inner.eth_api.cache() } +} + +impl EthApiSpec for OpEthApi +where + N: OpNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + >, +{ + type Transaction = ProviderTx; #[inline] - fn tracing_task_pool(&self) -> &BlockingTaskPool { - self.inner.blocking_task_pool() + fn starting_block(&self) -> U256 { + self.inner.eth_api.starting_block() } #[inline] - fn tracing_task_guard(&self) -> &BlockingTaskGuard { - self.inner.blocking_task_guard() + fn signers(&self) -> &parking_lot::RwLock>>>> { + self.inner.eth_api.signers() } } -impl LoadFee for OpEthApi +impl SpawnBlocking for OpEthApi where - Self: LoadBlock, - N: FullNodeComponents>, + Self: Send + Sync + Clone + 'static, + N: OpNodeCore, { #[inline] - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() + fn io_task_spawner(&self) -> impl TaskSpawner { + self.inner.eth_api.task_spawner() } #[inline] - fn gas_oracle(&self) -> &GasPriceOracle { - self.inner.gas_oracle() + fn tracing_task_pool(&self) -> &BlockingTaskPool { + self.inner.eth_api.blocking_task_pool() } #[inline] - fn fee_history_cache(&self) -> &FeeHistoryCache { - self.inner.fee_history_cache() + fn tracing_task_guard(&self) -> &BlockingTaskGuard { + self.inner.eth_api.blocking_task_guard() } } -impl LoadState for OpEthApi +impl LoadFee for OpEthApi where - Self: Send + Sync + Clone, - N: FullNodeComponents>, + Self: LoadBlock, + N: OpNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + >, { #[inline] - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider { - self.inner.provider() + fn gas_oracle(&self) -> &GasPriceOracle { + self.inner.eth_api.gas_oracle() } #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() + fn fee_history_cache(&self) -> &FeeHistoryCache { + self.inner.eth_api.fee_history_cache() } +} - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } +impl LoadState for OpEthApi where + N: OpNodeCore< + Provider: StateProviderFactory + ChainSpecProvider, + Pool: TransactionPool, + > +{ } impl EthState for OpEthApi where Self: LoadState + SpawnBlocking, - N: FullNodeComponents, + N: OpNodeCore, { #[inline] fn max_proof_window(&self) -> u64 { - self.inner.eth_proof_window() + self.inner.eth_api.eth_proof_window() } } impl EthFees for OpEthApi where Self: LoadFee, - N: FullNodeComponents, + N: OpNodeCore, { } impl Trace for OpEthApi where - Self: LoadState, - N: FullNodeComponents, + Self: RpcNodeCore + + LoadState< + Evm: ConfigureEvm< + Header = ProviderHeader, + Transaction = ProviderTx, + >, + >, + N: OpNodeCore, { - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } impl AddDevSigners for OpEthApi where - N: FullNodeComponents>, + N: OpNodeCore, { fn with_dev_accounts(&self) { - *self.signers().write() = DevSigner::random_signers(20) + *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } } + +/// Container type `OpEthApi` +#[allow(missing_debug_implementations)] +struct OpEthApiInner { + /// Gateway to node's core components. + eth_api: EthApiNodeBackend, + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, +} + +/// A type that knows how to build a [`OpEthApi`]. +#[derive(Debug, Default)] +pub struct OpEthApiBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given OP + /// network. + sequencer_client: Option, +} + +impl OpEthApiBuilder { + /// Creates a [`OpEthApiBuilder`] instance from [`EthApiBuilderCtx`]. + pub const fn new() -> Self { + Self { sequencer_client: None } + } + + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { + self.sequencer_client = sequencer_client; + self + } +} + +impl OpEthApiBuilder { + /// Builds an instance of [`OpEthApi`] + pub fn build(self, ctx: &EthApiBuilderCtx) -> OpEthApi + where + N: OpNodeCore< + Provider: BlockReaderIdExt< + Block = <::Primitives as NodePrimitives>::Block, + Receipt = <::Primitives as NodePrimitives>::Receipt, + > + ChainSpecProvider + + CanonStateSubscriptions + + Clone + + 'static, + >, + { + let blocking_task_pool = + BlockingTaskPool::build().expect("failed to build blocking task pool"); + + let eth_api = EthApiInner::new( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.network.clone(), + ctx.cache.clone(), + ctx.new_gas_price_oracle(), + ctx.config.rpc_gas_cap, + ctx.config.rpc_max_simulate_blocks, + ctx.config.eth_proof_window, + blocking_task_pool, + ctx.new_fee_history_cache(), + ctx.evm_config.clone(), + ctx.executor.clone(), + ctx.config.proof_permits, + ); + + OpEthApi { + inner: Arc::new(OpEthApiInner { eth_api, sequencer_client: self.sequencer_client }), + } + } +} diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 5b716f39320..01c2264063e 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,54 +1,56 @@ //! Loads OP pending block for a RPC response. -use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::EthereumHardforks; +use crate::OpEthApi; +use alloy_consensus::{ + constants::EMPTY_WITHDRAWALS, proofs::calculate_transaction_root, Header, EMPTY_OMMER_ROOT_HASH, +}; +use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE, BlockNumberOrTag}; +use alloy_primitives::{B256, U256}; +use op_alloy_network::Network; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; -use reth_primitives::{ - revm_primitives::BlockEnv, BlockNumberOrTag, Header, Receipt, SealedBlockWithSenders, -}; +use reth_primitives::{logs_bloom, BlockBody, Receipt, SealedBlockWithSenders, TransactionSigned}; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, - ReceiptProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderBlock, + ProviderHeader, ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderFactory, }; use reth_rpc_eth_api::{ helpers::{LoadPendingBlock, SpawnBlocking}, - FromEthApiError, + EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; -use reth_transaction_pool::TransactionPool; - -use crate::OpEthApi; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use revm::primitives::{BlockEnv, ExecutionResult}; impl LoadPendingBlock for OpEthApi where - Self: SpawnBlocking, - N: FullNodeComponents>, + Self: SpawnBlocking + + EthApiTypes< + NetworkTypes: Network< + HeaderResponse = alloy_rpc_types_eth::Header>, + >, + >, + N: RpcNodeCore< + Provider: BlockReaderIdExt< + Transaction = reth_primitives::TransactionSigned, + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool>>, + Evm: ConfigureEvm
, + >, { #[inline] - fn provider( + fn pending_block( &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory { - self.inner.provider() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } - - #[inline] - fn pending_block(&self) -> &tokio::sync::Mutex> { - self.inner.pending_block() - } - - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() + ) -> &tokio::sync::Mutex< + Option, ProviderReceipt>>, + > { + self.inner.eth_api.pending_block() } /// Returns the locally built pending block @@ -78,20 +80,76 @@ where Ok(Some((block, receipts))) } - fn receipts_root( + fn assemble_block( &self, block_env: &BlockEnv, - execution_outcome: &ExecutionOutcome, - block_number: BlockNumber, - ) -> B256 { - execution_outcome - .generic_receipts_root_slow(block_number, |receipts| { - calculate_receipt_root_no_memo_optimism( - receipts, - self.provider().chain_spec().as_ref(), - block_env.timestamp.to::(), - ) - }) - .expect("Block is present") + parent_hash: B256, + state_root: B256, + transactions: Vec>, + receipts: &[ProviderReceipt], + ) -> reth_provider::ProviderBlock { + let chain_spec = self.provider().chain_spec(); + let timestamp = block_env.timestamp.to::(); + + let transactions_root = calculate_transaction_root(&transactions); + let receipts_root = calculate_receipt_root_no_memo_optimism( + &receipts.iter().collect::>(), + &chain_spec, + timestamp, + ); + + let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs)); + let is_cancun = chain_spec.is_cancun_active_at_timestamp(timestamp); + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); + + let header = Header { + parent_hash, + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root: (is_shanghai).then_some(EMPTY_WITHDRAWALS), + logs_bloom, + timestamp, + mix_hash: block_env.prevrandao.unwrap_or_default(), + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(block_env.basefee.to::()), + number: block_env.number.to::(), + gas_limit: block_env.gas_limit.to::(), + difficulty: U256::ZERO, + gas_used: receipts.last().map(|r| r.cumulative_gas_used).unwrap_or_default(), + blob_gas_used: is_cancun.then(|| { + transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum::() + }), + excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), + extra_data: Default::default(), + parent_beacon_block_root: is_cancun.then_some(B256::ZERO), + requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH), + target_blobs_per_block: None, + }; + + // seal the block + reth_primitives::Block { + header, + body: BlockBody { transactions, ommers: vec![], withdrawals: None }, + } + } + + fn assemble_receipt( + &self, + tx: &ProviderTx, + result: ExecutionResult, + cumulative_gas_used: u64, + ) -> reth_provider::ProviderReceipt { + #[allow(clippy::needless_update)] + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + ..Default::default() + } } } diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 200b626d8c3..2a4df1ada49 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,17 +1,19 @@ //! Loads and formats OP receipt RPC response. use alloy_eips::eip2718::Encodable2718; -use alloy_rpc_types::{AnyReceiptEnvelope, Log, TransactionReceipt}; -use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; -use op_alloy_rpc_types::{receipt::L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; +use alloy_rpc_types_eth::{Log, TransactionReceipt}; +use op_alloy_consensus::{ + DepositTransaction, OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, +}; +use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_evm::RethL1BlockInfo; -use reth_optimism_forks::OptimismHardforks; +use reth_optimism_forks::OpHardforks; use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; -use reth_provider::ChainSpecProvider; +use reth_provider::{ChainSpecProvider, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; use crate::{OpEthApi, OpEthApiError}; @@ -19,19 +21,19 @@ impl LoadReceipt for OpEthApi where Self: Send + Sync, N: FullNodeComponents>, + Self::Provider: + TransactionsProvider + ReceiptProvider, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - async fn build_transaction_receipt( &self, tx: TransactionSigned, meta: TransactionMeta, receipt: Receipt, ) -> Result, Self::Error> { - let (block, receipts) = LoadReceipt::cache(self) + let (block, receipts) = self + .inner + .eth_api + .cache() .get_block_and_receipts(meta.block_hash) .await .map_err(Self::Error::from_eth_err)? @@ -43,7 +45,7 @@ where reth_optimism_evm::extract_l1_info(&block.body).map_err(OpEthApiError::from)?; Ok(OpReceiptBuilder::new( - &self.inner.provider().chain_spec(), + &self.inner.eth_api.provider().chain_spec(), &tx, meta, &receipt, @@ -56,10 +58,10 @@ where /// L1 fee and data gas for a non-deposit transaction, or deposit nonce and receipt version for a /// deposit transaction. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] pub struct OpReceiptFieldsBuilder { /// Block timestamp. - pub l1_block_timestamp: u64, + pub block_timestamp: u64, /// The L1 fee for transaction. pub l1_fee: Option, /// L1 gas used by transaction. @@ -86,8 +88,19 @@ pub struct OpReceiptFieldsBuilder { impl OpReceiptFieldsBuilder { /// Returns a new builder. - pub fn new(block_timestamp: u64) -> Self { - Self { l1_block_timestamp: block_timestamp, ..Default::default() } + pub const fn new(block_timestamp: u64) -> Self { + Self { + block_timestamp, + l1_fee: None, + l1_data_gas: None, + l1_fee_scalar: None, + l1_base_fee: None, + deposit_nonce: None, + deposit_receipt_version: None, + l1_base_fee_scalar: None, + l1_blob_base_fee: None, + l1_blob_base_fee_scalar: None, + } } /// Applies [`L1BlockInfo`](revm::L1BlockInfo). @@ -98,7 +111,7 @@ impl OpReceiptFieldsBuilder { l1_block_info: revm::L1BlockInfo, ) -> Result { let raw_tx = tx.encoded_2718(); - let timestamp = self.l1_block_timestamp; + let timestamp = self.block_timestamp; self.l1_fee = Some( l1_block_info @@ -142,7 +155,7 @@ impl OpReceiptFieldsBuilder { /// Builds the [`OpTransactionReceiptFields`] object. pub const fn build(self) -> OpTransactionReceiptFields { let Self { - l1_block_timestamp: _, // used to compute other fields + block_timestamp: _, // used to compute other fields l1_fee, l1_data_gas: l1_gas_used, l1_fee_scalar, @@ -174,9 +187,7 @@ impl OpReceiptFieldsBuilder { #[derive(Debug)] pub struct OpReceiptBuilder { /// Core receipt, has all the fields of an L1 receipt and is the basis for the OP receipt. - pub core_receipt: TransactionReceipt>, - /// Transaction type. - pub tx_type: TxType, + pub core_receipt: TransactionReceipt>, /// Additional OP receipt fields. pub op_receipt_fields: OpTransactionReceiptFields, } @@ -191,83 +202,46 @@ impl OpReceiptBuilder { all_receipts: &[Receipt], l1_block_info: revm::L1BlockInfo, ) -> Result { - let ReceiptBuilder { base: core_receipt, .. } = - ReceiptBuilder::new(transaction, meta, receipt, all_receipts) - .map_err(OpEthApiError::Eth)?; - - let tx_type = transaction.tx_type(); - - let op_receipt_fields = OpReceiptFieldsBuilder::default() + let timestamp = meta.timestamp; + let core_receipt = + build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { + match receipt.tx_type { + TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), + TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), + TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), + TxType::Eip4844 => { + // TODO: unreachable + OpReceiptEnvelope::::Eip1559(receipt_with_bloom) + } + TxType::Eip7702 => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), + TxType::Deposit => { + OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { + receipt: OpDepositReceipt:: { + inner: receipt_with_bloom.receipt, + deposit_nonce: receipt.deposit_nonce, + deposit_receipt_version: receipt.deposit_receipt_version, + }, + logs_bloom: receipt_with_bloom.logs_bloom, + }) + } + } + })?; + + let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp) .l1_block_info(chain_spec, transaction, l1_block_info)? .deposit_nonce(receipt.deposit_nonce) .deposit_version(receipt.deposit_receipt_version) .build(); - Ok(Self { core_receipt, tx_type, op_receipt_fields }) + Ok(Self { core_receipt, op_receipt_fields }) } /// Builds [`OpTransactionReceipt`] by combing core (l1) receipt fields and additional OP /// receipt fields. pub fn build(self) -> OpTransactionReceipt { - let Self { core_receipt, tx_type, op_receipt_fields } = self; - - let OpTransactionReceiptFields { l1_block_info, deposit_nonce, deposit_receipt_version } = - op_receipt_fields; - - let TransactionReceipt { - inner: AnyReceiptEnvelope { inner: receipt_with_bloom, .. }, - transaction_hash, - transaction_index, - block_hash, - block_number, - gas_used, - effective_gas_price, - blob_gas_used, - blob_gas_price, - from, - to, - contract_address, - state_root, - authorization_list, - } = core_receipt; - - let inner = match tx_type { - TxType::Legacy => OpReceiptEnvelope::::Legacy(receipt_with_bloom), - TxType::Eip2930 => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), - TxType::Eip1559 => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), - TxType::Eip4844 => { - // TODO: unreachable - OpReceiptEnvelope::::Eip1559(receipt_with_bloom) - } - TxType::Eip7702 => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), - TxType::Deposit => { - OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { - receipt: OpDepositReceipt:: { - inner: receipt_with_bloom.receipt, - deposit_nonce, - deposit_receipt_version, - }, - logs_bloom: receipt_with_bloom.logs_bloom, - }) - } - }; + let Self { core_receipt: inner, op_receipt_fields } = self; - let inner = TransactionReceipt::> { - inner, - transaction_hash, - transaction_index, - block_hash, - block_number, - gas_used, - effective_gas_price, - blob_gas_used, - blob_gas_price, - from, - to, - contract_address, - state_root, - authorization_list, - }; + let OpTransactionReceiptFields { l1_block_info, .. } = op_receipt_fields; OpTransactionReceipt { inner, l1_block_info } } @@ -275,13 +249,12 @@ impl OpReceiptBuilder { #[cfg(test)] mod test { + use super::*; use alloy_primitives::hex; use op_alloy_network::eip2718::Decodable2718; - use reth_optimism_chainspec::OP_MAINNET; + use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; use reth_primitives::{Block, BlockBody}; - use super::*; - /// OP Mainnet transaction at index 0 in block 124665056. /// /// @@ -384,4 +357,46 @@ mod test { "incorrect l1 blob base fee scalar" ); } + + // + #[test] + fn base_receipt_gas_fields() { + // https://basescan.org/tx/0x510fd4c47d78ba9f97c91b0f2ace954d5384c169c9545a77a373cf3ef8254e6e + let system = hex!("7ef8f8a0389e292420bcbf9330741f72074e39562a09ff5a00fd22e4e9eee7e34b81bca494deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e20000008dd00101c120000000000000004000000006721035b00000000014189960000000000000000000000000000000000000000000000000000000349b4dcdc000000000000000000000000000000000000000000000000000000004ef9325cc5991ce750960f636ca2ffbb6e209bb3ba91412f21dd78c14ff154d1930f1f9a0000000000000000000000005050f69a9786f081509234f1a7f4684b5e5b76c9"); + let tx_0 = TransactionSigned::decode_2718(&mut &system[..]).unwrap(); + + let block = Block { + body: BlockBody { transactions: vec![tx_0], ..Default::default() }, + ..Default::default() + }; + let l1_block_info = + reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); + + // https://basescan.org/tx/0xf9420cbaf66a2dda75a015488d37262cbfd4abd0aad7bb2be8a63e14b1fa7a94 + let tx = hex!("02f86c8221058034839a4ae283021528942f16386bb37709016023232523ff6d9daf444be380841249c58bc080a001b927eda2af9b00b52a57be0885e0303c39dd2831732e14051c2336470fd468a0681bf120baf562915841a48601c2b54a6742511e535cf8f71c95115af7ff63bd"); + let tx_1 = TransactionSigned::decode_2718(&mut &tx[..]).unwrap(); + + let receipt_meta = OpReceiptFieldsBuilder::new(1730216981) + .l1_block_info(&BASE_MAINNET, &tx_1, l1_block_info) + .expect("should parse revm l1 info") + .build(); + + let L1BlockInfo { + l1_gas_price, + l1_gas_used, + l1_fee, + l1_fee_scalar, + l1_base_fee_scalar, + l1_blob_base_fee, + l1_blob_base_fee_scalar, + } = receipt_meta.l1_block_info; + + assert_eq!(l1_gas_price, Some(14121491676), "incorrect l1 base fee (former gas price)"); + assert_eq!(l1_gas_used, Some(1600), "incorrect l1 gas used"); + assert_eq!(l1_fee, Some(191150293412), "incorrect l1 fee"); + assert!(l1_fee_scalar.is_none(), "incorrect l1 fee scalar"); + assert_eq!(l1_base_fee_scalar, Some(2269), "incorrect l1 base fee scalar"); + assert_eq!(l1_blob_base_fee, Some(1324954204), "incorrect l1 blob base fee"); + assert_eq!(l1_blob_base_fee_scalar, Some(1055762), "incorrect l1 blob base fee scalar"); + } } diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index b7575c24416..468b46d97eb 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,42 +1,39 @@ //! Loads and formats OP transaction RPC response. -use alloy_consensus::Transaction as _; -use alloy_primitives::{Bytes, B256}; -use alloy_rpc_types::TransactionInfo; +use alloy_consensus::{Signed, Transaction as _}; +use alloy_primitives::{Bytes, PrimitiveSignature as Signature, Sealable, Sealed, B256}; +use alloy_rpc_types_eth::TransactionInfo; +use op_alloy_consensus::OpTxEnvelope; use op_alloy_rpc_types::Transaction; use reth_node_api::FullNodeComponents; -use reth_primitives::TransactionSignedEcRecovered; -use reth_provider::{BlockReaderIdExt, TransactionsProvider}; -use reth_rpc::eth::EthTxBuilder; +use reth_primitives::{RecoveredTx, TransactionSigned}; +use reth_provider::{ + BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, +}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, TransactionCompat, + FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, TransactionCompat, }; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthStateCache}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use crate::{OpEthApi, SequencerClient}; +use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, SequencerClient}; impl EthTransactions for OpEthApi where - Self: LoadTransaction, - N: FullNodeComponents, + Self: LoadTransaction, + N: OpNodeCore>>, { - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - - fn signers(&self) -> &parking_lot::RwLock>> { - self.inner.signers() + fn signers(&self) -> &parking_lot::RwLock>>>> { + self.inner.eth_api.signers() } /// Decodes and recovers the transaction and submits it to the pool. /// /// Returns the hash of the transaction. async fn send_raw_transaction(&self, tx: Bytes) -> Result { - let recovered = recover_raw_transaction(tx.clone())?; - let pool_transaction = - ::Transaction::from_pooled(recovered.into()); + let recovered = recover_raw_transaction(&tx)?; + let pool_transaction = ::Transaction::from_pooled(recovered); // On optimism, transactions are forwarded directly to the sequencer to be included in // blocks that it builds. @@ -60,66 +57,132 @@ where impl LoadTransaction for OpEthApi where - Self: SpawnBlocking + FullEthApiTypes, - N: FullNodeComponents, + Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt, + N: OpNodeCore, + Self::Pool: TransactionPool, { - type Pool = N::Pool; - - fn provider(&self) -> impl TransactionsProvider { - self.inner.provider() - } - - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - fn pool(&self) -> &Self::Pool { - self.inner.pool() - } } impl OpEthApi where - N: FullNodeComponents, + N: OpNodeCore, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { - self.sequencer_client.clone() + self.inner.sequencer_client.clone() } } -/// Builds OP transaction response type. -#[derive(Clone, Debug, Copy)] -pub struct OpTxBuilder; - -impl TransactionCompat for OpTxBuilder { +impl TransactionCompat for OpEthApi +where + N: FullNodeComponents>, +{ type Transaction = Transaction; - - fn fill(tx: TransactionSignedEcRecovered, tx_info: TransactionInfo) -> Self::Transaction { - let signed_tx = tx.clone().into_signed(); - - let mut inner = EthTxBuilder::fill(tx, tx_info).inner; - - if signed_tx.is_deposit() { - inner.gas_price = Some(signed_tx.max_fee_per_gas()) - } - - Transaction { - inner, - source_hash: signed_tx.source_hash(), - mint: signed_tx.mint(), - // only include is_system_tx if true: - is_system_tx: (signed_tx.is_deposit() && signed_tx.is_system_transaction()) - .then_some(true), - deposit_receipt_version: None, // todo: how to fill this field? - } + type Error = OpEthApiError; + + fn fill( + &self, + tx: RecoveredTx, + tx_info: TransactionInfo, + ) -> Result { + let from = tx.signer(); + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx.into_signed(); + let mut deposit_receipt_version = None; + let mut deposit_nonce = None; + + let inner = match transaction { + reth_primitives::Transaction::Legacy(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip2930(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip1559(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip4844(_) => unreachable!(), + reth_primitives::Transaction::Eip7702(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Deposit(tx) => { + self.inner + .eth_api + .provider() + .receipt_by_hash(hash) + .map_err(Self::Error::from_eth_err)? + .inspect(|receipt| { + deposit_receipt_version = receipt.deposit_receipt_version; + deposit_nonce = receipt.deposit_nonce; + }); + + OpTxEnvelope::Deposit(tx.seal_unchecked(hash)) + } + }; + + let TransactionInfo { + block_hash, block_number, index: transaction_index, base_fee, .. + } = tx_info; + + let effective_gas_price = if inner.is_deposit() { + // For deposits, we must always set the `gasPrice` field to 0 in rpc + // deposit tx don't have a gas price field, but serde of `Transaction` will take care of + // it + 0 + } else { + base_fee + .map(|base_fee| { + inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee + }) + .unwrap_or_else(|| inner.max_fee_per_gas()) + }; + + Ok(Transaction { + inner: alloy_rpc_types_eth::Transaction { + inner, + block_hash, + block_number, + transaction_index, + from, + effective_gas_price: Some(effective_gas_price), + }, + deposit_nonce, + deposit_receipt_version, + }) } - fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.inner.input = tx.inner.input.slice(..4); + fn build_simulate_v1_transaction( + &self, + request: alloy_rpc_types_eth::TransactionRequest, + ) -> Result { + let Ok(tx) = request.build_typed_tx() else { + return Err(OpEthApiError::Eth(EthApiError::TransactionConversionError)) + }; + + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + Ok(TransactionSigned::new_unhashed(tx.into(), signature)) } - fn tx_type(tx: &Self::Transaction) -> u8 { - tx.inner.transaction_type.unwrap_or_default() + fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { + let input = match &mut tx.inner.inner { + OpTxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Eip7702(tx) => &mut tx.tx_mut().input, + OpTxEnvelope::Deposit(tx) => { + let (mut deposit, hash) = std::mem::replace( + tx, + Sealed::new_unchecked(Default::default(), Default::default()), + ) + .split(); + deposit.input = deposit.input.slice(..4); + let mut deposit = deposit.seal_unchecked(hash); + std::mem::swap(tx, &mut deposit); + return + } + _ => return, + }; + *input = input.slice(..4); } } diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs index e3fef7adb5b..b76058ce531 100644 --- a/crates/optimism/rpc/src/lib.rs +++ b/crates/optimism/rpc/src/lib.rs @@ -12,8 +12,10 @@ pub mod error; pub mod eth; +pub mod miner; pub mod sequencer; +pub mod witness; -pub use error::{OpEthApiError, OptimismInvalidTransactionError, SequencerClientError}; -pub use eth::{transaction::OpTxBuilder, OpEthApi, OpReceiptBuilder}; +pub use error::{OpEthApiError, OpInvalidTransactionError, SequencerClientError}; +pub use eth::{OpEthApi, OpReceiptBuilder}; pub use sequencer::SequencerClient; diff --git a/crates/optimism/rpc/src/miner.rs b/crates/optimism/rpc/src/miner.rs new file mode 100644 index 00000000000..bfdee467647 --- /dev/null +++ b/crates/optimism/rpc/src/miner.rs @@ -0,0 +1,32 @@ +//! Miner API extension for OP. + +use alloy_primitives::U64; +use jsonrpsee_core::{async_trait, RpcResult}; +pub use op_alloy_rpc_jsonrpsee::traits::MinerApiExtServer; +use reth_optimism_payload_builder::config::OpDAConfig; +use tracing::debug; + +/// Miner API extension for OP, exposes settings for the data availability configuration via the +/// `miner_` API. +#[derive(Debug, Clone)] +pub struct OpMinerExtApi { + da_config: OpDAConfig, +} + +impl OpMinerExtApi { + /// Instantiate the miner API extension with the given, sharable data availability + /// configuration. + pub const fn new(da_config: OpDAConfig) -> Self { + Self { da_config } + } +} + +#[async_trait] +impl MinerApiExtServer for OpMinerExtApi { + /// Handler for `miner_setMaxDASize` RPC method. + async fn set_max_da_size(&self, max_tx_size: U64, max_block_size: U64) -> RpcResult<()> { + debug!(target: "rpc", "Setting max DA size: tx={}, block={}", max_tx_size, max_block_size); + self.da_config.set_max_da_size(max_tx_size.to(), max_block_size.to()); + Ok(()) + } +} diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs new file mode 100644 index 00000000000..d533bb187d9 --- /dev/null +++ b/crates/optimism/rpc/src/witness.rs @@ -0,0 +1,102 @@ +//! Support for optimism specific witness RPCs. + +use alloy_consensus::Header; +use alloy_primitives::B256; +use alloy_rpc_types_debug::ExecutionWitness; +use jsonrpsee_core::{async_trait, RpcResult}; +use op_alloy_rpc_types_engine::OpPayloadAttributes; +use reth_chainspec::ChainSpecProvider; +use reth_evm::ConfigureEvm; +use reth_optimism_chainspec::OpChainSpec; +use reth_optimism_payload_builder::OpPayloadBuilder; +use reth_primitives::{SealedHeader, TransactionSigned}; +use reth_provider::{BlockReaderIdExt, ProviderError, ProviderResult, StateProviderFactory}; +pub use reth_rpc_api::DebugExecutionWitnessApiServer; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_tasks::TaskSpawner; +use std::{fmt::Debug, sync::Arc}; +use tokio::sync::{oneshot, Semaphore}; + +/// An extension to the `debug_` namespace of the RPC API. +pub struct OpDebugWitnessApi { + inner: Arc>, +} + +impl OpDebugWitnessApi { + /// Creates a new instance of the `OpDebugWitnessApi`. + pub fn new( + provider: Provider, + evm_config: EvmConfig, + task_spawner: Box, + ) -> Self { + let builder = OpPayloadBuilder::new(evm_config); + let semaphore = Arc::new(Semaphore::new(3)); + let inner = OpDebugWitnessApiInner { provider, builder, task_spawner, semaphore }; + Self { inner: Arc::new(inner) } + } +} + +impl OpDebugWitnessApi +where + Provider: BlockReaderIdExt
, +{ + /// Fetches the parent header by hash. + fn parent_header(&self, parent_block_hash: B256) -> ProviderResult { + self.inner + .provider + .sealed_header_by_hash(parent_block_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_block_hash.into())) + } +} + +#[async_trait] +impl DebugExecutionWitnessApiServer + for OpDebugWitnessApi +where + Provider: BlockReaderIdExt
+ + StateProviderFactory + + ChainSpecProvider + + Clone + + 'static, + EvmConfig: ConfigureEvm
+ 'static, +{ + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: OpPayloadAttributes, + ) -> RpcResult { + let _permit = self.inner.semaphore.acquire().await; + + let parent_header = self.parent_header(parent_block_hash).to_rpc_result()?; + + let (tx, rx) = oneshot::channel(); + let this = self.clone(); + self.inner.task_spawner.spawn_blocking(Box::pin(async move { + let res = + this.inner.builder.payload_witness(&this.inner.provider, parent_header, attributes); + let _ = tx.send(res); + })); + + rx.await + .map_err(|err| internal_rpc_err(err.to_string()))? + .map_err(|err| internal_rpc_err(err.to_string())) + } +} + +impl Clone for OpDebugWitnessApi { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} +impl Debug for OpDebugWitnessApi { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OpDebugWitnessApi").finish_non_exhaustive() + } +} + +struct OpDebugWitnessApiInner { + provider: Provider, + builder: OpPayloadBuilder, + task_spawner: Box, + semaphore: Arc, +} diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index 107b64db3de..b72e9c287df 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -20,4 +20,8 @@ reth-prune-types.workspace = true reth-stages-types.workspace = true [features] -optimism = ["reth-primitives/optimism"] \ No newline at end of file +optimism = [ + "reth-primitives/optimism", + "reth-codecs/op", + "reth-db-api/optimism" +] diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index d435ed1d884..0db8f4e20a9 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -13,10 +13,10 @@ mod tests { use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; use reth_db_api::models::{ - CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockOmmers, + CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, Requests, Withdrawals}; + use reth_primitives::{Account, Receipt}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -40,14 +40,11 @@ mod tests { assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 2); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); // In case of failure, refer to the documentation of the // [`validate_bitflag_backwards_compat`] macro for detailed instructions on handling @@ -66,14 +63,10 @@ mod tests { validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Requests, UnusedBits::Zero); } } diff --git a/crates/payload/basic/Cargo.toml b/crates/payload/basic/Cargo.toml index 939eb5b54b7..0315f73cae4 100644 --- a/crates/payload/basic/Cargo.toml +++ b/crates/payload/basic/Cargo.toml @@ -15,17 +15,22 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true -reth-revm.workspace = true +reth-primitives-traits.workspace = true reth-transaction-pool.workspace = true reth-provider.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true +reth-evm.workspace = true +reth-revm.workspace=true # ethereum alloy-rlp.workspace = true alloy-primitives.workspace = true revm.workspace = true +alloy-consensus.workspace = true +alloy-eips.workspace = true # async tokio = { workspace = true, features = ["sync", "time"] } diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index f9487ec784c..8e9c06865d0 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -9,22 +9,20 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use crate::metrics::PayloadBuilderMetrics; +use alloy_consensus::constants::EMPTY_WITHDRAWALS; +use alloy_eips::{eip4895::Withdrawals, merge::SLOT_DURATION}; use alloy_primitives::{Bytes, B256, U256}; use futures_core::ready; use futures_util::FutureExt; -use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_payload_builder::{ - database::CachedReads, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, -}; -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError}; -use reth_primitives::{ - constants::{EMPTY_WITHDRAWALS, RETH_CLIENT_VERSION, SLOT_DURATION}, - proofs, BlockNumberOrTag, SealedBlock, Withdrawals, -}; -use reth_provider::{ - BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, -}; -use reth_revm::state_change::post_block_withdrawals_balance_increments; +use reth_chainspec::EthereumHardforks; +use reth_evm::state_change::post_block_withdrawals_balance_increments; +use reth_payload_builder::{KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind}; +use reth_primitives::{proofs, SealedHeader}; +use reth_primitives_traits::constants::RETH_CLIENT_VERSION; +use reth_provider::{BlockReaderIdExt, CanonStateNotification, StateProviderFactory}; +use reth_revm::cached::CachedReads; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use revm::{Database, State}; @@ -44,6 +42,9 @@ use tokio::{ use tracing::{debug, trace, warn}; mod metrics; +mod stack; + +pub use stack::PayloadBuilderStack; /// The [`PayloadJobGenerator`] that creates [`BasicPayloadJob`]s. #[derive(Debug)] @@ -92,10 +93,11 @@ impl BasicPayloadJobGenerator Client software SHOULD stop the updating process when either a call to engine_getPayload - // > with the build process's payloadId is made or SECONDS_PER_SLOT (12s in the Mainnet - // > configuration) have passed since the point in time identified by the timestamp parameter. - // See also + /// > Client software SHOULD stop the updating process when either a call to engine_getPayload + /// > with the build process's payloadId is made or SECONDS_PER_SLOT (12s in the Mainnet + /// > configuration) have passed since the point in time identified by the timestamp parameter. + /// + /// See also #[inline] fn max_job_duration(&self, unix_timestamp: u64) -> Duration { let duration_until_timestamp = duration_until(unix_timestamp); @@ -118,7 +120,7 @@ impl BasicPayloadJobGenerator Option { self.pre_cached.as_ref().filter(|pc| pc.block == parent).map(|pc| pc.cached.clone()) @@ -130,7 +132,11 @@ impl BasicPayloadJobGenerator PayloadJobGenerator for BasicPayloadJobGenerator where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Client: StateProviderFactory + + BlockReaderIdExt
+ + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, Builder: PayloadBuilder + Unpin + 'static, @@ -143,29 +149,30 @@ where &self, attributes: ::PayloadAttributes, ) -> Result { - let parent_block = if attributes.parent().is_zero() { - // use latest block if parent is zero: genesis block + let parent_header = if attributes.parent().is_zero() { + // Use latest header for genesis block case self.client - .block_by_number_or_tag(BlockNumberOrTag::Latest)? - .ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent()))? - .seal_slow() + .latest_header() + .map_err(PayloadBuilderError::from)? + .ok_or_else(|| PayloadBuilderError::MissingParentHeader(B256::ZERO))? } else { - let block = self - .client - .find_block_by_hash(attributes.parent(), BlockSource::Any)? - .ok_or_else(|| PayloadBuilderError::MissingParentBlock(attributes.parent()))?; - - // we already know the hash, so we can seal it - block.seal(attributes.parent()) + // Fetch specific header by hash + self.client + .sealed_header_by_hash(attributes.parent()) + .map_err(PayloadBuilderError::from)? + .ok_or_else(|| PayloadBuilderError::MissingParentHeader(attributes.parent()))? }; - let config = - PayloadConfig::new(Arc::new(parent_block), self.config.extradata.clone(), attributes); + let config = PayloadConfig::new( + Arc::new(parent_header.clone()), + self.config.extradata.clone(), + attributes, + ); let until = self.job_deadline(config.attributes.timestamp()); let deadline = Box::pin(tokio::time::sleep_until(until)); - let cached_reads = self.maybe_pre_cached(config.parent_block.hash()); + let cached_reads = self.maybe_pre_cached(parent_header.hash()); let mut job = BasicPayloadJob { config, @@ -175,7 +182,7 @@ where deadline, // ticks immediately interval: tokio::time::interval(self.config.interval), - best_payload: None, + best_payload: PayloadState::Missing, pending_block: None, cached_reads, payload_task_guard: self.payload_task_guard.clone(), @@ -321,8 +328,8 @@ where deadline: Pin>, /// The interval at which the job should build a new payload after the last. interval: Interval, - /// The best payload so far. - best_payload: Option, + /// The best payload so far and its state. + best_payload: PayloadState, /// Receiver for the block that is currently being built. pending_block: Option>, /// Restricts how many generator tasks can be executed at once. @@ -359,7 +366,7 @@ where let _cancel = cancel.clone(); let guard = self.payload_task_guard.clone(); let payload_config = self.config.clone(); - let best_payload = self.best_payload.clone(); + let best_payload = self.best_payload.payload().cloned(); self.metrics.inc_initiated_payload_builds(); let cached_reads = self.cached_reads.take().unwrap_or_default(); let builder = self.builder.clone(); @@ -404,8 +411,9 @@ where // check if the interval is reached while this.interval.poll_tick(cx).is_ready() { - // start a new job if there is no pending block and we haven't reached the deadline - if this.pending_block.is_none() { + // start a new job if there is no pending block, we haven't reached the deadline, + // and the payload isn't frozen + if this.pending_block.is_none() && !this.best_payload.is_frozen() { this.spawn_build_job(); } } @@ -417,7 +425,11 @@ where BuildOutcome::Better { payload, cached_reads } => { this.cached_reads = Some(cached_reads); debug!(target: "payload_builder", value = %payload.fees(), "built better payload"); - this.best_payload = Some(payload); + this.best_payload = PayloadState::Best(payload); + } + BuildOutcome::Freeze(payload) => { + debug!(target: "payload_builder", "payload frozen, no further building will occur"); + this.best_payload = PayloadState::Frozen(payload); } BuildOutcome::Aborted { fees, cached_reads } => { this.cached_reads = Some(cached_reads); @@ -456,26 +468,29 @@ where type BuiltPayload = Builder::BuiltPayload; fn best_payload(&self) -> Result { - if let Some(ref payload) = self.best_payload { - return Ok(payload.clone()) + if let Some(payload) = self.best_payload.payload() { + Ok(payload.clone()) + } else { + // No payload has been built yet, but we need to return something that the CL then + // can deliver, so we need to return an empty payload. + // + // Note: it is assumed that this is unlikely to happen, as the payload job is + // started right away and the first full block should have been + // built by the time CL is requesting the payload. + self.metrics.inc_requested_empty_payload(); + self.builder.build_empty_payload(&self.client, self.config.clone()) } - // No payload has been built yet, but we need to return something that the CL then can - // deliver, so we need to return an empty payload. - // - // Note: it is assumed that this is unlikely to happen, as the payload job is started right - // away and the first full block should have been built by the time CL is requesting the - // payload. - self.metrics.inc_requested_empty_payload(); - self.builder.build_empty_payload(&self.client, self.config.clone()) } fn payload_attributes(&self) -> Result { Ok(self.config.attributes.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { - let best_payload = self.best_payload.take(); - + fn resolve_kind( + &mut self, + kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + let best_payload = self.best_payload.payload().cloned(); if best_payload.is_none() && self.pending_block.is_none() { // ensure we have a job scheduled if we don't have a best payload yet and none is active self.spawn_build_job(); @@ -529,12 +544,44 @@ where }; } - let fut = ResolveBestPayload { best_payload, maybe_better, empty_payload }; + let fut = ResolveBestPayload { + best_payload, + maybe_better, + empty_payload: empty_payload.filter(|_| kind != PayloadKind::WaitForPending), + }; (fut, KeepPayloadJobAlive::No) } } +/// Represents the current state of a payload being built. +#[derive(Debug, Clone)] +pub enum PayloadState

{ + /// No payload has been built yet. + Missing, + /// The best payload built so far, which may still be improved upon. + Best(P), + /// The payload is frozen and no further building should occur. + /// + /// Contains the final payload `P` that should be used. + Frozen(P), +} + +impl

PayloadState

{ + /// Checks if the payload is frozen. + pub const fn is_frozen(&self) -> bool { + matches!(self, Self::Frozen(_)) + } + + /// Returns the payload if it exists (either Best or Frozen). + pub const fn payload(&self) -> Option<&P> { + match self { + Self::Missing => None, + Self::Best(p) | Self::Frozen(p) => Some(p), + } + } +} + /// The future that returns the best payload to be served to the consensus layer. /// /// This returns the payload that's supposed to be sent to the CL. @@ -573,7 +620,9 @@ where if let Some(fut) = Pin::new(&mut this.maybe_better).as_pin_mut() { if let Poll::Ready(res) = fut.poll(cx) { this.maybe_better = None; - if let Ok(BuildOutcome::Better { payload, .. }) = res { + if let Ok(Some(payload)) = res.map(|out| out.into_payload()) + .inspect_err(|err| warn!(target: "payload_builder", %err, "failed to resolve pending payload")) + { debug!(target: "payload_builder", "resolving better payload"); return Poll::Ready(Ok(payload)) } @@ -662,8 +711,8 @@ impl Drop for Cancelled { /// Static config for how to build a payload. #[derive(Clone, Debug)] pub struct PayloadConfig { - /// The parent block. - pub parent_block: Arc, + /// The parent header. + pub parent_header: Arc, /// Block extra data. pub extra_data: Bytes, /// Requested attributes for the payload. @@ -683,11 +732,11 @@ where { /// Create new payload config. pub const fn new( - parent_block: Arc, + parent_header: Arc, extra_data: Bytes, attributes: Attributes, ) -> Self { - Self { parent_block, extra_data, attributes } + Self { parent_header, extra_data, attributes } } /// Returns the payload id. @@ -715,13 +764,16 @@ pub enum BuildOutcome { }, /// Build job was cancelled Cancelled, + + /// The payload is final and no further building should occur + Freeze(Payload), } impl BuildOutcome { /// Consumes the type and returns the payload if the outcome is `Better`. pub fn into_payload(self) -> Option { match self { - Self::Better { payload, .. } => Some(payload), + Self::Better { payload, .. } | Self::Freeze(payload) => Some(payload), _ => None, } } @@ -740,6 +792,52 @@ impl BuildOutcome { pub const fn is_cancelled(&self) -> bool { matches!(self, Self::Cancelled) } + + /// Applies a fn on the current payload. + pub(crate) fn map_payload(self, f: F) -> BuildOutcome

+ where + F: FnOnce(Payload) -> P, + { + match self { + Self::Better { payload, cached_reads } => { + BuildOutcome::Better { payload: f(payload), cached_reads } + } + Self::Aborted { fees, cached_reads } => BuildOutcome::Aborted { fees, cached_reads }, + Self::Cancelled => BuildOutcome::Cancelled, + Self::Freeze(payload) => BuildOutcome::Freeze(f(payload)), + } + } +} + +/// The possible outcomes of a payload building attempt without reused [`CachedReads`] +#[derive(Debug)] +pub enum BuildOutcomeKind { + /// Successfully built a better block. + Better { + /// The new payload that was built. + payload: Payload, + }, + /// Aborted payload building because resulted in worse block wrt. fees. + Aborted { + /// The total fees associated with the attempted payload. + fees: U256, + }, + /// Build job was cancelled + Cancelled, + /// The payload is final and no further building should occur + Freeze(Payload), +} + +impl BuildOutcomeKind { + /// Attaches the [`CachedReads`] to the outcome. + pub fn with_cached_reads(self, cached_reads: CachedReads) -> BuildOutcome { + match self { + Self::Better { payload } => BuildOutcome::Better { payload, cached_reads }, + Self::Aborted { fees } => BuildOutcome::Aborted { fees, cached_reads }, + Self::Cancelled => BuildOutcome::Cancelled, + Self::Freeze(payload) => BuildOutcome::Freeze(payload), + } + } } /// A collection of arguments used for building payloads. @@ -789,6 +887,21 @@ impl BuildArguments(self, f: F) -> BuildArguments + where + F: FnOnce(Pool) -> P, + { + BuildArguments { + client: self.client, + pool: f(self.pool), + cached_reads: self.cached_reads, + config: self.config, + cancel: self.cancel, + best_payload: self.best_payload, + } + } } /// A trait for building payloads that encapsulate Ethereum transactions. @@ -870,69 +983,42 @@ impl Default for MissingPayloadBehaviour { } } -/// Represents the outcome of committing withdrawals to the runtime database and post state. -/// Pre-shanghai these are `None` values. -#[derive(Default, Debug)] -pub struct WithdrawalsOutcome { - /// committed withdrawals, if any. - pub withdrawals: Option, - /// withdrawals root if any. - pub withdrawals_root: Option, -} - -impl WithdrawalsOutcome { - /// No withdrawals pre shanghai - pub const fn pre_shanghai() -> Self { - Self { withdrawals: None, withdrawals_root: None } - } - - /// No withdrawals - pub fn empty() -> Self { - Self { - withdrawals: Some(Withdrawals::default()), - withdrawals_root: Some(EMPTY_WITHDRAWALS), - } - } -} - /// Executes the withdrawals and commits them to the _runtime_ Database and `BundleState`. /// /// Returns the withdrawals root. /// /// Returns `None` values pre shanghai -pub fn commit_withdrawals>( +pub fn commit_withdrawals( db: &mut State, chain_spec: &ChainSpec, timestamp: u64, - withdrawals: Withdrawals, -) -> Result { + withdrawals: &Withdrawals, +) -> Result, DB::Error> +where + DB: Database, + ChainSpec: EthereumHardforks, +{ if !chain_spec.is_shanghai_active_at_timestamp(timestamp) { - return Ok(WithdrawalsOutcome::pre_shanghai()) + return Ok(None) } if withdrawals.is_empty() { - return Ok(WithdrawalsOutcome::empty()) + return Ok(Some(EMPTY_WITHDRAWALS)) } let balance_increments = - post_block_withdrawals_balance_increments(chain_spec, timestamp, &withdrawals); + post_block_withdrawals_balance_increments(chain_spec, timestamp, withdrawals); db.increment_balances(balance_increments)?; - let withdrawals_root = proofs::calculate_withdrawals_root(&withdrawals); - - // calculate withdrawals root - Ok(WithdrawalsOutcome { - withdrawals: Some(withdrawals), - withdrawals_root: Some(withdrawals_root), - }) + Ok(Some(proofs::calculate_withdrawals_root(withdrawals))) } /// Checks if the new payload is better than the current best. /// /// This compares the total fees of the blocks, higher is better. #[inline(always)] -pub fn is_better_payload(best_payload: Option, new_fees: U256) -> bool { +pub fn is_better_payload(best_payload: Option<&T>, new_fees: U256) -> bool { if let Some(best_payload) = best_payload { new_fees > best_payload.fees() } else { diff --git a/crates/payload/basic/src/stack.rs b/crates/payload/basic/src/stack.rs new file mode 100644 index 00000000000..45a3f3b4244 --- /dev/null +++ b/crates/payload/basic/src/stack.rs @@ -0,0 +1,271 @@ +use crate::{ + BuildArguments, BuildOutcome, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, + PayloadConfig, +}; + +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{Address, B256, U256}; +use reth_payload_builder::PayloadId; +use reth_payload_primitives::BuiltPayload; +use reth_primitives::SealedBlock; + +use alloy_eips::eip7685::Requests; +use std::{error::Error, fmt}; + +/// hand rolled Either enum to handle two builder types +#[derive(Debug, Clone)] +pub enum Either { + /// left variant + Left(L), + /// right variant + Right(R), +} + +impl fmt::Display for Either +where + L: fmt::Display, + R: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Left(l) => write!(f, "Left: {}", l), + Self::Right(r) => write!(f, "Right: {}", r), + } + } +} + +impl Error for Either +where + L: Error + 'static, + R: Error + 'static, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + match self { + Self::Left(l) => Some(l), + Self::Right(r) => Some(r), + } + } +} + +impl PayloadBuilderAttributes for Either +where + L: PayloadBuilderAttributes, + R: PayloadBuilderAttributes, + L::Error: Error + 'static, + R::Error: Error + 'static, +{ + type RpcPayloadAttributes = Either; + type Error = Either; + + fn try_new( + parent: B256, + rpc_payload_attributes: Self::RpcPayloadAttributes, + version: u8, + ) -> Result { + match rpc_payload_attributes { + Either::Left(attr) => { + L::try_new(parent, attr, version).map(Either::Left).map_err(Either::Left) + } + Either::Right(attr) => { + R::try_new(parent, attr, version).map(Either::Right).map_err(Either::Right) + } + } + } + + fn payload_id(&self) -> PayloadId { + match self { + Self::Left(l) => l.payload_id(), + Self::Right(r) => r.payload_id(), + } + } + + fn parent(&self) -> B256 { + match self { + Self::Left(l) => l.parent(), + Self::Right(r) => r.parent(), + } + } + + fn timestamp(&self) -> u64 { + match self { + Self::Left(l) => l.timestamp(), + Self::Right(r) => r.timestamp(), + } + } + + fn parent_beacon_block_root(&self) -> Option { + match self { + Self::Left(l) => l.parent_beacon_block_root(), + Self::Right(r) => r.parent_beacon_block_root(), + } + } + + fn suggested_fee_recipient(&self) -> Address { + match self { + Self::Left(l) => l.suggested_fee_recipient(), + Self::Right(r) => r.suggested_fee_recipient(), + } + } + + fn prev_randao(&self) -> B256 { + match self { + Self::Left(l) => l.prev_randao(), + Self::Right(r) => r.prev_randao(), + } + } + + fn withdrawals(&self) -> &Withdrawals { + match self { + Self::Left(l) => l.withdrawals(), + Self::Right(r) => r.withdrawals(), + } + } +} + +/// this structure enables the chaining of multiple `PayloadBuilder` implementations, +/// creating a hierarchical fallback system. It's designed to be nestable, allowing +/// for complex builder arrangements like `Stack, C>` with different +#[derive(Debug)] +pub struct PayloadBuilderStack { + left: L, + right: R, +} + +impl PayloadBuilderStack { + /// Creates a new `PayloadBuilderStack` with the given left and right builders. + pub const fn new(left: L, right: R) -> Self { + Self { left, right } + } +} + +impl Clone for PayloadBuilderStack +where + L: Clone, + R: Clone, +{ + fn clone(&self) -> Self { + Self::new(self.left.clone(), self.right.clone()) + } +} + +impl BuiltPayload for Either +where + L: BuiltPayload, + R: BuiltPayload, +{ + fn block(&self) -> &SealedBlock { + match self { + Self::Left(l) => l.block(), + Self::Right(r) => r.block(), + } + } + + fn fees(&self) -> U256 { + match self { + Self::Left(l) => l.fees(), + Self::Right(r) => r.fees(), + } + } + + fn requests(&self) -> Option { + match self { + Self::Left(l) => l.requests(), + Self::Right(r) => r.requests(), + } + } +} + +impl PayloadBuilder for PayloadBuilderStack +where + L: PayloadBuilder + Unpin + 'static, + R: PayloadBuilder + Unpin + 'static, + Client: Clone, + Pool: Clone, + L::Attributes: Unpin + Clone, + R::Attributes: Unpin + Clone, + L::BuiltPayload: Unpin + Clone, + R::BuiltPayload: Unpin + Clone, + <>::Attributes as PayloadBuilderAttributes>::Error: 'static, + <>::Attributes as PayloadBuilderAttributes>::Error: 'static, +{ + type Attributes = Either; + type BuiltPayload = Either; + + fn try_build( + &self, + args: BuildArguments, + ) -> Result, PayloadBuilderError> { + match args.config.attributes { + Either::Left(ref left_attr) => { + let left_args: BuildArguments = + BuildArguments { + client: args.client.clone(), + pool: args.pool.clone(), + cached_reads: args.cached_reads.clone(), + config: PayloadConfig { + parent_header: args.config.parent_header.clone(), + extra_data: args.config.extra_data.clone(), + attributes: left_attr.clone(), + }, + cancel: args.cancel.clone(), + best_payload: args.best_payload.clone().and_then(|payload| { + if let Either::Left(p) = payload { + Some(p) + } else { + None + } + }), + }; + + self.left.try_build(left_args).map(|out| out.map_payload(Either::Left)) + } + Either::Right(ref right_attr) => { + let right_args = BuildArguments { + client: args.client.clone(), + pool: args.pool.clone(), + cached_reads: args.cached_reads.clone(), + config: PayloadConfig { + parent_header: args.config.parent_header.clone(), + extra_data: args.config.extra_data.clone(), + attributes: right_attr.clone(), + }, + cancel: args.cancel.clone(), + best_payload: args.best_payload.clone().and_then(|payload| { + if let Either::Right(p) = payload { + Some(p) + } else { + None + } + }), + }; + + self.right.try_build(right_args).map(|out| out.map_payload(Either::Right)) + } + } + } + + fn build_empty_payload( + &self, + client: &Client, + config: PayloadConfig, + ) -> Result { + match config.attributes { + Either::Left(left_attr) => { + let left_config = PayloadConfig { + attributes: left_attr, + parent_header: config.parent_header.clone(), + extra_data: config.extra_data.clone(), + }; + self.left.build_empty_payload(client, left_config).map(Either::Left) + } + Either::Right(right_attr) => { + let right_config = PayloadConfig { + parent_header: config.parent_header.clone(), + extra_data: config.extra_data.clone(), + attributes: right_attr, + }; + self.right.build_empty_payload(client, right_config).map(Either::Right) + } + } + } +} diff --git a/crates/payload/builder-primitives/Cargo.toml b/crates/payload/builder-primitives/Cargo.toml new file mode 100644 index 00000000000..6d89ea89d03 --- /dev/null +++ b/crates/payload/builder-primitives/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "reth-payload-builder-primitives" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-payload-primitives.workspace = true + +# alloy +alloy-rpc-types-engine = { workspace = true, features = ["serde"] } + +# async +async-trait.workspace = true +pin-project.workspace = true +tokio = { workspace = true, features = ["sync"] } +tokio-stream.workspace = true + +# misc +tracing.workspace = true diff --git a/crates/payload/primitives/src/events.rs b/crates/payload/builder-primitives/src/events.rs similarity index 98% rename from crates/payload/primitives/src/events.rs rename to crates/payload/builder-primitives/src/events.rs index 3fb3813adb1..d51f13f7c4c 100644 --- a/crates/payload/primitives/src/events.rs +++ b/crates/payload/builder-primitives/src/events.rs @@ -1,4 +1,4 @@ -use crate::PayloadTypes; +use reth_payload_primitives::PayloadTypes; use std::{ pin::Pin, task::{ready, Context, Poll}, diff --git a/crates/payload/builder-primitives/src/lib.rs b/crates/payload/builder-primitives/src/lib.rs new file mode 100644 index 00000000000..af7ad736d44 --- /dev/null +++ b/crates/payload/builder-primitives/src/lib.rs @@ -0,0 +1,18 @@ +//! This crate defines abstractions to create and update payloads (blocks) + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod events; +pub use crate::events::{Events, PayloadEvents}; + +/// Contains the payload builder trait to abstract over payload attributes. +mod traits; +pub use traits::{PayloadBuilder, PayloadStoreExt}; + +pub use reth_payload_primitives::PayloadBuilderError; diff --git a/crates/payload/builder-primitives/src/traits.rs b/crates/payload/builder-primitives/src/traits.rs new file mode 100644 index 00000000000..b5e8910b6c2 --- /dev/null +++ b/crates/payload/builder-primitives/src/traits.rs @@ -0,0 +1,111 @@ +use crate::{PayloadBuilderError, PayloadEvents}; +use alloy_rpc_types_engine::PayloadId; +use reth_payload_primitives::{PayloadKind, PayloadTypes}; +use std::fmt::Debug; +use tokio::sync::oneshot; + +/// A helper trait for internal usage to retrieve and resolve payloads. +#[async_trait::async_trait] +pub trait PayloadStoreExt: Debug + Send + Sync + Unpin { + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option>; + + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve(&self, id: PayloadId) -> Option> { + self.resolve_kind(id, PayloadKind::Earliest).await + } + + /// Returns the best payload for the given identifier. + async fn best_payload( + &self, + id: PayloadId, + ) -> Option>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option>; +} + +#[async_trait::async_trait] +impl PayloadStoreExt for P +where + P: PayloadBuilder, +{ + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { + Some(PayloadBuilder::resolve_kind(self, id, kind).await?.map_err(Into::into)) + } + + async fn best_payload( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::best_payload(self, id).await?.map_err(Into::into)) + } + + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + Some(PayloadBuilder::payload_attributes(self, id).await?.map_err(Into::into)) + } +} + +/// A type that can request, subscribe to and resolve payloads. +#[async_trait::async_trait] +pub trait PayloadBuilder: Debug + Send + Sync + Unpin { + /// The Payload type for the builder. + type PayloadType: PayloadTypes; + /// The error type returned by the builder. + type Error: Into; + + /// Sends a message to the service to start building a new payload for the given payload. + /// + /// Returns a receiver that will receive the payload id. + fn send_new_payload( + &self, + attr: ::PayloadBuilderAttributes, + ) -> oneshot::Receiver>; + + /// Returns the best payload for the given identifier. + async fn best_payload( + &self, + id: PayloadId, + ) -> Option::BuiltPayload, Self::Error>>; + + /// Resolves the payload job and returns the best payload that has been built so far. + async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option::BuiltPayload, Self::Error>>; + + /// Resolves the payload job as fast and possible and returns the best payload that has been + /// built so far. + async fn resolve( + &self, + id: PayloadId, + ) -> Option::BuiltPayload, Self::Error>> { + self.resolve_kind(id, PayloadKind::Earliest).await + } + + /// Sends a message to the service to subscribe to payload events. + /// Returns a receiver that will receive them. + async fn subscribe(&self) -> Result, Self::Error>; + + /// Returns the payload attributes associated with the given identifier. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option::PayloadBuilderAttributes, Self::Error>>; +} diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 71f63ce34c2..78814da5066 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -13,15 +13,15 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true -reth-provider.workspace = true +reth-primitives = { workspace = true, optional = true } +reth-chain-state.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-ethereum-engine-primitives.workspace = true -reth-chain-state = { workspace = true, optional = true } # alloy +alloy-primitives = { workspace = true, optional = true } alloy-rpc-types = { workspace = true, features = ["engine"] } -alloy-primitives.workspace = true # async async-trait.workspace = true @@ -37,7 +37,15 @@ metrics.workspace = true tracing.workspace = true [dev-dependencies] +reth-primitives.workspace = true +alloy-primitives.workspace = true revm.workspace = true +alloy-consensus.workspace = true [features] -test-utils = ["reth-chain-state"] +test-utils = [ + "alloy-primitives", + "reth-chain-state/test-utils", + "reth-primitives/test-utils", + "revm/test-utils", +] diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index 70b4296da4e..b6191ea7fd1 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -26,10 +26,12 @@ //! ``` //! use std::future::Future; //! use std::pin::Pin; +//! use std::sync::Arc; //! use std::task::{Context, Poll}; +//! use alloy_consensus::Header; //! use alloy_primitives::U256; -//! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator}; -//! use reth_primitives::{Block, Header}; +//! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; +//! use reth_primitives::{Block, BlockExt}; //! //! /// The generator type that creates new jobs that builds empty blocks. //! pub struct EmptyBlockPayloadJobGenerator; @@ -56,7 +58,7 @@ //! //! fn best_payload(&self) -> Result { //! // NOTE: some fields are omitted here for brevity -//! let payload = Block { +//! let block = Block { //! header: Header { //! parent_hash: self.attributes.parent, //! timestamp: self.attributes.timestamp, @@ -65,7 +67,7 @@ //! }, //! ..Default::default() //! }; -//! let payload = EthBuiltPayload::new(self.attributes.id, payload.seal_slow(), U256::ZERO, None); +//! let payload = EthBuiltPayload::new(self.attributes.id, Arc::new(block.seal_slow()), U256::ZERO, None, None); //! Ok(payload) //! } //! @@ -73,7 +75,7 @@ //! Ok(self.attributes.clone()) //! } //! -//! fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { +//! fn resolve_kind(&mut self, _kind: PayloadKind) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { //! let payload = self.best_payload(); //! (futures_util::future::ready(payload), KeepPayloadJobAlive::No) //! } @@ -101,7 +103,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -pub mod database; mod metrics; mod service; mod traits; @@ -112,7 +113,8 @@ pub mod noop; pub mod test_utils; pub use alloy_rpc_types::engine::PayloadId; -pub use reth_payload_primitives::PayloadBuilderError; +pub use reth_payload_builder_primitives::PayloadBuilderError; +pub use reth_payload_primitives::PayloadKind; pub use service::{ PayloadBuilderHandle, PayloadBuilderService, PayloadServiceCommand, PayloadStore, }; diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index 06da7dcfada..cbf21f1cebf 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -51,7 +51,7 @@ where } PayloadServiceCommand::BestPayload(_, tx) => tx.send(None).ok(), PayloadServiceCommand::PayloadAttributes(_, tx) => tx.send(None).ok(), - PayloadServiceCommand::Resolve(_, tx) => tx.send(None).ok(), + PayloadServiceCommand::Resolve(_, _, tx) => tx.send(None).ok(), PayloadServiceCommand::Subscribe(_) => None, }; } diff --git a/crates/payload/builder/src/service.rs b/crates/payload/builder/src/service.rs index 1ebf6770c99..af11ba75ce6 100644 --- a/crates/payload/builder/src/service.rs +++ b/crates/payload/builder/src/service.rs @@ -9,15 +9,16 @@ use crate::{ }; use alloy_rpc_types::engine::PayloadId; use futures_util::{future::FutureExt, Stream, StreamExt}; -use reth_payload_primitives::{ - BuiltPayload, Events, PayloadBuilder, PayloadBuilderAttributes, PayloadBuilderError, - PayloadEvents, PayloadTypes, +use reth_chain_state::CanonStateNotification; +use reth_payload_builder_primitives::{ + Events, PayloadBuilder, PayloadBuilderError, PayloadEvents, PayloadStoreExt, }; -use reth_provider::CanonStateNotification; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind, PayloadTypes}; use std::{ fmt, future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, }; use tokio::sync::{ @@ -30,13 +31,14 @@ use tracing::{debug, info, trace, warn}; type PayloadFuture

= Pin> + Send + Sync>>; /// A communication channel to the [`PayloadBuilderService`] that can retrieve payloads. +/// +/// This type is intended to be used to retrieve payloads from the service (e.g. from the engine +/// API). #[derive(Debug)] pub struct PayloadStore { - inner: PayloadBuilderHandle, + inner: Arc>, } -// === impl PayloadStore === - impl PayloadStore where T: PayloadTypes, @@ -45,11 +47,20 @@ where /// /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the /// job, See [`PayloadJob::resolve`]. + pub async fn resolve_kind( + &self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { + self.inner.resolve_kind(id, kind).await + } + + /// Resolves the payload job and returns the best payload that has been built so far. pub async fn resolve( &self, id: PayloadId, ) -> Option> { - self.inner.resolve(id).await + self.resolve_kind(id, PayloadKind::Earliest).await } /// Returns the best payload for the given identifier. @@ -73,12 +84,16 @@ where } } -impl Clone for PayloadStore +impl PayloadStore where T: PayloadTypes, { - fn clone(&self) -> Self { - Self { inner: self.inner.clone() } + /// Create a new instance + pub fn new

(inner: P) -> Self + where + P: PayloadStoreExt + 'static, + { + Self { inner: Arc::new(inner) } } } @@ -87,7 +102,7 @@ where T: PayloadTypes, { fn from(inner: PayloadBuilderHandle) -> Self { - Self { inner } + Self::new(inner) } } @@ -110,16 +125,13 @@ where type PayloadType = T; type Error = PayloadBuilderError; - async fn send_and_resolve_payload( + fn send_new_payload( &self, attr: ::PayloadBuilderAttributes, - ) -> Result::BuiltPayload>, Self::Error> { - let rx = self.send_new_payload(attr); - let id = rx.await??; - + ) -> Receiver> { let (tx, rx) = oneshot::channel(); - let _ = self.to_service.send(PayloadServiceCommand::Resolve(id, tx)); - rx.await?.ok_or(PayloadBuilderError::MissingPayload) + let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); + rx } /// Note: this does not resolve the job if it's still in progress. @@ -132,21 +144,17 @@ where rx.await.ok()? } - fn send_new_payload( + async fn resolve_kind( &self, - attr: ::PayloadBuilderAttributes, - ) -> Receiver> { + id: PayloadId, + kind: PayloadKind, + ) -> Option> { let (tx, rx) = oneshot::channel(); - let _ = self.to_service.send(PayloadServiceCommand::BuildNewPayload(attr, tx)); - rx - } - - /// Note: if there's already payload in progress with same identifier, it will be returned. - async fn new_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> Result { - self.send_new_payload(attr).await? + self.to_service.send(PayloadServiceCommand::Resolve(id, kind, tx)).ok()?; + match rx.await.transpose()? { + Ok(fut) => Some(fut.await), + Err(e) => Some(Err(e.into())), + } } async fn subscribe(&self) -> Result, Self::Error> { @@ -154,6 +162,18 @@ where let _ = self.to_service.send(PayloadServiceCommand::Subscribe(tx)); Ok(PayloadEvents { receiver: rx.await? }) } + + /// Returns the payload attributes associated with the given identifier. + /// + /// Note: this returns the attributes of the payload and does not resolve the job. + async fn payload_attributes( + &self, + id: PayloadId, + ) -> Option> { + let (tx, rx) = oneshot::channel(); + self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; + rx.await.ok()? + } } impl PayloadBuilderHandle @@ -167,31 +187,6 @@ where pub const fn new(to_service: mpsc::UnboundedSender>) -> Self { Self { to_service } } - - /// Resolves the payload job and returns the best payload that has been built so far. - /// - /// Note: depending on the installed [`PayloadJobGenerator`], this may or may not terminate the - /// job, See [`PayloadJob::resolve`]. - async fn resolve(&self, id: PayloadId) -> Option> { - let (tx, rx) = oneshot::channel(); - self.to_service.send(PayloadServiceCommand::Resolve(id, tx)).ok()?; - match rx.await.transpose()? { - Ok(fut) => Some(fut.await), - Err(e) => Some(Err(e.into())), - } - } - - /// Returns the payload attributes associated with the given identifier. - /// - /// Note: this returns the attributes of the payload and does not resolve the job. - async fn payload_attributes( - &self, - id: PayloadId, - ) -> Option> { - let (tx, rx) = oneshot::channel(); - self.to_service.send(PayloadServiceCommand::PayloadAttributes(id, tx)).ok()?; - rx.await.ok()? - } } impl Clone for PayloadBuilderHandle @@ -296,11 +291,15 @@ where /// Returns the best payload for the given identifier that has been built so far and terminates /// the job if requested. - fn resolve(&mut self, id: PayloadId) -> Option> { + fn resolve( + &mut self, + id: PayloadId, + kind: PayloadKind, + ) -> Option> { trace!(%id, "resolving payload job"); let job = self.payload_jobs.iter().position(|(_, job_id)| *job_id == id)?; - let (fut, keep_alive) = self.payload_jobs[job].0.resolve(); + let (fut, keep_alive) = self.payload_jobs[job].0.resolve_kind(kind); if keep_alive == KeepPayloadJobAlive::No { let (_, id) = self.payload_jobs.swap_remove(job); @@ -437,8 +436,8 @@ where let attributes = this.payload_attributes(id); let _ = tx.send(attributes); } - PayloadServiceCommand::Resolve(id, tx) => { - let _ = tx.send(this.resolve(id)); + PayloadServiceCommand::Resolve(id, strategy, tx) => { + let _ = tx.send(this.resolve(id, strategy)); } PayloadServiceCommand::Subscribe(tx) => { let new_rx = this.payload_events.subscribe(); @@ -469,7 +468,11 @@ pub enum PayloadServiceCommand { oneshot::Sender>>, ), /// Resolve the payload and return the payload - Resolve(PayloadId, oneshot::Sender>>), + Resolve( + PayloadId, + /* kind: */ PayloadKind, + oneshot::Sender>>, + ), /// Payload service events Subscribe(oneshot::Sender>>), } @@ -489,7 +492,7 @@ where Self::PayloadAttributes(f0, f1) => { f.debug_tuple("PayloadAttributes").field(&f0).field(&f1).finish() } - Self::Resolve(f0, _f1) => f.debug_tuple("Resolve").field(&f0).finish(), + Self::Resolve(f0, f1, _f2) => f.debug_tuple("Resolve").field(&f0).field(&f1).finish(), Self::Subscribe(f0) => f.debug_tuple("Subscribe").field(&f0).finish(), } } diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 55b9b84f45e..4690ca14f0d 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -6,13 +6,14 @@ use crate::{ }; use alloy_primitives::U256; -use reth_chain_state::ExecutedBlock; -use reth_payload_primitives::{PayloadBuilderError, PayloadTypes}; -use reth_primitives::Block; -use reth_provider::CanonStateNotification; +use reth_chain_state::{CanonStateNotification, ExecutedBlock}; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{PayloadKind, PayloadTypes}; +use reth_primitives::{Block, BlockExt}; use std::{ future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, }; @@ -86,9 +87,10 @@ impl PayloadJob for TestPayloadJob { fn best_payload(&self) -> Result { Ok(EthBuiltPayload::new( self.attr.payload_id(), - Block::default().seal_slow(), + Arc::new(Block::default().seal_slow()), U256::ZERO, Some(ExecutedBlock::default()), + Some(Default::default()), )) } @@ -96,7 +98,10 @@ impl PayloadJob for TestPayloadJob { Ok(self.attr.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + fn resolve_kind( + &mut self, + _kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let fut = futures_util::future::ready(self.best_payload()); (fut, KeepPayloadJobAlive::No) } diff --git a/crates/payload/builder/src/traits.rs b/crates/payload/builder/src/traits.rs index 8d448eeff5a..d9d54ccd0e4 100644 --- a/crates/payload/builder/src/traits.rs +++ b/crates/payload/builder/src/traits.rs @@ -1,7 +1,8 @@ //! Trait abstractions used by the payload crate. -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadBuilderError}; -use reth_provider::CanonStateNotification; +use reth_chain_state::CanonStateNotification; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes, PayloadKind}; use std::future::Future; /// A type that can build a payload. @@ -53,7 +54,21 @@ pub trait PayloadJob: Future> + Send + /// If this returns [`KeepPayloadJobAlive::Yes`], then the [`PayloadJob`] will be polled /// once more. If this returns [`KeepPayloadJobAlive::No`] then the [`PayloadJob`] will be /// dropped after this call. - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive); + /// + /// The [`PayloadKind`] determines how the payload should be resolved in the + /// `ResolvePayloadFuture`. [`PayloadKind::Earliest`] should return the earliest available + /// payload (as fast as possible), e.g. racing an empty payload job against a pending job if + /// there's no payload available yet. [`PayloadKind::WaitForPending`] is allowed to wait + /// until a built payload is available. + fn resolve_kind( + &mut self, + kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive); + + /// Resolves the payload as fast as possible. + fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + self.resolve_kind(PayloadKind::Earliest) + } } /// Whether the payload job should be kept alive or terminated after the payload was requested by diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index 27418ccd899..d4070b4688e 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -16,21 +16,20 @@ workspace = true reth-chainspec.workspace = true reth-errors.workspace = true reth-primitives.workspace = true -reth-transaction-pool.workspace = true reth-chain-state.workspace = true +revm-primitives.workspace = true + # alloy +alloy-eips.workspace = true alloy-primitives.workspace = true -alloy-rpc-types = { workspace = true, features = ["engine"] } -op-alloy-rpc-types-engine.workspace = true - -# async -async-trait.workspace = true -tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true -pin-project.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["serde"] } +op-alloy-rpc-types-engine = { workspace = true, optional = true } # misc serde.workspace = true thiserror.workspace = true -tracing.workspace = true +tokio = { workspace = true, default-features = false, features = ["sync"] } + +[features] +op = ["dep:op-alloy-rpc-types-engine"] \ No newline at end of file diff --git a/crates/payload/primitives/src/error.rs b/crates/payload/primitives/src/error.rs index 00df9e8d290..ffe4e027e96 100644 --- a/crates/payload/primitives/src/error.rs +++ b/crates/payload/primitives/src/error.rs @@ -1,14 +1,17 @@ //! Error types emitted by types or implementations of this crate. use alloy_primitives::B256; +use alloy_rpc_types_engine::ForkchoiceUpdateError; use reth_errors::{ProviderError, RethError}; -use reth_primitives::revm_primitives::EVMError; -use reth_transaction_pool::BlobStoreError; +use revm_primitives::EVMError; use tokio::sync::oneshot; /// Possible error variants during payload building. #[derive(Debug, thiserror::Error)] pub enum PayloadBuilderError { + /// Thrown when the parent header cannot be found + #[error("missing parent header: {0}")] + MissingParentHeader(B256), /// Thrown when the parent block is missing. #[error("missing parent block {0}")] MissingParentBlock(B256), @@ -18,21 +21,12 @@ pub enum PayloadBuilderError { /// If there's no payload to resolve. #[error("missing payload")] MissingPayload, - /// Build cancelled - #[error("build outcome cancelled")] - BuildOutcomeCancelled, - /// Error occurring in the blob store. - #[error(transparent)] - BlobStore(#[from] BlobStoreError), /// Other internal error #[error(transparent)] Internal(#[from] RethError), /// Unrecoverable error during evm execution. #[error("evm execution error: {0}")] EvmExecutionError(EVMError), - /// Thrown if the payload requests withdrawals before Shanghai activation. - #[error("withdrawals set before Shanghai activation")] - WithdrawalsBeforeShanghai, /// Any other payload building errors. #[error(transparent)] Other(Box), @@ -60,7 +54,7 @@ impl From for PayloadBuilderError { } } -/// Thrown when the payload or attributes are known to be invalid before processing. +/// Thrown when the payload or attributes are known to be invalid __before__ processing. /// /// This is used mainly for /// [`validate_version_specific_fields`](crate::validate_version_specific_fields), which validates @@ -122,3 +116,20 @@ impl EngineObjectValidationError { Self::InvalidParams(Box::new(error)) } } + +/// Thrown when validating the correctness of a payloadattributes object. +#[derive(thiserror::Error, Debug)] +pub enum InvalidPayloadAttributesError { + /// Thrown if the timestamp of the payload attributes is invalid according to the engine specs. + #[error("parent beacon block root not supported before V3")] + InvalidTimestamp, + /// Another type of error that is not covered by the above variants. + #[error("Invalid params: {0}")] + InvalidParams(#[from] Box), +} + +impl From for ForkchoiceUpdateError { + fn from(_: InvalidPayloadAttributesError) -> Self { + Self::UpdatedInvalidPayloadAttributes + } +} diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 5d100405135..523e6fb057a 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -9,24 +9,22 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod error; - -pub use error::{EngineObjectValidationError, PayloadBuilderError, VersionSpecificValidationError}; - -mod events; -pub use crate::events::{Events, PayloadEvents}; +pub use error::{ + EngineObjectValidationError, InvalidPayloadAttributesError, PayloadBuilderError, + VersionSpecificValidationError, +}; /// Contains traits to abstract over payload attributes types and default implementations of the /// [`PayloadAttributes`] trait for ethereum mainnet and optimism types. mod traits; pub use traits::{ - BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilder, - PayloadBuilderAttributes, + BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilderAttributes, }; mod payload; pub use payload::PayloadOrAttributes; -use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; /// The types that are used by the engine API. pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone + 'static { /// The built payload type. @@ -125,8 +123,8 @@ pub fn validate_payload_timestamp( /// Validates the presence of the `withdrawals` field according to the payload timestamp. /// After Shanghai, withdrawals field must be [Some]. /// Before Shanghai, withdrawals field must be [None]; -pub fn validate_withdrawals_presence( - chain_spec: &ChainSpec, +pub fn validate_withdrawals_presence( + chain_spec: &T, version: EngineApiMessageVersion, message_validation_kind: MessageValidationKind, timestamp: u64, @@ -210,8 +208,8 @@ pub fn validate_withdrawals_presence( /// `MessageValidationKind::Payload`, then the error code will be `-32602: Invalid params`. If the /// parameter is `MessageValidationKind::PayloadAttributes`, then the error code will be `-38003: /// Invalid payload attributes`. -pub fn validate_parent_beacon_block_root_presence( - chain_spec: &ChainSpec, +pub fn validate_parent_beacon_block_root_presence( + chain_spec: &T, version: EngineApiMessageVersion, validation_kind: MessageValidationKind, timestamp: u64, @@ -298,13 +296,14 @@ impl MessageValidationKind { /// either an execution payload, or payload attributes. /// /// The version is provided by the [`EngineApiMessageVersion`] argument. -pub fn validate_version_specific_fields( - chain_spec: &ChainSpec, +pub fn validate_version_specific_fields( + chain_spec: &T, version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, Type>, ) -> Result<(), EngineObjectValidationError> where Type: PayloadAttributes, + T: EthereumHardforks, { validate_withdrawals_presence( chain_spec, @@ -323,22 +322,45 @@ where } /// The version of Engine API message. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] pub enum EngineApiMessageVersion { /// Version 1 - V1, + V1 = 1, /// Version 2 /// /// Added in the Shanghai hardfork. - V2, + V2 = 2, /// Version 3 /// /// Added in the Cancun hardfork. - V3, + #[default] + V3 = 3, /// Version 4 /// /// Added in the Prague hardfork. - V4, + V4 = 4, +} + +/// Determines how we should choose the payload to return. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum PayloadKind { + /// Returns the next best available payload (the earliest available payload). + /// This does not wait for a real for pending job to finish if there's no best payload yet and + /// is allowed to race various payload jobs (empty, pending best) against each other and + /// returns whichever job finishes faster. + /// + /// This should be used when it's more important to return a valid payload as fast as possible. + /// For example, the engine API timeout for `engine_getPayload` is 1s and clients should rather + /// return an empty payload than indefinitely waiting for the pending payload job to finish and + /// risk missing the deadline. + #[default] + Earliest, + /// Only returns once we have at least one built payload. + /// + /// Compared to [`PayloadKind::Earliest`] this does not race an empty payload job against the + /// already in progress one, and returns the best available built payload or awaits the job in + /// progress. + WaitForPending, } #[cfg(test)] diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index fc685559e08..bcf48cea834 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -1,6 +1,7 @@ use crate::{MessageValidationKind, PayloadAttributes}; +use alloy_eips::eip4895::Withdrawal; use alloy_primitives::B256; -use alloy_rpc_types::engine::ExecutionPayload; +use alloy_rpc_types_engine::ExecutionPayload; /// Either an [`ExecutionPayload`] or a types that implements the [`PayloadAttributes`] trait. /// @@ -39,7 +40,7 @@ where Attributes: PayloadAttributes, { /// Return the withdrawals for the payload or attributes. - pub fn withdrawals(&self) -> Option<&Vec> { + pub fn withdrawals(&self) -> Option<&Vec> { match self { Self::ExecutionPayload { payload, .. } => payload.withdrawals(), Self::PayloadAttributes(attributes) => attributes.withdrawals(), diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index 494ed68aa4e..8d5c429e6c6 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -1,60 +1,11 @@ -use crate::{PayloadBuilderError, PayloadEvents, PayloadTypes}; -use alloy_primitives::{Address, B256, U256}; -use alloy_rpc_types::{ - engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}, - Withdrawal, +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + eip7685::Requests, }; -use op_alloy_rpc_types_engine::OpPayloadAttributes; +use alloy_primitives::{Address, B256, U256}; +use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use reth_chain_state::ExecutedBlock; -use reth_primitives::{SealedBlock, Withdrawals}; -use std::{future::Future, pin::Pin}; -use tokio::sync::oneshot; - -pub(crate) type PayloadFuture

= - Pin> + Send + Sync>>; - -/// A type that can request, subscribe to and resolve payloads. -#[async_trait::async_trait] -pub trait PayloadBuilder: Send + Unpin { - /// The Payload type for the builder. - type PayloadType: PayloadTypes; - /// The error type returned by the builder. - type Error; - - /// Sends a message to the service to start building a new payload for the given payload - /// attributes and returns a future that resolves to the payload. - async fn send_and_resolve_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> Result::BuiltPayload>, Self::Error>; - - /// Returns the best payload for the given identifier. - async fn best_payload( - &self, - id: PayloadId, - ) -> Option::BuiltPayload, Self::Error>>; - - /// Sends a message to the service to start building a new payload for the given payload. - /// - /// This is the same as [`PayloadBuilder::new_payload`] but does not wait for the result - /// and returns the receiver instead - fn send_new_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> oneshot::Receiver>; - - /// Starts building a new payload for the given payload attributes. - /// - /// Returns the identifier of the payload. - async fn new_payload( - &self, - attr: ::PayloadBuilderAttributes, - ) -> Result; - - /// Sends a message to the service to subscribe to payload events. - /// Returns a receiver that will receive them. - async fn subscribe(&self) -> Result, Self::Error>; -} +use reth_primitives::SealedBlock; /// Represents a built payload type that contains a built [`SealedBlock`] and can be converted into /// engine API execution payloads. @@ -69,6 +20,9 @@ pub trait BuiltPayload: Send + Sync + std::fmt::Debug { fn executed_block(&self) -> Option { None } + + /// Returns the EIP-7865 requests for the payload if any. + fn requests(&self) -> Option; } /// This can be implemented by types that describe a currently running payload job. @@ -84,10 +38,11 @@ pub trait PayloadBuilderAttributes: Send + Sync + std::fmt::Debug { /// Creates a new payload builder for the given parent block and the attributes. /// - /// Derives the unique [`PayloadId`] for the given parent and attributes + /// Derives the unique [`PayloadId`] for the given parent, attributes and version. fn try_new( parent: B256, rpc_payload_attributes: Self::RpcPayloadAttributes, + version: u8, ) -> Result where Self: Sized; @@ -145,7 +100,8 @@ impl PayloadAttributes for EthPayloadAttributes { } } -impl PayloadAttributes for OpPayloadAttributes { +#[cfg(feature = "op")] +impl PayloadAttributes for op_alloy_rpc_types_engine::OpPayloadAttributes { fn timestamp(&self) -> u64 { self.payload_attributes.timestamp } diff --git a/crates/payload/util/Cargo.toml b/crates/payload/util/Cargo.toml new file mode 100644 index 00000000000..2da8dc66028 --- /dev/null +++ b/crates/payload/util/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "reth-payload-util" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "reth payload utilities" + +[lints] +workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true + +# alloy +alloy-primitives.workspace = true +alloy-consensus.workspace = true \ No newline at end of file diff --git a/crates/payload/util/src/lib.rs b/crates/payload/util/src/lib.rs new file mode 100644 index 00000000000..5ad0e83507b --- /dev/null +++ b/crates/payload/util/src/lib.rs @@ -0,0 +1,15 @@ +//! payload utils. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod traits; +mod transaction; + +pub use traits::PayloadTransactions; +pub use transaction::{PayloadTransactionsChain, PayloadTransactionsFixed}; diff --git a/crates/payload/util/src/traits.rs b/crates/payload/util/src/traits.rs new file mode 100644 index 00000000000..e9bb7e03704 --- /dev/null +++ b/crates/payload/util/src/traits.rs @@ -0,0 +1,23 @@ +use alloy_primitives::Address; +use reth_primitives::RecoveredTx; + +/// Iterator that returns transactions for the block building process in the order they should be +/// included in the block. +/// +/// Can include transactions from the pool and other sources (alternative pools, +/// sequencer-originated transactions, etc.). +pub trait PayloadTransactions { + /// The transaction type this iterator yields. + type Transaction; + + /// Returns the next transaction to include in the block. + fn next( + &mut self, + // In the future, `ctx` can include access to state for block building purposes. + ctx: (), + ) -> Option>; + + /// Exclude descendants of the transaction with given sender and nonce from the iterator, + /// because this transaction won't be included in the block. + fn mark_invalid(&mut self, sender: Address, nonce: u64); +} diff --git a/crates/payload/util/src/transaction.rs b/crates/payload/util/src/transaction.rs new file mode 100644 index 00000000000..71387946aef --- /dev/null +++ b/crates/payload/util/src/transaction.rs @@ -0,0 +1,132 @@ +use crate::PayloadTransactions; +use alloy_consensus::Transaction; +use alloy_primitives::Address; +use reth_primitives::RecoveredTx; + +/// An implementation of [`crate::traits::PayloadTransactions`] that yields +/// a pre-defined set of transactions. +/// +/// This is useful to put a sequencer-specified set of transactions into the block +/// and compose it with the rest of the transactions. +#[derive(Debug)] +pub struct PayloadTransactionsFixed { + transactions: Vec, + index: usize, +} + +impl PayloadTransactionsFixed { + /// Constructs a new [`PayloadTransactionsFixed`]. + pub fn new(transactions: Vec) -> Self { + Self { transactions, index: Default::default() } + } + + /// Constructs a new [`PayloadTransactionsFixed`] with a single transaction. + pub fn single(transaction: T) -> Self { + Self { transactions: vec![transaction], index: Default::default() } + } +} + +impl PayloadTransactions for PayloadTransactionsFixed> { + type Transaction = T; + + fn next(&mut self, _ctx: ()) -> Option> { + (self.index < self.transactions.len()).then(|| { + let tx = self.transactions[self.index].clone(); + self.index += 1; + tx + }) + } + + fn mark_invalid(&mut self, _sender: Address, _nonce: u64) {} +} + +/// Wrapper over [`crate::traits::PayloadTransactions`] that combines transactions from multiple +/// `PayloadTransactions` iterators and keeps track of the gas for both of iterators. +/// +/// We can't use [`Iterator::chain`], because: +/// (a) we need to propagate the `mark_invalid` and `no_updates` +/// (b) we need to keep track of the gas +/// +/// Notes that [`PayloadTransactionsChain`] fully drains the first iterator +/// before moving to the second one. +/// +/// If the `before` iterator has transactions that are not fitting into the block, +/// the after iterator will get propagated a `mark_invalid` call for each of them. +#[derive(Debug)] +pub struct PayloadTransactionsChain { + /// Iterator that will be used first + before: B, + /// Allowed gas for the transactions from `before` iterator. If `None`, no gas limit is + /// enforced. + before_max_gas: Option, + /// Gas used by the transactions from `before` iterator + before_gas: u64, + /// Iterator that will be used after `before` iterator + after: A, + /// Allowed gas for the transactions from `after` iterator. If `None`, no gas limit is + /// enforced. + after_max_gas: Option, + /// Gas used by the transactions from `after` iterator + after_gas: u64, +} + +impl PayloadTransactionsChain { + /// Constructs a new [`PayloadTransactionsChain`]. + pub fn new( + before: B, + before_max_gas: Option, + after: A, + after_max_gas: Option, + ) -> Self { + Self { + before, + before_max_gas, + before_gas: Default::default(), + after, + after_max_gas, + after_gas: Default::default(), + } + } +} + +impl PayloadTransactions for PayloadTransactionsChain +where + A: PayloadTransactions, + B: PayloadTransactions, +{ + type Transaction = A::Transaction; + + fn next(&mut self, ctx: ()) -> Option> { + while let Some(tx) = self.before.next(ctx) { + if let Some(before_max_gas) = self.before_max_gas { + if self.before_gas + tx.as_signed().gas_limit() <= before_max_gas { + self.before_gas += tx.as_signed().gas_limit(); + return Some(tx); + } + self.before.mark_invalid(tx.signer(), tx.as_signed().nonce()); + self.after.mark_invalid(tx.signer(), tx.as_signed().nonce()); + } else { + return Some(tx); + } + } + + while let Some(tx) = self.after.next(ctx) { + if let Some(after_max_gas) = self.after_max_gas { + if self.after_gas + tx.as_signed().gas_limit() <= after_max_gas { + self.after_gas += tx.as_signed().gas_limit(); + return Some(tx); + } + self.after.mark_invalid(tx.signer(), tx.as_signed().nonce()); + } else { + return Some(tx); + } + } + + None + } + + fn mark_invalid(&mut self, sender: Address, nonce: u64) { + self.before.mark_invalid(sender, nonce); + self.after.mark_invalid(sender, nonce); + } +} diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 55002b0a98b..0a872a68ddf 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -8,9 +8,11 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_rpc_types::engine::{ExecutionPayload, MaybeCancunPayloadFields, PayloadError}; +use alloy_rpc_types::engine::{ + ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, +}; use reth_chainspec::EthereumHardforks; -use reth_primitives::SealedBlock; +use reth_primitives::{BlockExt, SealedBlock}; use reth_rpc_types_compat::engine::payload::try_into_block; use std::sync::Arc; @@ -21,7 +23,7 @@ pub struct ExecutionPayloadValidator { chain_spec: Arc, } -impl ExecutionPayloadValidator { +impl ExecutionPayloadValidator { /// Create a new validator. pub const fn new(chain_spec: Arc) -> Self { Self { chain_spec } @@ -29,10 +31,12 @@ impl ExecutionPayloadValidator { /// Returns the chain spec used by the validator. #[inline] - pub fn chain_spec(&self) -> &ChainSpec { + pub const fn chain_spec(&self) -> &Arc { &self.chain_spec } +} +impl ExecutionPayloadValidator { /// Returns true if the Cancun hardfork is active at the given timestamp. #[inline] fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { @@ -111,13 +115,12 @@ impl ExecutionPayloadValidator { pub fn ensure_well_formed_payload( &self, payload: ExecutionPayload, - cancun_fields: MaybeCancunPayloadFields, + sidecar: ExecutionPayloadSidecar, ) -> Result { let expected_hash = payload.block_hash(); // First parse the block - let sealed_block = - try_into_block(payload, cancun_fields.parent_beacon_block_root())?.seal_slow(); + let sealed_block = try_into_block(payload, &sidecar)?.seal_slow(); // Ensure the hash included in the payload matches the block hash if expected_hash != sealed_block.hash() { @@ -136,7 +139,7 @@ impl ExecutionPayloadValidator { // cancun active but excess blob gas not present return Err(PayloadError::PostCancunBlockWithoutExcessBlobGas) } - if cancun_fields.as_ref().is_none() { + if sidecar.cancun().is_none() { // cancun active but cancun fields not present return Err(PayloadError::PostCancunWithoutCancunFields) } @@ -153,7 +156,7 @@ impl ExecutionPayloadValidator { // cancun not active but excess blob gas present return Err(PayloadError::PreCancunBlockWithExcessBlobGas) } - if cancun_fields.as_ref().is_some() { + if sidecar.cancun().is_some() { // cancun not active but cancun fields present return Err(PayloadError::PreCancunWithCancunFields) } @@ -162,7 +165,7 @@ impl ExecutionPayloadValidator { let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp); if !shanghai_active && sealed_block.body.withdrawals.is_some() { // shanghai not active but withdrawals present - return Err(PayloadError::PreShanghaiBlockWithWitdrawals) + return Err(PayloadError::PreShanghaiBlockWithWithdrawals) } if !self.is_prague_active_at_timestamp(sealed_block.timestamp) && @@ -172,7 +175,10 @@ impl ExecutionPayloadValidator { } // EIP-4844 checks - self.ensure_matching_blob_versioned_hashes(&sealed_block, &cancun_fields)?; + self.ensure_matching_blob_versioned_hashes( + &sealed_block, + &sidecar.cancun().cloned().into(), + )?; Ok(sealed_block) } diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index b34987327ee..459fdbde1a7 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -12,26 +12,30 @@ description = "Common types in reth." workspace = true [dependencies] -reth-codecs.workspace = true +# reth +reth-codecs = { workspace = true, optional = true } -alloy-consensus = { workspace = true, features = ["serde"] } +# ethereum +alloy-consensus.workspace = true alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true +revm-primitives.workspace = true -revm-primitives = { workspace = true, features = ["serde"] } +# op +op-alloy-consensus = { workspace = true, optional = true } # misc -byteorder = "1" +byteorder = { workspace = true, optional = true } +bytes.workspace = true derive_more.workspace = true -roaring = "0.10.2" serde_with = { workspace = true, optional = true } +auto_impl.workspace = true # required by reth-codecs -bytes.workspace = true -modular-bitfield.workspace = true -serde.workspace = true +modular-bitfield = { workspace = true, optional = true } +serde = { workspace = true, optional = true} # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -39,29 +43,72 @@ proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] -reth-testing-utils.workspace = true - alloy-primitives = { workspace = true, features = ["arbitrary"] } alloy-consensus = { workspace = true, features = ["arbitrary"] } -arbitrary = { workspace = true, features = ["derive"] } bincode.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true +modular-bitfield.workspace = true +serde.workspace = true [features] default = ["std"] -std = [] -test-utils = ["arbitrary"] +std = [ + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "revm-primitives/std", + "serde?/std", + "serde_with?/std", + "alloy-rlp/std", + "bytes/std", + "derive_more/std" +] +test-utils = [ + "arbitrary", + "reth-codecs?/test-utils" +] arbitrary = [ - "std", - "alloy-consensus/arbitrary", - "alloy-primitives/arbitrary", - "dep:arbitrary", - "dep:proptest", - "dep:proptest-arbitrary-interop", + "std", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "dep:arbitrary", + "dep:proptest", + "dep:proptest-arbitrary-interop", + "alloy-eips/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs?/arbitrary", + "op-alloy-consensus?/arbitrary" +] +serde-bincode-compat = [ + "serde", + "serde_with", + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "op-alloy-consensus?/serde-bincode-compat" +] +serde = [ + "dep:serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bytes/serde", + "rand/serde", + "reth-codecs?/serde", + "revm-primitives/serde", + "revm-primitives/serde", + "op-alloy-consensus?/serde" +] +reth-codec = [ + "dep:reth-codecs", + "dep:modular-bitfield", + "dep:byteorder", +] +op = [ + "dep:op-alloy-consensus", ] -serde-bincode-compat = ["serde_with", "alloy-consensus/serde-bincode-compat"] \ No newline at end of file diff --git a/crates/primitives-traits/src/account.rs b/crates/primitives-traits/src/account.rs index 063504b2a0e..17f7f6f58ca 100644 --- a/crates/primitives-traits/src/account.rs +++ b/crates/primitives-traits/src/account.rs @@ -1,32 +1,34 @@ use alloy_consensus::constants::KECCAK_EMPTY; use alloy_genesis::GenesisAccount; use alloy_primitives::{keccak256, Bytes, B256, U256}; -use byteorder::{BigEndian, ReadBytesExt}; -use bytes::Buf; use derive_more::Deref; -use reth_codecs::{add_arbitrary_tests, Compact}; -use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError, JumpTable}; -use serde::{Deserialize, Serialize}; +use revm_primitives::{AccountInfo, Bytecode as RevmBytecode, BytecodeDecodeError}; -/// Identifier for [`LegacyRaw`](RevmBytecode::LegacyRaw). -const LEGACY_RAW_BYTECODE_ID: u8 = 0; +#[cfg(any(test, feature = "reth-codec"))] +/// Identifiers used in [`Compact`](reth_codecs::Compact) encoding of [`Bytecode`]. +pub mod compact_ids { + /// Identifier for [`LegacyRaw`](revm_primitives::Bytecode::LegacyRaw). + pub const LEGACY_RAW_BYTECODE_ID: u8 = 0; -/// Identifier for removed bytecode variant. -const REMOVED_BYTECODE_ID: u8 = 1; + /// Identifier for removed bytecode variant. + pub const REMOVED_BYTECODE_ID: u8 = 1; -/// Identifier for [`LegacyAnalyzed`](RevmBytecode::LegacyAnalyzed). -const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; + /// Identifier for [`LegacyAnalyzed`](revm_primitives::Bytecode::LegacyAnalyzed). + pub const LEGACY_ANALYZED_BYTECODE_ID: u8 = 2; -/// Identifier for [`Eof`](RevmBytecode::Eof). -const EOF_BYTECODE_ID: u8 = 3; + /// Identifier for [`Eof`](revm_primitives::Bytecode::Eof). + pub const EOF_BYTECODE_ID: u8 = 3; -/// Identifier for [`Eip7702`](RevmBytecode::Eip7702). -const EIP7702_BYTECODE_ID: u8 = 4; + /// Identifier for [`Eip7702`](revm_primitives::Bytecode::Eip7702). + pub const EIP7702_BYTECODE_ID: u8 = 4; +} /// An Ethereum account. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct Account { /// Account nonce. pub nonce: u64, @@ -47,7 +49,7 @@ impl Account { pub fn is_empty(&self) -> bool { self.nonce == 0 && self.balance.is_zero() && - self.bytecode_hash.map_or(true, |hash| hash == KECCAK_EMPTY) + self.bytecode_hash.is_none_or(|hash| hash == KECCAK_EMPTY) } /// Returns an account bytecode's hash. @@ -60,7 +62,8 @@ impl Account { /// Bytecode for an account. /// /// A wrapper around [`revm::primitives::Bytecode`][RevmBytecode] with encoding/decoding support. -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[derive(Debug, Clone, Default, PartialEq, Eq, Deref)] pub struct Bytecode(pub RevmBytecode); impl Bytecode { @@ -84,11 +87,17 @@ impl Bytecode { } } -impl Compact for Bytecode { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for Bytecode { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, { + use compact_ids::{ + EIP7702_BYTECODE_ID, EOF_BYTECODE_ID, LEGACY_ANALYZED_BYTECODE_ID, + LEGACY_RAW_BYTECODE_ID, + }; + let bytecode = match &self.0 { RevmBytecode::LegacyRaw(bytes) => bytes, RevmBytecode::LegacyAnalyzed(analyzed) => analyzed.bytecode(), @@ -127,7 +136,12 @@ impl Compact for Bytecode { // A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the // database. fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { - let len = buf.read_u32::().expect("could not read bytecode length"); + use byteorder::ReadBytesExt; + use bytes::Buf; + + use compact_ids::*; + + let len = buf.read_u32::().expect("could not read bytecode length"); let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); let variant = buf.read_u8().expect("could not read bytecode variant"); let decoded = match variant { @@ -138,8 +152,8 @@ impl Compact for Bytecode { LEGACY_ANALYZED_BYTECODE_ID => Self(unsafe { RevmBytecode::new_analyzed( bytes, - buf.read_u64::().unwrap() as usize, - JumpTable::from_slice(buf), + buf.read_u64::().unwrap() as usize, + revm_primitives::JumpTable::from_slice(buf), ) }), EOF_BYTECODE_ID | EIP7702_BYTECODE_ID => { @@ -164,11 +178,20 @@ impl From<&GenesisAccount> for Account { impl From for Account { fn from(revm_acc: AccountInfo) -> Self { - let code_hash = revm_acc.code_hash; Self { balance: revm_acc.balance, nonce: revm_acc.nonce, - bytecode_hash: (code_hash != KECCAK_EMPTY).then_some(code_hash), + bytecode_hash: (!revm_acc.is_empty_code_hash()).then_some(revm_acc.code_hash), + } + } +} + +impl From<&AccountInfo> for Account { + fn from(revm_acc: &AccountInfo) -> Self { + Self { + balance: revm_acc.balance, + nonce: revm_acc.nonce, + bytecode_hash: (!revm_acc.is_empty_code_hash()).then_some(revm_acc.code_hash), } } } @@ -186,9 +209,11 @@ impl From for AccountInfo { #[cfg(test)] mod tests { - use super::*; use alloy_primitives::{hex_literal::hex, B256, U256}; - use revm_primitives::LegacyAnalyzedBytecode; + use reth_codecs::Compact; + use revm_primitives::{JumpTable, LegacyAnalyzedBytecode}; + + use super::*; #[test] fn test_account() { @@ -256,4 +281,50 @@ mod tests { assert_eq!(decoded, bytecode); assert!(remainder.is_empty()); } + + #[test] + fn test_account_has_bytecode() { + // Account with no bytecode (None) + let acc_no_bytecode = Account { nonce: 1, balance: U256::from(1000), bytecode_hash: None }; + assert!(!acc_no_bytecode.has_bytecode(), "Account should not have bytecode"); + + // Account with bytecode hash set to KECCAK_EMPTY (should have bytecode) + let acc_empty_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(KECCAK_EMPTY) }; + assert!(acc_empty_bytecode.has_bytecode(), "Account should have bytecode"); + + // Account with a non-empty bytecode hash + let acc_with_bytecode = Account { + nonce: 1, + balance: U256::from(1000), + bytecode_hash: Some(B256::from_slice(&[0x11u8; 32])), + }; + assert!(acc_with_bytecode.has_bytecode(), "Account should have bytecode"); + } + + #[test] + fn test_account_get_bytecode_hash() { + // Account with no bytecode (should return KECCAK_EMPTY) + let acc_no_bytecode = Account { nonce: 0, balance: U256::ZERO, bytecode_hash: None }; + assert_eq!(acc_no_bytecode.get_bytecode_hash(), KECCAK_EMPTY, "Should return KECCAK_EMPTY"); + + // Account with bytecode hash set to KECCAK_EMPTY + let acc_empty_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(KECCAK_EMPTY) }; + assert_eq!( + acc_empty_bytecode.get_bytecode_hash(), + KECCAK_EMPTY, + "Should return KECCAK_EMPTY" + ); + + // Account with a valid bytecode hash + let bytecode_hash = B256::from_slice(&[0x11u8; 32]); + let acc_with_bytecode = + Account { nonce: 1, balance: U256::from(1000), bytecode_hash: Some(bytecode_hash) }; + assert_eq!( + acc_with_bytecode.get_bytecode_hash(), + bytecode_hash, + "Should return the bytecode hash" + ); + } } diff --git a/crates/primitives-traits/src/block.rs b/crates/primitives-traits/src/block.rs deleted file mode 100644 index 02f581801c9..00000000000 --- a/crates/primitives-traits/src/block.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! Block abstraction. - -pub mod body; - -use alloc::fmt; -use core::ops; - -use alloy_consensus::BlockHeader; -use alloy_primitives::{Address, Sealable, B256}; - -use crate::{traits::BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; - -/// Abstraction of block data type. -pub trait Block: - fmt::Debug - + Clone - + PartialEq - + Eq - + Default - + serde::Serialize - + for<'a> serde::Deserialize<'a> - + From<(Self::Header, Self::Body)> - + Into<(Self::Header, Self::Body)> -{ - /// Header part of the block. - type Header: BlockHeader + Sealable; - - /// The block's body contains the transactions in the block. - type Body: BlockBody; - - /// A block and block hash. - type SealedBlock; - - /// A block and addresses of senders of transactions in it. - type BlockWithSenders; - - /// Returns reference to [`BlockHeader`] type. - fn header(&self) -> &Self::Header; - - /// Returns reference to [`BlockBody`] type. - fn body(&self) -> &Self::Body; - - /// Calculate the header hash and seal the block so that it can't be changed. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal_slow(self) -> Self::SealedBlock; - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal(self, hash: B256) -> Self::SealedBlock; - - /// Expensive operation that recovers transaction signer. See - /// [`SealedBlockWithSenders`](reth_primitives::SealedBlockWithSenders). - fn senders(&self) -> Option> { - self.body().recover_signers() - } - - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - fn with_senders_unchecked(self, senders: Vec

) -> Self::BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - // todo: can be default impl if block with senders type is made generic over block and migrated - // to alloy - #[track_caller] - fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result; - - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn with_recovered_senders(self) -> Option; - - /// Calculates a heuristic for the in-memory size of the [`Block`]. - fn size(&self) -> usize; -} diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 03246c68b45..20f1cb9c159 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,159 +1,103 @@ //! Block body abstraction. -use alloc::fmt; -use core::ops; +use crate::{ + BlockHeader, FullSignedTx, InMemorySize, MaybeArbitrary, MaybeSerde, MaybeSerdeBincodeCompat, + SignedTransaction, +}; +use alloc::{fmt, vec::Vec}; +use alloy_consensus::Transaction; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; +use alloy_primitives::{Bytes, B256}; -use alloy_consensus::{BlockHeader,Request, Transaction, TxType}; -use alloy_primitives::{Address, B256}; -use alloy_eips::eip1559::Withdrawal; +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullBlockBody: BlockBody {} -use crate::Block; +impl FullBlockBody for T where T: BlockBody {} /// Abstraction for block's body. pub trait BlockBody: - Clone + Send + + Sync + + Unpin + + Clone + + Default + fmt::Debug + PartialEq + Eq - + Default - + serde::Serialize - + for<'de> serde::Deserialize<'de> + alloy_rlp::Encodable + alloy_rlp::Decodable + + InMemorySize + + MaybeSerde + + MaybeArbitrary + + MaybeSerdeBincodeCompat + + 'static { /// Ordered list of signed transactions as committed in block. - // todo: requires trait for signed transaction - type SignedTransaction: Transaction; - - /// Header type (uncle blocks). - type Header: BlockHeader; - - /// Withdrawals in block. - type Withdrawals: Iterator; + type Transaction: SignedTransaction; - /// Requests in block. - type Requests: Iterator; + /// Ommer header type. + type OmmerHeader: BlockHeader; /// Returns reference to transactions in block. - fn transactions(&self) -> &[Self::SignedTransaction]; + fn transactions(&self) -> &[Self::Transaction]; - /// Returns [`Withdrawals`] in the block, if any. - // todo: branch out into extension trait - fn withdrawals(&self) -> Option<&Self::Withdrawals>; - - /// Returns reference to uncle block headers. - fn ommers(&self) -> &[Self::Header]; - - /// Returns [`Request`] in block, if any. - fn requests(&self) -> Option<&Self::Requests>; - - /// Create a [`Block`] from the body and its header. - fn into_block>(self, header: Self::Header) -> T { - T::from((header, self)) - } + /// Consume the block body and return a [`Vec`] of transactions. + fn into_transactions(self) -> Vec; /// Calculate the transaction root for the block body. - fn calculate_tx_root(&self) -> B256; - - /// Calculate the ommers root for the block body. - fn calculate_ommers_root(&self) -> B256; - - /// Calculate the withdrawals root for the block body, if withdrawals exist. If there are no - /// withdrawals, this will return `None`. - // todo: can be default impl if `calculate_withdrawals_root` made into a method on - // `Withdrawals` and `Withdrawals` moved to alloy - fn calculate_withdrawals_root(&self) -> Option; - - /// Calculate the requests root for the block body, if requests exist. If there are no - /// requests, this will return `None`. - // todo: can be default impl if `calculate_requests_root` made into a method on - // `Requests` and `Requests` moved to alloy - fn calculate_requests_root(&self) -> Option; - - /// Recover signer addresses for all transactions in the block body. - fn recover_signers(&self) -> Option>; - - /// Returns whether or not the block body contains any blob transactions. - fn has_blob_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() as u8 == TxType::Eip4844 as u8) - } - - /// Returns whether or not the block body contains any EIP-7702 transactions. - fn has_eip7702_transactions(&self) -> bool { - self.transactions().iter().any(|tx| tx.ty() as u8 == TxType::Eip7702 as u8) - } - - /// Returns an iterator over all blob transactions of the block - fn blob_transactions_iter(&self) -> impl Iterator + '_ { - self.transactions().iter().filter(|tx| tx.ty() as u8 == TxType::Eip4844 as u8) - } - - /// Returns only the blob transactions, if any, from the block body. - fn blob_transactions(&self) -> Vec<&Self::SignedTransaction> { - self.blob_transactions_iter().collect() - } - - /// Returns an iterator over all blob versioned hashes from the block body. - fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_; - - /// Returns all blob versioned hashes from the block body. - fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() - } - - /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. - fn size(&self) -> usize; -} - -impl BlockBody for T -where - T: ops::Deref - + Clone - + fmt::Debug - + PartialEq - + Eq - + Default - + serde::Serialize - + for<'de> serde::Deserialize<'de> - + alloy_rlp::Encodable - + alloy_rlp::Decodable, -{ - type Header = ::Header; - type SignedTransaction = ::SignedTransaction; - - fn transactions(&self) -> &Vec { - self.deref().transactions() + fn calculate_tx_root(&self) -> B256 { + alloy_consensus::proofs::calculate_transaction_root(self.transactions()) } - fn withdrawals(&self) -> Option<&Withdrawals> { - self.deref().withdrawals() - } + /// Returns block withdrawals if any. + fn withdrawals(&self) -> Option<&Withdrawals>; - fn ommers(&self) -> &Vec { - self.deref().ommers() + /// Calculate the withdrawals root for the block body. + /// + /// Returns `None` if there are no withdrawals in the block. + fn calculate_withdrawals_root(&self) -> Option { + self.withdrawals().map(|withdrawals| { + alloy_consensus::proofs::calculate_withdrawals_root(withdrawals.as_slice()) + }) } - fn requests(&self) -> Option<&Requests> { - self.deref().requests() - } + /// Returns block ommers if any. + fn ommers(&self) -> Option<&[Self::OmmerHeader]>; - fn calculate_tx_root(&self) -> B256 { - self.deref().calculate_tx_root() + /// Calculate the ommers root for the block body. + /// + /// Returns `None` if there are no ommers in the block. + fn calculate_ommers_root(&self) -> Option { + self.ommers().map(alloy_consensus::proofs::calculate_ommers_root) } - fn calculate_ommers_root(&self) -> B256 { - self.deref().calculate_ommers_root() + /// Calculates the total blob gas used by _all_ EIP-4844 transactions in the block. + fn blob_gas_used(&self) -> u64 { + self.transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() } - fn recover_signers(&self) -> Option> { - self.deref().recover_signers() + /// Returns an iterator over all blob versioned hashes in the block body. + fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { + self.transactions().iter().filter_map(|tx| tx.blob_versioned_hashes()).flatten() } - fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.deref().blob_versioned_hashes_iter() + /// Returns an iterator over the encoded 2718 transactions. + /// + /// This is also known as `raw transactions`. + /// + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions_iter")] + fn encoded_2718_transactions_iter(&self) -> impl Iterator> + '_ { + self.transactions().iter().map(|tx| tx.encoded_2718()) } - fn size(&self) -> usize { - self.deref().size() + /// Returns a vector of encoded 2718 transactions. + /// + /// This is also known as `raw transactions`. + /// + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions")] + fn encoded_2718_transactions(&self) -> Vec { + self.encoded_2718_transactions_iter().map(Into::into).collect() } } diff --git a/crates/primitives-traits/src/block/header.rs b/crates/primitives-traits/src/block/header.rs new file mode 100644 index 00000000000..42d0153b19c --- /dev/null +++ b/crates/primitives-traits/src/block/header.rs @@ -0,0 +1,38 @@ +//! Block header data primitive. + +use core::fmt; + +use alloy_primitives::Sealable; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat}; + +/// Helper trait that unifies all behaviour required by block header to support full node +/// operations. +pub trait FullBlockHeader: BlockHeader + MaybeCompact {} + +impl FullBlockHeader for T where T: BlockHeader + MaybeCompact {} + +/// Abstraction of a block header. +pub trait BlockHeader: + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + alloy_consensus::BlockHeader + + Sealable + + InMemorySize + + MaybeSerde + + MaybeArbitrary + + MaybeSerdeBincodeCompat + + AsRef + + 'static +{ +} + +impl BlockHeader for alloy_consensus::Header {} diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 02f581801c9..1994075b922 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -1,99 +1,66 @@ //! Block abstraction. pub mod body; +pub mod header; use alloc::fmt; -use core::ops; +use alloy_rlp::{Decodable, Encodable}; -use alloy_consensus::BlockHeader; -use alloy_primitives::{Address, Sealable, B256}; +use crate::{ + BlockBody, BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeArbitrary, + MaybeSerde, +}; -use crate::{traits::BlockBody, BlockWithSenders, SealedBlock, SealedHeader}; +/// Helper trait that unifies all behaviour required by block to support full node operations. +pub trait FullBlock: + Block + alloy_rlp::Encodable + alloy_rlp::Decodable +{ +} + +impl FullBlock for T where + T: Block + + alloy_rlp::Encodable + + alloy_rlp::Decodable +{ +} + +/// Helper trait to access [`BlockBody::Transaction`] given a [`Block`]. +pub type BlockTx = <::Body as BlockBody>::Transaction; /// Abstraction of block data type. +// todo: make sealable super-trait, depends on +// todo: make with senders extension trait, so block can be impl by block type already containing +// senders pub trait Block: - fmt::Debug + Send + + Sync + + Unpin + Clone + + Default + + fmt::Debug + PartialEq + Eq - + Default - + serde::Serialize - + for<'a> serde::Deserialize<'a> - + From<(Self::Header, Self::Body)> - + Into<(Self::Header, Self::Body)> + + InMemorySize + + MaybeSerde + + MaybeArbitrary + + Encodable + + Decodable { /// Header part of the block. - type Header: BlockHeader + Sealable; + type Header: BlockHeader; /// The block's body contains the transactions in the block. - type Body: BlockBody; + type Body: BlockBody; - /// A block and block hash. - type SealedBlock; + /// Create new block instance. + fn new(header: Self::Header, body: Self::Body) -> Self; - /// A block and addresses of senders of transactions in it. - type BlockWithSenders; - - /// Returns reference to [`BlockHeader`] type. + /// Returns reference to block header. fn header(&self) -> &Self::Header; - /// Returns reference to [`BlockBody`] type. + /// Returns reference to block body. fn body(&self) -> &Self::Body; - /// Calculate the header hash and seal the block so that it can't be changed. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal_slow(self) -> Self::SealedBlock; - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn seal(self, hash: B256) -> Self::SealedBlock; - - /// Expensive operation that recovers transaction signer. See - /// [`SealedBlockWithSenders`](reth_primitives::SealedBlockWithSenders). - fn senders(&self) -> Option> { - self.body().recover_signers() - } - - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - fn with_senders_unchecked(self, senders: Vec
) -> Self::BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - // todo: can be default impl if block with senders type is made generic over block and migrated - // to alloy - #[track_caller] - fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result; - - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - // todo: can be default impl if sealed block type is made generic over header and body and - // migrated to alloy - fn with_recovered_senders(self) -> Option; - - /// Calculates a heuristic for the in-memory size of the [`Block`]. - fn size(&self) -> usize; + /// Splits the block into its header and body. + fn split(self) -> (Self::Header, Self::Body); } diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index d40abdd64ba..e927ed3a7df 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -1,9 +1,5 @@ //! Ethereum protocol-related constants -use alloy_consensus::EMPTY_ROOT_HASH; -use alloy_primitives::{address, b256, Address, B256, U256}; -use core::time::Duration; - /// Gas units, for example [`GIGAGAS`]. pub mod gas_units; pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; @@ -11,133 +7,9 @@ pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); -/// The first four bytes of the call data for a function call specifies the function to be called. -pub const SELECTOR_LEN: usize = 4; - -/// Maximum extra data size in a block after genesis -pub const MAXIMUM_EXTRA_DATA_SIZE: usize = 32; - -/// An EPOCH is a series of 32 slots. -pub const EPOCH_SLOTS: u64 = 32; - -/// The duration of a slot in seconds. -/// -/// This is the time period of 12 seconds in which a randomly chosen validator has time to propose a -/// block. -pub const SLOT_DURATION: Duration = Duration::from_secs(12); - -/// An EPOCH is a series of 32 slots (~6.4min). -pub const EPOCH_DURATION: Duration = Duration::from_secs(12 * EPOCH_SLOTS); - -/// The default block nonce in the beacon consensus -pub const BEACON_NONCE: u64 = 0u64; - -/// The default Ethereum block gas limit. -pub const ETHEREUM_BLOCK_GAS_LIMIT: u64 = 30_000_000; - -/// The minimum tx fee below which the txpool will reject the transaction. -/// -/// Configured to `7` WEI which is the lowest possible value of base fee under mainnet EIP-1559 -/// parameters. `BASE_FEE_MAX_CHANGE_DENOMINATOR` -/// is `8`, or 12.5%. Once the base fee has dropped to `7` WEI it cannot decrease further because -/// 12.5% of 7 is less than 1. -/// -/// Note that min base fee under different 1559 parameterizations may differ, but there's no -/// significant harm in leaving this setting as is. -pub const MIN_PROTOCOL_BASE_FEE: u64 = 7; - -/// Same as [`MIN_PROTOCOL_BASE_FEE`] but as a U256. -pub const MIN_PROTOCOL_BASE_FEE_U256: U256 = U256::from_limbs([7u64, 0, 0, 0]); - -/// Initial base fee as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_INITIAL_BASE_FEE: u64 = 1_000_000_000; - -/// Base fee max change denominator as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u64 = 8; - -/// Elasticity multiplier as defined in [EIP-1559](https://eips.ethereum.org/EIPS/eip-1559) -pub const EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u64 = 2; - /// Minimum gas limit allowed for transactions. pub const MINIMUM_GAS_LIMIT: u64 = 5000; -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_MAINNET_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism Canyon -/// hardfork. -pub const OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; - -/// Base fee max change denominator for Optimism Mainnet as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_SEPOLIA_EIP1559_DEFAULT_BASE_FEE_MAX_CHANGE_DENOMINATOR: u128 = 50; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism Canyon -/// hardfork. -pub const OP_SEPOLIA_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON: u128 = 250; - -/// Base fee max change denominator for Optimism Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const OP_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 6; - -/// Base fee max change denominator for Base Sepolia as defined in the Optimism -/// [transaction costs](https://community.optimism.io/docs/developers/build/differences/#transaction-costs) doc. -pub const BASE_SEPOLIA_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER: u128 = 10; - -/// Multiplier for converting gwei to wei. -pub const GWEI_TO_WEI: u64 = 1_000_000_000; - -/// Multiplier for converting finney (milliether) to wei. -pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000; - -/// Multiplier for converting ether to wei. -pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000; - -/// The Ethereum mainnet genesis hash: -/// `0x0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3` -pub const MAINNET_GENESIS_HASH: B256 = - b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); - -/// Sepolia genesis hash: `0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9` -pub const SEPOLIA_GENESIS_HASH: B256 = - b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"); - -/// Holesky genesis hash: `0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4` -pub const HOLESKY_GENESIS_HASH: B256 = - b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); - -/// Testnet genesis hash: `0x2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c` -pub const DEV_GENESIS_HASH: B256 = - b256!("2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c"); - -/// Keccak256 over empty array: `0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470` -pub const KECCAK_EMPTY: B256 = - b256!("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"); - -/// Ommer root of empty list: `0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347` -pub const EMPTY_OMMER_ROOT_HASH: B256 = - b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"); - -/// From address from Optimism system txs: `0xdeaddeaddeaddeaddeaddeaddeaddeaddead0001` -pub const OP_SYSTEM_TX_FROM_ADDR: Address = address!("deaddeaddeaddeaddeaddeaddeaddeaddead0001"); - -/// To address from Optimism system txs: `0x4200000000000000000000000000000000000015` -pub const OP_SYSTEM_TX_TO_ADDR: Address = address!("4200000000000000000000000000000000000015"); - -/// Transactions root of empty receipts set. -pub const EMPTY_RECEIPTS: B256 = EMPTY_ROOT_HASH; - -/// Transactions root of empty transactions set. -pub const EMPTY_TRANSACTIONS: B256 = EMPTY_ROOT_HASH; - -/// Withdrawals root of empty withdrawals set. -pub const EMPTY_WITHDRAWALS: B256 = EMPTY_ROOT_HASH; - /// The number of blocks to unwind during a reorg that already became a part of canonical chain. /// /// In reality, the node can end up in this particular situation very rarely. It would happen only @@ -147,22 +19,3 @@ pub const EMPTY_WITHDRAWALS: B256 = EMPTY_ROOT_HASH; /// Unwind depth of `3` blocks significantly reduces the chance that the reorged block is kept in /// the database. pub const BEACON_CONSENSUS_REORG_UNWIND_DEPTH: u64 = 3; - -/// Max seconds from current time allowed for blocks, before they're considered future blocks. -/// -/// This is only used when checking whether or not the timestamp for pre-merge blocks is in the -/// future. -/// -/// See: -/// -pub const ALLOWED_FUTURE_BLOCK_TIME_SECONDS: u64 = 15; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn min_protocol_sanity() { - assert_eq!(MIN_PROTOCOL_BASE_FEE_U256.to::(), MIN_PROTOCOL_BASE_FEE); - } -} diff --git a/crates/primitives-traits/src/encoded.rs b/crates/primitives-traits/src/encoded.rs new file mode 100644 index 00000000000..885031af1b6 --- /dev/null +++ b/crates/primitives-traits/src/encoded.rs @@ -0,0 +1,63 @@ +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::Bytes; + +/// Generic wrapper with encoded Bytes, such as transaction data. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WithEncoded(Bytes, pub T); + +impl From<(Bytes, T)> for WithEncoded { + fn from(value: (Bytes, T)) -> Self { + Self(value.0, value.1) + } +} + +impl WithEncoded { + /// Wraps the value with the bytes. + pub const fn new(bytes: Bytes, value: T) -> Self { + Self(bytes, value) + } + + /// Get the encoded bytes + pub const fn encoded_bytes(&self) -> &Bytes { + &self.0 + } + + /// Get the underlying value + pub const fn value(&self) -> &T { + &self.1 + } + + /// Returns ownership of the underlying value. + pub fn into_value(self) -> T { + self.1 + } + + /// Transform the value + pub fn transform>(self) -> WithEncoded { + WithEncoded(self.0, self.1.into()) + } + + /// Split the wrapper into [`Bytes`] and value tuple + pub fn split(self) -> (Bytes, T) { + (self.0, self.1) + } + + /// Maps the inner value to a new value using the given function. + pub fn map U>(self, op: F) -> WithEncoded { + WithEncoded(self.0, op(self.1)) + } +} + +impl WithEncoded { + /// Wraps the value with the [`Encodable2718::encoded_2718`] bytes. + pub fn from_2718_encodable(value: T) -> Self { + Self(value.encoded_2718().into(), value) + } +} + +impl WithEncoded> { + /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. + pub fn transpose(self) -> Option> { + self.1.map(|v| WithEncoded(self.0, v)) + } +} diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index fa9c3324535..abcdf4ee0cc 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -1,5 +1,5 @@ mod sealed; -pub use sealed::SealedHeader; +pub use sealed::{Header, SealedHeader}; mod error; pub use error::HeaderError; @@ -7,73 +7,8 @@ pub use error::HeaderError; #[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] pub mod test_utils; -pub use alloy_consensus::Header; - -use alloy_primitives::{Address, BlockNumber, B256, U256}; - /// Bincode-compatible header type serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { pub use super::sealed::serde_bincode_compat::SealedHeader; } - -/// Trait for extracting specific Ethereum block data from a header -pub trait BlockHeader { - /// Retrieves the beneficiary (miner) of the block - fn beneficiary(&self) -> Address; - - /// Retrieves the difficulty of the block - fn difficulty(&self) -> U256; - - /// Retrieves the block number - fn number(&self) -> BlockNumber; - - /// Retrieves the gas limit of the block - fn gas_limit(&self) -> u64; - - /// Retrieves the timestamp of the block - fn timestamp(&self) -> u64; - - /// Retrieves the mix hash of the block - fn mix_hash(&self) -> B256; - - /// Retrieves the base fee per gas of the block, if available - fn base_fee_per_gas(&self) -> Option; - - /// Retrieves the excess blob gas of the block, if available - fn excess_blob_gas(&self) -> Option; -} - -impl BlockHeader for Header { - fn beneficiary(&self) -> Address { - self.beneficiary - } - - fn difficulty(&self) -> U256 { - self.difficulty - } - - fn number(&self) -> BlockNumber { - self.number - } - - fn gas_limit(&self) -> u64 { - self.gas_limit - } - - fn timestamp(&self) -> u64 { - self.timestamp - } - - fn mix_hash(&self) -> B256 { - self.mix_hash - } - - fn base_fee_per_gas(&self) -> Option { - self.base_fee_per_gas - } - - fn excess_blob_gas(&self) -> Option { - self.excess_blob_gas - } -} diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 7119a37e742..61b021a0879 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,19 +1,18 @@ -use super::Header; -use alloy_eips::BlockNumHash; +use crate::InMemorySize; +pub use alloy_consensus::Header; +use alloy_consensus::Sealed; +use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; use alloy_primitives::{keccak256, BlockHash, Sealable}; -#[cfg(any(test, feature = "test-utils"))] -use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; use core::mem; use derive_more::{AsRef, Deref}; -use reth_codecs::add_arbitrary_tests; -use serde::{Deserialize, Serialize}; /// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want /// to modify header. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] -#[add_arbitrary_tests(rlp)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] pub struct SealedHeader { /// Locked Header hash. hash: BlockHash, @@ -29,12 +28,10 @@ impl SealedHeader { pub const fn new(header: H, hash: BlockHash) -> Self { Self { header, hash } } -} -impl SealedHeader { /// Returns the sealed Header fields. #[inline] - pub const fn header(&self) -> &Header { + pub const fn header(&self) -> &H { &self.header } @@ -45,32 +42,47 @@ impl SealedHeader { } /// Extract raw header that can be modified. - pub fn unseal(self) -> Header { + pub fn unseal(self) -> H { self.header } /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. - pub fn split(self) -> (Header, BlockHash) { + pub fn split(self) -> (H, BlockHash) { (self.header, self.hash) } +} +impl SealedHeader { + /// Hashes the header and creates a sealed header. + pub fn seal(header: H) -> Self { + let hash = header.hash_slow(); + Self::new(header, hash) + } +} + +impl SealedHeader { /// Return the number hash tuple. pub fn num_hash(&self) -> BlockNumHash { - BlockNumHash::new(self.number, self.hash) + BlockNumHash::new(self.number(), self.hash) } + /// Return a [`BlockWithParent`] for this header. + pub fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent { parent: self.parent_hash(), block: self.num_hash() } + } +} + +impl InMemorySize for SealedHeader { /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.header.size() + mem::size_of::() } } -impl Default for SealedHeader { +impl Default for SealedHeader { fn default() -> Self { - let sealed = Header::default().seal_slow(); - let (header, hash) = sealed.into_parts(); - Self { header, hash } + Self::seal(H::default()) } } @@ -117,40 +129,48 @@ impl SealedHeader { } /// Updates the block number. - pub fn set_block_number(&mut self, number: BlockNumber) { + pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { self.header.number = number; } /// Updates the block state root. - pub fn set_state_root(&mut self, state_root: B256) { + pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) { self.header.state_root = state_root; } /// Updates the block difficulty. - pub fn set_difficulty(&mut self, difficulty: U256) { + pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { self.header.difficulty = difficulty; } } +impl From> for Sealed { + fn from(value: SealedHeader) -> Self { + Self::new_unchecked(value.header, value.hash) + } +} + #[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { +impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader +where + H: for<'b> arbitrary::Arbitrary<'b> + Sealable, +{ fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let header = Header::arbitrary(u)?; + let header = H::arbitrary(u)?; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Self::new(header, seal)) + Ok(Self::seal(header)) } } /// Bincode-compatible [`SealedHeader`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { - use alloy_consensus::serde_bincode_compat::Header; use alloy_primitives::BlockHash; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use crate::serde_bincode_compat::SerdeBincodeCompat; + /// Bincode-compatible [`super::SealedHeader`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: @@ -166,20 +186,21 @@ pub(super) mod serde_bincode_compat { /// header: SealedHeader, /// } /// ``` - #[derive(Debug, Serialize, Deserialize)] - pub struct SealedHeader<'a> { + #[derive(derive_more::Debug, Serialize, Deserialize)] + #[debug(bound(H::BincodeRepr<'a>: core::fmt::Debug))] + pub struct SealedHeader<'a, H: SerdeBincodeCompat = super::Header> { hash: BlockHash, - header: Header<'a>, + header: H::BincodeRepr<'a>, } - impl<'a> From<&'a super::SealedHeader> for SealedHeader<'a> { - fn from(value: &'a super::SealedHeader) -> Self { - Self { hash: value.hash, header: Header::from(&value.header) } + impl<'a, H: SerdeBincodeCompat> From<&'a super::SealedHeader> for SealedHeader<'a, H> { + fn from(value: &'a super::SealedHeader) -> Self { + Self { hash: value.hash, header: (&value.header).into() } } } - impl<'a> From> for super::SealedHeader { - fn from(value: SealedHeader<'a>) -> Self { + impl<'a, H: SerdeBincodeCompat> From> for super::SealedHeader { + fn from(value: SealedHeader<'a, H>) -> Self { Self { hash: value.hash, header: value.header.into() } } } @@ -202,13 +223,14 @@ pub(super) mod serde_bincode_compat { } } + impl SerdeBincodeCompat for super::SealedHeader { + type BincodeRepr<'a> = SealedHeader<'a, H>; + } #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, SealedHeader}; - use arbitrary::Arbitrary; use rand::Rng; - use reth_testing_utils::generators; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -222,7 +244,7 @@ pub(super) mod serde_bincode_compat { } let mut bytes = [0u8; 1024]; - generators::rng().fill(bytes.as_mut_slice()); + rand::thread_rng().fill(&mut bytes[..]); let data = Data { transaction: SealedHeader::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) .unwrap(), diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index ef5c0d02536..0e79f6cb462 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -1,6 +1,6 @@ //! Test utilities to generate random valid headers. -use crate::Header; +use alloy_consensus::Header; use alloy_primitives::B256; use proptest::{arbitrary::any, prop_compose}; use proptest_arbitrary_interop::arb; @@ -37,7 +37,7 @@ pub const fn generate_valid_header( } // Placeholder for future EIP adjustments - header.requests_root = None; + header.requests_hash = None; header } diff --git a/crates/primitives-traits/src/integer_list.rs b/crates/primitives-traits/src/integer_list.rs deleted file mode 100644 index 570c96c9fda..00000000000 --- a/crates/primitives-traits/src/integer_list.rs +++ /dev/null @@ -1,190 +0,0 @@ -use alloc::vec::Vec; -use bytes::BufMut; -use core::fmt; -use derive_more::Deref; -use roaring::RoaringTreemap; -use serde::{ - de::{SeqAccess, Visitor}, - ser::SerializeSeq, - Deserialize, Deserializer, Serialize, Serializer, -}; - -/// Uses Roaring Bitmaps to hold a list of integers. It provides really good compression with the -/// capability to access its elements without decoding it. -#[derive(Clone, PartialEq, Default, Deref)] -pub struct IntegerList(pub RoaringTreemap); - -impl fmt::Debug for IntegerList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("IntegerList")?; - f.debug_list().entries(self.0.iter()).finish() - } -} - -impl IntegerList { - /// Creates a new empty `IntegerList`. - pub fn empty() -> Self { - Self(RoaringTreemap::new()) - } - - /// Creates an `IntegerList` from a list of integers. - /// - /// Returns an error if the list is not pre-sorted. - pub fn new(list: impl IntoIterator) -> Result { - RoaringTreemap::from_sorted_iter(list) - .map(Self) - .map_err(|_| IntegerListError::UnsortedInput) - } - - // Creates an IntegerList from a pre-sorted list of integers. - /// - /// # Panics - /// - /// Panics if the list is not pre-sorted. - #[inline] - #[track_caller] - pub fn new_pre_sorted(list: impl IntoIterator) -> Self { - Self::new(list).expect("IntegerList must be pre-sorted and non-empty") - } - - /// Appends a list of integers to the current list. - pub fn append(&mut self, list: impl IntoIterator) -> Result { - self.0.append(list).map_err(|_| IntegerListError::UnsortedInput) - } - - /// Pushes a new integer to the list. - pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { - if self.0.push(value) { - Ok(()) - } else { - Err(IntegerListError::UnsortedInput) - } - } - - /// Clears the list. - pub fn clear(&mut self) { - self.0.clear(); - } - - /// Serializes a [`IntegerList`] into a sequence of bytes. - pub fn to_bytes(&self) -> Vec { - let mut vec = Vec::with_capacity(self.0.serialized_size()); - self.0.serialize_into(&mut vec).expect("not able to encode IntegerList"); - vec - } - - /// Serializes a [`IntegerList`] into a sequence of bytes. - pub fn to_mut_bytes(&self, buf: &mut B) { - self.0.serialize_into(buf.writer()).unwrap(); - } - - /// Deserializes a sequence of bytes into a proper [`IntegerList`]. - pub fn from_bytes(data: &[u8]) -> Result { - Ok(Self( - RoaringTreemap::deserialize_from(data) - .map_err(|_| IntegerListError::FailedToDeserialize)?, - )) - } -} - -impl Serialize for IntegerList { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; - for e in &self.0 { - seq.serialize_element(&e)?; - } - seq.end() - } -} - -struct IntegerListVisitor; -impl<'de> Visitor<'de> for IntegerListVisitor { - type Value = IntegerList; - - fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("a usize array") - } - - fn visit_seq(self, mut seq: E) -> Result - where - E: SeqAccess<'de>, - { - let mut list = IntegerList::empty(); - while let Some(item) = seq.next_element()? { - list.push(item).map_err(serde::de::Error::custom)?; - } - Ok(list) - } -} - -impl<'de> Deserialize<'de> for IntegerList { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_byte_buf(IntegerListVisitor) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -use arbitrary::{Arbitrary, Unstructured}; - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> Arbitrary<'a> for IntegerList { - fn arbitrary(u: &mut Unstructured<'a>) -> Result { - let mut nums: Vec = Vec::arbitrary(u)?; - nums.sort_unstable(); - Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat) - } -} - -/// Primitives error type. -#[derive(Debug, derive_more::Display, derive_more::Error)] -pub enum IntegerListError { - /// The provided input is unsorted. - #[display("the provided input is unsorted")] - UnsortedInput, - /// Failed to deserialize data into type. - #[display("failed to deserialize data into type")] - FailedToDeserialize, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn empty_list() { - assert_eq!(IntegerList::empty().len(), 0); - assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0); - } - - #[test] - fn test_integer_list() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - assert_eq!(ef_list.iter().collect::>(), original_list); - } - - #[test] - fn test_integer_list_serialization() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - - let blist = ef_list.to_bytes(); - assert_eq!(IntegerList::from_bytes(&blist).unwrap(), ef_list) - } - - #[test] - fn serde_serialize_deserialize() { - let original_list = [1, 2, 3]; - let ef_list = IntegerList::new(original_list).unwrap(); - - let serde_out = serde_json::to_string(&ef_list).unwrap(); - let serde_ef_list = serde_json::from_str::(&serde_out).unwrap(); - assert_eq!(serde_ef_list, ef_list); - } -} diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index dd68607f591..04d02be0b7d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -21,25 +21,32 @@ pub mod account; pub use account::{Account, Bytecode}; pub mod receipt; -pub use receipt::Receipt; +pub use receipt::{FullReceipt, Receipt}; pub mod transaction; -pub use transaction::Transaction; +pub use transaction::{ + execute::FillTxEnv, + signed::{FullSignedTx, SignedTransaction}, + tx_type::{FullTxType, TxType}, + FullTransaction, Transaction, +}; -mod integer_list; -pub use integer_list::{IntegerList, IntegerListError}; - -pub mod request; -pub use request::{Request, Requests}; +pub mod block; +pub use block::{ + body::{BlockBody, FullBlockBody}, + header::{BlockHeader, FullBlockHeader}, + Block, FullBlock, +}; +mod encoded; mod withdrawal; -pub use withdrawal::{Withdrawal, Withdrawals}; +pub use encoded::WithEncoded; mod error; pub use error::{GotExpected, GotExpectedBoxed}; mod log; -pub use log::{logs_bloom, Log, LogData}; +pub use alloy_primitives::{logs_bloom, Log, LogData}; mod storage; pub use storage::StorageEntry; @@ -48,7 +55,7 @@ pub use storage::StorageEntry; pub mod header; #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; -pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; +pub use header::{Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. /// @@ -58,6 +65,64 @@ pub use header::{BlockHeader, Header, HeaderError, SealedHeader}; /// /// Read more: #[cfg(feature = "serde-bincode-compat")] -pub mod serde_bincode_compat { - pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; -} +pub mod serde_bincode_compat; + +/// Heuristic size trait +pub mod size; +pub use size::InMemorySize; + +/// Node traits +pub mod node; +pub use node::{BodyTy, FullNodePrimitives, HeaderTy, NodePrimitives, ReceiptTy}; + +/// Helper trait that requires arbitrary implementation if the feature is enabled. +#[cfg(any(feature = "test-utils", feature = "arbitrary"))] +pub trait MaybeArbitrary: for<'a> arbitrary::Arbitrary<'a> {} +/// Helper trait that requires arbitrary implementation if the feature is enabled. +#[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] +pub trait MaybeArbitrary {} + +#[cfg(any(feature = "test-utils", feature = "arbitrary"))] +impl MaybeArbitrary for T where T: for<'a> arbitrary::Arbitrary<'a> {} +#[cfg(not(any(feature = "test-utils", feature = "arbitrary")))] +impl MaybeArbitrary for T {} + +/// Helper trait that requires de-/serialize implementation since `serde` feature is enabled. +#[cfg(feature = "serde")] +pub trait MaybeSerde: serde::Serialize + for<'de> serde::Deserialize<'de> {} +/// Noop. Helper trait that would require de-/serialize implementation if `serde` feature were +/// enabled. +#[cfg(not(feature = "serde"))] +pub trait MaybeSerde {} + +#[cfg(feature = "serde")] +impl MaybeSerde for T where T: serde::Serialize + for<'de> serde::Deserialize<'de> {} +#[cfg(not(feature = "serde"))] +impl MaybeSerde for T {} + +/// Helper trait that requires database encoding implementation since `reth-codec` feature is +/// enabled. +#[cfg(feature = "reth-codec")] +pub trait MaybeCompact: reth_codecs::Compact {} +/// Noop. Helper trait that would require database encoding implementation if `reth-codec` feature +/// were enabled. +#[cfg(not(feature = "reth-codec"))] +pub trait MaybeCompact {} + +#[cfg(feature = "reth-codec")] +impl MaybeCompact for T where T: reth_codecs::Compact {} +#[cfg(not(feature = "reth-codec"))] +impl MaybeCompact for T {} + +/// Helper trait that requires serde bincode compatibility implementation. +#[cfg(feature = "serde-bincode-compat")] +pub trait MaybeSerdeBincodeCompat: crate::serde_bincode_compat::SerdeBincodeCompat {} +/// Noop. Helper trait that would require serde bincode compatibility implementation if +/// `serde-bincode-compat` feature were enabled. +#[cfg(not(feature = "serde-bincode-compat"))] +pub trait MaybeSerdeBincodeCompat {} + +#[cfg(feature = "serde-bincode-compat")] +impl MaybeSerdeBincodeCompat for T where T: crate::serde_bincode_compat::SerdeBincodeCompat {} +#[cfg(not(feature = "serde-bincode-compat"))] +impl MaybeSerdeBincodeCompat for T {} diff --git a/crates/primitives-traits/src/log.rs b/crates/primitives-traits/src/log.rs index 6e6b4733518..0b445aeeba9 100644 --- a/crates/primitives-traits/src/log.rs +++ b/crates/primitives-traits/src/log.rs @@ -1,18 +1,3 @@ -use alloy_primitives::Bloom; -pub use alloy_primitives::{Log, LogData}; - -/// Calculate receipt logs bloom. -pub fn logs_bloom<'a>(logs: impl IntoIterator) -> Bloom { - let mut bloom = Bloom::ZERO; - for log in logs { - bloom.m3_2048(log.address.as_slice()); - for topic in log.topics() { - bloom.m3_2048(topic.as_slice()); - } - } - bloom -} - #[cfg(test)] mod tests { use alloy_primitives::{Address, Bytes, Log as AlloyLog, B256}; diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs new file mode 100644 index 00000000000..5b3691d2fdf --- /dev/null +++ b/crates/primitives-traits/src/node.rs @@ -0,0 +1,73 @@ +use crate::{ + Block, BlockBody, BlockHeader, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, + FullSignedTx, FullTxType, Receipt, SignedTransaction, TxType, +}; +use core::fmt; + +/// Configures all the primitive types of the node. +pub trait NodePrimitives: + Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static +{ + /// Block primitive. + type Block: Block
; + /// Block header primitive. + type BlockHeader: BlockHeader; + /// Block body primitive. + type BlockBody: BlockBody; + /// Signed version of the transaction type. + type SignedTx: SignedTransaction + 'static; + /// Transaction envelope type ID. + type TxType: TxType + 'static; + /// A receipt. + type Receipt: Receipt; +} +/// Helper trait that sets trait bounds on [`NodePrimitives`]. +pub trait FullNodePrimitives +where + Self: NodePrimitives< + Block: FullBlock
, + BlockHeader: FullBlockHeader, + BlockBody: FullBlockBody, + SignedTx: FullSignedTx, + TxType: FullTxType, + Receipt: FullReceipt, + > + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + 'static, +{ +} + +impl FullNodePrimitives for T where + T: NodePrimitives< + Block: FullBlock
, + BlockHeader: FullBlockHeader, + BlockBody: FullBlockBody, + SignedTx: FullSignedTx, + TxType: FullTxType, + Receipt: FullReceipt, + > + Send + + Sync + + Unpin + + Clone + + Default + + fmt::Debug + + PartialEq + + Eq + + 'static +{ +} + +/// Helper adapter type for accessing [`NodePrimitives`] block header types. +pub type HeaderTy = ::BlockHeader; + +/// Helper adapter type for accessing [`NodePrimitives`] block body types. +pub type BodyTy = ::BlockBody; + +/// Helper adapter type for accessing [`NodePrimitives`] receipt types. +pub type ReceiptTy = ::Receipt; diff --git a/crates/primitives-traits/src/receipt.rs b/crates/primitives-traits/src/receipt.rs index e2d19e4d4ff..1b5d2b698c8 100644 --- a/crates/primitives-traits/src/receipt.rs +++ b/crates/primitives-traits/src/receipt.rs @@ -1,29 +1,55 @@ //! Receipt abstraction -use alloc::fmt; +use alloc::vec::Vec; +use core::fmt; -use alloy_consensus::TxReceipt; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; +use alloy_consensus::{ + Eip2718EncodableReceipt, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, Typed2718, +}; +use alloy_primitives::B256; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; /// Helper trait that unifies all behaviour required by receipt to support full node operations. -pub trait FullReceipt: Receipt + Compact {} +pub trait FullReceipt: Receipt + MaybeCompact {} -impl FullReceipt for T where T: Receipt + Compact {} +impl FullReceipt for T where T: ReceiptExt + MaybeCompact {} /// Abstraction of a receipt. +#[auto_impl::auto_impl(&, Arc)] pub trait Receipt: - TxReceipt + Send + + Sync + + Unpin + Clone - + fmt::Debug - + PartialEq - + Eq + Default - + alloy_rlp::Encodable - + alloy_rlp::Decodable - + Serialize - + for<'de> Deserialize<'de> + + fmt::Debug + + TxReceipt + + RlpEncodableReceipt + + RlpDecodableReceipt + + Eip2718EncodableReceipt + + Typed2718 + + MaybeSerde + + InMemorySize + + MaybeArbitrary +{ +} + +/// Extension if [`Receipt`] used in block execution. +pub trait ReceiptExt: Receipt { + /// Calculates the receipts root of the given receipts. + fn receipts_root(receipts: &[&Self]) -> B256; +} + +/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). +pub fn gas_spent_by_transactions(receipts: I) -> Vec<(u64, u64)> +where + I: IntoIterator, + T: TxReceipt, { - /// Returns transaction type. - fn tx_type(&self) -> u8; + receipts + .into_iter() + .enumerate() + .map(|(id, receipt)| (id as u64, receipt.cumulative_gas_used() as u64)) + .collect() } diff --git a/crates/primitives-traits/src/request.rs b/crates/primitives-traits/src/request.rs deleted file mode 100644 index c08af3fd622..00000000000 --- a/crates/primitives-traits/src/request.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! EIP-7685 requests. - -use alloc::vec::Vec; -pub use alloy_consensus::Request; -use alloy_eips::eip7685::{Decodable7685, Encodable7685}; -use alloy_rlp::{Decodable, Encodable}; -use derive_more::{Deref, DerefMut, From, IntoIterator}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use revm_primitives::Bytes; -use serde::{Deserialize, Serialize}; - -/// A list of EIP-7685 requests. -#[derive( - Debug, - Clone, - PartialEq, - Eq, - Default, - Hash, - Deref, - DerefMut, - From, - IntoIterator, - Serialize, - Deserialize, - Compact, -)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] -pub struct Requests(pub Vec); - -impl Encodable for Requests { - fn encode(&self, out: &mut dyn bytes::BufMut) { - let mut h = alloy_rlp::Header { list: true, payload_length: 0 }; - - let mut encoded = Vec::new(); - for req in &self.0 { - let encoded_req = req.encoded_7685(); - h.payload_length += encoded_req.len(); - encoded.push(Bytes::from(encoded_req)); - } - - h.encode(out); - for req in encoded { - req.encode(out); - } - } -} - -impl Decodable for Requests { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Ok( as Decodable>::decode(buf)? - .into_iter() - .map(|bytes| Request::decode_7685(&mut bytes.as_ref())) - .collect::, alloy_eips::eip7685::Eip7685Error>>() - .map(Self)?) - } -} diff --git a/crates/primitives-traits/src/serde_bincode_compat.rs b/crates/primitives-traits/src/serde_bincode_compat.rs new file mode 100644 index 00000000000..a1f7d42569e --- /dev/null +++ b/crates/primitives-traits/src/serde_bincode_compat.rs @@ -0,0 +1,14 @@ +use core::fmt::Debug; + +pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; +use serde::{de::DeserializeOwned, Serialize}; + +/// Trait for types that can be serialized and deserialized using bincode. +pub trait SerdeBincodeCompat: Sized + 'static { + /// Serde representation of the type for bincode serialization. + type BincodeRepr<'a>: Debug + Serialize + DeserializeOwned + From<&'a Self> + Into; +} + +impl SerdeBincodeCompat for alloy_consensus::Header { + type BincodeRepr<'a> = alloy_consensus::serde_bincode_compat::Header<'a>; +} diff --git a/crates/primitives-traits/src/size.rs b/crates/primitives-traits/src/size.rs new file mode 100644 index 00000000000..a1978ff379e --- /dev/null +++ b/crates/primitives-traits/src/size.rs @@ -0,0 +1,69 @@ +use alloy_consensus::{Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, TxType}; +use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; + +/// Trait for calculating a heuristic for the in-memory size of a struct. +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait InMemorySize { + /// Returns a heuristic for the in-memory size of a struct. + fn size(&self) -> usize; +} + +impl InMemorySize for alloy_consensus::Signed { + fn size(&self) -> usize { + T::size(self.tx()) + self.signature().size() + self.hash().size() + } +} + +/// Implement `InMemorySize` for a type with `size_of` +macro_rules! impl_in_mem_size_size_of { + ($($ty:ty),*) => { + $( + impl InMemorySize for $ty { + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } + } + )* + }; +} + +impl_in_mem_size_size_of!(Signature, TxHash, TxType); + +/// Implement `InMemorySize` for a type with a native `size` method. +macro_rules! impl_in_mem_size { + ($($ty:ty),*) => { + $( + impl InMemorySize for $ty { + #[inline] + fn size(&self) -> usize { + Self::size(self) + } + } + )* + }; +} + +impl_in_mem_size!(Header, TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844); + +#[cfg(feature = "op")] +impl_in_mem_size_size_of!(op_alloy_consensus::OpTxType); + +#[cfg(test)] +mod tests { + use super::*; + + // ensures we don't have any recursion in the `InMemorySize` impls + #[test] + fn no_in_memory_no_recursion() { + fn assert_no_recursion() { + let _ = T::default().size(); + } + assert_no_recursion::
(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + assert_no_recursion::(); + } +} diff --git a/crates/primitives-traits/src/storage.rs b/crates/primitives-traits/src/storage.rs index 39b6155ee28..c6b9b1e11c7 100644 --- a/crates/primitives-traits/src/storage.rs +++ b/crates/primitives-traits/src/storage.rs @@ -1,13 +1,12 @@ use alloy_primitives::{B256, U256}; -use reth_codecs::{add_arbitrary_tests, Compact}; -use serde::{Deserialize, Serialize}; /// Account storage entry. /// /// `key` is the subkey when used as a value in the `StorageChangeSets` table. -#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct StorageEntry { /// Storage key. pub key: B256, @@ -31,7 +30,8 @@ impl From<(B256, U256)> for StorageEntry { // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey -impl Compact for StorageEntry { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageEntry { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, diff --git a/crates/primitives-traits/src/transaction.rs b/crates/primitives-traits/src/transaction.rs deleted file mode 100644 index 93645ead82e..00000000000 --- a/crates/primitives-traits/src/transaction.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Transaction abstraction - -use alloc::fmt; - -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; - -/// Helper trait that unifies all behaviour required by transaction to support full node operations. -pub trait FullTransaction: Transaction + Compact {} - -impl FullTransaction for T where T: Transaction + Compact {} - -/// Abstraction of a transaction. -pub trait Transaction: - alloy_consensus::Transaction - + Clone - + fmt::Debug - + PartialEq - + Eq - + Default - + alloy_rlp::Encodable - + alloy_rlp::Decodable - + Serialize - + for<'de> Deserialize<'de> -{ -} diff --git a/crates/primitives-traits/src/transaction/execute.rs b/crates/primitives-traits/src/transaction/execute.rs new file mode 100644 index 00000000000..c7350f1941b --- /dev/null +++ b/crates/primitives-traits/src/transaction/execute.rs @@ -0,0 +1,10 @@ +//! Abstraction of an executable transaction. + +use alloy_primitives::Address; +use revm_primitives::TxEnv; + +/// Loads transaction into execution environment. +pub trait FillTxEnv { + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); +} diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs new file mode 100644 index 00000000000..3a0871c99a4 --- /dev/null +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -0,0 +1,80 @@ +//! Transaction abstraction + +pub mod execute; +pub mod signed; +pub mod tx_type; + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde}; +use core::{fmt, hash::Hash}; + +use alloy_consensus::constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, +}; + +/// Helper trait that unifies all behaviour required by transaction to support full node operations. +pub trait FullTransaction: Transaction + MaybeCompact {} + +impl FullTransaction for T where T: Transaction + MaybeCompact {} + +/// Abstraction of a transaction. +pub trait Transaction: + Send + + Sync + + Unpin + + Clone + + fmt::Debug + + Eq + + PartialEq + + Hash + + alloy_consensus::Transaction + + InMemorySize + + MaybeSerde + + MaybeArbitrary +{ + /// Returns true if the transaction is a legacy transaction. + #[inline] + fn is_legacy(&self) -> bool { + self.ty() == LEGACY_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-2930 transaction. + #[inline] + fn is_eip2930(&self) -> bool { + self.ty() == EIP2930_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-1559 transaction. + #[inline] + fn is_eip1559(&self) -> bool { + self.ty() == EIP1559_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-4844 transaction. + #[inline] + fn is_eip4844(&self) -> bool { + self.ty() == EIP4844_TX_TYPE_ID + } + + /// Returns true if the transaction is an EIP-7702 transaction. + #[inline] + fn is_eip7702(&self) -> bool { + self.ty() == EIP7702_TX_TYPE_ID + } +} + +impl Transaction for T where + T: Send + + Sync + + Unpin + + Clone + + fmt::Debug + + Eq + + PartialEq + + Hash + + alloy_consensus::Transaction + + InMemorySize + + MaybeSerde + + MaybeArbitrary +{ +} diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs new file mode 100644 index 00000000000..5e0a91b4da2 --- /dev/null +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -0,0 +1,77 @@ +//! API of a signed transaction. + +use crate::{FillTxEnv, InMemorySize, MaybeArbitrary, MaybeCompact, MaybeSerde, TxType}; +use alloc::{fmt, vec::Vec}; +use alloy_eips::eip2718::{Decodable2718, Encodable2718}; +use alloy_primitives::{keccak256, Address, PrimitiveSignature, TxHash, B256}; +use core::hash::Hash; + +/// Helper trait that unifies all behaviour required by block to support full node operations. +pub trait FullSignedTx: SignedTransaction + FillTxEnv + MaybeCompact {} + +impl FullSignedTx for T where T: SignedTransaction + FillTxEnv + MaybeCompact {} + +/// A signed transaction. +#[auto_impl::auto_impl(&, Arc)] +pub trait SignedTransaction: + Send + + Sync + + Unpin + + Clone + + fmt::Debug + + PartialEq + + Eq + + Hash + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + Encodable2718 + + Decodable2718 + + alloy_consensus::Transaction + + MaybeSerde + + MaybeArbitrary + + InMemorySize +{ + /// Transaction envelope type ID. + type Type: TxType; + + /// Returns the transaction type. + fn tx_type(&self) -> Self::Type { + Self::Type::try_from(self.ty()).expect("should decode tx type id") + } + + /// Returns reference to transaction hash. + fn tx_hash(&self) -> &TxHash; + + /// Returns reference to signature. + fn signature(&self) -> &PrimitiveSignature; + + /// Recover signer from signature and hash. + /// + /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also `reth_primitives::transaction::recover_signer`. + /// + /// Note: + /// + /// This can fail for some early ethereum mainnet transactions pre EIP-2, use + /// [`Self::recover_signer_unchecked`] if you want to recover the signer without ensuring that + /// the signature has a low `s` value. + fn recover_signer(&self) -> Option
; + + /// Recover signer from signature and hash _without ensuring that the signature has a low `s` + /// value_. + /// + /// Returns `None` if the transaction's signature is invalid, see also + /// `reth_primitives::transaction::recover_signer_unchecked`. + fn recover_signer_unchecked(&self) -> Option
{ + self.recover_signer_unchecked_with_buf(&mut Vec::new()) + } + + /// Same as [`Self::recover_signer_unchecked`] but receives a buffer to operate on. This is used + /// during batch recovery to avoid allocating a new buffer for each transaction. + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
; + + /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with + /// tx type. + fn recalculate_hash(&self) -> B256 { + keccak256(self.encoded_2718()) + } +} diff --git a/crates/primitives-traits/src/transaction/tx_type.rs b/crates/primitives-traits/src/transaction/tx_type.rs new file mode 100644 index 00000000000..c60cd9cb3af --- /dev/null +++ b/crates/primitives-traits/src/transaction/tx_type.rs @@ -0,0 +1,52 @@ +//! Abstraction of transaction envelope type ID. + +use crate::{InMemorySize, MaybeArbitrary, MaybeCompact}; +use alloy_consensus::Typed2718; +use alloy_primitives::{U64, U8}; +use core::fmt; + +/// Helper trait that unifies all behaviour required by transaction type ID to support full node +/// operations. +pub trait FullTxType: TxType + MaybeCompact {} + +impl FullTxType for T where T: TxType + MaybeCompact {} + +/// Trait representing the behavior of a transaction type. +pub trait TxType: + Send + + Sync + + Unpin + + Clone + + Copy + + Default + + fmt::Debug + + fmt::Display + + PartialEq + + Eq + + PartialEq + + Into + + Into + + TryFrom + + TryFrom + + TryFrom + + alloy_rlp::Encodable + + alloy_rlp::Decodable + + Typed2718 + + InMemorySize + + MaybeArbitrary +{ + /// Returns whether this transaction type can be __broadcasted__ as full transaction over the + /// network. + /// + /// Some transactions are not broadcastable as objects and only allowed to be broadcasted as + /// hashes, e.g. because they missing context (e.g. blob sidecar). + fn is_broadcastable_in_full(&self) -> bool { + // EIP-4844 transactions are not broadcastable in full, only hashes are allowed. + !self.is_eip4844() + } +} + +#[cfg(feature = "op")] +impl TxType for op_alloy_consensus::OpTxType {} + +impl TxType for alloy_consensus::TxType {} diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 995e60292c6..0849ab6202e 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -1,97 +1,14 @@ //! [EIP-4895](https://eips.ethereum.org/EIPS/eip-4895) Withdrawal types. -use alloc::vec::Vec; -use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; -use derive_more::{AsRef, Deref, DerefMut, From, IntoIterator}; -use reth_codecs::{add_arbitrary_tests, Compact}; - -/// Re-export from `alloy_eips`. -#[doc(inline)] -pub use alloy_eips::eip4895::Withdrawal; -use serde::{Deserialize, Serialize}; - -/// Represents a collection of Withdrawals. -#[derive( - Debug, - Clone, - PartialEq, - Eq, - Default, - Hash, - From, - AsRef, - Deref, - DerefMut, - IntoIterator, - RlpEncodableWrapper, - RlpDecodableWrapper, - Serialize, - Deserialize, - Compact, -)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] -#[as_ref(forward)] -pub struct Withdrawals(Vec); - -impl Withdrawals { - /// Create a new Withdrawals instance. - pub const fn new(withdrawals: Vec) -> Self { - Self(withdrawals) - } - - /// Calculate the total size, including capacity, of the Withdrawals. - #[inline] - pub fn total_size(&self) -> usize { - self.capacity() * core::mem::size_of::() - } - - /// Calculate a heuristic for the in-memory size of the [Withdrawals]. - #[inline] - pub fn size(&self) -> usize { - self.len() * core::mem::size_of::() - } - - /// Get an iterator over the Withdrawals. - pub fn iter(&self) -> core::slice::Iter<'_, Withdrawal> { - self.0.iter() - } - - /// Get a mutable iterator over the Withdrawals. - pub fn iter_mut(&mut self) -> core::slice::IterMut<'_, Withdrawal> { - self.0.iter_mut() - } - - /// Convert [Self] into raw vec of withdrawals. - pub fn into_inner(self) -> Vec { - self.0 - } -} - -impl<'a> IntoIterator for &'a Withdrawals { - type Item = &'a Withdrawal; - type IntoIter = core::slice::Iter<'a, Withdrawal>; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a> IntoIterator for &'a mut Withdrawals { - type Item = &'a mut Withdrawal; - type IntoIter = core::slice::IterMut<'a, Withdrawal>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - #[cfg(test)] mod tests { - use super::*; + use alloy_eips::eip4895::Withdrawal; use alloy_primitives::Address; use alloy_rlp::{RlpDecodable, RlpEncodable}; use proptest::proptest; use proptest_arbitrary_interop::arb; + use reth_codecs::{add_arbitrary_tests, Compact}; + use serde::{Deserialize, Serialize}; /// This type is kept for compatibility tests after the codec support was added to alloy-eips /// Withdrawal type natively diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 8596f8d766c..109b20ec2bc 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -13,35 +13,35 @@ workspace = true [dependencies] # reth -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde"] } reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true -reth-trie-common.workspace = true revm-primitives = { workspace = true, features = ["serde"] } reth-codecs = { workspace = true, optional = true } - -# op-reth -reth-optimism-chainspec = { workspace = true, optional = true } +reth-zstd-compressors = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true +alloy-network = { workspace = true, optional = true } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-rpc-types = { workspace = true, optional = true } alloy-serde = { workspace = true, optional = true } alloy-eips = { workspace = true, features = ["serde"] } +alloy-trie = { workspace = true, features = ["serde"] } # optimism op-alloy-rpc-types = { workspace = true, optional = true } op-alloy-consensus = { workspace = true, features = [ - "arbitrary", + "arbitrary", + "serde", ], optional = true } # crypto secp256k1 = { workspace = true, features = [ - "global-context", - "recovery", - "rand", + "global-context", + "recovery", + "rand", ], optional = true } k256.workspace = true # for eip-4844 @@ -56,18 +56,17 @@ rand = { workspace = true, optional = true } rayon.workspace = true serde.workspace = true serde_with = { workspace = true, optional = true } -zstd = { workspace = true, features = ["experimental"], optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } -proptest = { workspace = true, optional = true } [dev-dependencies] # eth -reth-chainspec.workspace = true -reth-codecs.workspace = true +reth-chainspec = { workspace = true, features = ["arbitrary"] } +reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true +reth-trie-common = { workspace = true, features = ["arbitrary"] } revm-primitives = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } @@ -76,60 +75,101 @@ alloy-genesis.workspace = true arbitrary = { workspace = true, features = ["derive"] } assert_matches.workspace = true bincode.workspace = true -modular-bitfield.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true serde_json.workspace = true test-fuzz.workspace = true +rstest.workspace = true criterion.workspace = true pprof = { workspace = true, features = [ - "flamegraph", - "frame-pointer", - "criterion", + "flamegraph", + "frame-pointer", + "criterion", ] } [features] default = ["c-kzg", "alloy-compat", "std", "reth-codec", "secp256k1"] -std = ["reth-primitives-traits/std"] -reth-codec = ["dep:reth-codecs", "dep:zstd", "dep:modular-bitfield", "std"] -asm-keccak = ["alloy-primitives/asm-keccak"] +std = [ + "reth-primitives-traits/std", + "alloy-consensus/std", + "alloy-eips/std", + "alloy-genesis/std", + "alloy-primitives/std", + "alloy-serde?/std", + "k256/std", + "once_cell/std", + "revm-primitives/std", + "secp256k1?/std", + "serde/std", + "alloy-trie/std", + "serde_with?/std", + "alloy-rlp/std", + "reth-ethereum-forks/std", + "bytes/std", + "derive_more/std", + "reth-zstd-compressors?/std" +] +reth-codec = [ + "dep:reth-codecs", + "dep:reth-zstd-compressors", + "dep:modular-bitfield", "std", + "reth-primitives-traits/reth-codec", +] +asm-keccak = ["alloy-primitives/asm-keccak", "revm-primitives/asm-keccak"] arbitrary = [ - "dep:arbitrary", - "dep:proptest", - "alloy-eips/arbitrary", - "rand", - "reth-codec", - "reth-ethereum-forks/arbitrary", - "reth-primitives-traits/arbitrary", - "revm-primitives/arbitrary", - "secp256k1", + "dep:arbitrary", + "alloy-eips/arbitrary", + "rand", + "reth-codec", + "reth-ethereum-forks/arbitrary", + "reth-primitives-traits/arbitrary", + "revm-primitives/arbitrary", + "secp256k1", + "reth-chainspec/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "alloy-rpc-types?/arbitrary", + "alloy-serde?/arbitrary", + "op-alloy-consensus?/arbitrary", + "op-alloy-rpc-types?/arbitrary", + "reth-codecs?/arbitrary", + "alloy-trie/arbitrary", + "reth-trie-common/arbitrary" ] secp256k1 = ["dep:secp256k1"] c-kzg = [ - "dep:c-kzg", - "alloy-consensus/kzg", - "alloy-eips/kzg", - "revm-primitives/c-kzg", + "dep:c-kzg", + "alloy-consensus/kzg", + "alloy-eips/kzg", + "revm-primitives/c-kzg", ] optimism = [ - "dep:op-alloy-consensus", - "dep:reth-optimism-chainspec", - "reth-codecs?/optimism", - "revm-primitives/optimism", + "dep:op-alloy-consensus", + "reth-codecs?/op", + "revm-primitives/optimism", ] alloy-compat = [ - "dep:alloy-rpc-types", - "dep:alloy-serde", - "dep:op-alloy-rpc-types", + "dep:alloy-rpc-types", + "dep:alloy-serde", + "dep:op-alloy-rpc-types", + "dep:alloy-network", +] +test-utils = [ + "reth-primitives-traits/test-utils", + "reth-chainspec/test-utils", + "reth-codecs?/test-utils", + "reth-trie-common/test-utils", + "arbitrary", ] -test-utils = ["reth-primitives-traits/test-utils"] serde-bincode-compat = [ - "alloy-consensus/serde-bincode-compat", - "op-alloy-consensus?/serde-bincode-compat", - "reth-primitives-traits/serde-bincode-compat", - "serde_with", + "serde_with", + "alloy-eips/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "op-alloy-consensus?/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat", + "reth-trie-common/serde-bincode-compat", ] [[bench]] diff --git a/crates/primitives/benches/recover_ecdsa_crit.rs b/crates/primitives/benches/recover_ecdsa_crit.rs index 8e8e279b2a4..9273d71f6f5 100644 --- a/crates/primitives/benches/recover_ecdsa_crit.rs +++ b/crates/primitives/benches/recover_ecdsa_crit.rs @@ -4,6 +4,7 @@ use alloy_rlp::Decodable; use criterion::{criterion_group, criterion_main, Criterion}; use pprof::criterion::{Output, PProfProfiler}; use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; /// Benchmarks the recovery of the public key from the ECDSA message using criterion. pub fn criterion_benchmark(c: &mut Criterion) { diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 50498a9420f..453381366e1 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,7 +1,9 @@ #![allow(missing_docs)] use alloy_consensus::TxEip4844; -use alloy_eips::eip4844::{env_settings::EnvKzgSettings, MAX_BLOBS_PER_BLOCK}; +use alloy_eips::eip4844::{ + env_settings::EnvKzgSettings, BlobTransactionSidecar, MAX_BLOBS_PER_BLOCK, +}; use alloy_primitives::hex; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, @@ -12,7 +14,6 @@ use proptest::{ test_runner::{RngAlgorithm, TestRng, TestRunner}, }; use proptest_arbitrary_interop::arb; -use reth_primitives::BlobTransactionSidecar; // constant seed to use for the rng const SEED: [u8; 32] = hex!("1337133713371337133713371337133713371337133713371337133713371337"); diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index c9bdfad89f5..06451c30b9e 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,24 +1,20 @@ //! Common conversions from alloy types. -use crate::{ - constants::EMPTY_TRANSACTIONS, transaction::extract_chain_id, Block, BlockBody, Signature, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, TxType, -}; +use crate::{Block, BlockBody, Transaction, TransactionSigned}; use alloc::{string::ToString, vec::Vec}; -use alloy_consensus::{Transaction as _, TxEip1559, TxEip2930, TxEip4844, TxLegacy}; -use alloy_primitives::{Parity, TxKind}; -use alloy_rlp::Error as RlpError; +use alloy_consensus::{constants::EMPTY_TRANSACTIONS, Header, TxEnvelope}; +use alloy_network::{AnyHeader, AnyRpcBlock, AnyRpcTransaction, AnyTxEnvelope}; use alloy_serde::WithOtherFields; use op_alloy_rpc_types as _; -impl TryFrom>> for Block { +impl TryFrom for Block { type Error = alloy_rpc_types::ConversionError; - fn try_from( - block: alloy_rpc_types::Block>, - ) -> Result { + fn try_from(block: AnyRpcBlock) -> Result { use alloy_rpc_types::ConversionError; + let block = block.inner; + let transactions = { let transactions: Result, ConversionError> = match block .transactions @@ -33,244 +29,136 @@ impl TryFrom> for Transaction { +impl TryFrom for TransactionSigned { type Error = alloy_rpc_types::ConversionError; - fn try_from(tx: WithOtherFields) -> Result { - use alloy_eips::eip2718::Eip2718Error; + fn try_from(tx: AnyRpcTransaction) -> Result { use alloy_rpc_types::ConversionError; - #[cfg(feature = "optimism")] - let WithOtherFields { inner: tx, other } = tx; - #[cfg(not(feature = "optimism"))] let WithOtherFields { inner: tx, other: _ } = tx; - match tx.transaction_type.map(TryInto::try_into).transpose().map_err(|_| { - ConversionError::Eip2718Error(Eip2718Error::UnexpectedType( - tx.transaction_type.unwrap(), - )) - })? { - None | Some(TxType::Legacy) => { - // legacy - if tx.max_fee_per_gas.is_some() || tx.max_priority_fee_per_gas.is_some() { - return Err(ConversionError::Eip2718Error( - RlpError::Custom("EIP-1559 fields are present in a legacy transaction") - .into(), - )) - } - - // extract the chain id if possible - let chain_id = match tx.chain_id { - Some(chain_id) => Some(chain_id), - None => { - if let Some(signature) = tx.signature { - // TODO: make this error conversion better. This is needed because - // sometimes rpc providers return legacy transactions without a chain id - // explicitly in the response, however those transactions may also have - // a chain id in the signature from eip155 - extract_chain_id(signature.v.to()) - .map_err(|err| ConversionError::Eip2718Error(err.into()))? - .1 - } else { - return Err(ConversionError::MissingChainId) - } - } - }; - - Ok(Self::Legacy(TxLegacy { - chain_id, - nonce: tx.nonce, - gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - input: tx.input, - })) + let (transaction, signature, hash) = match tx.inner { + AnyTxEnvelope::Ethereum(TxEnvelope::Legacy(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Legacy(tx), signature, hash) } - Some(TxType::Eip2930) => { - // eip2930 - Ok(Self::Eip2930(TxEip2930 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - input: tx.input, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - gas_price: tx.gas_price.ok_or(ConversionError::MissingGasPrice)?, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip2930(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip2930(tx), signature, hash) } - Some(TxType::Eip1559) => { - // EIP-1559 - Ok(Self::Eip1559(TxEip1559 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx.gas, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - input: tx.input, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip1559(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip1559(tx), signature, hash) } - Some(TxType::Eip4844) => { - // EIP-4844 - Ok(Self::Eip4844(TxEip4844 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx.gas, - to: tx.to.unwrap_or_default(), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - input: tx.input, - blob_versioned_hashes: tx - .blob_versioned_hashes - .ok_or(ConversionError::MissingBlobVersionedHashes)?, - max_fee_per_blob_gas: tx - .max_fee_per_blob_gas - .ok_or(ConversionError::MissingMaxFeePerBlobGas)?, - })) + AnyTxEnvelope::Ethereum(TxEnvelope::Eip4844(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip4844(tx.into()), signature, hash) } - Some(TxType::Eip7702) => { - // this is currently unsupported as it is not present in alloy due to missing rpc - // specs - Err(ConversionError::Custom("Unimplemented".to_string())) - /* - // EIP-7702 - Ok(Transaction::Eip7702(TxEip7702 { - chain_id: tx.chain_id.ok_or(ConversionError::MissingChainId)?, - nonce: tx.nonce, - max_priority_fee_per_gas: tx - .max_priority_fee_per_gas - .ok_or(ConversionError::MissingMaxPriorityFeePerGas)?, - max_fee_per_gas: tx - .max_fee_per_gas - .ok_or(ConversionError::MissingMaxFeePerGas)?, - gas_limit: tx - .gas - .try_into() - .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - to: tx.to.map_or(TxKind::Create, TxKind::Call), - value: tx.value, - access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, - authorization_list: tx - .authorization_list - .ok_or(ConversionError::MissingAuthorizationList)?, - input: tx.input, - }))*/ + AnyTxEnvelope::Ethereum(TxEnvelope::Eip7702(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip7702(tx), signature, hash) } #[cfg(feature = "optimism")] - Some(TxType::Deposit) => { - let fields = other - .deserialize_into::() - .map_err(|e| ConversionError::Custom(e.to_string()))?; - Ok(Self::Deposit(op_alloy_consensus::TxDeposit { - source_hash: fields - .source_hash - .ok_or_else(|| ConversionError::Custom("MissingSourceHash".to_string()))?, - from: tx.from, - to: TxKind::from(tx.to), - mint: fields.mint.filter(|n| *n != 0), - value: tx.value, - gas_limit: tx.gas, - is_system_transaction: fields.is_system_tx.unwrap_or(false), - input: tx.input, - })) - } - } - } -} + AnyTxEnvelope::Unknown(alloy_network::UnknownTxEnvelope { hash, inner }) => { + use alloy_consensus::Transaction as _; -impl TryFrom> for TransactionSigned { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - use alloy_rpc_types::ConversionError; - - let signature = tx.signature.ok_or(ConversionError::MissingSignature)?; - let transaction: Transaction = tx.try_into()?; - let y_parity = if let Some(y_parity) = signature.y_parity { - y_parity.0 - } else { - match transaction.tx_type() { - // If the transaction type is Legacy, adjust the v component of the - // signature according to the Ethereum specification - TxType::Legacy => { - extract_chain_id(signature.v.to()) - .map_err(|_| ConversionError::InvalidSignature)? - .0 + if inner.ty() == crate::TxType::Deposit { + let fields: op_alloy_rpc_types::OpTransactionFields = inner + .fields + .clone() + .deserialize_into::() + .map_err(|e| ConversionError::Custom(e.to_string()))?; + ( + Transaction::Deposit(op_alloy_consensus::TxDeposit { + source_hash: fields.source_hash.ok_or_else(|| { + ConversionError::Custom("MissingSourceHash".to_string()) + })?, + from: tx.from, + to: revm_primitives::TxKind::from(inner.to()), + mint: fields.mint.filter(|n| *n != 0), + value: inner.value(), + gas_limit: inner.gas_limit(), + is_system_transaction: fields.is_system_tx.unwrap_or(false), + input: inner.input().clone(), + }), + op_alloy_consensus::TxDeposit::signature(), + hash, + ) + } else { + return Err(ConversionError::Custom("unknown transaction type".to_string())) } - _ => !signature.v.is_zero(), } + _ => return Err(ConversionError::Custom("unknown transaction type".to_string())), }; - let mut parity = Parity::Parity(y_parity); - - if matches!(transaction.tx_type(), TxType::Legacy) { - if let Some(chain_id) = transaction.chain_id() { - parity = parity.with_chain_id(chain_id) - } - } - - Ok(Self::from_transaction_and_signature( - transaction, - Signature::new(signature.r, signature.s, parity), - )) - } -} - -impl TryFrom> for TransactionSignedEcRecovered { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - use alloy_rpc_types::ConversionError; - - let transaction: TransactionSigned = tx.try_into()?; - - transaction.try_into_ecrecovered().map_err(|_| ConversionError::InvalidSignature) - } -} - -impl TryFrom> for TransactionSignedNoHash { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: WithOtherFields) -> Result { - Ok(Self { - signature: tx.signature.ok_or(Self::Error::MissingSignature)?.try_into()?, - transaction: tx.try_into()?, - }) + Ok(Self { transaction, signature, hash: hash.into() }) } } @@ -279,7 +167,7 @@ impl TryFrom> for TransactionSigne mod tests { use super::*; use alloy_primitives::{address, Address, B256, U256}; - use alloy_rpc_types::Transaction as AlloyTransaction; + use revm_primitives::TxKind; #[test] fn optimism_deposit_tx_conversion_no_mint() { @@ -303,10 +191,11 @@ mod tests { "v": "0x0", "value": "0x0" }"#; - let alloy_tx: WithOtherFields = + let alloy_tx: WithOtherFields> = serde_json::from_str(input).expect("failed to deserialize"); - let reth_tx: Transaction = alloy_tx.try_into().expect("failed to convert"); + let TransactionSigned { transaction: reth_tx, .. } = + alloy_tx.try_into().expect("failed to convert"); if let Transaction::Deposit(deposit_tx) = reth_tx { assert_eq!( deposit_tx.source_hash, @@ -353,10 +242,11 @@ mod tests { "v": "0x0", "value": "0x239c2e16a5ca590000" }"#; - let alloy_tx: WithOtherFields = + let alloy_tx: WithOtherFields> = serde_json::from_str(input).expect("failed to deserialize"); - let reth_tx: Transaction = alloy_tx.try_into().expect("failed to convert"); + let TransactionSigned { transaction: reth_tx, .. } = + alloy_tx.try_into().expect("failed to convert"); if let Transaction::Deposit(deposit_tx) = reth_tx { assert_eq!( diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index de0817fb025..0ee6f860b58 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,31 +1,18 @@ use crate::{ - GotExpected, Header, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, Withdrawals, + traits::BlockExt, transaction::SignedTransactionIntoRecoveredExt, BlockBodyTxExt, GotExpected, + RecoveredTx, SealedHeader, TransactionSigned, }; use alloc::vec::Vec; -pub use alloy_eips::eip1898::{ - BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, ForkBlock, RpcBlockHash, -}; -use alloy_eips::eip2718::Encodable2718; -use alloy_primitives::{Address, Bytes, Sealable, B256}; +use alloy_consensus::Header; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; +use alloy_primitives::{Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] -use proptest::prelude::prop_compose; -#[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; -use reth_primitives_traits::Requests; +use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction, Transaction}; use serde::{Deserialize, Serialize}; -// HACK(onbjerg): we need this to always set `requests` to `None` since we might otherwise generate -// a block with `None` withdrawals and `Some` requests, in which case we end up trying to decode the -// requests as withdrawals -#[cfg(any(feature = "arbitrary", test))] -prop_compose! { - pub fn empty_requests_strategy()(_ in 0..1) -> Option { - None - } -} - /// Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. @@ -39,73 +26,31 @@ pub struct Block { pub body: BlockBody, } -impl Block { - /// Calculate the header hash and seal the block so that it can't be changed. - pub fn seal_slow(self) -> SealedBlock { - let sealed = self.header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedBlock { header: SealedHeader::new(header, seal), body: self.body } - } +impl reth_primitives_traits::Block for Block { + type Header = Header; + type Body = BlockBody; - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - pub fn seal(self, hash: B256) -> SealedBlock { - SealedBlock { header: SealedHeader::new(self.header, hash), body: self.body } + fn new(header: Self::Header, body: Self::Body) -> Self { + Self { header, body } } - /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> { - self.body.recover_signers() + fn header(&self) -> &Self::Header { + &self.header } - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders { - self.try_with_senders_unchecked(senders).expect("stored block is valid") + fn body(&self) -> &Self::Body { + &self.body } - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - #[track_caller] - pub fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result { - let senders = if self.body.transactions.len() == senders.len() { - senders - } else { - let Some(senders) = self.body.recover_signers() else { return Err(self) }; - senders - }; - - Ok(BlockWithSenders { block: self, senders }) - } - - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - pub fn with_recovered_senders(self) -> Option { - let senders = self.senders()?; - Some(BlockWithSenders { block: self, senders }) + fn split(self) -> (Self::Header, Self::Body) { + (self.header, self.body) } +} +impl InMemorySize for Block { /// Calculates a heuristic for the in-memory size of the [`Block`]. #[inline] - pub fn size(&self) -> usize { + fn size(&self) -> usize { self.header.size() + self.body.size() } } @@ -122,7 +67,6 @@ mod block_rlp { transactions: Vec, ommers: Vec
, withdrawals: Option, - requests: Option, } #[derive(RlpEncodable)] @@ -132,75 +76,59 @@ mod block_rlp { transactions: &'a Vec, ommers: &'a Vec
, withdrawals: Option<&'a Withdrawals>, - requests: Option<&'a Requests>, } impl<'a> From<&'a Block> for HelperRef<'a, Header> { fn from(block: &'a Block) -> Self { - let Block { header, body: BlockBody { transactions, ommers, withdrawals, requests } } = - block; - Self { - header, - transactions, - ommers, - withdrawals: withdrawals.as_ref(), - requests: requests.as_ref(), - } + let Block { header, body: BlockBody { transactions, ommers, withdrawals } } = block; + Self { header, transactions, ommers, withdrawals: withdrawals.as_ref() } } } impl<'a> From<&'a SealedBlock> for HelperRef<'a, SealedHeader> { fn from(block: &'a SealedBlock) -> Self { - let SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - } = block; - Self { - header, - transactions, - ommers, - withdrawals: withdrawals.as_ref(), - requests: requests.as_ref(), - } + let SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } } = + block; + Self { header, transactions, ommers, withdrawals: withdrawals.as_ref() } } } impl Decodable for Block { fn decode(b: &mut &[u8]) -> alloy_rlp::Result { - let Helper { header, transactions, ommers, withdrawals, requests } = Helper::decode(b)?; - Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals, requests } }) + let Helper { header, transactions, ommers, withdrawals } = Helper::decode(b)?; + Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals } }) } } impl Decodable for SealedBlock { fn decode(b: &mut &[u8]) -> alloy_rlp::Result { - let Helper { header, transactions, ommers, withdrawals, requests } = Helper::decode(b)?; - Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals, requests } }) + let Helper { header, transactions, ommers, withdrawals } = Helper::decode(b)?; + Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals } }) } } impl Encodable for Block { - fn length(&self) -> usize { - let helper: HelperRef<'_, _> = self.into(); - helper.length() - } - fn encode(&self, out: &mut dyn bytes::BufMut) { let helper: HelperRef<'_, _> = self.into(); helper.encode(out) } - } - impl Encodable for SealedBlock { fn length(&self) -> usize { let helper: HelperRef<'_, _> = self.into(); helper.length() } + } + impl Encodable for SealedBlock { fn encode(&self, out: &mut dyn bytes::BufMut) { let helper: HelperRef<'_, _> = self.into(); helper.encode(out) } + + fn length(&self) -> usize { + let helper: HelperRef<'_, _> = self.into(); + helper.length() + } } } @@ -217,52 +145,51 @@ impl<'a> arbitrary::Arbitrary<'a> for Block { Ok(Self { header: u.arbitrary()?, - body: BlockBody { - transactions, - ommers, - // for now just generate empty requests, see HACK above - requests: u.arbitrary()?, - withdrawals: u.arbitrary()?, - }, + body: BlockBody { transactions, ommers, withdrawals: u.arbitrary()? }, }) } } /// Sealed block with senders recovered from transactions. #[derive(Debug, Clone, PartialEq, Eq, Default, Deref, DerefMut)] -pub struct BlockWithSenders { +pub struct BlockWithSenders { /// Block #[deref] #[deref_mut] - pub block: Block, + pub block: B, /// List of senders that match the transactions in the block pub senders: Vec
, } -impl BlockWithSenders { +impl BlockWithSenders { + /// New block with senders + pub const fn new_unchecked(block: B, senders: Vec
) -> Self { + Self { block, senders } + } + /// New block with senders. Return none if len of tx and senders does not match - pub fn new(block: Block, senders: Vec
) -> Option { - (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) + pub fn new(block: B, senders: Vec
) -> Option { + (block.body().transactions().len() == senders.len()).then_some(Self { block, senders }) } /// Seal the block with a known hash. /// /// WARNING: This method does not perform validation whether the hash is correct. #[inline] - pub fn seal(self, hash: B256) -> SealedBlockWithSenders { + pub fn seal(self, hash: B256) -> SealedBlockWithSenders { let Self { block, senders } = self; - SealedBlockWithSenders { block: block.seal(hash), senders } + SealedBlockWithSenders:: { block: block.seal(hash), senders } } /// Calculate the header hash and seal the block with senders so that it can't be changed. #[inline] - pub fn seal_slow(self) -> SealedBlockWithSenders { + pub fn seal_slow(self) -> SealedBlockWithSenders { SealedBlockWithSenders { block: self.block.seal_slow(), senders: self.senders } } /// Split Structure to its components #[inline] - pub fn into_components(self) -> (Block, Vec
) { + pub fn into_components(self) -> (B, Vec
) { (self.block, self.senders) } @@ -270,18 +197,23 @@ impl BlockWithSenders { #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { - self.senders.iter().zip(self.block.body.transactions()) + ) -> impl Iterator::Transaction)> + + '_ { + self.senders.iter().zip(self.block.body().transactions()) } /// Returns an iterator over all transactions in the chain. #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator { + ) -> impl Iterator::Transaction>> + where + ::Transaction: SignedTransaction, + { self.block - .body - .transactions + .split() + .1 + .into_transactions() .into_iter() .zip(self.senders) .map(|(tx, sender)| tx.with_signer(sender)) @@ -289,30 +221,31 @@ impl BlockWithSenders { /// Consumes the block and returns the transactions of the block. #[inline] - pub fn into_transactions(self) -> Vec { - self.block.body.transactions + pub fn into_transactions( + self, + ) -> Vec<::Transaction> { + self.block.split().1.into_transactions() } } /// Sealed Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp, 32))] -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlock { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +pub struct SealedBlock { /// Locked block header. #[deref] #[deref_mut] - pub header: SealedHeader, + pub header: SealedHeader, /// Block body. - pub body: BlockBody, + pub body: B, } -impl SealedBlock { +impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] - pub const fn new(header: SealedHeader, body: BlockBody) -> Self { + pub const fn new(header: SealedHeader, body: B) -> Self { Self { header, body } } @@ -322,56 +255,106 @@ impl SealedBlock { self.header.hash() } - /// Splits the sealed block into underlying components - #[inline] - pub fn split(self) -> (SealedHeader, Vec, Vec
) { - (self.header, self.body.transactions, self.body.ommers) - } - /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components #[inline] - pub fn split_header_body(self) -> (SealedHeader, BlockBody) { + pub fn split_header_body(self) -> (SealedHeader, B) { (self.header, self.body) } +} - /// Returns an iterator over all blob transactions of the block +impl SealedBlock { + /// Returns whether or not the block contains any blob transactions. #[inline] - pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { - self.body.blob_transactions_iter() + pub fn has_blob_transactions(&self) -> bool { + self.body.has_blob_transactions() } - /// Returns only the blob transactions, if any, from the block body. + /// Returns whether or not the block contains any eip-7702 transactions. #[inline] - pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { - self.blob_transactions_iter().collect() + pub fn has_eip7702_transactions(&self) -> bool { + self.body.has_eip7702_transactions() } +} +impl SealedBlock +where + B: reth_primitives_traits::BlockBody, +{ /// Returns an iterator over all blob versioned hashes from the block body. #[inline] pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.blob_transactions_iter() - .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) - .flatten() + self.body.blob_versioned_hashes_iter() } +} - /// Returns all blob versioned hashes from the block body. +impl SealedBlock +where + H: alloy_consensus::BlockHeader, + B: reth_primitives_traits::BlockBody, +{ + /// Ensures that the transaction root in the block header is valid. + /// + /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure + /// populated with each transaction in the transactions list portion of the block. + /// + /// # Returns + /// + /// Returns `Ok(())` if the calculated transaction root matches the one stored in the header, + /// indicating that the transactions in the block are correctly represented in the trie. + /// + /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` + /// error containing the calculated and expected roots. + pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> + where + B::Transaction: Encodable2718, + { + let calculated_root = self.body.calculate_tx_root(); + + if self.header.transactions_root() != calculated_root { + return Err(GotExpected { + got: calculated_root, + expected: self.header.transactions_root(), + }) + } + + Ok(()) + } +} + +impl SealedBlock +where + H: reth_primitives_traits::BlockHeader, + B: reth_primitives_traits::BlockBody, +{ + /// Splits the sealed block into underlying components #[inline] - pub fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() + pub fn split(self) -> (SealedHeader, B) { + (self.header, self.body) } /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> { + pub fn senders(&self) -> Option> + where + B::Transaction: SignedTransaction, + { self.body.recover_signers() } /// Seal sealed block with recovered transaction senders. - pub fn seal_with_senders(self) -> Option { + pub fn seal_with_senders(self) -> Option> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { self.try_seal_with_senders().ok() } /// Seal sealed block with recovered transaction senders. - pub fn try_seal_with_senders(self) -> Result { + pub fn try_seal_with_senders(self) -> Result, Self> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { match self.senders() { Some(senders) => Ok(SealedBlockWithSenders { block: self, senders }), None => Err(self), @@ -385,7 +368,11 @@ impl SealedBlock { /// If the number of senders does not match the number of transactions in the block /// and the signer recovery for one of the transactions fails. #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders { + pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { self.try_with_senders_unchecked(senders).expect("stored block is valid") } @@ -397,14 +384,18 @@ impl SealedBlock { /// /// Returns an error if a signature is invalid. #[track_caller] - pub fn try_with_senders_unchecked( + pub fn try_with_senders_unchecked( self, senders: Vec
, - ) -> Result { - let senders = if self.body.transactions.len() == senders.len() { + ) -> Result, Self> + where + B::Transaction: SignedTransaction, + T: reth_primitives_traits::Block
, + { + let senders = if self.body.transactions().len() == senders.len() { senders } else { - let Some(senders) = self.body.recover_signers() else { return Err(self) }; + let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; senders }; @@ -412,62 +403,28 @@ impl SealedBlock { } /// Unseal the block - pub fn unseal(self) -> Block { - Block { header: self.header.unseal(), body: self.body } - } - - /// Calculates a heuristic for the in-memory size of the [`SealedBlock`]. - #[inline] - pub fn size(&self) -> usize { - self.header.size() + self.body.size() - } - - /// Calculates the total gas used by blob transactions in the sealed block. - pub fn blob_gas_used(&self) -> u64 { - self.blob_transactions().iter().filter_map(|tx| tx.blob_gas_used()).sum() + pub fn unseal(self) -> Block + where + Block: reth_primitives_traits::Block
, + { + Block::new(self.header.unseal(), self.body) } - /// Returns whether or not the block contains any blob transactions. - #[inline] - pub fn has_blob_transactions(&self) -> bool { - self.body.has_blob_transactions() - } - - /// Returns whether or not the block contains any eip-7702 transactions. - #[inline] - pub fn has_eip7702_transactions(&self) -> bool { - self.body.has_eip7702_transactions() - } - - /// Ensures that the transaction root in the block header is valid. - /// - /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure - /// populated with each transaction in the transactions list portion of the block. + /// Returns a vector of encoded 2718 transactions. /// - /// # Returns + /// This is also known as `raw transactions`. /// - /// Returns `Ok(())` if the calculated transaction root matches the one stored in the header, - /// indicating that the transactions in the block are correctly represented in the trie. - /// - /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` - /// error containing the calculated and expected roots. - pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> { - let calculated_root = self.body.calculate_tx_root(); - - if self.header.transactions_root != calculated_root { - return Err(GotExpected { - got: calculated_root, - expected: self.header.transactions_root, - }) - } - - Ok(()) + /// See also [`Encodable2718`]. + #[doc(alias = "raw_transactions")] + pub fn encoded_2718_transactions(&self) -> Vec { + self.body.encoded_2718_transactions() } +} - /// Returns a vector of transactions RLP encoded with - /// [`alloy_eips::eip2718::Encodable2718::encoded_2718`]. - pub fn raw_transactions(&self) -> Vec { - self.body.transactions().map(|tx| tx.encoded_2718().into()).collect() +impl InMemorySize for SealedBlock { + #[inline] + fn size(&self) -> usize { + self.header.size() + self.body.size() } } @@ -477,39 +434,76 @@ impl From for Block { } } +impl Default for SealedBlock +where + SealedHeader: Default, + B: Default, +{ + fn default() -> Self { + Self { header: Default::default(), body: Default::default() } + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock +where + SealedHeader: arbitrary::Arbitrary<'a>, + B: arbitrary::Arbitrary<'a>, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Self { header: u.arbitrary()?, body: u.arbitrary()? }) + } +} + +/// A helepr trait to construct [`SealedBlock`] from a [`reth_primitives_traits::Block`]. +pub type SealedBlockFor = SealedBlock< + ::Header, + ::Body, +>; /// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlockWithSenders { +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +pub struct SealedBlockWithSenders { /// Sealed block #[deref] #[deref_mut] - pub block: SealedBlock, + #[serde(bound = "SealedBlock: Serialize + serde::de::DeserializeOwned")] + pub block: SealedBlock, /// List of senders that match transactions from block. pub senders: Vec
, } -impl SealedBlockWithSenders { +impl Default for SealedBlockWithSenders { + fn default() -> Self { + Self { block: SealedBlock::default(), senders: Default::default() } + } +} + +impl SealedBlockWithSenders { /// New sealed block with sender. Return none if len of tx and senders does not match - pub fn new(block: SealedBlock, senders: Vec
) -> Option { - (block.body.transactions.len() == senders.len()).then_some(Self { block, senders }) + pub fn new(block: SealedBlock, senders: Vec
) -> Option { + (block.body.transactions().len() == senders.len()).then_some(Self { block, senders }) } +} +impl SealedBlockWithSenders { /// Split Structure to its components #[inline] - pub fn into_components(self) -> (SealedBlock, Vec
) { + pub fn into_components(self) -> (SealedBlock, Vec
) { (self.block, self.senders) } /// Returns the unsealed [`BlockWithSenders`] #[inline] - pub fn unseal(self) -> BlockWithSenders { - let Self { block, senders } = self; - BlockWithSenders { block: block.unseal(), senders } + pub fn unseal(self) -> BlockWithSenders { + let (block, senders) = self.into_components(); + let (header, body) = block.split(); + let header = header.unseal(); + BlockWithSenders::new_unchecked(B::new(header, body), senders) } /// Returns an iterator over all transactions in the block. #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { + pub fn transactions(&self) -> &[::Transaction] { self.block.body.transactions() } @@ -517,24 +511,30 @@ impl SealedBlockWithSenders { #[inline] pub fn transactions_with_sender( &self, - ) -> impl Iterator + '_ { + ) -> impl Iterator::Transaction)> + + '_ { self.senders.iter().zip(self.block.body.transactions()) } /// Consumes the block and returns the transactions of the block. #[inline] - pub fn into_transactions(self) -> Vec { - self.block.body.transactions + pub fn into_transactions( + self, + ) -> Vec<::Transaction> { + self.block.body.into_transactions() } /// Returns an iterator over all transactions in the chain. #[inline] pub fn into_transactions_ecrecovered( self, - ) -> impl Iterator { + ) -> impl Iterator::Transaction>> + where + ::Transaction: SignedTransaction, + { self.block .body - .transactions + .into_transactions() .into_iter() .zip(self.senders) .map(|(tx, sender)| tx.with_signer(sender)) @@ -544,7 +544,7 @@ impl SealedBlockWithSenders { #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let block = SealedBlock::arbitrary(u)?; + let block: SealedBlock = SealedBlock::arbitrary(u)?; let senders = block .body @@ -565,15 +565,13 @@ impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable, )] #[rlp(trailing)] -pub struct BlockBody { +pub struct BlockBody { /// Transactions in the block - pub transactions: Vec, + pub transactions: Vec, /// Uncle headers for the given block pub ommers: Vec
, /// Withdrawals in the block. pub withdrawals: Option, - /// Requests in the block. - pub requests: Option, } impl BlockBody { @@ -582,11 +580,22 @@ impl BlockBody { Block { header, body: self } } - /// Calculate the transaction root for the block body. - pub fn calculate_tx_root(&self) -> B256 { - crate::proofs::calculate_transaction_root(&self.transactions) + /// Returns an iterator over all blob versioned hashes from the block body. + #[inline] + pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { + self.blob_transactions_iter() + .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) + .flatten() + } + + /// Returns all blob versioned hashes from the block body. + #[inline] + pub fn blob_versioned_hashes(&self) -> Vec<&B256> { + self.blob_versioned_hashes_iter().collect() } +} +impl BlockBody { /// Calculate the ommers root for the block body. pub fn calculate_ommers_root(&self) -> B256 { crate::proofs::calculate_ommers_root(&self.ommers) @@ -597,18 +606,9 @@ impl BlockBody { pub fn calculate_withdrawals_root(&self) -> Option { self.withdrawals.as_ref().map(|w| crate::proofs::calculate_withdrawals_root(w)) } +} - /// Calculate the requests root for the block body, if requests exist. If there are no - /// requests, this will return `None`. - pub fn calculate_requests_root(&self) -> Option { - self.requests.as_ref().map(|r| crate::proofs::calculate_requests_root(&r.0)) - } - - /// Recover signer addresses for all transactions in the block body. - pub fn recover_signers(&self) -> Option> { - TransactionSigned::recover_signers(&self.transactions, self.transactions.len()) - } - +impl BlockBody { /// Returns whether or not the block body contains any blob transactions. #[inline] pub fn has_blob_transactions(&self) -> bool { @@ -623,41 +623,23 @@ impl BlockBody { /// Returns an iterator over all blob transactions of the block #[inline] - pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { + pub fn blob_transactions_iter(&self) -> impl Iterator + '_ { self.transactions.iter().filter(|tx| tx.is_eip4844()) } /// Returns only the blob transactions, if any, from the block body. #[inline] - pub fn blob_transactions(&self) -> Vec<&TransactionSigned> { + pub fn blob_transactions(&self) -> Vec<&T> { self.blob_transactions_iter().collect() } +} - /// Returns an iterator over all blob versioned hashes from the block body. - #[inline] - pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.blob_transactions_iter() - .filter_map(|tx| tx.as_eip4844().map(|blob_tx| &blob_tx.blob_versioned_hashes)) - .flatten() - } - - /// Returns all blob versioned hashes from the block body. - #[inline] - pub fn blob_versioned_hashes(&self) -> Vec<&B256> { - self.blob_versioned_hashes_iter().collect() - } - - /// Returns an iterator over all transactions. - #[inline] - pub fn transactions(&self) -> impl Iterator + '_ { - self.transactions.iter() - } - +impl InMemorySize for BlockBody { /// Calculates a heuristic for the in-memory size of the [`BlockBody`]. #[inline] - pub fn size(&self) -> usize { - self.transactions.iter().map(TransactionSigned::size).sum::() + - self.transactions.capacity() * core::mem::size_of::() + + fn size(&self) -> usize { + self.transactions.iter().map(T::size).sum::() + + self.transactions.capacity() * core::mem::size_of::() + self.ommers.iter().map(Header::size).sum::() + self.ommers.capacity() * core::mem::size_of::
() + self.withdrawals @@ -666,13 +648,33 @@ impl BlockBody { } } +impl reth_primitives_traits::BlockBody for BlockBody { + type Transaction = TransactionSigned; + type OmmerHeader = Header; + + fn transactions(&self) -> &[Self::Transaction] { + &self.transactions + } + + fn into_transactions(self) -> Vec { + self.transactions + } + + fn withdrawals(&self) -> Option<&Withdrawals> { + self.withdrawals.as_ref() + } + + fn ommers(&self) -> Option<&[Self::OmmerHeader]> { + Some(&self.ommers) + } +} + impl From for BlockBody { fn from(block: Block) -> Self { Self { transactions: block.body.transactions, ommers: block.body.ommers, withdrawals: block.body.withdrawals, - requests: block.body.requests, } } } @@ -694,8 +696,7 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockBody { }) .collect::>>()?; - // for now just generate empty requests, see HACK above - Ok(Self { transactions, ommers, requests: None, withdrawals: u.arbitrary()? }) + Ok(Self { transactions, ommers, withdrawals: u.arbitrary()? }) } } @@ -704,8 +705,9 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockBody { pub(super) mod serde_bincode_compat { use alloc::{borrow::Cow, vec::Vec}; use alloy_consensus::serde_bincode_compat::Header; + use alloy_eips::eip4895::Withdrawals; use alloy_primitives::Address; - use reth_primitives_traits::{serde_bincode_compat::SealedHeader, Requests, Withdrawals}; + use reth_primitives_traits::serde_bincode_compat::{SealedHeader, SerdeBincodeCompat}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -731,7 +733,6 @@ pub(super) mod serde_bincode_compat { transactions: Vec>, ommers: Vec>, withdrawals: Cow<'a, Option>, - requests: Cow<'a, Option>, } impl<'a> From<&'a super::BlockBody> for BlockBody<'a> { @@ -740,7 +741,6 @@ pub(super) mod serde_bincode_compat { transactions: value.transactions.iter().map(Into::into).collect(), ommers: value.ommers.iter().map(Into::into).collect(), withdrawals: Cow::Borrowed(&value.withdrawals), - requests: Cow::Borrowed(&value.requests), } } } @@ -751,7 +751,6 @@ pub(super) mod serde_bincode_compat { transactions: value.transactions.into_iter().map(Into::into).collect(), ommers: value.ommers.into_iter().map(Into::into).collect(), withdrawals: value.withdrawals.into_owned(), - requests: value.requests.into_owned(), } } } @@ -774,6 +773,10 @@ pub(super) mod serde_bincode_compat { } } + impl SerdeBincodeCompat for super::BlockBody { + type BincodeRepr<'a> = BlockBody<'a>; + } + /// Bincode-compatible [`super::SealedBlock`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: @@ -790,19 +793,34 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct SealedBlock<'a> { - header: SealedHeader<'a>, - body: BlockBody<'a>, - } - - impl<'a> From<&'a super::SealedBlock> for SealedBlock<'a> { - fn from(value: &'a super::SealedBlock) -> Self { - Self { header: SealedHeader::from(&value.header), body: BlockBody::from(&value.body) } + pub struct SealedBlock<'a, H = super::Header, B = super::BlockBody> + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + header: SealedHeader<'a, H>, + body: B::BincodeRepr<'a>, + } + + impl<'a, H, B> From<&'a super::SealedBlock> for SealedBlock<'a, H, B> + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + fn from(value: &'a super::SealedBlock) -> Self { + Self { + header: SealedHeader::from(&value.header), + body: B::BincodeRepr::from(&value.body), + } } } - impl<'a> From> for super::SealedBlock { - fn from(value: SealedBlock<'a>) -> Self { + impl<'a, H, B> From> for super::SealedBlock + where + H: SerdeBincodeCompat, + B: SerdeBincodeCompat, + { + fn from(value: SealedBlock<'a, H, B>) -> Self { Self { header: value.header.into(), body: value.body.into() } } } @@ -841,19 +859,28 @@ pub(super) mod serde_bincode_compat { /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct SealedBlockWithSenders<'a> { - block: SealedBlock<'a>, + pub struct SealedBlockWithSenders<'a, B = super::Block> + where + B: reth_primitives_traits::Block, + { + block: SealedBlock<'a, B::Header, B::Body>, senders: Cow<'a, Vec
>, } - impl<'a> From<&'a super::SealedBlockWithSenders> for SealedBlockWithSenders<'a> { - fn from(value: &'a super::SealedBlockWithSenders) -> Self { + impl<'a, B> From<&'a super::SealedBlockWithSenders> for SealedBlockWithSenders<'a, B> + where + B: reth_primitives_traits::Block, + { + fn from(value: &'a super::SealedBlockWithSenders) -> Self { Self { block: SealedBlock::from(&value.block), senders: Cow::Borrowed(&value.senders) } } } - impl<'a> From> for super::SealedBlockWithSenders { - fn from(value: SealedBlockWithSenders<'a>) -> Self { + impl<'a, B> From> for super::SealedBlockWithSenders + where + B: reth_primitives_traits::Block, + { + fn from(value: SealedBlockWithSenders<'a, B>) -> Self { Self { block: value.block.into(), senders: value.senders.into_owned() } } } @@ -955,12 +982,20 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { - use super::{BlockNumberOrTag::*, *}; - use alloy_eips::eip1898::HexStringMissingPrefixError; + use super::*; + use alloy_eips::{ + eip1898::HexStringMissingPrefixError, BlockId, BlockNumberOrTag, BlockNumberOrTag::*, + RpcBlockHash, + }; use alloy_primitives::hex_literal::hex; use alloy_rlp::{Decodable, Encodable}; use std::str::FromStr; + const fn _traits() { + const fn assert_block() {} + assert_block::(); + } + /// Check parsing according to EIP-1898. #[test] fn can_parse_blockid_u64() { @@ -1124,19 +1159,28 @@ mod tests { Some(BlockWithSenders { block: block.clone(), senders: vec![sender] }) ); let sealed = block.seal_slow(); - assert_eq!(SealedBlockWithSenders::new(sealed.clone(), vec![]), None); + assert_eq!(SealedBlockWithSenders::::new(sealed.clone(), vec![]), None); assert_eq!( - SealedBlockWithSenders::new(sealed.clone(), vec![sender]), + SealedBlockWithSenders::::new(sealed.clone(), vec![sender]), Some(SealedBlockWithSenders { block: sealed, senders: vec![sender] }) ); } #[test] fn test_default_seal() { - let block = SealedBlock::default(); + let block: SealedBlock = SealedBlock::default(); let sealed = block.hash(); - let block = block.unseal(); + let block: Block = block.unseal(); let block = block.seal_slow(); assert_eq!(sealed, block.hash()); } + + #[test] + fn empty_block_rlp() { + let body: BlockBody = BlockBody::default(); + let mut buf = Vec::new(); + body.encode(&mut buf); + let decoded = BlockBody::decode(&mut buf.as_slice()).unwrap(); + assert_eq!(body, decoded); + } } diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs deleted file mode 100644 index 14e892adfbe..00000000000 --- a/crates/primitives/src/constants/eip4844.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) protocol constants and utils for shard Blob Transactions. - -pub use alloy_eips::eip4844::{ - BLOB_GASPRICE_UPDATE_FRACTION, BLOB_TX_MIN_BLOB_GASPRICE, DATA_GAS_PER_BLOB, - FIELD_ELEMENTS_PER_BLOB, FIELD_ELEMENT_BYTES, MAX_BLOBS_PER_BLOCK, MAX_DATA_GAS_PER_BLOCK, - TARGET_BLOBS_PER_BLOCK, TARGET_DATA_GAS_PER_BLOCK, VERSIONED_HASH_VERSION_KZG, -}; diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs deleted file mode 100644 index fd1dc158624..00000000000 --- a/crates/primitives/src/constants/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! Ethereum protocol-related constants - -pub use reth_primitives_traits::constants::*; - -/// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants. -pub mod eip4844; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index ec65cbf20e5..18fe1498b8a 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -21,12 +21,12 @@ extern crate alloc; +mod traits; +pub use traits::*; + #[cfg(feature = "alloy-compat")] mod alloy_compat; mod block; -#[cfg(feature = "reth-codec")] -mod compression; -pub mod constants; pub mod proofs; mod receipt; pub use reth_static_file_types as static_file; @@ -34,43 +34,26 @@ pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; pub use block::{ - Block, BlockBody, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, BlockWithSenders, - ForkBlock, RpcBlockHash, SealedBlock, SealedBlockWithSenders, -}; -#[cfg(feature = "reth-codec")] -pub use compression::*; -pub use constants::{ - DEV_GENESIS_HASH, EMPTY_OMMER_ROOT_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, - MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, -}; -pub use receipt::{ - gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, + Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockFor, SealedBlockWithSenders, }; +pub use receipt::{gas_spent_by_transactions, Receipt, Receipts}; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, - LogData, Request, Requests, SealedHeader, StorageEntry, Withdrawal, Withdrawals, + LogData, NodePrimitives, SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; -pub use transaction::{ - BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, -}; - -#[cfg(feature = "c-kzg")] -pub use transaction::BlobTransactionValidationError; - pub use transaction::{ util::secp256k1::{public_key_to_address, recover_signer_unchecked, sign_message}, - InvalidTransactionError, Signature, Transaction, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxHashOrNumber, TxType, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, + BlobTransaction, InvalidTransactionError, PooledTransactionsElement, + PooledTransactionsElementEcRecovered, RecoveredTx, Transaction, TransactionMeta, + TransactionSigned, TransactionSignedEcRecovered, TxType, }; +pub use alloy_consensus::ReceiptWithBloom; + // Re-exports pub use reth_ethereum_forks::*; -pub use revm_primitives::{self, JumpTable}; #[cfg(any(test, feature = "arbitrary"))] pub use arbitrary; @@ -92,3 +75,17 @@ pub mod serde_bincode_compat { transaction::{serde_bincode_compat as transaction, serde_bincode_compat::*}, }; } + +/// Temp helper struct for integrating [`NodePrimitives`]. +#[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[non_exhaustive] +pub struct EthPrimitives; + +impl reth_primitives_traits::NodePrimitives for EthPrimitives { + type Block = crate::Block; + type BlockHeader = alloy_consensus::Header; + type BlockBody = crate::BlockBody; + type SignedTx = crate::TransactionSigned; + type TxType = crate::TxType; + type Receipt = crate::Receipt; +} diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index dc814804ec8..4711da0934c 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,65 +1,31 @@ //! Helper function for calculating Merkle proofs and hashes. -use crate::{ - constants::EMPTY_OMMER_ROOT_HASH, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, - Request, TransactionSigned, Withdrawal, -}; -use alloc::vec::Vec; -use alloy_eips::{eip2718::Encodable2718, eip7685::Encodable7685}; -use alloy_primitives::{keccak256, B256}; -use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; +use crate::Receipt; +use alloy_eips::eip2718::Encodable2718; +use alloy_primitives::B256; +use alloy_trie::root::ordered_trie_root_with_encoder; + +pub use alloy_consensus::proofs::calculate_receipt_root; /// Calculate a transaction root. /// /// `(rlp(index), encoded(tx))` pairs. -pub fn calculate_transaction_root(transactions: &[T]) -> B256 -where - T: AsRef, -{ - ordered_trie_root_with_encoder(transactions, |tx: &T, buf| tx.as_ref().encode_2718(buf)) -} +#[doc(inline)] +pub use alloy_consensus::proofs::calculate_transaction_root; /// Calculates the root hash of the withdrawals. -pub fn calculate_withdrawals_root(withdrawals: &[Withdrawal]) -> B256 { - ordered_trie_root(withdrawals) -} - -/// Calculates the receipt root for a header. -pub fn calculate_receipt_root(receipts: &[ReceiptWithBloom]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) -} - -/// Calculate [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685) requests root. -/// -/// NOTE: The requests are encoded as `id + request` -pub fn calculate_requests_root(requests: &[Request]) -> B256 { - ordered_trie_root_with_encoder(requests, |item, buf| item.encode_7685(buf)) -} +#[doc(inline)] +pub use alloy_consensus::proofs::calculate_withdrawals_root; -/// Calculates the receipt root for a header. -pub fn calculate_receipt_root_ref(receipts: &[ReceiptWithBloomRef<'_>]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) -} +/// Calculates the root hash for ommer/uncle headers. +#[doc(inline)] +pub use alloy_consensus::proofs::calculate_ommers_root; /// Calculates the receipt root for a header for the reference type of [Receipt]. /// /// NOTE: Prefer [`calculate_receipt_root`] if you have log blooms memoized. pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { - ordered_trie_root_with_encoder(receipts, |r, buf| { - ReceiptWithBloomRef::from(*r).encode_inner(buf, false) - }) -} - -/// Calculates the root hash for ommer/uncle headers. -pub fn calculate_ommers_root(ommers: &[Header]) -> B256 { - // Check if `ommers` list is empty - if ommers.is_empty() { - return EMPTY_OMMER_ROOT_HASH - } - // RLP Encode - let mut ommers_rlp = Vec::new(); - alloy_rlp::encode_list(ommers, &mut ommers_rlp); - keccak256(ommers_rlp) + ordered_trie_root_with_encoder(receipts, |r, buf| r.with_bloom_ref().encode_2718(buf)) } #[cfg(test)] @@ -92,6 +58,8 @@ mod tests { #[cfg(not(feature = "optimism"))] #[test] fn check_receipt_root_optimism() { + use alloy_consensus::ReceiptWithBloom; + let logs = vec![Log { address: Address::ZERO, data: LogData::new_unchecked(vec![], Default::default()), @@ -104,7 +72,7 @@ mod tests { cumulative_gas_used: 102068, logs, }, - bloom, + logs_bloom: bloom, }; let receipt = vec![receipt]; let root = calculate_receipt_root(&receipt); diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index cfd831ed0f7..62c664e22a4 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,24 +1,29 @@ -#[cfg(feature = "reth-codec")] -use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::{ - logs_bloom, TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, - EIP7702_TX_TYPE_ID, -}; use alloc::{vec, vec::Vec}; -use alloy_primitives::{Bloom, Bytes, Log, B256}; -use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; -use bytes::{Buf, BufMut}; -use core::{cmp::Ordering, ops::Deref}; +use reth_primitives_traits::InMemorySize; + +use alloy_consensus::{ + Eip2718EncodableReceipt, Eip658Value, ReceiptWithBloom, RlpDecodableReceipt, + RlpEncodableReceipt, TxReceipt, Typed2718, +}; +use alloy_primitives::{Bloom, Log, B256}; +use alloy_rlp::{Decodable, Encodable, Header, RlpDecodable, RlpEncodable}; +use bytes::BufMut; use derive_more::{DerefMut, From, IntoIterator}; -#[cfg(feature = "reth-codec")] -use reth_codecs::{Compact, CompactZstd}; +use reth_primitives_traits::receipt::ReceiptExt; use serde::{Deserialize, Serialize}; +use crate::TxType; +#[cfg(feature = "reth-codec")] +use reth_zstd_compressors::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; + +/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). +pub use reth_primitives_traits::receipt::gas_spent_by_transactions; + /// Receipt containing result of transaction execution. #[derive( Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, )] -#[cfg_attr(any(test, feature = "reth-codec"), derive(CompactZstd))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::CompactZstd))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] #[rlp(trailing)] pub struct Receipt { @@ -49,191 +54,72 @@ impl Receipt { /// Calculates [`Log`]'s bloom filter. this is slow operation and [`ReceiptWithBloom`] can /// be used to cache this value. pub fn bloom_slow(&self) -> Bloom { - logs_bloom(self.logs.iter()) + alloy_primitives::logs_bloom(self.logs.iter()) } /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] container /// type. - pub fn with_bloom(self) -> ReceiptWithBloom { + pub fn with_bloom(self) -> ReceiptWithBloom { self.into() } - /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloomRef`] + /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] /// container type. - pub fn with_bloom_ref(&self) -> ReceiptWithBloomRef<'_> { + pub fn with_bloom_ref(&self) -> ReceiptWithBloom<&Self> { self.into() } -} - -/// A collection of receipts organized as a two-dimensional vector. -#[derive( - Clone, - Debug, - PartialEq, - Eq, - Default, - Serialize, - Deserialize, - From, - derive_more::Deref, - DerefMut, - IntoIterator, -)] -pub struct Receipts { - /// A two-dimensional vector of optional `Receipt` instances. - pub receipt_vec: Vec>>, -} - -impl Receipts { - /// Returns the length of the `Receipts` vector. - pub fn len(&self) -> usize { - self.receipt_vec.len() - } - /// Returns `true` if the `Receipts` vector is empty. - pub fn is_empty(&self) -> bool { - self.receipt_vec.is_empty() - } - - /// Push a new vector of receipts into the `Receipts` collection. - pub fn push(&mut self, receipts: Vec>) { - self.receipt_vec.push(receipts); - } - - /// Retrieves all recorded receipts from index and calculates the root using the given closure. - pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&Receipt]) -> B256) -> Option { - let receipts = - self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?; - Some(f(receipts.as_slice())) - } -} + /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. + pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize { + let len = self.success.length() + + self.cumulative_gas_used.length() + + bloom.length() + + self.logs.length(); -impl From> for Receipts { - fn from(block_receipts: Vec) -> Self { - Self { receipt_vec: vec![block_receipts.into_iter().map(Option::Some).collect()] } - } -} - -impl FromIterator>> for Receipts { - fn from_iter>>>(iter: I) -> Self { - iter.into_iter().collect::>().into() - } -} - -impl From for ReceiptWithBloom { - fn from(receipt: Receipt) -> Self { - let bloom = receipt.bloom_slow(); - Self { receipt, bloom } - } -} - -/// [`Receipt`] with calculated bloom filter. -#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), derive(Compact))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct ReceiptWithBloom { - /// Bloom filter build from logs. - pub bloom: Bloom, - /// Main receipt body - pub receipt: Receipt, -} - -impl ReceiptWithBloom { - /// Create new [`ReceiptWithBloom`] - pub const fn new(receipt: Receipt, bloom: Bloom) -> Self { - Self { receipt, bloom } - } + #[cfg(feature = "optimism")] + if self.tx_type == TxType::Deposit { + let mut len = len; - /// Consume the structure, returning only the receipt - pub fn into_receipt(self) -> Receipt { - self.receipt - } + if let Some(deposit_nonce) = self.deposit_nonce { + len += deposit_nonce.length(); + } + if let Some(deposit_receipt_version) = self.deposit_receipt_version { + len += deposit_receipt_version.length(); + } - /// Consume the structure, returning the receipt and the bloom filter - pub fn into_components(self) -> (Receipt, Bloom) { - (self.receipt, self.bloom) - } + return len + } - #[inline] - const fn as_encoder(&self) -> ReceiptWithBloomEncoder<'_> { - ReceiptWithBloomEncoder { receipt: &self.receipt, bloom: &self.bloom } + len } -} - -/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). -pub fn gas_spent_by_transactions>( - receipts: impl IntoIterator, -) -> Vec<(u64, u64)> { - receipts - .into_iter() - .enumerate() - .map(|(id, receipt)| (id as u64, receipt.deref().cumulative_gas_used)) - .collect() -} -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for Receipt { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let tx_type = TxType::arbitrary(u)?; - let success = bool::arbitrary(u)?; - let cumulative_gas_used = u64::arbitrary(u)?; - let logs = Vec::::arbitrary(u)?; + /// RLP-encodes receipt fields with the given [`Bloom`] without an RLP header. + pub fn rlp_encode_fields(&self, bloom: &Bloom, out: &mut dyn BufMut) { + self.success.encode(out); + self.cumulative_gas_used.encode(out); + bloom.encode(out); + self.logs.encode(out); - // Only receipts for deposit transactions may contain a deposit nonce #[cfg(feature = "optimism")] - let (deposit_nonce, deposit_receipt_version) = if tx_type == TxType::Deposit { - let deposit_nonce = Option::::arbitrary(u)?; - let deposit_nonce_version = - deposit_nonce.map(|_| Option::::arbitrary(u)).transpose()?.flatten(); - (deposit_nonce, deposit_nonce_version) - } else { - (None, None) - }; - - Ok(Self { - tx_type, - success, - cumulative_gas_used, - logs, - #[cfg(feature = "optimism")] - deposit_nonce, - #[cfg(feature = "optimism")] - deposit_receipt_version, - }) - } -} - -impl ReceiptWithBloom { - /// Returns the enveloped encoded receipt. - /// - /// See also [`ReceiptWithBloom::encode_enveloped`] - pub fn envelope_encoded(&self) -> Bytes { - let mut buf = Vec::new(); - self.encode_enveloped(&mut buf); - buf.into() - } - - /// Encodes the receipt into its "raw" format. - /// This format is also referred to as "binary" encoding. - /// - /// For legacy receipts, it encodes the RLP of the receipt into the buffer: - /// `rlp([status, cumulativeGasUsed, logsBloom, logs])` as per EIP-2718. - /// For EIP-2718 typed transactions, it encodes the type of the transaction followed by the rlp - /// of the receipt: - /// - EIP-1559, 2930 and 4844 transactions: `tx-type || rlp([status, cumulativeGasUsed, - /// logsBloom, logs])` - pub fn encode_enveloped(&self, out: &mut dyn bytes::BufMut) { - self.encode_inner(out, false) + if self.tx_type == TxType::Deposit { + if let Some(nonce) = self.deposit_nonce { + nonce.encode(out); + } + if let Some(version) = self.deposit_receipt_version { + version.encode(out); + } + } } - /// Encode receipt with or without the header data. - pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - self.as_encoder().encode_inner(out, with_header) + /// Returns RLP header for inner encoding. + pub fn rlp_header_inner(&self, bloom: &Bloom) -> Header { + Header { list: true, payload_length: self.rlp_encoded_fields_length(bloom) } } - /// Decodes the receipt payload - fn decode_receipt(buf: &mut &[u8], tx_type: TxType) -> alloy_rlp::Result { + fn decode_receipt_with_bloom( + buf: &mut &[u8], + tx_type: TxType, + ) -> alloy_rlp::Result> { let b = &mut &**buf; let rlp_head = alloy_rlp::Header::decode(b)?; if !rlp_head.list { @@ -241,21 +127,20 @@ impl ReceiptWithBloom { } let started_len = b.len(); - let success = alloy_rlp::Decodable::decode(b)?; - let cumulative_gas_used = alloy_rlp::Decodable::decode(b)?; + let success = Decodable::decode(b)?; + let cumulative_gas_used = Decodable::decode(b)?; let bloom = Decodable::decode(b)?; - let logs = alloy_rlp::Decodable::decode(b)?; + let logs = Decodable::decode(b)?; let receipt = match tx_type { #[cfg(feature = "optimism")] TxType::Deposit => { let remaining = |b: &[u8]| rlp_head.payload_length - (started_len - b.len()) > 0; - let deposit_nonce = - remaining(b).then(|| alloy_rlp::Decodable::decode(b)).transpose()?; + let deposit_nonce = remaining(b).then(|| Decodable::decode(b)).transpose()?; let deposit_receipt_version = - remaining(b).then(|| alloy_rlp::Decodable::decode(b)).transpose()?; + remaining(b).then(|| Decodable::decode(b)).transpose()?; - Receipt { + Self { tx_type, success, cumulative_gas_used, @@ -264,7 +149,7 @@ impl ReceiptWithBloom { deposit_receipt_version, } } - _ => Receipt { + _ => Self { tx_type, success, cumulative_gas_used, @@ -276,7 +161,7 @@ impl ReceiptWithBloom { }, }; - let this = Self { receipt, bloom }; + let this = ReceiptWithBloom { receipt, logs_bloom: bloom }; let consumed = started_len - b.len(); if consumed != rlp_head.payload_length { return Err(alloy_rlp::Error::ListLengthMismatch { @@ -289,219 +174,234 @@ impl ReceiptWithBloom { } } -impl Encodable for ReceiptWithBloom { - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out, true) +impl Eip2718EncodableReceipt for Receipt { + fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + self.rlp_header_inner(bloom).length_with_payload() + + !matches!(self.tx_type, TxType::Legacy) as usize // account for type prefix } - fn length(&self) -> usize { - self.as_encoder().length() + + fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + if !matches!(self.tx_type, TxType::Legacy) { + out.put_u8(self.tx_type as u8); + } + self.rlp_header_inner(bloom).encode(out); + self.rlp_encode_fields(bloom, out); } } -impl Decodable for ReceiptWithBloom { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - // a receipt is either encoded as a string (non legacy) or a list (legacy). - // We should not consume the buffer if we are decoding a legacy receipt, so let's - // check if the first byte is between 0x80 and 0xbf. - let rlp_type = *buf - .first() - .ok_or(alloy_rlp::Error::Custom("cannot decode a receipt from empty bytes"))?; - - match rlp_type.cmp(&alloy_rlp::EMPTY_LIST_CODE) { - Ordering::Less => { - // strip out the string header - let _header = alloy_rlp::Header::decode(buf)?; - let receipt_type = *buf.first().ok_or(alloy_rlp::Error::Custom( - "typed receipt cannot be decoded from an empty slice", - ))?; - match receipt_type { - EIP2930_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip2930) - } - EIP1559_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip1559) - } - EIP4844_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip4844) - } - EIP7702_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Eip7702) - } - #[cfg(feature = "optimism")] - crate::transaction::DEPOSIT_TX_TYPE_ID => { - buf.advance(1); - Self::decode_receipt(buf, TxType::Deposit) - } - _ => Err(alloy_rlp::Error::Custom("invalid receipt type")), - } - } - Ordering::Equal => { - Err(alloy_rlp::Error::Custom("an empty list is not a valid receipt encoding")) +impl RlpEncodableReceipt for Receipt { + fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { + let mut len = self.eip2718_encoded_length_with_bloom(bloom); + if !matches!(self.tx_type, TxType::Legacy) { + len += Header { + list: false, + payload_length: self.eip2718_encoded_length_with_bloom(bloom), } - Ordering::Greater => Self::decode_receipt(buf, TxType::Legacy), + .length(); + } + + len + } + + fn rlp_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { + if !matches!(self.tx_type, TxType::Legacy) { + Header { list: false, payload_length: self.eip2718_encoded_length_with_bloom(bloom) } + .encode(out); } + self.eip2718_encode_with_bloom(bloom, out); } } -/// [`Receipt`] reference type with calculated bloom filter. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ReceiptWithBloomRef<'a> { - /// Bloom filter build from logs. - pub bloom: Bloom, - /// Main receipt body - pub receipt: &'a Receipt, +impl RlpDecodableReceipt for Receipt { + fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result> { + let header_buf = &mut &**buf; + let header = Header::decode(header_buf)?; + + if header.list { + return Self::decode_receipt_with_bloom(buf, TxType::Legacy); + } + + *buf = *header_buf; + + let remaining = buf.len(); + let tx_type = TxType::decode(buf)?; + let this = Self::decode_receipt_with_bloom(buf, tx_type)?; + + if buf.len() + header.payload_length != remaining { + return Err(alloy_rlp::Error::UnexpectedLength); + } + + Ok(this) + } } -impl<'a> ReceiptWithBloomRef<'a> { - /// Create new [`ReceiptWithBloomRef`] - pub const fn new(receipt: &'a Receipt, bloom: Bloom) -> Self { - Self { receipt, bloom } +impl TxReceipt for Receipt { + type Log = Log; + + fn status_or_post_state(&self) -> Eip658Value { + self.success.into() } - /// Encode receipt with or without the header data. - pub fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - self.as_encoder().encode_inner(out, with_header) + fn status(&self) -> bool { + self.success } - #[inline] - const fn as_encoder(&self) -> ReceiptWithBloomEncoder<'_> { - ReceiptWithBloomEncoder { receipt: self.receipt, bloom: &self.bloom } + fn bloom(&self) -> Bloom { + alloy_primitives::logs_bloom(self.logs.iter()) } -} -impl Encodable for ReceiptWithBloomRef<'_> { - fn encode(&self, out: &mut dyn BufMut) { - self.as_encoder().encode_inner(out, true) + fn cumulative_gas_used(&self) -> u128 { + self.cumulative_gas_used as u128 } - fn length(&self) -> usize { - self.as_encoder().length() + + fn logs(&self) -> &[Log] { + &self.logs } } -impl<'a> From<&'a Receipt> for ReceiptWithBloomRef<'a> { - fn from(receipt: &'a Receipt) -> Self { - let bloom = receipt.bloom_slow(); - ReceiptWithBloomRef { receipt, bloom } +impl Typed2718 for Receipt { + fn ty(&self) -> u8 { + self.tx_type as u8 } } -struct ReceiptWithBloomEncoder<'a> { - bloom: &'a Bloom, - receipt: &'a Receipt, -} +impl reth_primitives_traits::Receipt for Receipt {} -impl ReceiptWithBloomEncoder<'_> { - /// Returns the rlp header for the receipt payload. - fn receipt_rlp_header(&self) -> alloy_rlp::Header { - let mut rlp_head = alloy_rlp::Header { list: true, payload_length: 0 }; +impl ReceiptExt for Receipt { + fn receipts_root(_receipts: &[&Self]) -> B256 { + #[cfg(feature = "optimism")] + panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); + #[cfg(not(feature = "optimism"))] + crate::proofs::calculate_receipt_root_no_memo(_receipts) + } +} - rlp_head.payload_length += self.receipt.success.length(); - rlp_head.payload_length += self.receipt.cumulative_gas_used.length(); - rlp_head.payload_length += self.bloom.length(); - rlp_head.payload_length += self.receipt.logs.length(); +impl InMemorySize for Receipt { + /// Calculates a heuristic for the in-memory size of the [Receipt]. + #[inline] + fn size(&self) -> usize { + let total_size = self.tx_type.size() + + core::mem::size_of::() + + core::mem::size_of::() + + self.logs.capacity() * core::mem::size_of::(); #[cfg(feature = "optimism")] - if self.receipt.tx_type == TxType::Deposit { - if let Some(deposit_nonce) = self.receipt.deposit_nonce { - rlp_head.payload_length += deposit_nonce.length(); - } - if let Some(deposit_receipt_version) = self.receipt.deposit_receipt_version { - rlp_head.payload_length += deposit_receipt_version.length(); - } - } - - rlp_head + return total_size + 2 * core::mem::size_of::>(); + #[cfg(not(feature = "optimism"))] + total_size } +} - /// Encodes the receipt data. - fn encode_fields(&self, out: &mut dyn BufMut) { - self.receipt_rlp_header().encode(out); - self.receipt.success.encode(out); - self.receipt.cumulative_gas_used.encode(out); - self.bloom.encode(out); - self.receipt.logs.encode(out); - #[cfg(feature = "optimism")] - if self.receipt.tx_type == TxType::Deposit { - if let Some(deposit_nonce) = self.receipt.deposit_nonce { - deposit_nonce.encode(out) - } - if let Some(deposit_receipt_version) = self.receipt.deposit_receipt_version { - deposit_receipt_version.encode(out) - } - } +/// A collection of receipts organized as a two-dimensional vector. +#[derive( + Clone, + Debug, + PartialEq, + Eq, + Serialize, + Deserialize, + From, + derive_more::Deref, + DerefMut, + IntoIterator, +)] +pub struct Receipts { + /// A two-dimensional vector of optional `Receipt` instances. + pub receipt_vec: Vec>>, +} + +impl Receipts { + /// Returns the length of the `Receipts` vector. + pub fn len(&self) -> usize { + self.receipt_vec.len() } - /// Encode receipt with or without the header data. - fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { - if matches!(self.receipt.tx_type, TxType::Legacy) { - self.encode_fields(out); - return - } + /// Returns true if the `Receipts` vector is empty. + pub fn is_empty(&self) -> bool { + self.receipt_vec.is_empty() + } - let mut payload = Vec::new(); - self.encode_fields(&mut payload); + /// Push a new vector of receipts into the `Receipts` collection. + pub fn push(&mut self, receipts: Vec>) { + self.receipt_vec.push(receipts); + } - if with_header { - let payload_length = payload.len() + 1; - let header = alloy_rlp::Header { list: false, payload_length }; - header.encode(out); - } + /// Retrieves all recorded receipts from index and calculates the root using the given closure. + pub fn root_slow(&self, index: usize, f: impl FnOnce(&[&T]) -> B256) -> Option { + let receipts = + self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?; + Some(f(receipts.as_slice())) + } +} - match self.receipt.tx_type { - TxType::Legacy => unreachable!("legacy already handled"), +impl From> for Receipts { + fn from(block_receipts: Vec) -> Self { + Self { receipt_vec: vec![block_receipts.into_iter().map(Option::Some).collect()] } + } +} - TxType::Eip2930 => { - out.put_u8(EIP2930_TX_TYPE_ID); - } - TxType::Eip1559 => { - out.put_u8(EIP1559_TX_TYPE_ID); - } - TxType::Eip4844 => { - out.put_u8(EIP4844_TX_TYPE_ID); - } - TxType::Eip7702 => { - out.put_u8(EIP7702_TX_TYPE_ID); - } - #[cfg(feature = "optimism")] - TxType::Deposit => { - out.put_u8(crate::transaction::DEPOSIT_TX_TYPE_ID); - } - } - out.put_slice(payload.as_ref()); +impl FromIterator>> for Receipts { + fn from_iter>>>(iter: I) -> Self { + iter.into_iter().collect::>().into() } +} - /// Returns the length of the receipt data. - fn receipt_length(&self) -> usize { - let rlp_head = self.receipt_rlp_header(); - length_of_length(rlp_head.payload_length) + rlp_head.payload_length +impl Default for Receipts { + fn default() -> Self { + Self { receipt_vec: Vec::new() } } } -impl Encodable for ReceiptWithBloomEncoder<'_> { - fn encode(&self, out: &mut dyn BufMut) { - self.encode_inner(out, true) - } - fn length(&self) -> usize { - let mut payload_len = self.receipt_length(); - // account for eip-2718 type prefix and set the list - if !matches!(self.receipt.tx_type, TxType::Legacy) { - payload_len += 1; - // we include a string header for typed receipts, so include the length here - payload_len += length_of_length(payload_len); - } +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> arbitrary::Arbitrary<'a> for Receipt { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let tx_type = TxType::arbitrary(u)?; + let success = bool::arbitrary(u)?; + let cumulative_gas_used = u64::arbitrary(u)?; + let logs = Vec::::arbitrary(u)?; - payload_len + // Only receipts for deposit transactions may contain a deposit nonce + #[cfg(feature = "optimism")] + let (deposit_nonce, deposit_receipt_version) = if tx_type == TxType::Deposit { + let deposit_nonce = Option::::arbitrary(u)?; + let deposit_nonce_version = + deposit_nonce.map(|_| Option::::arbitrary(u)).transpose()?.flatten(); + (deposit_nonce, deposit_nonce_version) + } else { + (None, None) + }; + + Ok(Self { + tx_type, + success, + cumulative_gas_used, + logs, + #[cfg(feature = "optimism")] + deposit_nonce, + #[cfg(feature = "optimism")] + deposit_receipt_version, + }) } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::{address, b256, bytes, hex_literal::hex}; + use alloy_eips::eip2718::Encodable2718; + use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes}; + use reth_codecs::Compact; + + #[test] + fn test_decode_receipt() { + #[cfg(not(feature = "optimism"))] + reth_codecs::test_utils::test_decode::(&hex!( + "c428b52ffd23fc42696156b10200f034792b6a94c3850215c2fef7aea361a0c31b79d9a32652eefc0d4e2e730036061cff7344b6fc6132b50cda0ed810a991ae58ef013150c12b2522533cb3b3a8b19b7786a8b5ff1d3cdc84225e22b02def168c8858df" + )); + #[cfg(feature = "optimism")] + reth_codecs::test_utils::test_decode::(&hex!( + "c30328b52ffd23fc426961a00105007eb0042307705a97e503562eacf2b95060cce9de6de68386b6c155b73a9650021a49e2f8baad17f30faff5899d785c4c0873e45bc268bcf07560106424570d11f9a59e8f3db1efa4ceec680123712275f10d92c3411e1caaa11c7c5d591bc11487168e09934a9986848136da1b583babf3a7188e3aed007a1520f1cf4c1ca7d3482c6c28d37c298613c70a76940008816c4c95644579fd08471dc34732fd0f24" + )); + } // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 #[test] @@ -527,7 +427,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; receipt.encode(&mut data); @@ -561,7 +461,7 @@ mod tests { #[cfg(feature = "optimism")] deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); @@ -571,7 +471,7 @@ mod tests { #[cfg(feature = "optimism")] #[test] fn decode_deposit_receipt_regolith_roundtrip() { - let data = hex!("7ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"); + let data = hex!("b901107ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"); // Deposit Receipt (post-regolith) let expected = ReceiptWithBloom { @@ -583,21 +483,21 @@ mod tests { deposit_nonce: Some(4012991), deposit_receipt_version: None, }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); assert_eq!(receipt, expected); let mut buf = Vec::with_capacity(data.len()); - receipt.encode_inner(&mut buf, false); + receipt.encode(&mut buf); assert_eq!(buf, &data[..]); } #[cfg(feature = "optimism")] #[test] fn decode_deposit_receipt_canyon_roundtrip() { - let data = hex!("7ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"); + let data = hex!("b901117ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"); // Deposit Receipt (post-regolith) let expected = ReceiptWithBloom { @@ -609,14 +509,14 @@ mod tests { deposit_nonce: Some(4012991), deposit_receipt_version: Some(1), }, - bloom: [0; 256].into(), + logs_bloom: [0; 256].into(), }; let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); assert_eq!(receipt, expected); let mut buf = Vec::with_capacity(data.len()); - expected.encode_inner(&mut buf, false); + expected.encode(&mut buf); assert_eq!(buf, &data[..]); } @@ -649,4 +549,50 @@ mod tests { let (decoded, _) = Receipt::from_compact(&data[..], data.len()); assert_eq!(decoded, receipt); } + + #[test] + fn test_encode_2718_length() { + let receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + logs_bloom: Bloom::default(), + }; + + let encoded = receipt.encoded_2718(); + assert_eq!( + encoded.len(), + receipt.encode_2718_len(), + "Encoded length should match the actual encoded data length" + ); + + // Test for legacy receipt as well + let legacy_receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + }, + logs_bloom: Bloom::default(), + }; + + let legacy_encoded = legacy_receipt.encoded_2718(); + assert_eq!( + legacy_encoded.len(), + legacy_receipt.encode_2718_len(), + "Encoded length for legacy receipt should match the actual encoded data length" + ); + } } diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs new file mode 100644 index 00000000000..3f009bba84b --- /dev/null +++ b/crates/primitives/src/traits.rs @@ -0,0 +1,114 @@ +use crate::{ + transaction::{recover_signers, recover_signers_unchecked}, + BlockWithSenders, SealedBlock, +}; +use alloc::vec::Vec; +use reth_primitives_traits::{Block, BlockBody, SealedHeader, SignedTransaction}; +use revm_primitives::{Address, B256}; + +/// Extension trait for [`reth_primitives_traits::Block`] implementations +/// allowing for conversions into common block parts containers such as [`SealedBlock`], +/// [`BlockWithSenders`], etc. +pub trait BlockExt: Block { + /// Calculate the header hash and seal the block so that it can't be changed. + fn seal_slow(self) -> SealedBlock { + let (header, body) = self.split(); + SealedBlock { header: SealedHeader::seal(header), body } + } + + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + fn seal(self, hash: B256) -> SealedBlock { + let (header, body) = self.split(); + SealedBlock { header: SealedHeader::new(header, hash), body } + } + + /// Expensive operation that recovers transaction signer. + fn senders(&self) -> Option> + where + ::Transaction: SignedTransaction, + { + self.body().recover_signers() + } + + /// Transform into a [`BlockWithSenders`]. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + /// + /// Note: this is expected to be called with blocks read from disk. + #[track_caller] + fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders + where + ::Transaction: SignedTransaction, + { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a [`BlockWithSenders`] using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// See also [`recover_signers_unchecked`] + /// + /// Returns an error if a signature is invalid. + #[track_caller] + fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result, Self> + where + ::Transaction: SignedTransaction, + { + let senders = if self.body().transactions().len() == senders.len() { + senders + } else { + let Some(senders) = self.body().recover_signers_unchecked() else { return Err(self) }; + senders + }; + + Ok(BlockWithSenders::new_unchecked(self, senders)) + } + + /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained + /// transactions. + /// + /// Returns `None` if a transaction is invalid. + fn with_recovered_senders(self) -> Option> + where + ::Transaction: SignedTransaction, + { + let senders = self.senders()?; + Some(BlockWithSenders::new_unchecked(self, senders)) + } +} + +impl BlockExt for T {} + +/// Extension trait for [`BlockBody`] adding helper methods operating with transactions. +pub trait BlockBodyTxExt: BlockBody { + /// Recover signer addresses for all transactions in the block body. + fn recover_signers(&self) -> Option> + where + Self::Transaction: SignedTransaction, + { + recover_signers(self.transactions(), self.transactions().len()) + } + + /// Recover signer addresses for all transactions in the block body _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid, see also + /// [`recover_signers_unchecked`]. + fn recover_signers_unchecked(&self) -> Option> + where + Self::Transaction: SignedTransaction, + { + recover_signers_unchecked(self.transactions(), self.transactions().len()) + } +} + +impl BlockBodyTxExt for T {} diff --git a/crates/primitives/src/transaction/compat.rs b/crates/primitives/src/transaction/compat.rs index 81281186f64..883c89c45f5 100644 --- a/crates/primitives/src/transaction/compat.rs +++ b/crates/primitives/src/transaction/compat.rs @@ -1,5 +1,7 @@ use crate::{Transaction, TransactionSigned}; use alloy_primitives::{Address, TxKind, U256}; +#[cfg(feature = "optimism")] +use op_alloy_consensus::DepositTransaction; use revm_primitives::{AuthorizationList, TxEnv}; /// Implements behaviour to fill a [`TxEnv`] from another transaction. diff --git a/crates/primitives/src/transaction/error.rs b/crates/primitives/src/transaction/error.rs index 790292cd82b..78f6cf5e5fd 100644 --- a/crates/primitives/src/transaction/error.rs +++ b/crates/primitives/src/transaction/error.rs @@ -76,7 +76,7 @@ pub enum TransactionConversionError { } /// Represents error variants than can happen when trying to convert a -/// [`TransactionSignedEcRecovered`](crate::TransactionSignedEcRecovered) transaction. +/// [`RecoveredTx`](crate::RecoveredTx) transaction. #[derive(Debug, Clone, Eq, PartialEq, derive_more::Display)] pub enum TryFromRecoveredTransactionError { /// Thrown if the transaction type is unsupported. diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index aeee4232e05..b64cf094042 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,50 +1,51 @@ //! Transaction types. -use crate::BlockHashOrNumber; -use alloy_eips::eip7702::SignedAuthorization; -use alloy_primitives::{keccak256, Address, ChainId, TxKind, B256, U256}; - +use alloc::vec::Vec; use alloy_consensus::{ - SignableTransaction, Transaction as AlloyTransaction, TxEip1559, TxEip2930, TxEip4844, - TxEip7702, TxLegacy, + transaction::RlpEcdsaTx, SignableTransaction, Signed, Transaction as _, TxEip1559, TxEip2930, + TxEip4844, TxEip4844Variant, TxEip7702, TxLegacy, TypedTransaction, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, eip2930::AccessList, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{ + keccak256, Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; -use alloy_primitives::{Bytes, TxHash}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; -use core::mem; +use core::hash::{Hash, Hasher}; use derive_more::{AsRef, Deref}; use once_cell as _; #[cfg(not(feature = "std"))] -use once_cell::sync::Lazy as LazyLock; +use once_cell::sync::{Lazy as LazyLock, OnceCell as OnceLock}; +#[cfg(feature = "optimism")] +use op_alloy_consensus::DepositTransaction; +#[cfg(feature = "optimism")] +use op_alloy_consensus::TxDeposit; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use revm_primitives::{AuthorizationList, TxEnv}; use serde::{Deserialize, Serialize}; -use signature::{decode_with_eip155_chain_id, with_eip155_parity}; +use signature::decode_with_eip155_chain_id; #[cfg(feature = "std")] -use std::sync::LazyLock; +use std::sync::{LazyLock, OnceLock}; +pub use compat::FillTxEnv; pub use error::{ InvalidTransactionError, TransactionConversionError, TryFromRecoveredTransactionError, }; pub use meta::TransactionMeta; pub use pooled::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] -pub use sidecar::generate_blob_sidecar; -#[cfg(feature = "c-kzg")] -pub use sidecar::BlobTransactionValidationError; -pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; +pub use reth_primitives_traits::WithEncoded; +pub use sidecar::BlobTransaction; +pub use signature::{recover_signer, recover_signer_unchecked}; +pub use tx_type::TxType; -pub use compat::FillTxEnv; -pub use signature::{ - extract_chain_id, legacy_parity, recover_signer, recover_signer_unchecked, Signature, -}; -pub use tx_type::{ - TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, -}; -pub use variant::TransactionSignedVariant; +/// Handling transaction signature operations, including signature recovery, +/// applying chain IDs, and EIP-2 validation. +pub mod signature; +pub mod util; pub(crate) mod access_list; mod compat; @@ -52,34 +53,17 @@ mod error; mod meta; mod pooled; mod sidecar; -mod signature; mod tx_type; -pub(crate) mod util; -mod variant; -#[cfg(feature = "optimism")] -use op_alloy_consensus::TxDeposit; -#[cfg(feature = "optimism")] -use reth_optimism_chainspec::optimism_deposit_tx_signature; -#[cfg(feature = "optimism")] -pub use tx_type::DEPOSIT_TX_TYPE_ID; #[cfg(any(test, feature = "reth-codec"))] -use tx_type::{ +pub use tx_type::{ COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY, }; -#[cfg(test)] -use reth_codecs::Compact; - -use alloc::vec::Vec; - -/// Either a transaction hash or number. -pub type TxHashOrNumber = BlockHashOrNumber; - -// Expected number of transactions where we can expect a speed-up by recovering the senders in -// parallel. -pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = +/// Expected number of transactions where we can expect a speed-up by recovering the senders in +/// parallel. +pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = LazyLock::new(|| match rayon::current_num_threads() { 0..=1 => usize::MAX, 2..=8 => 10, @@ -142,6 +126,31 @@ pub enum Transaction { Deposit(TxDeposit), } +#[cfg(feature = "optimism")] +impl DepositTransaction for Transaction { + fn source_hash(&self) -> Option { + match self { + Self::Deposit(tx) => tx.source_hash(), + _ => None, + } + } + fn mint(&self) -> Option { + match self { + Self::Deposit(tx) => tx.mint(), + _ => None, + } + } + fn is_system_transaction(&self) -> bool { + match self { + Self::Deposit(tx) => tx.is_system_transaction(), + _ => false, + } + } + fn is_deposit(&self) -> bool { + matches!(self, Self::Deposit(_)) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Transaction { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -213,29 +222,6 @@ impl Transaction { } } - /// Gets the transaction's [`TxKind`], which is the address of the recipient or - /// [`TxKind::Create`] if the transaction is a contract creation. - pub const fn kind(&self) -> TxKind { - match self { - Self::Legacy(TxLegacy { to, .. }) | - Self::Eip2930(TxEip2930 { to, .. }) | - Self::Eip1559(TxEip1559 { to, .. }) => *to, - Self::Eip4844(TxEip4844 { to, .. }) | Self::Eip7702(TxEip7702 { to, .. }) => { - TxKind::Call(*to) - } - #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { to, .. }) => *to, - } - } - - /// Get the transaction's address of the contract that will be called, or the address that will - /// receive the transfer. - /// - /// Returns `None` if this is a `CREATE` transaction. - pub fn to(&self) -> Option
{ - self.kind().to().copied() - } - /// Get the transaction's type pub const fn tx_type(&self) -> TxType { match self { @@ -249,80 +235,15 @@ impl Transaction { } } - /// Returns the [`AccessList`] of the transaction. - /// - /// Returns `None` for legacy transactions. - pub const fn access_list(&self) -> Option<&AccessList> { - match self { - Self::Legacy(_) => None, - Self::Eip2930(tx) => Some(&tx.access_list), - Self::Eip1559(tx) => Some(&tx.access_list), - Self::Eip4844(tx) => Some(&tx.access_list), - Self::Eip7702(tx) => Some(&tx.access_list), - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - - /// Returns the [`SignedAuthorization`] list of the transaction. - /// - /// Returns `None` if this transaction is not EIP-7702. - pub fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - match self { - Self::Eip7702(tx) => Some(&tx.authorization_list), - _ => None, - } - } - - /// Returns true if the tx supports dynamic fees - pub const fn is_dynamic_fee(&self) -> bool { - match self { - Self::Legacy(_) | Self::Eip2930(_) => false, - Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, - #[cfg(feature = "optimism")] - Self::Deposit(_) => false, - } - } - - /// Blob versioned hashes for eip4844 transaction, for legacy, eip1559, eip2930 and eip7702 - /// transactions this is `None` - /// - /// This is also commonly referred to as the "blob versioned hashes" (`BlobVersionedHashes`). - pub fn blob_versioned_hashes(&self) -> Option> { - match self { - Self::Legacy(_) | Self::Eip2930(_) | Self::Eip1559(_) | Self::Eip7702(_) => None, - Self::Eip4844(TxEip4844 { blob_versioned_hashes, .. }) => { - Some(blob_versioned_hashes.clone()) - } - #[cfg(feature = "optimism")] - Self::Deposit(_) => None, - } - } - /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 /// transaction. /// /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } - /// Returns the effective gas price for the given base fee. - /// - /// If the transaction is a legacy or EIP2930 transaction, the gas price is returned. - pub const fn effective_gas_price(&self, base_fee: Option) -> u128 { - match self { - Self::Legacy(tx) => tx.gas_price, - Self::Eip2930(tx) => tx.gas_price, - Self::Eip1559(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Self::Eip4844(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - Self::Eip7702(dynamic_tx) => dynamic_tx.effective_gas_price(base_fee), - #[cfg(feature = "optimism")] - Self::Deposit(_) => 0, - } - } - /// Returns the effective miner gas tip cap (`gasTipCap`) for the given base fee: /// `min(maxFeePerGas - baseFee, maxPriorityFeePerGas)` /// @@ -354,89 +275,39 @@ impl Transaction { } } - /// Get the transaction's input field. - pub const fn input(&self) -> &Bytes { + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { match self { - Self::Legacy(TxLegacy { input, .. }) | - Self::Eip2930(TxEip2930 { input, .. }) | - Self::Eip1559(TxEip1559 { input, .. }) | - Self::Eip4844(TxEip4844 { input, .. }) | - Self::Eip7702(TxEip7702 { input, .. }) => input, + Self::Legacy(tx) => tx.encode_for_signing(out), + Self::Eip2930(tx) => tx.encode_for_signing(out), + Self::Eip1559(tx) => tx.encode_for_signing(out), + Self::Eip4844(tx) => tx.encode_for_signing(out), + Self::Eip7702(tx) => tx.encode_for_signing(out), #[cfg(feature = "optimism")] - Self::Deposit(TxDeposit { input, .. }) => input, - } - } - - /// Returns the source hash of the transaction, which uniquely identifies its source. - /// If not a deposit transaction, this will always return `None`. - #[cfg(feature = "optimism")] - pub const fn source_hash(&self) -> Option { - match self { - Self::Deposit(TxDeposit { source_hash, .. }) => Some(*source_hash), - _ => None, - } - } - - /// Returns the amount of ETH locked up on L1 that will be minted on L2. If the transaction - /// is not a deposit transaction, this will always return `None`. - #[cfg(feature = "optimism")] - pub const fn mint(&self) -> Option { - match self { - Self::Deposit(TxDeposit { mint, .. }) => *mint, - _ => None, - } - } - - /// Returns whether or not the transaction is a system transaction. If the transaction - /// is not a deposit transaction, this will always return `false`. - #[cfg(feature = "optimism")] - pub const fn is_system_transaction(&self) -> bool { - match self { - Self::Deposit(TxDeposit { is_system_transaction, .. }) => *is_system_transaction, - _ => false, + Self::Deposit(_) => {} } } - /// Returns whether or not the transaction is an Optimism Deposited transaction. - #[cfg(feature = "optimism")] - pub const fn is_deposit(&self) -> bool { - matches!(self, Self::Deposit(_)) - } - - /// This encodes the transaction _without_ the signature, and is only suitable for creating a - /// hash intended for signing. - pub fn encode_without_signature(&self, out: &mut dyn bytes::BufMut) { - Encodable::encode(self, out); - } - - /// Inner encoding function that is used for both rlp [`Encodable`] trait and for calculating - /// hash that for eip2718 does not require rlp header - pub fn encode_with_signature( - &self, - signature: &Signature, - out: &mut dyn bytes::BufMut, - with_header: bool, - ) { + /// Produces EIP-2718 encoding of the transaction + pub fn eip2718_encode(&self, signature: &Signature, out: &mut dyn bytes::BufMut) { match self { Self::Legacy(legacy_tx) => { // do nothing w/ with_header - legacy_tx.encode_with_signature_fields( - &with_eip155_parity(signature, legacy_tx.chain_id), - out, - ) + legacy_tx.eip2718_encode(signature, out); } Self::Eip2930(access_list_tx) => { - access_list_tx.encode_with_signature(signature, out, with_header) + access_list_tx.eip2718_encode(signature, out); } Self::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encode_with_signature(signature, out, with_header) + dynamic_fee_tx.eip2718_encode(signature, out); } - Self::Eip4844(blob_tx) => blob_tx.encode_with_signature(signature, out, with_header), + Self::Eip4844(blob_tx) => blob_tx.eip2718_encode(signature, out), Self::Eip7702(set_code_tx) => { - set_code_tx.encode_with_signature(signature, out, with_header) + set_code_tx.eip2718_encode(signature, out); } #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.encode_inner(out, with_header), + Self::Deposit(deposit_tx) => deposit_tx.encode_2718(out), } } @@ -492,20 +363,6 @@ impl Transaction { } } - /// Calculates a heuristic for the in-memory size of the [Transaction]. - #[inline] - pub fn size(&self) -> usize { - match self { - Self::Legacy(tx) => tx.size(), - Self::Eip2930(tx) => tx.size(), - Self::Eip1559(tx) => tx.size(), - Self::Eip4844(tx) => tx.size(), - Self::Eip7702(tx) => tx.size(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.size(), - } - } - /// Returns true if the transaction is a legacy transaction. #[inline] pub const fn is_legacy(&self) -> bool { @@ -577,6 +434,22 @@ impl Transaction { } } +impl InMemorySize for Transaction { + /// Calculates a heuristic for the in-memory size of the [Transaction]. + #[inline] + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip4844(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.size(), + } + } +} + #[cfg(any(test, feature = "reth-codec"))] impl reth_codecs::Compact for Transaction { // Serializes the TxType to the buffer if necessary, returning 2 bits of the type as an @@ -622,19 +495,19 @@ impl reth_codecs::Compact for Transaction { use bytes::Buf; match identifier { - COMPACT_IDENTIFIER_LEGACY => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => { let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); (Self::Legacy(tx), buf) } - COMPACT_IDENTIFIER_EIP2930 => { + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => { let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); (Self::Eip2930(tx), buf) } - COMPACT_IDENTIFIER_EIP1559 => { + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => { let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); (Self::Eip1559(tx), buf) } - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { // An identifier of 3 indicates that the transaction type did not fit into // the backwards compatible 2 bit identifier, their transaction types are // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, @@ -642,23 +515,25 @@ impl reth_codecs::Compact for Transaction { // reading the full 8 bits (single byte) and match on this transaction type. let identifier = buf.get_u8(); match identifier { - EIP4844_TX_TYPE_ID => { + alloy_consensus::constants::EIP4844_TX_TYPE_ID => { let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); (Self::Eip4844(tx), buf) } - EIP7702_TX_TYPE_ID => { + alloy_consensus::constants::EIP7702_TX_TYPE_ID => { let (tx, buf) = TxEip7702::from_compact(buf, buf.len()); (Self::Eip7702(tx), buf) } #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => { + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); (Self::Deposit(tx), buf) } - _ => unreachable!("Junk data in database: unknown Transaction variant"), + _ => unreachable!( + "Junk data in database: unknown Transaction variant: {identifier}" + ), } } - _ => unreachable!("Junk data in database: unknown Transaction variant"), + _ => unreachable!("Junk data in database: unknown Transaction variant: {identifier}"), } } } @@ -669,47 +544,7 @@ impl Default for Transaction { } } -impl Encodable for Transaction { - /// This encodes the transaction _without_ the signature, and is only suitable for creating a - /// hash intended for signing. - fn encode(&self, out: &mut dyn bytes::BufMut) { - match self { - Self::Legacy(legacy_tx) => { - legacy_tx.encode_for_signing(out); - } - Self::Eip2930(access_list_tx) => { - access_list_tx.encode_for_signing(out); - } - Self::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encode_for_signing(out); - } - Self::Eip4844(blob_tx) => { - blob_tx.encode_for_signing(out); - } - Self::Eip7702(set_code_tx) => { - set_code_tx.encode_for_signing(out); - } - #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => { - deposit_tx.encode_inner(out, true); - } - } - } - - fn length(&self) -> usize { - match self { - Self::Legacy(legacy_tx) => legacy_tx.payload_len_for_signature(), - Self::Eip2930(access_list_tx) => access_list_tx.payload_len_for_signature(), - Self::Eip1559(dynamic_fee_tx) => dynamic_fee_tx.payload_len_for_signature(), - Self::Eip4844(blob_tx) => blob_tx.payload_len_for_signature(), - Self::Eip7702(set_code_tx) => set_code_tx.payload_len_for_signature(), - #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.encoded_len(true), - } - } -} - -impl AlloyTransaction for Transaction { +impl alloy_consensus::Transaction for Transaction { fn chain_id(&self) -> Option { match self { Self::Legacy(tx) => tx.chain_id(), @@ -782,6 +617,18 @@ impl AlloyTransaction for Transaction { } } + fn max_fee_per_blob_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.max_fee_per_blob_gas(), + Self::Eip4844(tx) => tx.max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.max_fee_per_blob_gas(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.max_fee_per_blob_gas(), + } + } + fn priority_fee_or_price(&self) -> u128 { match self { Self::Legacy(tx) => tx.priority_fee_or_price(), @@ -794,27 +641,48 @@ impl AlloyTransaction for Transaction { } } - fn max_fee_per_blob_gas(&self) -> Option { + fn effective_gas_price(&self, base_fee: Option) -> u128 { match self { - Self::Legacy(tx) => tx.max_fee_per_blob_gas(), - Self::Eip2930(tx) => tx.max_fee_per_blob_gas(), - Self::Eip1559(tx) => tx.max_fee_per_blob_gas(), - Self::Eip4844(tx) => tx.max_fee_per_blob_gas(), - Self::Eip7702(tx) => tx.max_fee_per_blob_gas(), + Self::Legacy(tx) => tx.effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.effective_gas_price(base_fee), + Self::Eip4844(tx) => tx.effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.effective_gas_price(base_fee), #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.max_fee_per_blob_gas(), + Self::Deposit(tx) => tx.effective_gas_price(base_fee), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(_) | Self::Eip2930(_) => false, + Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, + #[cfg(feature = "optimism")] + Self::Deposit(_) => false, + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.kind(), + Self::Eip2930(tx) => tx.kind(), + Self::Eip1559(tx) => tx.kind(), + Self::Eip4844(tx) => tx.kind(), + Self::Eip7702(tx) => tx.kind(), + #[cfg(feature = "optimism")] + Self::Deposit(tx) => tx.kind(), } } - fn to(&self) -> TxKind { + fn is_create(&self) -> bool { match self { - Self::Legacy(tx) => tx.to(), - Self::Eip2930(tx) => tx.to(), - Self::Eip1559(tx) => tx.to(), - Self::Eip4844(tx) => tx.to(), - Self::Eip7702(tx) => tx.to(), + Self::Legacy(tx) => tx.is_create(), + Self::Eip2930(tx) => tx.is_create(), + Self::Eip1559(tx) => tx.is_create(), + Self::Eip4844(tx) => tx.is_create(), + Self::Eip7702(tx) => tx.is_create(), #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.to(), + Self::Deposit(tx) => tx.is_create(), } } @@ -830,7 +698,7 @@ impl AlloyTransaction for Transaction { } } - fn input(&self) -> &[u8] { + fn input(&self) -> &Bytes { match self { Self::Legacy(tx) => tx.input(), Self::Eip2930(tx) => tx.input(), @@ -891,12 +759,34 @@ impl AlloyTransaction for Transaction { } } -/// Signed transaction without its Hash. Used type for inserting into the DB. -/// -/// This can by converted to [`TransactionSigned`] by calling [`TransactionSignedNoHash::hash`]. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub struct TransactionSignedNoHash { +impl From for Transaction { + fn from(value: TxEip4844Variant) -> Self { + match value { + TxEip4844Variant::TxEip4844(tx) => tx.into(), + TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx.into(), + } + } +} + +impl From for Transaction { + fn from(value: TypedTransaction) -> Self { + match value { + TypedTransaction::Legacy(tx) => tx.into(), + TypedTransaction::Eip2930(tx) => tx.into(), + TypedTransaction::Eip1559(tx) => tx.into(), + TypedTransaction::Eip4844(tx) => tx.into(), + TypedTransaction::Eip7702(tx) => tx.into(), + } + } +} + +/// Signed transaction. +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] +#[derive(Debug, Clone, Eq, AsRef, Deref, Serialize, Deserialize)] +pub struct TransactionSigned { + /// Transaction hash + #[serde(skip)] + pub hash: OnceLock, /// The transaction signature values pub signature: Signature, /// Raw transaction info @@ -905,286 +795,88 @@ pub struct TransactionSignedNoHash { pub transaction: Transaction, } -impl TransactionSignedNoHash { - /// Calculates the transaction hash. If used more than once, it's better to convert it to - /// [`TransactionSigned`] first. - pub fn hash(&self) -> B256 { - // pre-allocate buffer for the transaction - let mut buf = Vec::with_capacity(128 + self.transaction.input().len()); - self.transaction.encode_with_signature(&self.signature, &mut buf, false); - keccak256(&buf) - } - - /// Recover signer from signature and hash. - /// - /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. - pub fn recover_signer(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) +impl Default for TransactionSigned { + fn default() -> Self { + Self { + hash: Default::default(), + signature: Signature::test_signature(), + transaction: Default::default(), } + } +} - let signature_hash = self.signature_hash(); - recover_signer(&self.signature, signature_hash) +impl AsRef for TransactionSigned { + fn as_ref(&self) -> &Self { + self } +} - /// Recover signer from signature and hash _without ensuring that the signature has a low `s` - /// value_. - /// - /// Reuses a given buffer to avoid numerous reallocations when recovering batches. **Clears the - /// buffer before use.** - /// - /// Returns `None` if the transaction's signature is invalid, see also - /// [`recover_signer_unchecked`]. - /// - /// # Optimism - /// - /// For optimism this will return [`Address::ZERO`] if the Signature is empty, this is because pre bedrock (on OP mainnet), relay messages to the L2 Cross Domain Messenger were sent as legacy transactions from the zero address with an empty signature, e.g.: - /// This makes it possible to import pre bedrock transactions via the sender recovery stage. - pub fn encode_and_recover_unchecked(&self, buffer: &mut Vec) -> Option
{ - buffer.clear(); - self.transaction.encode_without_signature(buffer); +impl Hash for TransactionSigned { + fn hash(&self, state: &mut H) { + self.signature.hash(state); + self.transaction.hash(state); + } +} - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - { - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } +impl PartialEq for TransactionSigned { + fn eq(&self, other: &Self) -> bool { + self.signature == other.signature && + self.transaction == other.transaction && + self.tx_hash() == other.tx_hash() + } +} - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if self.is_legacy() && self.signature == optimism_deposit_tx_signature() { - return Some(Address::ZERO) - } - } +// === impl TransactionSigned === - recover_signer_unchecked(&self.signature, keccak256(buffer)) +impl TransactionSigned { + /// Creates a new signed transaction from the given parts. + pub fn new(transaction: Transaction, signature: Signature, hash: B256) -> Self { + Self { hash: hash.into(), signature, transaction } } - /// Converts into a transaction type with its hash: [`TransactionSigned`]. + /// Creates a new signed transaction from the given transaction and signature without the hash. /// - /// Note: This will recalculate the hash of the transaction. - #[inline] - pub fn with_hash(self) -> TransactionSigned { - let Self { signature, transaction } = self; - TransactionSigned::from_transaction_and_signature(transaction, signature) + /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. + pub fn new_unhashed(transaction: Transaction, signature: Signature) -> Self { + Self { hash: Default::default(), signature, transaction } + } + + /// Transaction + pub const fn transaction(&self) -> &Transaction { + &self.transaction } - /// Recovers a list of signers from a transaction list iterator + /// Tries to convert a [`TransactionSigned`] into a [`PooledTransactionsElement`]. /// - /// Returns `None`, if some transaction's signature is invalid, see also - /// [`Self::recover_signer`]. - pub fn recover_signers<'a, T>(txes: T, num_txes: usize) -> Option> - where - T: IntoParallelIterator + IntoIterator + Send, - { - if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { - txes.into_iter().map(|tx| tx.recover_signer()).collect() - } else { - txes.into_par_iter().map(|tx| tx.recover_signer()).collect() - } - } -} - -impl Default for TransactionSignedNoHash { - fn default() -> Self { - Self { signature: Signature::test_signature(), transaction: Default::default() } - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for TransactionSignedNoHash { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let tx_signed = TransactionSigned::arbitrary(u)?; - - Ok(Self { signature: tx_signed.signature, transaction: tx_signed.transaction }) - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for TransactionSignedNoHash { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let start = buf.as_mut().len(); - - // Placeholder for bitflags. - // The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit] - buf.put_u8(0); - - let sig_bit = self.signature.to_compact(buf) as u8; - let zstd_bit = self.transaction.input().len() >= 32; - - let tx_bits = if zstd_bit { - let mut tmp = Vec::with_capacity(256); - if cfg!(feature = "std") { - crate::compression::TRANSACTION_COMPRESSOR.with(|compressor| { - let mut compressor = compressor.borrow_mut(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - }) - } else { - let mut compressor = crate::compression::create_tx_compressor(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 + /// This function used as a helper to convert from a decoded p2p broadcast message to + /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on + /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. + pub fn try_into_pooled(self) -> Result { + let hash = self.hash(); + match self { + Self { transaction: Transaction::Legacy(tx), signature, .. } => { + Ok(PooledTransactionsElement::Legacy(Signed::new_unchecked(tx, signature, hash))) } - } else { - self.transaction.to_compact(buf) as u8 - }; - - // Replace bitflags with the actual values - buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3); - - buf.as_mut().len() - start - } - - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { - use bytes::Buf; - - // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] - let bitflags = buf.get_u8() as usize; - - let sig_bit = bitflags & 1; - let (mut signature, buf) = Signature::from_compact(buf, sig_bit); - - let zstd_bit = bitflags >> 3; - let (transaction, buf) = if zstd_bit != 0 { - if cfg!(feature = "std") { - crate::compression::TRANSACTION_DECOMPRESSOR.with(|decompressor| { - let mut decompressor = decompressor.borrow_mut(); - - // TODO: enforce that zstd is only present at a "top" level type - - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = - Transaction::from_compact(decompressor.decompress(buf), transaction_type); - - (transaction, buf) - }) - } else { - let mut decompressor = crate::compression::create_tx_decompressor(); - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = - Transaction::from_compact(decompressor.decompress(buf), transaction_type); - - (transaction, buf) + Self { transaction: Transaction::Eip2930(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip2930(Signed::new_unchecked(tx, signature, hash))) } - } else { - let transaction_type = bitflags >> 1; - Transaction::from_compact(buf, transaction_type) - }; - - if matches!(transaction, Transaction::Legacy(_)) { - signature = signature.with_parity(legacy_parity(&signature, transaction.chain_id())) - } - - (Self { signature, transaction }, buf) - } -} - -impl From for TransactionSigned { - fn from(tx: TransactionSignedNoHash) -> Self { - tx.with_hash() - } -} - -impl From for TransactionSignedNoHash { - fn from(tx: TransactionSigned) -> Self { - Self { signature: tx.signature, transaction: tx.transaction } - } -} - -/// Signed transaction. -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] -pub struct TransactionSigned { - /// Transaction hash - pub hash: TxHash, - /// The transaction signature values - pub signature: Signature, - /// Raw transaction info - #[deref] - #[as_ref] - pub transaction: Transaction, -} - -impl Default for TransactionSigned { - fn default() -> Self { - Self { - hash: Default::default(), - signature: Signature::test_signature(), - transaction: Default::default(), + Self { transaction: Transaction::Eip1559(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip1559(Signed::new_unchecked(tx, signature, hash))) + } + Self { transaction: Transaction::Eip7702(tx), signature, .. } => { + Ok(PooledTransactionsElement::Eip7702(Signed::new_unchecked(tx, signature, hash))) + } + // Not supported because missing blob sidecar + tx @ Self { transaction: Transaction::Eip4844(_), .. } => Err(tx), + #[cfg(feature = "optimism")] + // Not supported because deposit transactions are never pooled + tx @ Self { transaction: Transaction::Deposit(_), .. } => Err(tx), } } -} - -impl AsRef for TransactionSigned { - fn as_ref(&self) -> &Self { - self - } -} - -// === impl TransactionSigned === - -impl TransactionSigned { - /// Transaction signature. - pub const fn signature(&self) -> &Signature { - &self.signature - } /// Transaction hash. Used to identify transaction. - pub const fn hash(&self) -> TxHash { - self.hash - } - - /// Reference to transaction hash. Used to identify transaction. - pub const fn hash_ref(&self) -> &TxHash { - &self.hash - } - - /// Recover signer from signature and hash. - /// - /// Returns `None` if the transaction's signature is invalid following [EIP-2](https://eips.ethereum.org/EIPS/eip-2), see also [`recover_signer`]. - /// - /// Note: - /// - /// This can fail for some early ethereum mainnet transactions pre EIP-2, use - /// [`Self::recover_signer_unchecked`] if you want to recover the signer without ensuring that - /// the signature has a low `s` value. - pub fn recover_signer(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - let signature_hash = self.signature_hash(); - recover_signer(&self.signature, signature_hash) - } - - /// Recover signer from signature and hash _without ensuring that the signature has a low `s` - /// value_. - /// - /// Returns `None` if the transaction's signature is invalid, see also - /// [`recover_signer_unchecked`]. - pub fn recover_signer_unchecked(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - let signature_hash = self.signature_hash(); - recover_signer_unchecked(&self.signature, signature_hash) + pub fn hash(&self) -> TxHash { + *self.tx_hash() } /// Recovers a list of signers from a transaction list iterator. @@ -1218,56 +910,39 @@ impl TransactionSigned { } } - /// Returns the [`TransactionSignedEcRecovered`] transaction with the given sender. + /// Returns the [`RecoveredTx`] transaction with the given sender. #[inline] - pub const fn with_signer(self, signer: Address) -> TransactionSignedEcRecovered { - TransactionSignedEcRecovered::from_signed_transaction(self, signer) + pub const fn with_signer(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self, signer) } - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] + /// Consumes the type, recover signer and return [`RecoveredTx`] /// /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. - pub fn into_ecrecovered(self) -> Option { + pub fn into_ecrecovered(self) -> Option { let signer = self.recover_signer()?; - Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) + Some(RecoveredTx { signed_transaction: self, signer }) } - /// Consumes the type, recover signer and return [`TransactionSignedEcRecovered`] _without + /// Consumes the type, recover signer and return [`RecoveredTx`] _without /// ensuring that the signature has a low `s` value_ (EIP-2). /// /// Returns `None` if the transaction's signature is invalid, see also /// [`Self::recover_signer_unchecked`]. - pub fn into_ecrecovered_unchecked(self) -> Option { + pub fn into_ecrecovered_unchecked(self) -> Option { let signer = self.recover_signer_unchecked()?; - Some(TransactionSignedEcRecovered { signed_transaction: self, signer }) + Some(RecoveredTx { signed_transaction: self, signer }) } - /// Tries to recover signer and return [`TransactionSignedEcRecovered`] by cloning the type. - pub fn try_ecrecovered(&self) -> Option { - let signer = self.recover_signer()?; - Some(TransactionSignedEcRecovered { signed_transaction: self.clone(), signer }) - } - - /// Tries to recover signer and return [`TransactionSignedEcRecovered`]. - /// - /// Returns `Err(Self)` if the transaction's signature is invalid, see also - /// [`Self::recover_signer`]. - pub fn try_into_ecrecovered(self) -> Result { - match self.recover_signer() { - None => Err(self), - Some(signer) => Ok(TransactionSignedEcRecovered { signed_transaction: self, signer }), - } - } - - /// Tries to recover signer and return [`TransactionSignedEcRecovered`]. _without ensuring that + /// Tries to recover signer and return [`RecoveredTx`]. _without ensuring that /// the signature has a low `s` value_ (EIP-2). /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also /// [`Self::recover_signer_unchecked`]. - pub fn try_into_ecrecovered_unchecked(self) -> Result { + pub fn try_into_ecrecovered_unchecked(self) -> Result { match self.recover_signer_unchecked() { None => Err(self), - Some(signer) => Ok(TransactionSignedEcRecovered { signed_transaction: self, signer }), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), } } @@ -1277,19 +952,10 @@ impl TransactionSigned { keccak256(self.encoded_2718()) } - /// Create a new signed transaction from a transaction and its signature. - /// - /// This will also calculate the transaction hash using its encoding. - pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { - let mut initial_tx = Self { transaction, hash: Default::default(), signature }; - initial_tx.hash = initial_tx.recalculate_hash(); - initial_tx - } - - /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. - #[inline] - pub fn size(&self) -> usize { - mem::size_of::() + self.transaction.size() + mem::size_of::() + /// Splits the transaction into parts. + pub fn into_parts(self) -> (Transaction, Signature, B256) { + let hash = self.hash(); + (self.transaction, self.signature, hash) } /// Decodes legacy transaction from the data buffer into a tuple. @@ -1298,7 +964,7 @@ impl TransactionSigned { /// /// Refer to the docs for [`Self::decode_rlp_legacy_transaction`] for details on the exact /// format expected. - pub(crate) fn decode_rlp_legacy_transaction_tuple( + pub fn decode_rlp_legacy_transaction_tuple( data: &mut &[u8], ) -> alloy_rlp::Result<(TxLegacy, TxHash, Signature)> { // keep this around, so we can use it to calculate the hash @@ -1349,13 +1015,212 @@ impl TransactionSigned { // so decoding methods do not need to manually advance the buffer pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; - let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + let signed = + Self { transaction: Transaction::Legacy(transaction), hash: hash.into(), signature }; Ok(signed) } } -impl From for TransactionSigned { - fn from(recovered: TransactionSignedEcRecovered) -> Self { +impl SignedTransaction for TransactionSigned { + type Type = TxType; + + fn tx_hash(&self) -> &TxHash { + self.hash.get_or_init(|| self.recalculate_hash()) + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn recover_signer(&self) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(feature = "optimism")] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + let signature_hash = self.signature_hash(); + recover_signer(&self.signature, signature_hash) + } + + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + // Optimism's Deposit transaction does not have a signature. Directly return the + // `from` address. + #[cfg(feature = "optimism")] + if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { + return Some(from) + } + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(&self.signature, signature_hash) + } +} + +impl reth_primitives_traits::FillTxEnv for TransactionSigned { + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + tx_env.caller = sender; + match self.as_ref() { + Transaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip4844(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = TxKind::Call(tx.to); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); + tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); + tx_env.authorization_list = None; + } + Transaction::Eip7702(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to.into(); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = + Some(AuthorizationList::Signed(tx.authorization_list.clone())); + } + #[cfg(feature = "optimism")] + Transaction::Deposit(_) => {} + } + } +} + +impl InMemorySize for TransactionSigned { + /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. + #[inline] + fn size(&self) -> usize { + self.hash().size() + self.transaction.size() + self.signature().size() + } +} + +impl alloy_consensus::Transaction for TransactionSigned { + fn chain_id(&self) -> Option { + self.deref().chain_id() + } + + fn nonce(&self) -> u64 { + self.deref().nonce() + } + + fn gas_limit(&self) -> u64 { + self.deref().gas_limit() + } + + fn gas_price(&self) -> Option { + self.deref().gas_price() + } + + fn max_fee_per_gas(&self) -> u128 { + self.deref().max_fee_per_gas() + } + + fn max_priority_fee_per_gas(&self) -> Option { + self.deref().max_priority_fee_per_gas() + } + + fn max_fee_per_blob_gas(&self) -> Option { + self.deref().max_fee_per_blob_gas() + } + + fn priority_fee_or_price(&self) -> u128 { + self.deref().priority_fee_or_price() + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + self.deref().effective_gas_price(base_fee) + } + + fn is_dynamic_fee(&self) -> bool { + self.deref().is_dynamic_fee() + } + + fn kind(&self) -> TxKind { + self.deref().kind() + } + + fn is_create(&self) -> bool { + self.deref().is_create() + } + + fn value(&self) -> U256 { + self.deref().value() + } + + fn input(&self) -> &Bytes { + self.deref().input() + } + + fn ty(&self) -> u8 { + self.deref().ty() + } + + fn access_list(&self) -> Option<&AccessList> { + self.deref().access_list() + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + alloy_consensus::Transaction::blob_versioned_hashes(self.deref()) + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + self.deref().authorization_list() + } +} + +impl From for TransactionSigned { + fn from(recovered: RecoveredTx) -> Self { recovered.signed_transaction } } @@ -1424,28 +1289,28 @@ impl Encodable2718 for TransactionSigned { fn encode_2718_len(&self) -> usize { match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.encoded_len_with_signature( - &with_eip155_parity(&self.signature, legacy_tx.chain_id), - ), + Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), Transaction::Eip2930(access_list_tx) => { - access_list_tx.encoded_len_with_signature(&self.signature, false) + access_list_tx.eip2718_encoded_length(&self.signature) } Transaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.encoded_len_with_signature(&self.signature, false) - } - Transaction::Eip4844(blob_tx) => { - blob_tx.encoded_len_with_signature(&self.signature, false) + dynamic_fee_tx.eip2718_encoded_length(&self.signature) } + Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), Transaction::Eip7702(set_code_tx) => { - set_code_tx.encoded_len_with_signature(&self.signature, false) + set_code_tx.eip2718_encoded_length(&self.signature) } #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.encoded_len(false), + Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - self.transaction.encode_with_signature(&self.signature, out, false) + self.transaction.eip2718_encode(&self.signature, out) + } + + fn trie_hash(&self) -> B256 { + self.hash() } } @@ -1454,25 +1319,25 @@ impl Decodable2718 for TransactionSigned { match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), TxType::Eip2930 => { - let (tx, signature, hash) = TxEip2930::decode_signed_fields(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash: hash.into() }) } TxType::Eip1559 => { - let (tx, signature, hash) = TxEip1559::decode_signed_fields(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash: hash.into() }) } TxType::Eip7702 => { - let (tx, signature, hash) = TxEip7702::decode_signed_fields(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) + let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash: hash.into() }) } TxType::Eip4844 => { - let (tx, signature, hash) = TxEip4844::decode_signed_fields(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); + Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash: hash.into() }) } #[cfg(feature = "optimism")] - TxType::Deposit => Ok(Self::from_transaction_and_signature( - Transaction::Deposit(TxDeposit::decode(buf)?), - optimism_deposit_tx_signature(), + TxType::Deposit => Ok(Self::new_unhashed( + Transaction::Deposit(TxDeposit::rlp_decode(buf)?), + TxDeposit::signature(), )), } } @@ -1482,6 +1347,115 @@ impl Decodable2718 for TransactionSigned { } } +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for TransactionSigned { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let start = buf.as_mut().len(); + + // Placeholder for bitflags. + // The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit] + buf.put_u8(0); + + let sig_bit = self.signature.to_compact(buf) as u8; + let zstd_bit = self.transaction.input().len() >= 32; + + let tx_bits = if zstd_bit { + let mut tmp = Vec::with_capacity(256); + if cfg!(feature = "std") { + reth_zstd_compressors::TRANSACTION_COMPRESSOR.with(|compressor| { + let mut compressor = compressor.borrow_mut(); + let tx_bits = self.transaction.to_compact(&mut tmp); + buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); + tx_bits as u8 + }) + } else { + let mut compressor = reth_zstd_compressors::create_tx_compressor(); + let tx_bits = self.transaction.to_compact(&mut tmp); + buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); + tx_bits as u8 + } + } else { + self.transaction.to_compact(buf) as u8 + }; + + // Replace bitflags with the actual values + buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3); + + buf.as_mut().len() - start + } + + fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + + // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] + let bitflags = buf.get_u8() as usize; + + let sig_bit = bitflags & 1; + let (signature, buf) = Signature::from_compact(buf, sig_bit); + + let zstd_bit = bitflags >> 3; + let (transaction, buf) = if zstd_bit != 0 { + if cfg!(feature = "std") { + reth_zstd_compressors::TRANSACTION_DECOMPRESSOR.with(|decompressor| { + let mut decompressor = decompressor.borrow_mut(); + + // TODO: enforce that zstd is only present at a "top" level type + + let transaction_type = (bitflags & 0b110) >> 1; + let (transaction, _) = + Transaction::from_compact(decompressor.decompress(buf), transaction_type); + + (transaction, buf) + }) + } else { + let mut decompressor = reth_zstd_compressors::create_tx_decompressor(); + let transaction_type = (bitflags & 0b110) >> 1; + let (transaction, _) = + Transaction::from_compact(decompressor.decompress(buf), transaction_type); + + (transaction, buf) + } + } else { + let transaction_type = bitflags >> 1; + Transaction::from_compact(buf, transaction_type) + }; + + (Self { signature, transaction, hash: Default::default() }, buf) + } +} + +macro_rules! impl_from_signed { + ($($tx:ident),*) => { + $( + impl From> for TransactionSigned { + fn from(value: Signed<$tx>) -> Self { + let(tx,sig,hash) = value.into_parts(); + Self::new(tx.into(), sig, hash) + } + } + )* + }; +} + +impl_from_signed!(TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844, TypedTransaction); + +impl From> for TransactionSigned { + fn from(value: Signed) -> Self { + let (tx, sig, hash) = value.into_parts(); + Self::new(tx, sig, hash) + } +} + +impl From for Signed { + fn from(value: TransactionSigned) -> Self { + let (tx, sig, hash) = value.into_parts(); + Self::new_unchecked(tx, sig, hash) + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { @@ -1490,22 +1464,12 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { let secp = secp256k1::Secp256k1::new(); let key_pair = secp256k1::Keypair::new(&secp, &mut rand::thread_rng()); - let mut signature = crate::sign_message( + let signature = crate::sign_message( B256::from_slice(&key_pair.secret_bytes()[..]), transaction.signature_hash(), ) .unwrap(); - signature = if matches!(transaction, Transaction::Legacy(_)) { - if let Some(chain_id) = transaction.chain_id() { - signature.with_chain_id(chain_id) - } else { - signature.with_parity(alloy_primitives::Parity::NonEip155(bool::arbitrary(u)?)) - } - } else { - signature.with_parity_bool() - }; - #[cfg(feature = "optimism")] // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that @@ -1517,59 +1481,67 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { } #[cfg(feature = "optimism")] - let signature = - if transaction.is_deposit() { optimism_deposit_tx_signature() } else { signature }; - - Ok(Self::from_transaction_and_signature(transaction, signature)) + let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; + Ok(Self::new_unhashed(transaction, signature)) } } +/// Type alias kept for backward compatibility. +pub type TransactionSignedEcRecovered = RecoveredTx; + /// Signed transaction with recovered signer. #[derive(Debug, Clone, PartialEq, Hash, Eq, AsRef, Deref)] -pub struct TransactionSignedEcRecovered { +pub struct RecoveredTx { /// Signer of the transaction signer: Address, /// Signed transaction #[deref] #[as_ref] - signed_transaction: TransactionSigned, + signed_transaction: T, } -// === impl TransactionSignedEcRecovered === +// === impl RecoveredTx === -impl TransactionSignedEcRecovered { +impl RecoveredTx { /// Signer of transaction recovered from signature pub const fn signer(&self) -> Address { self.signer } + /// Reference to the signer of transaction recovered from signature + pub const fn signer_ref(&self) -> &Address { + &self.signer + } + /// Returns a reference to [`TransactionSigned`] - pub const fn as_signed(&self) -> &TransactionSigned { + pub const fn as_signed(&self) -> &T { &self.signed_transaction } /// Transform back to [`TransactionSigned`] - pub fn into_signed(self) -> TransactionSigned { + pub fn into_signed(self) -> T { self.signed_transaction } /// Dissolve Self to its component - pub fn to_components(self) -> (TransactionSigned, Address) { + pub fn to_components(self) -> (T, Address) { (self.signed_transaction, self.signer) } - /// Create [`TransactionSignedEcRecovered`] from [`TransactionSigned`] and [`Address`] of the + /// Create [`RecoveredTx`] from [`TransactionSigned`] and [`Address`] of the /// signer. #[inline] - pub const fn from_signed_transaction( - signed_transaction: TransactionSigned, - signer: Address, - ) -> Self { + pub const fn from_signed_transaction(signed_transaction: T, signer: Address) -> Self { Self { signed_transaction, signer } } + + /// Applies the given closure to the inner transactions. + pub fn map_transaction(self, f: impl FnOnce(T) -> Tx) -> RecoveredTx { + RecoveredTx::from_signed_transaction(f(self.signed_transaction), self.signer) + } } -impl Encodable for TransactionSignedEcRecovered { +impl Encodable for RecoveredTx { /// This encodes the transaction _with_ the signature, and an rlp header. /// /// Refer to docs for [`TransactionSigned::encode`] for details on the exact format. @@ -1582,9 +1554,9 @@ impl Encodable for TransactionSignedEcRecovered { } } -impl Decodable for TransactionSignedEcRecovered { +impl Decodable for RecoveredTx { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let signed_transaction = TransactionSigned::decode(buf)?; + let signed_transaction = T::decode(buf)?; let signer = signed_transaction .recover_signer() .ok_or(RlpError::Custom("Unable to recover decoded transaction signer."))?; @@ -1592,59 +1564,59 @@ impl Decodable for TransactionSignedEcRecovered { } } -/// Generic wrapper with encoded Bytes, such as transaction data. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct WithEncoded(Bytes, pub T); - -impl From<(Bytes, T)> for WithEncoded { - fn from(value: (Bytes, T)) -> Self { - Self(value.0, value.1) +impl Encodable2718 for RecoveredTx { + fn type_flag(&self) -> Option { + self.signed_transaction.type_flag() } -} -impl WithEncoded { - /// Wraps the value with the bytes. - pub const fn new(bytes: Bytes, value: T) -> Self { - Self(bytes, value) + fn encode_2718_len(&self) -> usize { + self.signed_transaction.encode_2718_len() } - /// Get the encoded bytes - pub fn encoded_bytes(&self) -> Bytes { - self.0.clone() + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + self.signed_transaction.encode_2718(out) } - /// Get the underlying value - pub const fn value(&self) -> &T { - &self.1 + fn trie_hash(&self) -> B256 { + self.signed_transaction.trie_hash() } +} - /// Returns ownership of the underlying value. - pub fn into_value(self) -> T { - self.1 +/// Extension trait for [`SignedTransaction`] to convert it into [`RecoveredTx`]. +pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { + /// Tries to recover signer and return [`RecoveredTx`] by cloning the type. + fn try_ecrecovered(&self) -> Option> { + let signer = self.recover_signer()?; + Some(RecoveredTx { signed_transaction: self.clone(), signer }) } - /// Transform the value - pub fn transform>(self) -> WithEncoded { - WithEncoded(self.0, self.1.into()) + /// Tries to recover signer and return [`RecoveredTx`]. + /// + /// Returns `Err(Self)` if the transaction's signature is invalid, see also + /// [`SignedTransaction::recover_signer`]. + fn try_into_ecrecovered(self) -> Result, Self> { + match self.recover_signer() { + None => Err(self), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), + } } - /// Split the wrapper into [`Bytes`] and value tuple - pub fn split(self) -> (Bytes, T) { - (self.0, self.1) + /// Consumes the type, recover signer and return [`RecoveredTx`] _without + /// ensuring that the signature has a low `s` value_ (EIP-2). + /// + /// Returns `None` if the transaction's signature is invalid. + fn into_ecrecovered_unchecked(self) -> Option> { + let signer = self.recover_signer_unchecked()?; + Some(RecoveredTx::from_signed_transaction(self, signer)) } - /// Maps the inner value to a new value using the given function. - pub fn map U>(self, op: F) -> WithEncoded { - WithEncoded(self.0, op(self.1)) + /// Returns the [`RecoveredTx`] transaction with the given sender. + fn with_signer(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self, signer) } } -impl WithEncoded> { - /// returns `None` if the inner value is `None`, otherwise returns `Some(WithEncoded)`. - pub fn transpose(self) -> Option> { - self.1.map(|v| WithEncoded(self.0, v)) - } -} +impl SignedTransactionIntoRecoveredExt for T where T: SignedTransaction {} /// Bincode-compatible transaction type serde implementations. #[cfg(feature = "serde-bincode-compat")] @@ -1654,9 +1626,7 @@ pub mod serde_bincode_compat { transaction::serde_bincode_compat::{TxEip1559, TxEip2930, TxEip7702, TxLegacy}, TxEip4844, }; - use alloy_primitives::{Signature, TxHash}; - #[cfg(feature = "optimism")] - use op_alloy_consensus::serde_bincode_compat::TxDeposit; + use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -1684,8 +1654,7 @@ pub mod serde_bincode_compat { Eip4844(Cow<'a, TxEip4844>), Eip7702(TxEip7702<'a>), #[cfg(feature = "optimism")] - #[cfg(feature = "optimism")] - Deposit(TxDeposit<'a>), + Deposit(op_alloy_consensus::serde_bincode_compat::TxDeposit<'a>), } impl<'a> From<&'a super::Transaction> for Transaction<'a> { @@ -1697,7 +1666,9 @@ pub mod serde_bincode_compat { super::Transaction::Eip4844(tx) => Self::Eip4844(Cow::Borrowed(tx)), super::Transaction::Eip7702(tx) => Self::Eip7702(TxEip7702::from(tx)), #[cfg(feature = "optimism")] - super::Transaction::Deposit(tx) => Self::Deposit(TxDeposit::from(tx)), + super::Transaction::Deposit(tx) => { + Self::Deposit(op_alloy_consensus::serde_bincode_compat::TxDeposit::from(tx)) + } } } } @@ -1759,7 +1730,7 @@ pub mod serde_bincode_compat { impl<'a> From<&'a super::TransactionSigned> for TransactionSigned<'a> { fn from(value: &'a super::TransactionSigned) -> Self { Self { - hash: value.hash, + hash: value.hash(), signature: value.signature, transaction: Transaction::from(&value.transaction), } @@ -1769,7 +1740,7 @@ pub mod serde_bincode_compat { impl<'a> From> for super::TransactionSigned { fn from(value: TransactionSigned<'a>) -> Self { Self { - hash: value.hash, + hash: value.hash.into(), signature: value.signature, transaction: value.transaction.into(), } @@ -1800,7 +1771,6 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, Transaction, TransactionSigned}; - use arbitrary::Arbitrary; use rand::Rng; use reth_testing_utils::generators; @@ -1853,18 +1823,52 @@ pub mod serde_bincode_compat { } } +/// Recovers a list of signers from a transaction list iterator. +/// +/// Returns `None`, if some transaction's signature is invalid +pub fn recover_signers<'a, I, T>(txes: I, num_txes: usize) -> Option> +where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, +{ + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer()).collect() + } +} + +/// Recovers a list of signers from a transaction list iterator _without ensuring that the +/// signature has a low `s` value_. +/// +/// Returns `None`, if some transaction's signature is invalid. +pub fn recover_signers_unchecked<'a, I, T>(txes: I, num_txes: usize) -> Option> +where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, +{ + if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { + txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } else { + txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } +} + #[cfg(test)] mod tests { use crate::{ - transaction::{signature::Signature, TxEip1559, TxKind, TxLegacy}, - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + transaction::{TxEip1559, TxKind, TxLegacy}, + RecoveredTx, Transaction, TransactionSigned, }; use alloy_consensus::Transaction as _; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; - use alloy_primitives::{address, b256, bytes, hex, Address, Bytes, Parity, B256, U256}; + use alloy_primitives::{ + address, b256, bytes, hex, Address, Bytes, PrimitiveSignature as Signature, B256, U256, + }; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_codecs::Compact; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] @@ -1930,13 +1934,15 @@ mod tests { assert_eq!( tx.blob_versioned_hashes(), - Some(vec![ - b256!("012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a"), - b256!("0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4"), - b256!("013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7"), - b256!("01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1"), - b256!("011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549"), - ]) + Some( + &[ + b256!("012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a"), + b256!("0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4"), + b256!("013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7"), + b256!("01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1"), + b256!("011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549"), + ][..] + ) ); } @@ -1969,7 +1975,7 @@ mod tests { .unwrap(), U256::from_str("0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18") .unwrap(), - Parity::Eip155(43), + false, ); let hash = b256!("a517b206d2223278f860ea017d3626cacad4f52ff51030dc9a96b432f17f8d34"); test_decode_and_encode(&bytes, transaction, signature, Some(hash)); @@ -1989,7 +1995,7 @@ mod tests { .unwrap(), U256::from_str("0x5406ad177223213df262cb66ccbb2f46bfdccfdfbbb5ffdda9e2c02d977631da") .unwrap(), - Parity::Eip155(43), + false, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2008,7 +2014,7 @@ mod tests { .unwrap(), U256::from_str("0x3ca3ae86580e94550d7c071e3a02eadb5a77830947c9225165cf9100901bee88") .unwrap(), - Parity::Eip155(43), + false, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2029,7 +2035,7 @@ mod tests { .unwrap(), U256::from_str("0x016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469") .unwrap(), - Parity::Parity(true), + true, ); test_decode_and_encode(&bytes, transaction, signature, None); @@ -2048,7 +2054,7 @@ mod tests { .unwrap(), U256::from_str("0x612638fb29427ca33b9a3be2a0a561beecfe0269655be160d35e72d366a6a860") .unwrap(), - Parity::Eip155(44), + true, ); test_decode_and_encode(&bytes, transaction, signature, None); } @@ -2059,9 +2065,9 @@ mod tests { signature: Signature, hash: Option, ) { - let expected = TransactionSigned::from_transaction_and_signature(transaction, signature); + let expected = TransactionSigned::new_unhashed(transaction, signature); if let Some(hash) = hash { - assert_eq!(hash, expected.hash); + assert_eq!(hash, expected.hash()); } assert_eq!(bytes.len(), expected.length()); @@ -2113,8 +2119,7 @@ mod tests { let tx = TransactionSigned::decode(&mut &input[..]).unwrap(); let recovered = tx.into_ecrecovered().unwrap(); - let decoded = - TransactionSignedEcRecovered::decode(&mut &alloy_rlp::encode(&recovered)[..]).unwrap(); + let decoded = RecoveredTx::decode(&mut &alloy_rlp::encode(&recovered)[..]).unwrap(); assert_eq!(recovered, decoded) } @@ -2156,7 +2161,7 @@ mod tests { let signature = crate::sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(tx, signature) + TransactionSigned::new_unhashed(tx, signature) }).collect(); let parallel_senders = TransactionSigned::recover_signers(&txes, txes.len()).unwrap(); @@ -2212,7 +2217,7 @@ mod tests { .unwrap(), U256::from_str("0x3a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18") .unwrap(), - Parity::Eip155(43), + false, ); let inputs: Vec> = vec![ @@ -2238,17 +2243,17 @@ mod tests { input: Bytes::from(input), }); - let tx_signed_no_hash = TransactionSignedNoHash { signature, transaction }; - test_transaction_signed_to_from_compact(tx_signed_no_hash); + let tx = TransactionSigned::new_unhashed(transaction, signature); + test_transaction_signed_to_from_compact(tx); } } - fn test_transaction_signed_to_from_compact(tx_signed_no_hash: TransactionSignedNoHash) { + fn test_transaction_signed_to_from_compact(tx: TransactionSigned) { // zstd aware `to_compact` let mut buff: Vec = Vec::new(); - let written_bytes = tx_signed_no_hash.to_compact(&mut buff); - let (decoded, _) = TransactionSignedNoHash::from_compact(&buff, written_bytes); - assert_eq!(tx_signed_no_hash, decoded); + let written_bytes = tx.to_compact(&mut buff); + let (decoded, _) = TransactionSigned::from_compact(&buff, written_bytes); + assert_eq!(tx, decoded); } #[test] diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index ec49f44a680..eea10d44c9f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -2,23 +2,30 @@ //! response to `GetPooledTransactions`. use super::{ - error::TransactionConversionError, - signature::{recover_signer, with_eip155_parity}, + error::TransactionConversionError, recover_signer_unchecked, signature::recover_signer, TxEip7702, }; -use crate::{ - BlobTransaction, BlobTransactionSidecar, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, EIP4844_TX_TYPE_ID, -}; +use crate::{BlobTransaction, RecoveredTx, Transaction, TransactionSigned, TxType}; +use alloc::vec::Vec; use alloy_consensus::{ + constants::EIP4844_TX_TYPE_ID, transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, - SignableTransaction, TxEip4844WithSidecar, + SignableTransaction, Signed, TxEip4844WithSidecar, +}; +use alloy_eips::{ + eip2718::{Decodable2718, Eip2718Result, Encodable2718}, + eip2930::AccessList, + eip4844::BlobTransactionSidecar, + eip7702::SignedAuthorization, +}; +use alloy_primitives::{ + Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, }; -use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; -use alloy_primitives::{Address, TxHash, B256}; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; -use derive_more::{AsRef, Deref}; +use core::hash::{Hash, Hasher}; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; +use revm_primitives::keccak256; use serde::{Deserialize, Serialize}; /// A response to `GetPooledTransactions`. This can include either a blob transaction, or a @@ -26,75 +33,20 @@ use serde::{Deserialize, Serialize}; #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum PooledTransactionsElement { - /// A legacy transaction - Legacy { - /// The inner transaction - transaction: TxLegacy, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-2930 typed transaction - Eip2930 { - /// The inner transaction - transaction: TxEip2930, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-1559 typed transaction - Eip1559 { - /// The inner transaction - transaction: TxEip1559, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, - /// An EIP-7702 typed transaction - Eip7702 { - /// The inner transaction - transaction: TxEip7702, - /// The signature - signature: Signature, - /// The hash of the transaction - hash: TxHash, - }, + /// An untagged [`TxLegacy`]. + Legacy(Signed), + /// A [`TxEip2930`] tagged with type 1. + Eip2930(Signed), + /// A [`TxEip1559`] tagged with type 2. + Eip1559(Signed), + /// A [`TxEip7702`] tagged with type 4. + Eip7702(Signed), /// A blob transaction, which includes the transaction, blob data, commitments, and proofs. BlobTransaction(BlobTransaction), } impl PooledTransactionsElement { - /// Tries to convert a [`TransactionSigned`] into a [`PooledTransactionsElement`]. - /// - /// This function used as a helper to convert from a decoded p2p broadcast message to - /// [`PooledTransactionsElement`]. Since [`BlobTransaction`] is disallowed to be broadcasted on - /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. - pub fn try_from_broadcast(tx: TransactionSigned) -> Result { - match tx { - TransactionSigned { transaction: Transaction::Legacy(tx), signature, hash } => { - Ok(Self::Legacy { transaction: tx, signature, hash }) - } - TransactionSigned { transaction: Transaction::Eip2930(tx), signature, hash } => { - Ok(Self::Eip2930 { transaction: tx, signature, hash }) - } - TransactionSigned { transaction: Transaction::Eip1559(tx), signature, hash } => { - Ok(Self::Eip1559 { transaction: tx, signature, hash }) - } - TransactionSigned { transaction: Transaction::Eip7702(tx), signature, hash } => { - Ok(Self::Eip7702 { transaction: tx, signature, hash }) - } - // Not supported because missing blob sidecar - tx @ TransactionSigned { transaction: Transaction::Eip4844(_), .. } => Err(tx), - #[cfg(feature = "optimism")] - // Not supported because deposit transactions are never pooled - tx @ TransactionSigned { transaction: Transaction::Deposit(_), .. } => Err(tx), - } - } - - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a + /// Converts from an EIP-4844 [`RecoveredTx`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// /// Returns an `Err` containing the original `TransactionSigned` if the transaction is not @@ -103,15 +55,16 @@ impl PooledTransactionsElement { tx: TransactionSigned, sidecar: BlobTransactionSidecar, ) -> Result { + let hash = tx.hash(); Ok(match tx { // If the transaction is an EIP-4844 transaction... - TransactionSigned { transaction: Transaction::Eip4844(tx), signature, hash } => { + TransactionSigned { transaction: Transaction::Eip4844(tx), signature, .. } => { // Construct a `PooledTransactionsElement::BlobTransaction` with provided sidecar. - Self::BlobTransaction(BlobTransaction { + Self::BlobTransaction(BlobTransaction(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, signature, hash, - transaction: TxEip4844WithSidecar { tx, sidecar }, - }) + ))) } // If the transaction is not EIP-4844, return an error with the original // transaction. @@ -123,44 +76,33 @@ impl PooledTransactionsElement { /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> B256 { match self { - Self::Legacy { transaction, .. } => transaction.signature_hash(), - Self::Eip2930 { transaction, .. } => transaction.signature_hash(), - Self::Eip1559 { transaction, .. } => transaction.signature_hash(), - Self::Eip7702 { transaction, .. } => transaction.signature_hash(), - Self::BlobTransaction(blob_tx) => blob_tx.transaction.signature_hash(), + Self::Legacy(tx) => tx.signature_hash(), + Self::Eip2930(tx) => tx.signature_hash(), + Self::Eip1559(tx) => tx.signature_hash(), + Self::Eip7702(tx) => tx.signature_hash(), + Self::BlobTransaction(tx) => tx.signature_hash(), } } /// Reference to transaction hash. Used to identify transaction. pub const fn hash(&self) -> &TxHash { match self { - Self::Legacy { hash, .. } | - Self::Eip2930 { hash, .. } | - Self::Eip1559 { hash, .. } | - Self::Eip7702 { hash, .. } => hash, - Self::BlobTransaction(tx) => &tx.hash, + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::BlobTransaction(tx) => tx.0.hash(), } } /// Returns the signature of the transaction. pub const fn signature(&self) -> &Signature { match self { - Self::Legacy { signature, .. } | - Self::Eip2930 { signature, .. } | - Self::Eip1559 { signature, .. } | - Self::Eip7702 { signature, .. } => signature, - Self::BlobTransaction(blob_tx) => &blob_tx.signature, - } - } - - /// Returns the transaction nonce. - pub const fn nonce(&self) -> u64 { - match self { - Self::Legacy { transaction, .. } => transaction.nonce, - Self::Eip2930 { transaction, .. } => transaction.nonce, - Self::Eip1559 { transaction, .. } => transaction.nonce, - Self::Eip7702 { transaction, .. } => transaction.nonce, - Self::BlobTransaction(blob_tx) => blob_tx.transaction.tx.nonce, + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::BlobTransaction(tx) => tx.0.signature(), } } @@ -178,37 +120,35 @@ impl PooledTransactionsElement { pub fn try_into_ecrecovered(self) -> Result { match self.recover_signer() { None => Err(self), - Some(signer) => Ok(PooledTransactionsElementEcRecovered { transaction: self, signer }), + Some(signer) => Ok(RecoveredTx { signed_transaction: self, signer }), } } - /// Create [`TransactionSignedEcRecovered`] by converting this transaction into + /// This encodes the transaction _without_ the signature, and is only suitable for creating a + /// hash intended for signing. + pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { + match self { + Self::Legacy(tx) => tx.tx().encode_for_signing(out), + Self::Eip2930(tx) => tx.tx().encode_for_signing(out), + Self::Eip1559(tx) => tx.tx().encode_for_signing(out), + Self::BlobTransaction(tx) => tx.tx().encode_for_signing(out), + Self::Eip7702(tx) => tx.tx().encode_for_signing(out), + } + } + + /// Create [`RecoveredTx`] by converting this transaction into /// [`TransactionSigned`] and [`Address`] of the signer. - pub fn into_ecrecovered_transaction(self, signer: Address) -> TransactionSignedEcRecovered { - TransactionSignedEcRecovered::from_signed_transaction(self.into_transaction(), signer) + pub fn into_ecrecovered_transaction(self, signer: Address) -> RecoveredTx { + RecoveredTx::from_signed_transaction(self.into_transaction(), signer) } /// Returns the inner [`TransactionSigned`]. pub fn into_transaction(self) -> TransactionSigned { match self { - Self::Legacy { transaction, signature, hash } => { - TransactionSigned { transaction: Transaction::Legacy(transaction), signature, hash } - } - Self::Eip2930 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip2930(transaction), - signature, - hash, - }, - Self::Eip1559 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip1559(transaction), - signature, - hash, - }, - Self::Eip7702 { transaction, signature, hash } => TransactionSigned { - transaction: Transaction::Eip7702(transaction), - signature, - hash, - }, + Self::Legacy(tx) => tx.into(), + Self::Eip2930(tx) => tx.into(), + Self::Eip1559(tx) => tx.into(), + Self::Eip7702(tx) => tx.into(), Self::BlobTransaction(blob_tx) => blob_tx.into_parts().0, } } @@ -222,7 +162,7 @@ impl PooledTransactionsElement { /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. pub const fn as_legacy(&self) -> Option<&TxLegacy> { match self { - Self::Legacy { transaction, .. } => Some(transaction), + Self::Legacy(tx) => Some(tx.tx()), _ => None, } } @@ -230,7 +170,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. pub const fn as_eip2930(&self) -> Option<&TxEip2930> { match self { - Self::Eip2930 { transaction, .. } => Some(transaction), + Self::Eip2930(tx) => Some(tx.tx()), _ => None, } } @@ -238,7 +178,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. pub const fn as_eip1559(&self) -> Option<&TxEip1559> { match self { - Self::Eip1559 { transaction, .. } => Some(transaction), + Self::Eip1559(tx) => Some(tx.tx()), _ => None, } } @@ -246,7 +186,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip4844`] variant if the transaction is an EIP-4844 transaction. pub const fn as_eip4844(&self) -> Option<&TxEip4844> { match self { - Self::BlobTransaction(tx) => Some(&tx.transaction.tx), + Self::BlobTransaction(tx) => Some(tx.0.tx().tx()), _ => None, } } @@ -254,7 +194,7 @@ impl PooledTransactionsElement { /// Returns the [`TxEip7702`] variant if the transaction is an EIP-7702 transaction. pub const fn as_eip7702(&self) -> Option<&TxEip7702> { match self { - Self::Eip7702 { transaction, .. } => Some(transaction), + Self::Eip7702(tx) => Some(tx.tx()), _ => None, } } @@ -263,47 +203,15 @@ impl PooledTransactionsElement { /// transaction. /// /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](crate::constants::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. + /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. pub fn blob_gas_used(&self) -> Option { self.as_eip4844().map(TxEip4844::blob_gas) } +} - /// Max fee per blob gas for eip4844 transaction [`TxEip4844`]. - /// - /// Returns `None` for non-eip4844 transactions. - /// - /// This is also commonly referred to as the "Blob Gas Fee Cap" (`BlobGasFeeCap`). - pub const fn max_fee_per_blob_gas(&self) -> Option { - match self { - Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_fee_per_blob_gas), - _ => None, - } - } - - /// Max priority fee per gas for eip1559 transaction, for legacy and eip2930 transactions this - /// is `None` - /// - /// This is also commonly referred to as the "Gas Tip Cap" (`GasTipCap`). - pub const fn max_priority_fee_per_gas(&self) -> Option { - match self { - Self::Legacy { .. } | Self::Eip2930 { .. } => None, - Self::Eip1559 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), - Self::Eip7702 { transaction, .. } => Some(transaction.max_priority_fee_per_gas), - Self::BlobTransaction(tx) => Some(tx.transaction.tx.max_priority_fee_per_gas), - } - } - - /// Max fee per gas for eip1559 transaction, for legacy transactions this is `gas_price`. - /// - /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). - pub const fn max_fee_per_gas(&self) -> u128 { - match self { - Self::Legacy { transaction, .. } => transaction.gas_price, - Self::Eip2930 { transaction, .. } => transaction.gas_price, - Self::Eip1559 { transaction, .. } => transaction.max_fee_per_gas, - Self::Eip7702 { transaction, .. } => transaction.max_fee_per_gas, - Self::BlobTransaction(tx) => tx.transaction.tx.max_fee_per_gas, - } +impl Hash for PooledTransactionsElement { + fn hash(&self, state: &mut H) { + self.trie_hash().hash(state); } } @@ -391,73 +299,37 @@ impl Decodable for PooledTransactionsElement { impl Encodable2718 for PooledTransactionsElement { fn type_flag(&self) -> Option { match self { - Self::Legacy { .. } => None, - Self::Eip2930 { .. } => Some(0x01), - Self::Eip1559 { .. } => Some(0x02), - Self::BlobTransaction { .. } => Some(0x03), - Self::Eip7702 { .. } => Some(0x04), + Self::Legacy(_) => None, + Self::Eip2930(_) => Some(0x01), + Self::Eip1559(_) => Some(0x02), + Self::BlobTransaction(_) => Some(0x03), + Self::Eip7702(_) => Some(0x04), } } fn encode_2718_len(&self) -> usize { match self { - Self::Legacy { transaction, signature, .. } => { - // method computes the payload len with a RLP header - transaction.encoded_len_with_signature(&with_eip155_parity( - signature, - transaction.chain_id, - )) - } - Self::Eip2930 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) - } - Self::Eip1559 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) - } - Self::Eip7702 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) - } - Self::BlobTransaction(blob_tx) => { - // the encoding does not use a header, so we set `with_header` to false - blob_tx.payload_len_with_type(false) - } + Self::Legacy(tx) => tx.eip2718_encoded_length(), + Self::Eip2930(tx) => tx.eip2718_encoded_length(), + Self::Eip1559(tx) => tx.eip2718_encoded_length(), + Self::Eip7702(tx) => tx.eip2718_encoded_length(), + Self::BlobTransaction(tx) => tx.eip2718_encoded_length(), } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - // The encoding of `tx-data` depends on the transaction type. Refer to these docs for more - // information on the exact format: - // - Legacy: TxLegacy::encode_with_signature - // - EIP-2930: TxEip2930::encode_with_signature - // - EIP-1559: TxEip1559::encode_with_signature - // - EIP-4844: BlobTransaction::encode_with_type_inner - // - EIP-7702: TxEip7702::encode_with_signature - match self { - Self::Legacy { transaction, signature, .. } => transaction - .encode_with_signature_fields( - &with_eip155_parity(signature, transaction.chain_id), - out, - ), - Self::Eip2930 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } - Self::BlobTransaction(blob_tx) => { - // The inner encoding is used with `with_header` set to true, making the final - // encoding: - // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs]))` - blob_tx.encode_with_type_inner(out, false); - } + match self { + Self::Legacy(tx) => tx.eip2718_encode(out), + Self::Eip2930(tx) => tx.eip2718_encode(out), + Self::Eip1559(tx) => tx.eip2718_encode(out), + Self::Eip7702(tx) => tx.eip2718_encode(out), + Self::BlobTransaction(tx) => tx.eip2718_encode(out), } } + + fn trie_hash(&self) -> B256 { + *self.hash() + } } impl Decodable2718 for PooledTransactionsElement { @@ -480,7 +352,7 @@ impl Decodable2718 for PooledTransactionsElement { } tx_type => { let typed_tx = TransactionSigned::typed_decode(tx_type, buf)?; - + let hash = typed_tx.hash(); match typed_tx.transaction { Transaction::Legacy(_) => Err(RlpError::Custom( "legacy transactions should not be a result of typed decoding", @@ -490,21 +362,11 @@ impl Decodable2718 for PooledTransactionsElement { Transaction::Eip4844(_) => Err(RlpError::Custom( "EIP-4844 transactions can only be decoded with transaction type 0x03", ).into()), - Transaction::Eip2930(tx) => Ok(Self::Eip2930 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), - Transaction::Eip1559(tx) => Ok(Self::Eip1559 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), - Transaction::Eip7702(tx) => Ok(Self::Eip7702 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), + Transaction::Eip2930(tx) => Ok(Self::Eip2930 ( + Signed::new_unchecked(tx, typed_tx.signature, hash) + )), + Transaction::Eip1559(tx) => Ok(Self::Eip1559( Signed::new_unchecked(tx, typed_tx.signature, hash))), + Transaction::Eip7702(tx) => Ok(Self::Eip7702( Signed::new_unchecked(tx, typed_tx.signature, hash))), #[cfg(feature = "optimism")] Transaction::Deposit(_) => Err(RlpError::Custom("Optimism deposit transaction cannot be decoded to PooledTransactionsElement").into()) } @@ -517,7 +379,242 @@ impl Decodable2718 for PooledTransactionsElement { let (transaction, hash, signature) = TransactionSigned::decode_rlp_legacy_transaction_tuple(buf)?; - Ok(Self::Legacy { transaction, signature, hash }) + Ok(Self::Legacy(Signed::new_unchecked(transaction, signature, hash))) + } +} + +impl alloy_consensus::Transaction for PooledTransactionsElement { + fn chain_id(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().chain_id(), + Self::Eip2930(tx) => tx.tx().chain_id(), + Self::Eip1559(tx) => tx.tx().chain_id(), + Self::Eip7702(tx) => tx.tx().chain_id(), + Self::BlobTransaction(tx) => tx.tx().chain_id(), + } + } + + fn nonce(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().nonce(), + Self::Eip2930(tx) => tx.tx().nonce(), + Self::Eip1559(tx) => tx.tx().nonce(), + Self::Eip7702(tx) => tx.tx().nonce(), + Self::BlobTransaction(tx) => tx.tx().nonce(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Self::Legacy(tx) => tx.tx().gas_limit(), + Self::Eip2930(tx) => tx.tx().gas_limit(), + Self::Eip1559(tx) => tx.tx().gas_limit(), + Self::Eip7702(tx) => tx.tx().gas_limit(), + Self::BlobTransaction(tx) => tx.tx().gas_limit(), + } + } + + fn gas_price(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().gas_price(), + Self::Eip2930(tx) => tx.tx().gas_price(), + Self::Eip1559(tx) => tx.tx().gas_price(), + Self::Eip7702(tx) => tx.tx().gas_price(), + Self::BlobTransaction(tx) => tx.tx().gas_price(), + } + } + + fn max_fee_per_gas(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_gas(), + Self::BlobTransaction(tx) => tx.tx().max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip2930(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip1559(tx) => tx.tx().max_priority_fee_per_gas(), + Self::Eip7702(tx) => tx.tx().max_priority_fee_per_gas(), + Self::BlobTransaction(tx) => tx.tx().max_priority_fee_per_gas(), + } + } + + fn max_fee_per_blob_gas(&self) -> Option { + match self { + Self::Legacy(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip2930(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip1559(tx) => tx.tx().max_fee_per_blob_gas(), + Self::Eip7702(tx) => tx.tx().max_fee_per_blob_gas(), + Self::BlobTransaction(tx) => tx.tx().max_fee_per_blob_gas(), + } + } + + fn priority_fee_or_price(&self) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().priority_fee_or_price(), + Self::Eip2930(tx) => tx.tx().priority_fee_or_price(), + Self::Eip1559(tx) => tx.tx().priority_fee_or_price(), + Self::Eip7702(tx) => tx.tx().priority_fee_or_price(), + Self::BlobTransaction(tx) => tx.tx().priority_fee_or_price(), + } + } + + fn effective_gas_price(&self, base_fee: Option) -> u128 { + match self { + Self::Legacy(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip2930(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip1559(tx) => tx.tx().effective_gas_price(base_fee), + Self::Eip7702(tx) => tx.tx().effective_gas_price(base_fee), + Self::BlobTransaction(tx) => tx.tx().effective_gas_price(base_fee), + } + } + + fn is_dynamic_fee(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_dynamic_fee(), + Self::Eip2930(tx) => tx.tx().is_dynamic_fee(), + Self::Eip1559(tx) => tx.tx().is_dynamic_fee(), + Self::Eip7702(tx) => tx.tx().is_dynamic_fee(), + Self::BlobTransaction(tx) => tx.tx().is_dynamic_fee(), + } + } + + fn kind(&self) -> TxKind { + match self { + Self::Legacy(tx) => tx.tx().kind(), + Self::Eip2930(tx) => tx.tx().kind(), + Self::Eip1559(tx) => tx.tx().kind(), + Self::Eip7702(tx) => tx.tx().kind(), + Self::BlobTransaction(tx) => tx.tx().kind(), + } + } + + fn is_create(&self) -> bool { + match self { + Self::Legacy(tx) => tx.tx().is_create(), + Self::Eip2930(tx) => tx.tx().is_create(), + Self::Eip1559(tx) => tx.tx().is_create(), + Self::Eip7702(tx) => tx.tx().is_create(), + Self::BlobTransaction(tx) => tx.tx().is_create(), + } + } + + fn value(&self) -> U256 { + match self { + Self::Legacy(tx) => tx.tx().value(), + Self::Eip2930(tx) => tx.tx().value(), + Self::Eip1559(tx) => tx.tx().value(), + Self::Eip7702(tx) => tx.tx().value(), + Self::BlobTransaction(tx) => tx.tx().value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Self::Legacy(tx) => tx.tx().input(), + Self::Eip2930(tx) => tx.tx().input(), + Self::Eip1559(tx) => tx.tx().input(), + Self::Eip7702(tx) => tx.tx().input(), + Self::BlobTransaction(tx) => tx.tx().input(), + } + } + + fn ty(&self) -> u8 { + match self { + Self::Legacy(tx) => tx.tx().ty(), + Self::Eip2930(tx) => tx.tx().ty(), + Self::Eip1559(tx) => tx.tx().ty(), + Self::Eip7702(tx) => tx.tx().ty(), + Self::BlobTransaction(tx) => tx.tx().ty(), + } + } + + fn access_list(&self) -> Option<&AccessList> { + match self { + Self::Legacy(tx) => tx.tx().access_list(), + Self::Eip2930(tx) => tx.tx().access_list(), + Self::Eip1559(tx) => tx.tx().access_list(), + Self::Eip7702(tx) => tx.tx().access_list(), + Self::BlobTransaction(tx) => tx.tx().access_list(), + } + } + + fn blob_versioned_hashes(&self) -> Option<&[B256]> { + match self { + Self::Legacy(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip2930(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip1559(tx) => tx.tx().blob_versioned_hashes(), + Self::Eip7702(tx) => tx.tx().blob_versioned_hashes(), + Self::BlobTransaction(tx) => tx.tx().blob_versioned_hashes(), + } + } + + fn authorization_list(&self) -> Option<&[SignedAuthorization]> { + match self { + Self::Legacy(tx) => tx.tx().authorization_list(), + Self::Eip2930(tx) => tx.tx().authorization_list(), + Self::Eip1559(tx) => tx.tx().authorization_list(), + Self::Eip7702(tx) => tx.tx().authorization_list(), + Self::BlobTransaction(tx) => tx.tx().authorization_list(), + } + } +} + +impl SignedTransaction for PooledTransactionsElement { + type Type = TxType; + + fn tx_hash(&self) -> &TxHash { + match self { + Self::Legacy(tx) => tx.hash(), + Self::Eip2930(tx) => tx.hash(), + Self::Eip1559(tx) => tx.hash(), + Self::Eip7702(tx) => tx.hash(), + Self::BlobTransaction(tx) => tx.hash(), + } + } + + fn signature(&self) -> &Signature { + match self { + Self::Legacy(tx) => tx.signature(), + Self::Eip2930(tx) => tx.signature(), + Self::Eip1559(tx) => tx.signature(), + Self::Eip7702(tx) => tx.signature(), + Self::BlobTransaction(tx) => tx.signature(), + } + } + + fn recover_signer(&self) -> Option
{ + let signature_hash = self.signature_hash(); + recover_signer(self.signature(), signature_hash) + } + + fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(self.signature(), signature_hash) + } +} + +impl InMemorySize for PooledTransactionsElement { + fn size(&self) -> usize { + match self { + Self::Legacy(tx) => tx.size(), + Self::Eip2930(tx) => tx.size(), + Self::Eip1559(tx) => tx.size(), + Self::Eip7702(tx) => tx.size(), + Self::BlobTransaction(tx) => tx.size(), + } + } +} + +impl From for PooledTransactionsElement { + fn from(recovered: PooledTransactionsElementEcRecovered) -> Self { + recovered.into_signed() } } @@ -525,7 +622,19 @@ impl TryFrom for PooledTransactionsElement { type Error = TransactionConversionError; fn try_from(tx: TransactionSigned) -> Result { - Self::try_from_broadcast(tx).map_err(|_| TransactionConversionError::UnsupportedForP2P) + tx.try_into_pooled().map_err(|_| TransactionConversionError::UnsupportedForP2P) + } +} + +impl From for TransactionSigned { + fn from(element: PooledTransactionsElement) -> Self { + match element { + PooledTransactionsElement::Legacy(tx) => tx.into(), + PooledTransactionsElement::Eip2930(tx) => tx.into(), + PooledTransactionsElement::Eip1559(tx) => tx.into(), + PooledTransactionsElement::Eip7702(tx) => tx.into(), + PooledTransactionsElement::BlobTransaction(blob_tx) => blob_tx.into_parts().0, + } } } @@ -542,87 +651,60 @@ impl<'a> arbitrary::Arbitrary<'a> for PooledTransactionsElement { // Attempt to create a `TransactionSigned` with arbitrary data. let tx_signed = TransactionSigned::arbitrary(u)?; // Attempt to create a `PooledTransactionsElement` with arbitrary data, handling the Result. - match Self::try_from(tx_signed) { - Ok(Self::BlobTransaction(mut tx)) => { - // Successfully converted to a BlobTransaction, now generate a sidecar. - tx.transaction.sidecar = crate::BlobTransactionSidecar::arbitrary(u)?; - Ok(Self::BlobTransaction(tx)) + match tx_signed.try_into_pooled() { + Ok(tx) => Ok(tx), + Err(tx) => { + let (tx, sig, hash) = tx.into_parts(); + match tx { + Transaction::Eip4844(tx) => { + let sidecar = BlobTransactionSidecar::arbitrary(u)?; + Ok(Self::BlobTransaction(BlobTransaction(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, + sig, + hash, + )))) + } + _ => Err(arbitrary::Error::IncorrectFormat), + } } - Ok(tx) => Ok(tx), // Successfully converted, but not a BlobTransaction. - Err(_) => Err(arbitrary::Error::IncorrectFormat), /* Conversion failed, return an - * arbitrary error. */ } } } /// A signed pooled transaction with recovered signer. -#[derive(Debug, Clone, PartialEq, Eq, AsRef, Deref)] -pub struct PooledTransactionsElementEcRecovered { - /// Signer of the transaction - signer: Address, - /// Signed transaction - #[deref] - #[as_ref] - transaction: PooledTransactionsElement, -} - -// === impl PooledTransactionsElementEcRecovered === +pub type PooledTransactionsElementEcRecovered = RecoveredTx; impl PooledTransactionsElementEcRecovered { - /// Signer of transaction recovered from signature - pub const fn signer(&self) -> Address { - self.signer - } - - /// Transform back to [`PooledTransactionsElement`] - pub fn into_transaction(self) -> PooledTransactionsElement { - self.transaction - } - - /// Transform back to [`TransactionSignedEcRecovered`] - pub fn into_ecrecovered_transaction(self) -> TransactionSignedEcRecovered { - let (tx, signer) = self.into_components(); + /// Transform back to [`RecoveredTx`] + pub fn into_ecrecovered_transaction(self) -> RecoveredTx { + let (tx, signer) = self.to_components(); tx.into_ecrecovered_transaction(signer) } - /// Dissolve Self to its component - pub fn into_components(self) -> (PooledTransactionsElement, Address) { - (self.transaction, self.signer) - } - - /// Create [`TransactionSignedEcRecovered`] from [`PooledTransactionsElement`] and [`Address`] - /// of the signer. - pub const fn from_signed_transaction( - transaction: PooledTransactionsElement, - signer: Address, - ) -> Self { - Self { transaction, signer } - } - - /// Converts from an EIP-4844 [`TransactionSignedEcRecovered`] to a + /// Converts from an EIP-4844 [`RecoveredTx`] to a /// [`PooledTransactionsElementEcRecovered`] with the given sidecar. /// /// Returns the transaction is not an EIP-4844 transaction. pub fn try_from_blob_transaction( - tx: TransactionSignedEcRecovered, + tx: RecoveredTx, sidecar: BlobTransactionSidecar, - ) -> Result { - let TransactionSignedEcRecovered { signer, signed_transaction } = tx; + ) -> Result { + let RecoveredTx { signer, signed_transaction } = tx; let transaction = PooledTransactionsElement::try_from_blob_transaction(signed_transaction, sidecar) - .map_err(|tx| TransactionSignedEcRecovered { signer, signed_transaction: tx })?; - Ok(Self { transaction, signer }) + .map_err(|tx| RecoveredTx { signer, signed_transaction: tx })?; + Ok(Self::from_signed_transaction(transaction, signer)) } } -/// Converts a `TransactionSignedEcRecovered` into a `PooledTransactionsElementEcRecovered`. -impl TryFrom for PooledTransactionsElementEcRecovered { +/// Converts a `Recovered` into a `PooledTransactionsElementEcRecovered`. +impl TryFrom for PooledTransactionsElementEcRecovered { type Error = TransactionConversionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { match PooledTransactionsElement::try_from(tx.signed_transaction) { Ok(pooled_transaction) => { - Ok(Self { transaction: pooled_transaction, signer: tx.signer }) + Ok(Self::from_signed_transaction(pooled_transaction, tx.signer)) } Err(_) => Err(TransactionConversionError::UnsupportedForP2P), } @@ -632,6 +714,7 @@ impl TryFrom for PooledTransactionsElementEcRecove #[cfg(test)] mod tests { use super::*; + use alloy_consensus::Transaction as _; use alloy_primitives::{address, hex}; use assert_matches::assert_matches; use bytes::Bytes; diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 87b8c1fbf3e..e244a53df77 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -1,180 +1,51 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] -use crate::{Signature, Transaction, TransactionSigned, EIP4844_TX_TYPE_ID}; -use alloy_consensus::{transaction::TxEip4844, TxEip4844WithSidecar}; -use alloy_primitives::{keccak256, TxHash}; -use alloy_rlp::{Decodable, Error as RlpError, Header}; +use crate::{Transaction, TransactionSigned}; +use alloy_consensus::{transaction::RlpEcdsaTx, Signed, TxEip4844WithSidecar}; +use alloy_eips::eip4844::BlobTransactionSidecar; +use derive_more::Deref; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; -#[doc(inline)] -pub use alloy_eips::eip4844::BlobTransactionSidecar; - -#[cfg(feature = "c-kzg")] -pub use alloy_eips::eip4844::BlobTransactionValidationError; - -use alloc::vec::Vec; - /// A response to `GetPooledTransactions` that includes blob data, their commitments, and their /// corresponding proofs. /// /// This is defined in [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#networking) as an element /// of a `PooledTransactions` response. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct BlobTransaction { - /// The transaction hash. - pub hash: TxHash, - /// The transaction signature. - pub signature: Signature, - /// The transaction payload with the sidecar. - #[serde(flatten)] - pub transaction: TxEip4844WithSidecar, -} +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Deref)] +pub struct BlobTransaction(pub Signed); impl BlobTransaction { /// Constructs a new [`BlobTransaction`] from a [`TransactionSigned`] and a /// [`BlobTransactionSidecar`]. /// - /// Returns an error if the signed transaction is not [`TxEip4844`] + /// Returns an error if the signed transaction is not [`Transaction::Eip4844`] pub fn try_from_signed( tx: TransactionSigned, sidecar: BlobTransactionSidecar, ) -> Result { - let TransactionSigned { transaction, signature, hash } = tx; + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx; match transaction { - Transaction::Eip4844(transaction) => Ok(Self { - hash, - transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, + Transaction::Eip4844(transaction) => Ok(Self(Signed::new_unchecked( + TxEip4844WithSidecar { tx: transaction, sidecar }, signature, - }), + hash, + ))), transaction => { - let tx = TransactionSigned { transaction, signature, hash }; + let tx = TransactionSigned::new(transaction, signature, hash); Err((tx, sidecar)) } } } - /// Verifies that the transaction's blob data, commitments, and proofs are all valid. - /// - /// See also [`TxEip4844::validate_blob`] - #[cfg(feature = "c-kzg")] - pub fn validate( - &self, - proof_settings: &c_kzg::KzgSettings, - ) -> Result<(), BlobTransactionValidationError> { - self.transaction.validate_blob(proof_settings) - } - /// Splits the [`BlobTransaction`] into its [`TransactionSigned`] and [`BlobTransactionSidecar`] /// components. pub fn into_parts(self) -> (TransactionSigned, BlobTransactionSidecar) { - let transaction = TransactionSigned { - transaction: Transaction::Eip4844(self.transaction.tx), - hash: self.hash, - signature: self.signature, - }; - - (transaction, self.transaction.sidecar) - } - - /// Encodes the [`BlobTransaction`] fields as RLP, with a tx type. If `with_header` is `false`, - /// the following will be encoded: - /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// If `with_header` is `true`, the following will be encoded: - /// `rlp(tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs]))` - /// - /// NOTE: The header will be a byte string header, not a list header. - pub(crate) fn encode_with_type_inner(&self, out: &mut dyn bytes::BufMut, with_header: bool) { - // Calculate the length of: - // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs])` - // - // to construct and encode the string header - if with_header { - Header { - list: false, - // add one for the tx type - payload_length: 1 + self.payload_len(), - } - .encode(out); - } - - out.put_u8(EIP4844_TX_TYPE_ID); - - // Now we encode the inner blob transaction: - self.encode_inner(out); - } - - /// Encodes the [`BlobTransaction`] fields as RLP, with the following format: - /// `rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// where `transaction_payload_body` is a list: - /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - /// - /// Note: this should be used only when implementing other RLP encoding methods, and does not - /// represent the full RLP encoding of the blob transaction. - pub(crate) fn encode_inner(&self, out: &mut dyn bytes::BufMut) { - self.transaction.encode_with_signature_fields(&self.signature, out); - } - - /// Outputs the length of the RLP encoding of the blob transaction, including the tx type byte, - /// optionally including the length of a wrapping string header. If `with_header` is `false`, - /// the length of the following will be calculated: - /// `tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// If `with_header` is `true`, the length of the following will be calculated: - /// `rlp(tx_type (0x03) || rlp([transaction_payload_body, blobs, commitments, proofs]))` - pub(crate) fn payload_len_with_type(&self, with_header: bool) -> usize { - if with_header { - // Construct a header and use that to calculate the total length - let wrapped_header = Header { - list: false, - // add one for the tx type byte - payload_length: 1 + self.payload_len(), - }; - - // The total length is now the length of the header plus the length of the payload - // (which includes the tx type byte) - wrapped_header.length() + wrapped_header.payload_length - } else { - // Just add the length of the tx type to the payload length - 1 + self.payload_len() - } - } - - /// Outputs the length of the RLP encoding of the blob transaction with the following format: - /// `rlp([transaction_payload_body, blobs, commitments, proofs])` - /// - /// where `transaction_payload_body` is a list: - /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - /// - /// Note: this should be used only when implementing other RLP encoding length methods, and - /// does not represent the full RLP encoding of the blob transaction. - pub(crate) fn payload_len(&self) -> usize { - // The `transaction_payload_body` length is the length of the fields, plus the length of - // its list header. - let tx_header = Header { - list: true, - payload_length: self.transaction.tx.fields_len() + self.signature.rlp_vrs_len(), - }; - - let tx_length = tx_header.length() + tx_header.payload_length; - - // The payload length is the length of the `tranascation_payload_body` list, plus the - // length of the blobs, commitments, and proofs. - let payload_length = tx_length + self.transaction.sidecar.fields_len(); - - // We use the calculated payload len to construct the first list header, which encompasses - // everything in the tx - the length of the second, inner list header is part of - // payload_length - let blob_tx_header = Header { list: true, payload_length }; - - // The final length is the length of: - // * the outer blob tx header + - // * the inner tx header + - // * the inner tx fields + - // * the signature fields + - // * the sidecar fields - blob_tx_header.length() + blob_tx_header.payload_length + let (transaction, signature, hash) = self.0.into_parts(); + let (transaction, sidecar) = transaction.into_parts(); + let transaction = TransactionSigned::new(transaction.into(), signature, hash); + (transaction, sidecar) } /// Decodes a [`BlobTransaction`] from RLP. This expects the encoding to be: @@ -186,109 +57,32 @@ impl BlobTransaction { /// Note: this should be used only when implementing other RLP decoding methods, and does not /// represent the full RLP decoding of the `PooledTransactionsElement` type. pub(crate) fn decode_inner(data: &mut &[u8]) -> alloy_rlp::Result { - // decode the _first_ list header for the rest of the transaction - let outer_header = Header::decode(data)?; - if !outer_header.list { - return Err(RlpError::Custom("PooledTransactions blob tx must be encoded as a list")) - } - - let outer_remaining_len = data.len(); - - // Now we need to decode the inner 4844 transaction and its signature: - // - // `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - let inner_header = Header::decode(data)?; - if !inner_header.list { - return Err(RlpError::Custom( - "PooledTransactions inner blob tx must be encoded as a list", - )) - } - - let inner_remaining_len = data.len(); - - // inner transaction - let transaction = TxEip4844::decode_fields(data)?; - - // signature - let signature = Signature::decode_rlp_vrs(data)?; - - // the inner header only decodes the transaction and signature, so we check the length here - let inner_consumed = inner_remaining_len - data.len(); - if inner_consumed != inner_header.payload_length { - return Err(RlpError::UnexpectedLength) - } - - // All that's left are the blobs, commitments, and proofs - let sidecar = BlobTransactionSidecar::decode(data)?; - - // # Calculating the hash - // - // The full encoding of the `PooledTransaction` response is: - // `tx_type (0x03) || rlp([tx_payload_body, blobs, commitments, proofs])` - // - // The transaction hash however, is: - // `keccak256(tx_type (0x03) || rlp(tx_payload_body))` - // - // Note that this is `tx_payload_body`, not `[tx_payload_body]`, which would be - // `[[chain_id, nonce, max_priority_fee_per_gas, ...]]`, i.e. a list within a list. - // - // Because the pooled transaction encoding is different than the hash encoding for - // EIP-4844 transactions, we do not use the original buffer to calculate the hash. - // - // Instead, we use `encode_with_signature`, which RLP encodes the transaction with a - // signature for hashing without a header. We then hash the result. - let mut buf = Vec::new(); - transaction.encode_with_signature(&signature, &mut buf, false); - let hash = keccak256(&buf); - - // the outer header is for the entire transaction, so we check the length here - let outer_consumed = outer_remaining_len - data.len(); - if outer_consumed != outer_header.payload_length { - return Err(RlpError::UnexpectedLength) - } - - Ok(Self { transaction: TxEip4844WithSidecar { tx: transaction, sidecar }, hash, signature }) + let (transaction, signature, hash) = + TxEip4844WithSidecar::rlp_decode_signed(data)?.into_parts(); + Ok(Self(Signed::new_unchecked(transaction, signature, hash))) } } -/// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. -#[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] -pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { - use alloy_eips::eip4844::env_settings::EnvKzgSettings; - use c_kzg::{KzgCommitment, KzgProof}; - - let kzg_settings = EnvKzgSettings::Default; - - let commitments: Vec = blobs - .iter() - .map(|blob| { - KzgCommitment::blob_to_kzg_commitment(&blob.clone(), kzg_settings.get()).unwrap() - }) - .map(|commitment| commitment.to_bytes()) - .collect(); - - let proofs: Vec = blobs - .iter() - .zip(commitments.iter()) - .map(|(blob, commitment)| { - KzgProof::compute_blob_kzg_proof(blob, commitment, kzg_settings.get()).unwrap() - }) - .map(|proof| proof.to_bytes()) - .collect(); - - BlobTransactionSidecar::from_kzg(blobs, commitments, proofs) +impl InMemorySize for BlobTransaction { + fn size(&self) -> usize { + // TODO(mattsse): replace with next alloy bump + self.0.hash().size() + + self.0.signature().size() + + self.0.tx().tx().size() + + self.0.tx().sidecar.size() + } } #[cfg(all(test, feature = "c-kzg"))] mod tests { use super::*; use crate::{kzg::Blob, PooledTransactionsElement}; + use alloc::vec::Vec; use alloy_eips::{ eip2718::{Decodable2718, Encodable2718}, eip4844::Bytes48, }; use alloy_primitives::hex; - use alloy_rlp::Encodable; use std::{fs, path::PathBuf, str::FromStr}; #[test] @@ -310,7 +104,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Assert commitment equality assert_eq!( @@ -359,7 +153,7 @@ mod tests { } // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs.clone()); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Assert sidecar size assert_eq!(sidecar.size(), 524672); @@ -384,13 +178,13 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create a vector to store the encoded RLP let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode(&mut encoded_rlp); + sidecar.rlp_encode_fields(&mut encoded_rlp); // Assert the equality between the expected RLP from the JSON and the encoded RLP assert_eq!(json_value.get("rlp").unwrap().as_str().unwrap(), hex::encode(&encoded_rlp)); @@ -415,16 +209,17 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create a vector to store the encoded RLP let mut encoded_rlp = Vec::new(); // Encode the inner data of the BlobTransactionSidecar into RLP - sidecar.encode(&mut encoded_rlp); + sidecar.rlp_encode_fields(&mut encoded_rlp); // Decode the RLP-encoded data back into a BlobTransactionSidecar - let decoded_sidecar = BlobTransactionSidecar::decode(&mut encoded_rlp.as_slice()).unwrap(); + let decoded_sidecar = + BlobTransactionSidecar::rlp_decode_fields(&mut encoded_rlp.as_slice()).unwrap(); // Assert the equality between the original BlobTransactionSidecar and the decoded one assert_eq!(sidecar, decoded_sidecar); diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index 39c0f92fda8..6056266ae0f 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -1,11 +1,7 @@ use crate::transaction::util::secp256k1; -use alloy_primitives::{Address, Parity, B256, U256}; -use alloy_rlp::{Decodable, Error as RlpError}; - -pub use alloy_primitives::Signature; - -#[cfg(feature = "optimism")] -use reth_optimism_chainspec::optimism_deposit_tx_signature; +use alloy_consensus::transaction::from_eip155_value; +use alloy_primitives::{Address, PrimitiveSignature as Signature, B256, U256}; +use alloy_rlp::Decodable; /// The order of the secp256k1 curve, divided by two. Signatures that should be checked according /// to EIP-2 should have an S value less than or equal to this. @@ -19,25 +15,23 @@ const SECP256K1N_HALF: U256 = U256::from_be_bytes([ pub(crate) fn decode_with_eip155_chain_id( buf: &mut &[u8], ) -> alloy_rlp::Result<(Signature, Option)> { - let v: Parity = Decodable::decode(buf)?; + let v = Decodable::decode(buf)?; let r: U256 = Decodable::decode(buf)?; let s: U256 = Decodable::decode(buf)?; - #[cfg(not(feature = "optimism"))] - if matches!(v, Parity::Parity(_)) { - return Err(alloy_rlp::Error::Custom("invalid parity for legacy transaction")); - } - - #[cfg(feature = "optimism")] - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if matches!(v, Parity::Parity(false)) && r.is_zero() && s.is_zero() { - return Ok((Signature::new(r, s, Parity::Parity(false)), None)) - } + let Some((parity, chain_id)) = from_eip155_value(v) else { + // pre bedrock system transactions were sent from the zero address as legacy + // transactions with an empty signature + // + // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock + #[cfg(feature = "optimism")] + if v == 0 && r.is_zero() && s.is_zero() { + return Ok((Signature::new(r, s, false), None)) + } + return Err(alloy_rlp::Error::Custom("invalid parity for legacy transaction")) + }; - Ok((Signature::new(r, s, v), v.chain_id())) + Ok((Signature::new(r, s, parity), chain_id)) } /// Recover signer from message hash, _without ensuring that the signature has a low `s` @@ -51,7 +45,7 @@ pub fn recover_signer_unchecked(signature: &Signature, hash: B256) -> Option()); sig[32..64].copy_from_slice(&signature.s().to_be_bytes::<32>()); - sig[64] = signature.v().y_parity_byte(); + sig[64] = signature.v() as u8; // NOTE: we are removing error from underlying crypto library as it will restrain primitive // errors and we care only if recovery is passing or not. @@ -71,71 +65,16 @@ pub fn recover_signer(signature: &Signature, hash: B256) -> Option
{ recover_signer_unchecked(signature, hash) } -/// Returns [Parity] value based on `chain_id` for legacy transaction signature. -#[allow(clippy::missing_const_for_fn)] -pub fn legacy_parity(signature: &Signature, chain_id: Option) -> Parity { - if let Some(chain_id) = chain_id { - Parity::Parity(signature.v().y_parity()).with_chain_id(chain_id) - } else { - #[cfg(feature = "optimism")] - // pre bedrock system transactions were sent from the zero address as legacy - // transactions with an empty signature - // - // NOTE: this is very hacky and only relevant for op-mainnet pre bedrock - if *signature == optimism_deposit_tx_signature() { - return Parity::Parity(false) - } - Parity::NonEip155(signature.v().y_parity()) - } -} - -/// Returns a signature with the given chain ID applied to the `v` value. -pub(crate) fn with_eip155_parity(signature: &Signature, chain_id: Option) -> Signature { - Signature::new(signature.r(), signature.s(), legacy_parity(signature, chain_id)) -} - -/// Outputs (`odd_y_parity`, `chain_id`) from the `v` value. -/// This doesn't check validity of the `v` value for optimism. -#[inline] -pub const fn extract_chain_id(v: u64) -> alloy_rlp::Result<(bool, Option)> { - if v < 35 { - // non-EIP-155 legacy scheme, v = 27 for even y-parity, v = 28 for odd y-parity - if v != 27 && v != 28 { - return Err(RlpError::Custom("invalid Ethereum signature (V is not 27 or 28)")) - } - Ok((v == 28, None)) - } else { - // EIP-155: v = {0, 1} + CHAIN_ID * 2 + 35 - let odd_y_parity = ((v - 35) % 2) != 0; - let chain_id = (v - 35) >> 1; - Ok((odd_y_parity, Some(chain_id))) - } -} - #[cfg(test)] mod tests { - use crate::{ - transaction::signature::{ - legacy_parity, recover_signer, recover_signer_unchecked, SECP256K1N_HALF, - }, - Signature, + use crate::transaction::signature::{ + recover_signer, recover_signer_unchecked, SECP256K1N_HALF, }; use alloy_eips::eip2718::Decodable2718; - use alloy_primitives::{hex, Address, Parity, B256, U256}; + use alloy_primitives::{hex, Address, PrimitiveSignature as Signature, B256, U256}; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; - #[test] - fn test_legacy_parity() { - // Select 1 as an arbitrary nonzero value for R and S, as v() always returns 0 for (0, 0). - let signature = Signature::new(U256::from(1), U256::from(1), Parity::Parity(false)); - assert_eq!(Parity::NonEip155(false), legacy_parity(&signature, None)); - assert_eq!(Parity::Eip155(37), legacy_parity(&signature, Some(1))); - - let signature = Signature::new(U256::from(1), U256::from(1), Parity::Parity(true)); - assert_eq!(Parity::NonEip155(true), legacy_parity(&signature, None)); - assert_eq!(Parity::Eip155(38), legacy_parity(&signature, Some(1))); - } - #[test] fn test_recover_signer() { let signature = Signature::new( @@ -147,7 +86,7 @@ mod tests { "46948507304638947509940763649030358759909902576025900602547168820602576006531", ) .unwrap(), - Parity::Parity(false), + false, ); let hash = B256::from_str("daf5a779ae972f972197303d7b574746c7ef83eadac0f2791ad23db92e4c8e53") diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index c55e0d3c619..d0a4786dcf1 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,74 +1,78 @@ +use alloy_consensus::{ + constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, + }, + Typed2718, +}; use alloy_primitives::{U64, U8}; use alloy_rlp::{Decodable, Encodable}; +use derive_more::Display; +use reth_primitives_traits::InMemorySize; use serde::{Deserialize, Serialize}; -#[cfg(test)] -use reth_codecs::Compact; - /// Identifier parameter for legacy transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_LEGACY: usize = 0; +pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; /// Identifier parameter for EIP-2930 transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_EIP2930: usize = 1; +pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; /// Identifier parameter for EIP-1559 transaction #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_IDENTIFIER_EIP1559: usize = 2; +pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; /// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier /// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is /// read from the buffer as a single byte. #[cfg(any(test, feature = "reth-codec"))] -pub(crate) const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; - -/// Identifier for legacy transaction, however [`TxLegacy`](alloy_consensus::TxLegacy) this is -/// technically not typed. -pub const LEGACY_TX_TYPE_ID: u8 = 0; - -/// Identifier for [`TxEip2930`](alloy_consensus::TxEip2930) transaction. -pub const EIP2930_TX_TYPE_ID: u8 = 1; - -/// Identifier for [`TxEip1559`](alloy_consensus::TxEip1559) transaction. -pub const EIP1559_TX_TYPE_ID: u8 = 2; - -/// Identifier for [`TxEip4844`](alloy_consensus::TxEip4844) transaction. -pub const EIP4844_TX_TYPE_ID: u8 = 3; - -/// Identifier for [`TxEip7702`](alloy_consensus::TxEip7702) transaction. -pub const EIP7702_TX_TYPE_ID: u8 = 4; - -/// Identifier for [`TxDeposit`](op_alloy_consensus::TxDeposit) transaction. -#[cfg(feature = "optimism")] -pub const DEPOSIT_TX_TYPE_ID: u8 = 126; +pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; /// Transaction Type /// /// Currently being used as 2-bit type when encoding it to `reth_codecs::Compact` on -/// [`crate::TransactionSignedNoHash`]. Adding more transaction types will break the codec and +/// [`crate::TransactionSigned`]. Adding more transaction types will break the codec and /// database format. /// /// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files). #[derive( - Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize, Hash, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Default, + Serialize, + Deserialize, + Hash, + Display, )] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] +#[display("tx type: {_variant}")] pub enum TxType { /// Legacy transaction pre EIP-2929 #[default] + #[display("legacy (0)")] Legacy = 0_isize, /// AccessList transaction + #[display("eip2930 (1)")] Eip2930 = 1_isize, /// Transaction with Priority fee + #[display("eip1559 (2)")] Eip1559 = 2_isize, /// Shard Blob Transactions - EIP-4844 + #[display("eip4844 (3)")] Eip4844 = 3_isize, /// EOA Contract Code Transactions - EIP-7702 + #[display("eip7702 (4)")] Eip7702 = 4_isize, /// Optimism Deposit transaction. #[cfg(feature = "optimism")] + #[display("deposit (126)")] Deposit = 126_isize, } @@ -87,6 +91,22 @@ impl TxType { } } +impl Typed2718 for TxType { + fn ty(&self) -> u8 { + (*self).into() + } +} + +impl reth_primitives_traits::TxType for TxType {} + +impl InMemorySize for TxType { + /// Calculates a heuristic for the in-memory size of the [`TxType`]. + #[inline] + fn size(&self) -> usize { + core::mem::size_of::() + } +} + impl From for u8 { fn from(value: TxType) -> Self { match value { @@ -96,7 +116,7 @@ impl From for u8 { TxType::Eip4844 => EIP4844_TX_TYPE_ID, TxType::Eip7702 => EIP7702_TX_TYPE_ID, #[cfg(feature = "optimism")] - TxType::Deposit => DEPOSIT_TX_TYPE_ID, + TxType::Deposit => op_alloy_consensus::DEPOSIT_TX_TYPE_ID, } } } @@ -155,6 +175,8 @@ impl reth_codecs::Compact for TxType { where B: bytes::BufMut + AsMut<[u8]>, { + use reth_codecs::txtype::*; + match self { Self::Legacy => COMPACT_IDENTIFIER_LEGACY, Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, @@ -169,7 +191,7 @@ impl reth_codecs::Compact for TxType { } #[cfg(feature = "optimism")] Self::Deposit => { - buf.put_u8(DEPOSIT_TX_TYPE_ID); + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); COMPACT_EXTENDED_IDENTIFIER_FLAG } } @@ -182,16 +204,16 @@ impl reth_codecs::Compact for TxType { use bytes::Buf; ( match identifier { - COMPACT_IDENTIFIER_LEGACY => Self::Legacy, - COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, - COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, - COMPACT_EXTENDED_IDENTIFIER_FLAG => { + reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { let extended_identifier = buf.get_u8(); match extended_identifier { EIP4844_TX_TYPE_ID => Self::Eip4844, EIP7702_TX_TYPE_ID => Self::Eip7702, #[cfg(feature = "optimism")] - DEPOSIT_TX_TYPE_ID => Self::Deposit, + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, _ => panic!("Unsupported TxType identifier: {extended_identifier}"), } } @@ -232,128 +254,84 @@ impl Decodable for TxType { } } -impl From for TxType { - fn from(value: alloy_consensus::TxType) -> Self { - match value { - alloy_consensus::TxType::Legacy => Self::Legacy, - alloy_consensus::TxType::Eip2930 => Self::Eip2930, - alloy_consensus::TxType::Eip1559 => Self::Eip1559, - alloy_consensus::TxType::Eip4844 => Self::Eip4844, - alloy_consensus::TxType::Eip7702 => Self::Eip7702, - } - } -} - #[cfg(test)] mod tests { + use super::*; use alloy_primitives::hex; - use rand::Rng; use reth_codecs::Compact; - - use super::*; + use reth_primitives_traits::TxType as _; + use rstest::rstest; #[test] - fn test_u64_to_tx_type() { - // Test for Legacy transaction - assert_eq!(TxType::try_from(U64::from(LEGACY_TX_TYPE_ID)).unwrap(), TxType::Legacy); - - // Test for EIP2930 transaction - assert_eq!(TxType::try_from(U64::from(EIP2930_TX_TYPE_ID)).unwrap(), TxType::Eip2930); - - // Test for EIP1559 transaction - assert_eq!(TxType::try_from(U64::from(EIP1559_TX_TYPE_ID)).unwrap(), TxType::Eip1559); - - // Test for EIP4844 transaction - assert_eq!(TxType::try_from(U64::from(EIP4844_TX_TYPE_ID)).unwrap(), TxType::Eip4844); - - // Test for EIP7702 transaction - assert_eq!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID)).unwrap(), TxType::Eip7702); - - // Test for Deposit transaction - #[cfg(feature = "optimism")] - assert_eq!(TxType::try_from(U64::from(DEPOSIT_TX_TYPE_ID)).unwrap(), TxType::Deposit); - - // For transactions with unsupported values - assert!(TxType::try_from(U64::from(EIP7702_TX_TYPE_ID + 1)).is_err()); + fn is_broadcastable() { + assert!(TxType::Legacy.is_broadcastable_in_full()); + assert!(TxType::Eip1559.is_broadcastable_in_full()); + assert!(!TxType::Eip4844.is_broadcastable_in_full()); } - #[test] - fn test_txtype_to_compat() { - let cases = vec![ - (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), - (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), - (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), - (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), - (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), - #[cfg(feature = "optimism")] - (TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]), - ]; - - for (tx_type, expected_identifier, expected_buf) in cases { - let mut buf = vec![]; - let identifier = tx_type.to_compact(&mut buf); - assert_eq!( - identifier, expected_identifier, - "Unexpected identifier for TxType {tx_type:?}", - ); - assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}"); - } + #[rstest] + #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] + #[case(U64::from(EIP2930_TX_TYPE_ID), Ok(TxType::Eip2930))] + #[case(U64::from(EIP1559_TX_TYPE_ID), Ok(TxType::Eip1559))] + #[case(U64::from(EIP4844_TX_TYPE_ID), Ok(TxType::Eip4844))] + #[case(U64::from(EIP7702_TX_TYPE_ID), Ok(TxType::Eip7702))] + #[cfg_attr( + feature = "optimism", + case(U64::from(op_alloy_consensus::DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)) + )] + #[case(U64::MAX, Err("invalid tx type"))] + fn test_u64_to_tx_type(#[case] input: U64, #[case] expected: Result) { + let tx_type_result = TxType::try_from(input); + assert_eq!(tx_type_result, expected); } - #[test] - fn test_txtype_from_compact() { - let cases = vec![ - (TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![]), - (TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![]), - (TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![]), - (TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID]), - (TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID]), - #[cfg(feature = "optimism")] - (TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![DEPOSIT_TX_TYPE_ID]), - ]; - - for (expected_type, identifier, buf) in cases { - let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); - assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}",); - assert!( - remaining_buf.is_empty(), - "Buffer not fully consumed for identifier {identifier}", - ); - } + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] + fn test_txtype_to_compact( + #[case] tx_type: TxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!(identifier, expected_identifier, "Unexpected identifier for TxType {tx_type:?}",); + assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}",); } - #[test] - fn decode_tx_type() { - // Test for Legacy transaction - let tx_type = TxType::decode(&mut &hex!("80")[..]).unwrap(); - assert_eq!(tx_type, TxType::Legacy); - - // Test for EIP2930 transaction - let tx_type = TxType::decode(&mut &[EIP2930_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip2930); - - // Test for EIP1559 transaction - let tx_type = TxType::decode(&mut &[EIP1559_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip1559); - - // Test for EIP4844 transaction - let tx_type = TxType::decode(&mut &[EIP4844_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip4844); - - // Test for EIP7702 transaction - let tx_type = TxType::decode(&mut &[EIP7702_TX_TYPE_ID][..]).unwrap(); - assert_eq!(tx_type, TxType::Eip7702); - - // Test random byte not in range - let buf = [rand::thread_rng().gen_range(EIP7702_TX_TYPE_ID + 1..=u8::MAX)]; - assert!(TxType::decode(&mut &buf[..]).is_err()); + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] + fn test_txtype_from_compact( + #[case] expected_type: TxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); + } - // Test for Deposit transaction - #[cfg(feature = "optimism")] - { - let buf = [DEPOSIT_TX_TYPE_ID]; - let tx_type = TxType::decode(&mut &buf[..]).unwrap(); - assert_eq!(tx_type, TxType::Deposit); - } + #[rstest] + #[case(&hex!("80"), Ok(TxType::Legacy))] + #[case(&[EIP2930_TX_TYPE_ID], Ok(TxType::Eip2930))] + #[case(&[EIP1559_TX_TYPE_ID], Ok(TxType::Eip1559))] + #[case(&[EIP4844_TX_TYPE_ID], Ok(TxType::Eip4844))] + #[case(&[EIP7702_TX_TYPE_ID], Ok(TxType::Eip7702))] + #[case(&[u8::MAX], Err(alloy_rlp::Error::InputTooShort))] + #[cfg_attr(feature = "optimism", case(&[op_alloy_consensus::DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] + fn decode_tx_type(#[case] input: &[u8], #[case] expected: Result) { + let tx_type_result = TxType::decode(&mut &input[..]); + assert_eq!(tx_type_result, expected) } } diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 7569400e94b..8eb1a639d96 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,8 +1,10 @@ -use crate::Signature; -use alloy_primitives::Address; +//! Utility functions for signature. +use alloy_primitives::{Address, PrimitiveSignature as Signature}; + +/// Secp256k1 utility functions. #[cfg(feature = "secp256k1")] -pub(crate) mod secp256k1 { +pub mod secp256k1 { pub use super::impl_secp256k1::*; } @@ -19,7 +21,7 @@ mod impl_secp256k1 { ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; - use alloy_primitives::{keccak256, Parity, B256, U256}; + use alloy_primitives::{keccak256, B256, U256}; /// Recovers the address of the sender using secp256k1 pubkey recovery. /// @@ -45,7 +47,7 @@ mod impl_secp256k1 { let signature = Signature::new( U256::try_from_be_slice(&data[..32]).expect("The slice has at most 32 bytes"), U256::try_from_be_slice(&data[32..64]).expect("The slice has at most 32 bytes"), - Parity::Parity(rec_id.to_i32() != 0), + rec_id.to_i32() != 0, ); Ok(signature) } @@ -63,7 +65,7 @@ mod impl_secp256k1 { #[cfg_attr(feature = "secp256k1", allow(unused, unreachable_pub))] mod impl_k256 { use super::*; - use alloy_primitives::{keccak256, Parity, B256, U256}; + use alloy_primitives::{keccak256, B256}; pub(crate) use k256::ecdsa::Error; use k256::ecdsa::{RecoveryId, SigningKey, VerifyingKey}; @@ -93,15 +95,7 @@ mod impl_k256 { /// Returns the corresponding signature. pub fn sign_message(secret: B256, message: B256) -> Result { let sec = SigningKey::from_slice(secret.as_ref())?; - let (sig, rec_id) = sec.sign_prehash_recoverable(&message.0)?; - let (r, s) = sig.split_bytes(); - - let signature = Signature::new( - U256::try_from_be_slice(&r).expect("The slice has at most 32 bytes"), - U256::try_from_be_slice(&s).expect("The slice has at most 32 bytes"), - Parity::Parity(rec_id.is_y_odd()), - ); - Ok(signature) + sec.sign_prehash_recoverable(&message.0).map(Into::into) } /// Converts a public key into an ethereum address by hashing the encoded public key with @@ -132,7 +126,7 @@ mod tests { let mut sig: [u8; 65] = [0; 65]; sig[0..32].copy_from_slice(&signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&signature.s().to_be_bytes::<32>()); - sig[64] = signature.v().y_parity_byte(); + sig[64] = signature.v() as u8; assert_eq!(recover_signer_unchecked(&sig, &hash), Ok(signer)); } @@ -190,14 +184,14 @@ mod tests { sig[0..32].copy_from_slice(&secp256k1_signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&secp256k1_signature.s().to_be_bytes::<32>()); - sig[64] = secp256k1_signature.v().y_parity_byte(); + sig[64] = secp256k1_signature.v() as u8; let secp256k1_recovered = impl_secp256k1::recover_signer_unchecked(&sig, &hash).expect("secp256k1 recover"); assert_eq!(secp256k1_recovered, secp256k1_signer); sig[0..32].copy_from_slice(&k256_signature.r().to_be_bytes::<32>()); sig[32..64].copy_from_slice(&k256_signature.s().to_be_bytes::<32>()); - sig[64] = k256_signature.v().y_parity_byte(); + sig[64] = k256_signature.v() as u8; let k256_recovered = impl_k256::recover_signer_unchecked(&sig, &hash).expect("k256 recover"); assert_eq!(k256_recovered, k256_signer); diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs deleted file mode 100644 index 888c83946ca..00000000000 --- a/crates/primitives/src/transaction/variant.rs +++ /dev/null @@ -1,145 +0,0 @@ -//! Helper enum functions for `Transaction`, `TransactionSigned` and -//! `TransactionSignedEcRecovered` - -use crate::{ - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, -}; -use alloy_primitives::{Address, B256}; -use core::ops::Deref; - -/// Represents various different transaction formats used in reth. -/// -/// All variants are based on a the raw [Transaction] data and can contain additional information -/// extracted (expensive) from that transaction, like the hash and the signer. -#[derive(Debug, Clone, PartialEq, Eq, Hash, derive_more::From)] -pub enum TransactionSignedVariant { - /// A signed transaction without a hash. - SignedNoHash(TransactionSignedNoHash), - /// Contains the plain transaction data its signature and hash. - Signed(TransactionSigned), - /// Contains the plain transaction data its signature and hash and the successfully recovered - /// signer. - SignedEcRecovered(TransactionSignedEcRecovered), -} - -impl TransactionSignedVariant { - /// Returns the raw transaction object - pub const fn as_raw(&self) -> &Transaction { - match self { - Self::SignedNoHash(tx) => &tx.transaction, - Self::Signed(tx) => &tx.transaction, - Self::SignedEcRecovered(tx) => &tx.signed_transaction.transaction, - } - } - - /// Returns the hash of the transaction - pub fn hash(&self) -> B256 { - match self { - Self::SignedNoHash(tx) => tx.hash(), - Self::Signed(tx) => tx.hash, - Self::SignedEcRecovered(tx) => tx.hash, - } - } - - /// Returns the signer of the transaction. - /// - /// If the transaction is of not of [`TransactionSignedEcRecovered`] it will be recovered. - pub fn signer(&self) -> Option
{ - match self { - Self::SignedNoHash(tx) => tx.recover_signer(), - Self::Signed(tx) => tx.recover_signer(), - Self::SignedEcRecovered(tx) => Some(tx.signer), - } - } - - /// Returns [`TransactionSigned`] type - /// else None - pub const fn as_signed(&self) -> Option<&TransactionSigned> { - match self { - Self::Signed(tx) => Some(tx), - _ => None, - } - } - - /// Returns `TransactionSignedEcRecovered` type - /// else None - pub const fn as_signed_ec_recovered(&self) -> Option<&TransactionSignedEcRecovered> { - match self { - Self::SignedEcRecovered(tx) => Some(tx), - _ => None, - } - } - - /// Returns true if the transaction is of [`TransactionSigned`] variant - pub const fn is_signed(&self) -> bool { - matches!(self, Self::Signed(_)) - } - - /// Returns true if the transaction is of [`TransactionSignedNoHash`] variant - pub const fn is_signed_no_hash(&self) -> bool { - matches!(self, Self::SignedNoHash(_)) - } - - /// Returns true if the transaction is of [`TransactionSignedEcRecovered`] variant - pub const fn is_signed_ec_recovered(&self) -> bool { - matches!(self, Self::SignedEcRecovered(_)) - } - - /// Consumes the [`TransactionSignedVariant`] and returns the consumed [Transaction] - pub fn into_raw(self) -> Transaction { - match self { - Self::SignedNoHash(tx) => tx.transaction, - Self::Signed(tx) => tx.transaction, - Self::SignedEcRecovered(tx) => tx.signed_transaction.transaction, - } - } - - /// Consumes the [`TransactionSignedVariant`] and returns the consumed [`TransactionSigned`] - pub fn into_signed(self) -> TransactionSigned { - match self { - Self::SignedNoHash(tx) => tx.with_hash(), - Self::Signed(tx) => tx, - Self::SignedEcRecovered(tx) => tx.signed_transaction, - } - } - - /// Consumes the [`TransactionSignedVariant`] and converts it into a - /// [`TransactionSignedEcRecovered`] - /// - /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. - /// - /// Returns `None` if the transaction's signature is invalid - pub fn into_signed_ec_recovered(self) -> Option { - self.try_into_signed_ec_recovered().ok() - } - - /// Consumes the [`TransactionSignedVariant`] and converts it into a - /// [`TransactionSignedEcRecovered`] - /// - /// If the variants is not a [`TransactionSignedEcRecovered`] it will recover the sender. - /// - /// Returns an error if the transaction's signature is invalid. - pub fn try_into_signed_ec_recovered( - self, - ) -> Result { - match self { - Self::SignedEcRecovered(tx) => Ok(tx), - Self::Signed(tx) => tx.try_into_ecrecovered(), - Self::SignedNoHash(tx) => tx.with_hash().try_into_ecrecovered(), - } - } -} - -impl AsRef for TransactionSignedVariant { - fn as_ref(&self) -> &Transaction { - self.as_raw() - } -} - -impl Deref for TransactionSignedVariant { - type Target = Transaction; - - fn deref(&self) -> &Self::Target { - self.as_raw() - } -} diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index 2f2a37d5ba6..f772ff54669 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -22,8 +22,13 @@ reth-provider.workspace = true reth-tokio-util.workspace = true reth-config.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true +# ethereum +alloy-consensus.workspace = true +alloy-eips.workspace = true + # metrics reth-metrics.workspace = true metrics.workspace = true @@ -41,6 +46,7 @@ rustc-hash.workspace = true # reth reth-db = { workspace = true, features = ["test-utils"] } reth-stages = { workspace = true, features = ["test-utils"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-testing-utils.workspace = true reth-tracing.workspace = true diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index 71d73c41610..4fd56617121 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -1,11 +1,13 @@ use crate::{segments::SegmentSet, Pruner}; +use alloy_eips::eip2718::Encodable2718; use reth_chainspec::MAINNET; use reth_config::PruneConfig; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; use reth_exex_types::FinishedExExHeight; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, DatabaseProviderFactory, - PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, + NodePrimitivesProvider, PruneCheckpointWriter, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use std::time::Duration; @@ -76,8 +78,15 @@ impl PrunerBuilder { /// Builds a [Pruner] from the current configuration with the given provider factory. pub fn build_with_provider_factory(self, provider_factory: PF) -> Pruner where - PF: DatabaseProviderFactory - + StaticFileProviderFactory, + PF: DatabaseProviderFactory< + ProviderRW: PruneCheckpointWriter + + BlockReader + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, + > + StaticFileProviderFactory< + Primitives = ::Primitives, + >, { let segments = SegmentSet::from_components(provider_factory.static_file_provider(), self.segments); @@ -93,10 +102,15 @@ impl PrunerBuilder { } /// Builds a [Pruner] from the current configuration with the given static file provider. - pub fn build(self, static_file_provider: StaticFileProvider) -> Pruner + pub fn build( + self, + static_file_provider: StaticFileProvider, + ) -> Pruner where - Provider: - DBProvider + BlockReader + PruneCheckpointWriter + TransactionsProvider, + Provider: StaticFileProviderFactory> + + DBProvider + + BlockReader + + PruneCheckpointWriter, { let segments = SegmentSet::::from_components(static_file_provider, self.segments); diff --git a/crates/prune/prune/src/db_ext.rs b/crates/prune/prune/src/db_ext.rs index a14127af20e..143cb5e2775 100644 --- a/crates/prune/prune/src/db_ext.rs +++ b/crates/prune/prune/src/db_ext.rs @@ -1,12 +1,12 @@ use std::{fmt::Debug, ops::RangeBounds}; +use crate::PruneLimiter; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, RangeWalker}, table::{Table, TableRow}, transaction::DbTxMut, DatabaseError, }; -use reth_prune_types::PruneLimiter; use tracing::debug; pub(crate) trait DbTxPruneExt: DbTxMut { diff --git a/crates/prune/prune/src/event.rs b/crates/prune/prune/src/event.rs deleted file mode 100644 index 95a90d7628c..00000000000 --- a/crates/prune/prune/src/event.rs +++ /dev/null @@ -1,16 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_prune_types::{PruneProgress, PruneSegment}; -use std::time::Duration; - -/// An event emitted by a [Pruner][crate::Pruner]. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum PrunerEvent { - /// Emitted when pruner started running. - Started { tip_block_number: BlockNumber }, - /// Emitted when pruner finished running. - Finished { - tip_block_number: BlockNumber, - elapsed: Duration, - stats: Vec<(PruneSegment, usize, PruneProgress)>, - }, -} diff --git a/crates/prune/prune/src/lib.rs b/crates/prune/prune/src/lib.rs index 5a43afeb502..ef3ee0de2db 100644 --- a/crates/prune/prune/src/lib.rs +++ b/crates/prune/prune/src/lib.rs @@ -12,7 +12,7 @@ mod builder; mod db_ext; mod error; -mod event; +mod limiter; mod metrics; mod pruner; pub mod segments; @@ -20,7 +20,7 @@ pub mod segments; use crate::metrics::Metrics; pub use builder::PrunerBuilder; pub use error::PrunerError; -pub use event::PrunerEvent; +pub use limiter::PruneLimiter; pub use pruner::{Pruner, PrunerResult, PrunerWithFactory, PrunerWithResult}; // Re-export prune types diff --git a/crates/prune/types/src/limiter.rs b/crates/prune/prune/src/limiter.rs similarity index 93% rename from crates/prune/types/src/limiter.rs rename to crates/prune/prune/src/limiter.rs index 3a105994930..654eed04f28 100644 --- a/crates/prune/types/src/limiter.rs +++ b/crates/prune/prune/src/limiter.rs @@ -1,3 +1,4 @@ +use reth_prune_types::{PruneInterruptReason, PruneProgress}; use std::{ num::NonZeroUsize, time::{Duration, Instant}, @@ -78,7 +79,7 @@ impl PruneLimiter { /// Returns `true` if the limit on the number of deleted entries (rows in the database) is /// reached. pub fn is_deleted_entries_limit_reached(&self) -> bool { - self.deleted_entries_limit.as_ref().map_or(false, |limit| limit.is_limit_reached()) + self.deleted_entries_limit.as_ref().is_some_and(|limit| limit.is_limit_reached()) } /// Increments the number of deleted entries by the given number. @@ -112,13 +113,37 @@ impl PruneLimiter { /// Returns `true` if time limit is reached. pub fn is_time_limit_reached(&self) -> bool { - self.time_limit.as_ref().map_or(false, |limit| limit.is_limit_reached()) + self.time_limit.as_ref().is_some_and(|limit| limit.is_limit_reached()) } /// Returns `true` if any limit is reached. pub fn is_limit_reached(&self) -> bool { self.is_deleted_entries_limit_reached() || self.is_time_limit_reached() } + + /// Creates new [`PruneInterruptReason`] based on the limiter's state. + pub fn interrupt_reason(&self) -> PruneInterruptReason { + if self.is_time_limit_reached() { + PruneInterruptReason::Timeout + } else if self.is_deleted_entries_limit_reached() { + PruneInterruptReason::DeletedEntriesLimitReached + } else { + PruneInterruptReason::Unknown + } + } + + /// Creates new [`PruneProgress`]. + /// + /// If `done == true`, returns [`PruneProgress::Finished`], otherwise + /// [`PruneProgress::HasMoreData`] is returned with [`PruneInterruptReason`] according to the + /// limiter's state. + pub fn progress(&self, done: bool) -> PruneProgress { + if done { + PruneProgress::Finished + } else { + PruneProgress::HasMoreData(self.interrupt_reason()) + } + } } #[cfg(test)] diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index d21560cae60..2344578bd08 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -2,14 +2,14 @@ use crate::{ segments::{PruneInput, Segment}, - Metrics, PrunerError, PrunerEvent, + Metrics, PruneLimiter, PrunerError, PrunerEvent, }; use alloy_primitives::BlockNumber; use reth_exex_types::FinishedExExHeight; use reth_provider::{ DBProvider, DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter, }; -use reth_prune_types::{PruneLimiter, PruneProgress, PruneSegment, PrunerOutput}; +use reth_prune_types::{PruneProgress, PrunedSegmentInfo, PrunerOutput}; use reth_tokio_util::{EventSender, EventStream}; use std::time::{Duration, Instant}; use tokio::sync::watch; @@ -21,8 +21,6 @@ pub type PrunerResult = Result; /// The pruner type itself with the result of [`Pruner::run`] pub type PrunerWithResult = (Pruner, PrunerResult); -type PrunerStats = Vec<(PruneSegment, usize, PruneProgress)>; - /// Pruner with preset provider factory. pub type PrunerWithFactory = Pruner<::ProviderRW, PF>; @@ -174,14 +172,15 @@ where /// be pruned according to the highest `static_files`. Segments are parts of the database that /// represent one or more tables. /// - /// Returns [`PrunerStats`], total number of entries pruned, and [`PruneProgress`]. + /// Returns a list of stats per pruned segment, total number of entries pruned, and + /// [`PruneProgress`]. fn prune_segments( &mut self, provider: &Provider, tip_block_number: BlockNumber, limiter: &mut PruneLimiter, - ) -> Result<(PrunerStats, usize, PrunerOutput), PrunerError> { - let mut stats = PrunerStats::new(); + ) -> Result<(Vec, usize, PrunerOutput), PrunerError> { + let mut stats = Vec::with_capacity(self.segments.len()); let mut pruned = 0; let mut output = PrunerOutput { progress: PruneProgress::Finished, @@ -249,7 +248,12 @@ where if segment_output.pruned > 0 { limiter.increment_deleted_entries_count_by(segment_output.pruned); pruned += segment_output.pruned; - stats.push((segment.segment(), segment_output.pruned, segment_output.progress)); + let info = PrunedSegmentInfo { + segment: segment.segment(), + pruned: segment_output.pruned, + progress: segment_output.progress, + }; + stats.push(info); } } else { debug!(target: "pruner", segment = ?segment.segment(), purpose = ?segment.purpose(), "Nothing to prune for the segment"); diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index d1b7819ac76..ae18bcb3c6e 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -3,12 +3,10 @@ mod set; mod static_file; mod user; -use crate::PrunerError; +use crate::{PruneLimiter, PrunerError}; use alloy_primitives::{BlockNumber, TxNumber}; use reth_provider::{errors::provider::ProviderResult, BlockReader, PruneCheckpointWriter}; -use reth_prune_types::{ - PruneCheckpoint, PruneLimiter, PruneMode, PrunePurpose, PruneSegment, SegmentOutput, -}; +use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; pub use set::SegmentSet; pub use static_file::{ Headers as StaticFileHeaders, Receipts as StaticFileReceipts, @@ -23,9 +21,9 @@ pub use user::{ /// A segment represents a pruning of some portion of the data. /// -/// Segments are called from [Pruner](crate::Pruner) with the following lifecycle: +/// Segments are called from [`Pruner`](crate::Pruner) with the following lifecycle: /// 1. Call [`Segment::prune`] with `delete_limit` of [`PruneInput`]. -/// 2. If [`Segment::prune`] returned a [Some] in `checkpoint` of [`SegmentOutput`], call +/// 2. If [`Segment::prune`] returned a [`Some`] in `checkpoint` of [`SegmentOutput`], call /// [`Segment::save_checkpoint`]. /// 3. Subtract `pruned` of [`SegmentOutput`] from `delete_limit` of next [`PruneInput`]. pub trait Segment: Debug + Send + Sync { @@ -88,7 +86,7 @@ impl PruneInput { }, }) // No checkpoint exists, prune from genesis - .unwrap_or(0); + .unwrap_or_default(); let to_tx_number = match provider.block_body_indices(self.to_block)? { Some(body) => { @@ -143,3 +141,207 @@ impl PruneInput { .unwrap_or(0) } } + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::B256; + use reth_primitives_traits::BlockBody; + use reth_provider::{ + providers::BlockchainProvider2, + test_utils::{create_test_provider_factory, MockEthProvider}, + }; + use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; + + #[test] + fn test_prune_input_get_next_tx_num_range_no_to_block() { + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + // Default provider with no block corresponding to block 10 + let provider = MockEthProvider::default(); + + // No block body for block 10, expected None + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } + + #[test] + fn test_prune_input_get_next_tx_num_range_no_tx() { + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks with no transactions + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Since there are no transactions, expected None + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } + + #[test] + fn test_prune_input_get_next_tx_num_range_valid() { + // Create a new prune input + let input = PruneInput { + previous_checkpoint: None, + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks with some transactions + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Get the next tx number range + let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); + + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); + + assert_eq!(range, 0..=num_txs - 1); + } + + #[test] + fn test_prune_input_get_next_tx_checkpoint_without_tx_number() { + // Create a prune input with a previous checkpoint without a tx number (unexpected) + let input = PruneInput { + previous_checkpoint: Some(PruneCheckpoint { + block_number: Some(5), + tx_number: None, + prune_mode: PruneMode::Full, + }), + to_block: 10, + limiter: PruneLimiter::default(), + }; + + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Fetch the range and check if it is correct + let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); + + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); + + assert_eq!(range, 0..=num_txs - 1,); + } + + #[test] + fn test_prune_input_get_next_tx_empty_range() { + // Create a new provider via factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..5, ..Default::default() }, + ); + + // Insert the blocks into the database + let provider_rw = factory.provider_rw().expect("failed to get provider_rw"); + for block in &blocks { + provider_rw + .insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + ) + .expect("failed to insert block"); + } + provider_rw.commit().expect("failed to commit"); + + // Create a new provider + let provider = BlockchainProvider2::new(factory).unwrap(); + + // Get the last tx number + // Calculate the total number of transactions + let num_txs = + blocks.iter().map(|block| block.body.transactions().len() as u64).sum::(); + let max_range = num_txs - 1; + + // Create a prune input with a previous checkpoint that is the last tx number + let input = PruneInput { + previous_checkpoint: Some(PruneCheckpoint { + block_number: Some(5), + tx_number: Some(max_range), + prune_mode: PruneMode::Full, + }), + to_block: 10, + limiter: PruneLimiter::default(), + }; + + // We expect an empty range since the previous checkpoint is the last tx number + let range = input.get_next_tx_num_range(&provider).expect("Expected range"); + assert!(range.is_none()); + } +} diff --git a/crates/prune/prune/src/segments/receipts.rs b/crates/prune/prune/src/segments/receipts.rs index 05482d65953..dbea32c47fe 100644 --- a/crates/prune/prune/src/segments/receipts.rs +++ b/crates/prune/prune/src/segments/receipts.rs @@ -6,14 +6,13 @@ //! node after static file producer has finished use crate::{db_ext::DbTxPruneExt, segments::PruneInput, PrunerError}; -use reth_db::{tables, transaction::DbTxMut}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, -}; -use reth_prune_types::{ - PruneCheckpoint, PruneProgress, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; +use reth_prune_types::{PruneCheckpoint, PruneSegment, SegmentOutput, SegmentOutputCheckpoint}; use tracing::trace; pub(crate) fn prune( @@ -21,7 +20,10 @@ pub(crate) fn prune( input: PruneInput, ) -> Result where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { let tx_range = match input.get_next_tx_num_range(provider)? { Some(range) => range, @@ -35,7 +37,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = tx_range_end; - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |_| false, @@ -50,7 +54,7 @@ where // so we could finish pruning its receipts on the next run. .checked_sub(if done { 0 } else { 1 }); - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -77,7 +81,7 @@ pub(crate) fn save_checkpoint( #[cfg(test)] mod tests { - use crate::segments::{PruneInput, SegmentOutput}; + use crate::segments::{PruneInput, PruneLimiter, SegmentOutput}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ @@ -87,7 +91,7 @@ mod tests { use reth_db::tables; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, + PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{ @@ -109,12 +113,14 @@ mod tests { let mut receipts = Vec::new(); for block in &blocks { + receipts.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { receipts .push((receipts.len() as u64, random_receipt(&mut rng, transaction, Some(0)))); } } - db.insert_receipts(receipts.clone()).expect("insert receipts"); + let receipts_len = receipts.len(); + db.insert_receipts(receipts).expect("insert receipts"); assert_eq!( db.table::().unwrap().len(), @@ -194,7 +200,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - receipts.len() - (last_pruned_tx_number + 1) + receipts_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 710b2b721cd..d7bbee1042b 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -2,16 +2,18 @@ use crate::segments::{ AccountHistory, ReceiptsByLogs, Segment, SenderRecovery, StorageHistory, TransactionLookup, UserReceipts, }; -use reth_db::transaction::DbTxMut; +use alloy_eips::eip2718::Encodable2718; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileProvider, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use super::{StaticFileHeaders, StaticFileReceipts, StaticFileTransactions}; -/// Collection of [Segment]. Thread-safe, allocated on the heap. +/// Collection of [`Segment`]. Thread-safe, allocated on the heap. #[derive(Debug)] pub struct SegmentSet { inner: Vec>>, @@ -23,7 +25,7 @@ impl SegmentSet { Self::default() } - /// Adds new [Segment] to collection. + /// Adds new [`Segment`] to collection. pub fn segment + 'static>(mut self, segment: S) -> Self { self.inner.push(Box::new(segment)); self @@ -45,12 +47,15 @@ impl SegmentSet { impl SegmentSet where - Provider: DBProvider + TransactionsProvider + PruneCheckpointWriter + BlockReader, + Provider: StaticFileProviderFactory> + + DBProvider + + PruneCheckpointWriter + + BlockReader, { /// Creates a [`SegmentSet`] from an existing components, such as [`StaticFileProvider`] and /// [`PruneModes`]. pub fn from_components( - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { let PruneModes { diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index 8700a653b11..7d100f4e283 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -3,7 +3,7 @@ use std::num::NonZeroUsize; use crate::{ db_ext::DbTxPruneExt, segments::{PruneInput, Segment}, - PrunerError, + PruneLimiter, PrunerError, }; use alloy_primitives::BlockNumber; use itertools::Itertools; @@ -12,10 +12,9 @@ use reth_db::{ tables, transaction::DbTxMut, }; -use reth_provider::{providers::StaticFileProvider, DBProvider}; +use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; use reth_prune_types::{ - PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, - SegmentOutputCheckpoint, + PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; use reth_static_file_types::StaticFileSegment; use tracing::trace; @@ -24,17 +23,19 @@ use tracing::trace; const HEADER_TABLES_TO_PRUNE: usize = 3; #[derive(Debug)] -pub struct Headers { - static_file_provider: StaticFileProvider, +pub struct Headers { + static_file_provider: StaticFileProvider, } -impl Headers { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Headers { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl> Segment for Headers { +impl> Segment + for Headers +{ fn segment(&self) -> PruneSegment { PruneSegment::Headers } @@ -89,8 +90,8 @@ impl> Segment for Headers { pruned += entries_pruned; } - let done = last_pruned_block.map_or(false, |block| block == block_range_end); - let progress = PruneProgress::new(done, &limiter); + let done = last_pruned_block == Some(block_range_end); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -193,7 +194,8 @@ where #[cfg(test)] mod tests { use crate::segments::{ - static_file::headers::HEADER_TABLES_TO_PRUNE, PruneInput, Segment, SegmentOutput, + static_file::headers::HEADER_TABLES_TO_PRUNE, PruneInput, PruneLimiter, Segment, + SegmentOutput, }; use alloy_primitives::{BlockNumber, B256, U256}; use assert_matches::assert_matches; @@ -204,8 +206,8 @@ mod tests { StaticFileProviderFactory, }; use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, - PruneSegment, SegmentOutputCheckpoint, + PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, + SegmentOutputCheckpoint, }; use reth_stages::test_utils::TestStageDB; use reth_testing_utils::{generators, generators::random_header_range}; diff --git a/crates/prune/prune/src/segments/static_file/receipts.rs b/crates/prune/prune/src/segments/static_file/receipts.rs index f766f7ea1d3..6cdc5375990 100644 --- a/crates/prune/prune/src/segments/static_file/receipts.rs +++ b/crates/prune/prune/src/segments/static_file/receipts.rs @@ -2,28 +2,33 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ errors::provider::ProviderResult, providers::StaticFileProvider, BlockReader, DBProvider, - PruneCheckpointWriter, TransactionsProvider, + PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use reth_static_file_types::StaticFileSegment; #[derive(Debug)] -pub struct Receipts { - static_file_provider: StaticFileProvider, +pub struct Receipts { + static_file_provider: StaticFileProvider, } -impl Receipts { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Receipts { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl Segment for Receipts +impl Segment for Receipts where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: StaticFileProviderFactory> + + DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader, { fn segment(&self) -> PruneSegment { PruneSegment::Receipts diff --git a/crates/prune/prune/src/segments/static_file/transactions.rs b/crates/prune/prune/src/segments/static_file/transactions.rs index 12772af5f88..12ffbf72798 100644 --- a/crates/prune/prune/src/segments/static_file/transactions.rs +++ b/crates/prune/prune/src/segments/static_file/transactions.rs @@ -3,28 +3,35 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{providers::StaticFileProvider, BlockReader, DBProvider, TransactionsProvider}; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, StaticFileProviderFactory, + TransactionsProvider, +}; use reth_prune_types::{ - PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, + PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; use reth_static_file_types::StaticFileSegment; use tracing::trace; #[derive(Debug)] -pub struct Transactions { - static_file_provider: StaticFileProvider, +pub struct Transactions { + static_file_provider: StaticFileProvider, } -impl Transactions { - pub const fn new(static_file_provider: StaticFileProvider) -> Self { +impl Transactions { + pub const fn new(static_file_provider: StaticFileProvider) -> Self { Self { static_file_provider } } } -impl Segment for Transactions +impl Segment for Transactions where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + + TransactionsProvider + + BlockReader + + StaticFileProviderFactory>, { fn segment(&self) -> PruneSegment { PruneSegment::Transactions @@ -52,7 +59,9 @@ where let mut limiter = input.limiter; let mut last_pruned_transaction = *tx_range.end(); - let (pruned, done) = provider.tx_ref().prune_table_with_range::( + let (pruned, done) = provider.tx_ref().prune_table_with_range::::SignedTx, + >>( tx_range, &mut limiter, |_| false, @@ -67,7 +76,7 @@ where // so we could finish pruning its transactions on the next run. .checked_sub(if done { 0 } else { 1 }); - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -82,7 +91,7 @@ where #[cfg(test)] mod tests { - use crate::segments::{PruneInput, Segment}; + use crate::segments::{PruneInput, PruneLimiter, Segment}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ @@ -95,8 +104,8 @@ mod tests { StaticFileProviderFactory, }; use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, - PruneSegment, SegmentOutput, + PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, + SegmentOutput, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; diff --git a/crates/prune/prune/src/segments/user/account_history.rs b/crates/prune/prune/src/segments/user/account_history.rs index 01f8c0850a1..92d62f3c995 100644 --- a/crates/prune/prune/src/segments/user/account_history.rs +++ b/crates/prune/prune/src/segments/user/account_history.rs @@ -8,8 +8,7 @@ use reth_db::{tables, transaction::DbTxMut}; use reth_db_api::models::ShardedKey; use reth_provider::DBProvider; use reth_prune_types::{ - PruneInterruptReason, PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, - SegmentOutputCheckpoint, + PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; use rustc_hash::FxHashMap; use tracing::{instrument, trace}; @@ -65,7 +64,7 @@ where }; if limiter.is_limit_reached() { return Ok(SegmentOutput::not_done( - PruneInterruptReason::new(&limiter), + limiter.interrupt_reason(), input.previous_checkpoint.map(SegmentOutputCheckpoint::from_prune_checkpoint), )) } @@ -113,7 +112,7 @@ where )?; trace!(target: "pruner", ?outcomes, %done, "Pruned account history (indices)"); - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -130,14 +129,14 @@ where mod tests { use crate::segments::{ user::account_history::ACCOUNT_HISTORY_TABLES_TO_PRUNE, AccountHistory, PruneInput, - Segment, SegmentOutput, + PruneLimiter, Segment, SegmentOutput, }; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, + PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{ diff --git a/crates/prune/prune/src/segments/user/receipts.rs b/crates/prune/prune/src/segments/user/receipts.rs index 5bc9feaf023..97708ad6de1 100644 --- a/crates/prune/prune/src/segments/user/receipts.rs +++ b/crates/prune/prune/src/segments/user/receipts.rs @@ -2,10 +2,11 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - errors::provider::ProviderResult, BlockReader, DBProvider, PruneCheckpointWriter, - TransactionsProvider, + errors::provider::ProviderResult, BlockReader, DBProvider, NodePrimitivesProvider, + PruneCheckpointWriter, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, SegmentOutput}; use tracing::instrument; @@ -23,7 +24,11 @@ impl Receipts { impl Segment for Receipts where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::Receipts diff --git a/crates/prune/prune/src/segments/user/receipts_by_logs.rs b/crates/prune/prune/src/segments/user/receipts_by_logs.rs index 05bc40b6c7b..91bad6f67ed 100644 --- a/crates/prune/prune/src/segments/user/receipts_by_logs.rs +++ b/crates/prune/prune/src/segments/user/receipts_by_logs.rs @@ -3,14 +3,17 @@ use crate::{ segments::{PruneInput, Segment}, PrunerError, }; -use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, PruneCheckpointWriter, TransactionsProvider}; +use alloy_consensus::TxReceipt; +use reth_db::{table::Value, tables, transaction::DbTxMut}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{ + BlockReader, DBProvider, NodePrimitivesProvider, PruneCheckpointWriter, TransactionsProvider, +}; use reth_prune_types::{ - PruneCheckpoint, PruneMode, PruneProgress, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, - SegmentOutput, MINIMUM_PRUNING_DISTANCE, + PruneCheckpoint, PruneMode, PrunePurpose, PruneSegment, ReceiptsLogPruneConfig, SegmentOutput, + MINIMUM_PRUNING_DISTANCE, }; use tracing::{instrument, trace}; - #[derive(Debug)] pub struct ReceiptsByLogs { config: ReceiptsLogPruneConfig, @@ -24,7 +27,11 @@ impl ReceiptsByLogs { impl Segment for ReceiptsByLogs where - Provider: DBProvider + PruneCheckpointWriter + TransactionsProvider + BlockReader, + Provider: DBProvider + + PruneCheckpointWriter + + TransactionsProvider + + BlockReader + + NodePrimitivesProvider>, { fn segment(&self) -> PruneSegment { PruneSegment::ContractLogs @@ -142,12 +149,14 @@ where // Delete receipts, except the ones in the inclusion list let mut last_skipped_transaction = 0; let deleted; - (deleted, done) = provider.tx_ref().prune_table_with_range::( + (deleted, done) = provider.tx_ref().prune_table_with_range::::Receipt, + >>( tx_range, &mut limiter, |(tx_num, receipt)| { let skip = num_addresses > 0 && - receipt.logs.iter().any(|log| { + receipt.logs().iter().any(|log| { filtered_addresses[..num_addresses].contains(&&log.address) }); @@ -210,7 +219,7 @@ where }, )?; - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, pruned, checkpoint: None }) } @@ -218,13 +227,14 @@ where #[cfg(test)] mod tests { - use crate::segments::{PruneInput, ReceiptsByLogs, Segment}; + use crate::segments::{PruneInput, PruneLimiter, ReceiptsByLogs, Segment}; use alloy_primitives::B256; use assert_matches::assert_matches; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; + use reth_primitives_traits::InMemorySize; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, TransactionsProvider}; - use reth_prune_types::{PruneLimiter, PruneMode, PruneSegment, ReceiptsLogPruneConfig}; + use reth_prune_types::{PruneMode, PruneSegment, ReceiptsLogPruneConfig}; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{ self, random_block_range, random_eoa_account, random_log, random_receipt, BlockRangeParams, @@ -263,6 +273,7 @@ mod tests { let (deposit_contract_addr, _) = random_eoa_account(&mut rng); for block in &blocks { + receipts.reserve_exact(block.body.size()); for (txi, transaction) in block.body.transactions.iter().enumerate() { let mut receipt = random_receipt(&mut rng, transaction, Some(1)); receipt.logs.push(random_log( diff --git a/crates/prune/prune/src/segments/user/sender_recovery.rs b/crates/prune/prune/src/segments/user/sender_recovery.rs index bd86f3e6521..bc4ba5ab067 100644 --- a/crates/prune/prune/src/segments/user/sender_recovery.rs +++ b/crates/prune/prune/src/segments/user/sender_recovery.rs @@ -6,7 +6,7 @@ use crate::{ use reth_db::{tables, transaction::DbTxMut}; use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; use reth_prune_types::{ - PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, + PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, }; use tracing::{instrument, trace}; @@ -67,7 +67,7 @@ where // previous, so we could finish pruning its transaction senders on the next run. .checked_sub(if done { 0 } else { 1 }); - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -82,7 +82,7 @@ where #[cfg(test)] mod tests { - use crate::segments::{PruneInput, Segment, SegmentOutput, SenderRecovery}; + use crate::segments::{PruneInput, PruneLimiter, Segment, SegmentOutput, SenderRecovery}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ @@ -90,8 +90,9 @@ mod tests { Itertools, }; use reth_db::tables; + use reth_primitives_traits::SignedTransaction; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; - use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; + use reth_prune_types::{PruneCheckpoint, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; use std::ops::Sub; @@ -110,6 +111,7 @@ mod tests { let mut transaction_senders = Vec::new(); for block in &blocks { + transaction_senders.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { transaction_senders.push(( transaction_senders.len() as u64, @@ -117,8 +119,8 @@ mod tests { )); } } - db.insert_transaction_senders(transaction_senders.clone()) - .expect("insert transaction senders"); + let transaction_senders_len = transaction_senders.len(); + db.insert_transaction_senders(transaction_senders).expect("insert transaction senders"); assert_eq!( db.table::().unwrap().len(), @@ -202,7 +204,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - transaction_senders.len() - (last_pruned_tx_number + 1) + transaction_senders_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory diff --git a/crates/prune/prune/src/segments/user/storage_history.rs b/crates/prune/prune/src/segments/user/storage_history.rs index 315ad750a8b..dd2c79d9687 100644 --- a/crates/prune/prune/src/segments/user/storage_history.rs +++ b/crates/prune/prune/src/segments/user/storage_history.rs @@ -7,10 +7,7 @@ use itertools::Itertools; use reth_db::{tables, transaction::DbTxMut}; use reth_db_api::models::{storage_sharded_key::StorageShardedKey, BlockNumberAddress}; use reth_provider::DBProvider; -use reth_prune_types::{ - PruneInterruptReason, PruneMode, PruneProgress, PrunePurpose, PruneSegment, - SegmentOutputCheckpoint, -}; +use reth_prune_types::{PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint}; use rustc_hash::FxHashMap; use tracing::{instrument, trace}; @@ -65,7 +62,7 @@ where }; if limiter.is_limit_reached() { return Ok(SegmentOutput::not_done( - PruneInterruptReason::new(&limiter), + limiter.interrupt_reason(), input.previous_checkpoint.map(SegmentOutputCheckpoint::from_prune_checkpoint), )) } @@ -118,7 +115,7 @@ where )?; trace!(target: "pruner", ?outcomes, %done, "Pruned storage history (indices)"); - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -134,14 +131,14 @@ where #[cfg(test)] mod tests { use crate::segments::{ - user::storage_history::STORAGE_HISTORY_TABLES_TO_PRUNE, PruneInput, Segment, SegmentOutput, - StorageHistory, + user::storage_history::STORAGE_HISTORY_TABLES_TO_PRUNE, PruneInput, PruneLimiter, Segment, + SegmentOutput, StorageHistory, }; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db::{tables, BlockNumberList}; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; - use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneMode, PruneProgress, PruneSegment}; + use reth_prune_types::{PruneCheckpoint, PruneMode, PruneProgress, PruneSegment}; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{ self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams, diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index bb8196cdb03..f2331fee1b0 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -3,12 +3,11 @@ use crate::{ segments::{PruneInput, Segment, SegmentOutput}, PrunerError, }; +use alloy_eips::eip2718::Encodable2718; use rayon::prelude::*; use reth_db::{tables, transaction::DbTxMut}; -use reth_provider::{BlockReader, DBProvider, TransactionsProvider}; -use reth_prune_types::{ - PruneMode, PruneProgress, PrunePurpose, PruneSegment, SegmentOutputCheckpoint, -}; +use reth_provider::{BlockReader, DBProvider}; +use reth_prune_types::{PruneMode, PrunePurpose, PruneSegment, SegmentOutputCheckpoint}; use tracing::{instrument, trace}; #[derive(Debug)] @@ -24,7 +23,7 @@ impl TransactionLookup { impl Segment for TransactionLookup where - Provider: DBProvider + TransactionsProvider + BlockReader, + Provider: DBProvider + BlockReader, { fn segment(&self) -> PruneSegment { PruneSegment::TransactionLookup @@ -58,7 +57,7 @@ where let hashes = provider .transactions_by_tx_range(tx_range.clone())? .into_par_iter() - .map(|transaction| transaction.hash()) + .map(|transaction| transaction.trie_hash()) .collect::>(); // Number of transactions retrieved from the database should match the tx range count @@ -95,7 +94,7 @@ where // run. .checked_sub(if done { 0 } else { 1 }); - let progress = PruneProgress::new(done, &limiter); + let progress = limiter.progress(done); Ok(SegmentOutput { progress, @@ -110,7 +109,7 @@ where #[cfg(test)] mod tests { - use crate::segments::{PruneInput, Segment, SegmentOutput, TransactionLookup}; + use crate::segments::{PruneInput, PruneLimiter, Segment, SegmentOutput, TransactionLookup}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use assert_matches::assert_matches; use itertools::{ @@ -120,7 +119,7 @@ mod tests { use reth_db::tables; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ - PruneCheckpoint, PruneInterruptReason, PruneLimiter, PruneMode, PruneProgress, PruneSegment, + PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, }; use reth_stages::test_utils::{StorageKind, TestStageDB}; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; @@ -140,11 +139,13 @@ mod tests { let mut tx_hash_numbers = Vec::new(); for block in &blocks { + tx_hash_numbers.reserve_exact(block.body.transactions.len()); for transaction in &block.body.transactions { - tx_hash_numbers.push((transaction.hash, tx_hash_numbers.len() as u64)); + tx_hash_numbers.push((transaction.hash(), tx_hash_numbers.len() as u64)); } } - db.insert_tx_hash_numbers(tx_hash_numbers.clone()).expect("insert tx hash numbers"); + let tx_hash_numbers_len = tx_hash_numbers.len(); + db.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers"); assert_eq!( db.table::().unwrap().len(), @@ -228,7 +229,7 @@ mod tests { assert_eq!( db.table::().unwrap().len(), - tx_hash_numbers.len() - (last_pruned_tx_number + 1) + tx_hash_numbers_len - (last_pruned_tx_number + 1) ); assert_eq!( db.factory diff --git a/crates/prune/types/Cargo.toml b/crates/prune/types/Cargo.toml index 13def8eaa8b..5446d6f76ff 100644 --- a/crates/prune/types/Cargo.toml +++ b/crates/prune/types/Cargo.toml @@ -20,6 +20,7 @@ derive_more.workspace = true modular-bitfield.workspace = true serde.workspace = true thiserror.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } @@ -29,3 +30,13 @@ proptest-arbitrary-interop.workspace = true serde_json.workspace = true test-fuzz.workspace = true toml.workspace = true + +[features] +test-utils = [ + "dep:arbitrary", + "reth-codecs/test-utils" +] +arbitrary = [ + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary" +] diff --git a/crates/prune/types/src/checkpoint.rs b/crates/prune/types/src/checkpoint.rs index f654fba7df1..e0397c5afc8 100644 --- a/crates/prune/types/src/checkpoint.rs +++ b/crates/prune/types/src/checkpoint.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; /// Saves the pruning progress of a stage. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(Default, arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(Default, arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct PruneCheckpoint { /// Highest pruned block number. If it's [None], the pruning for block `0` is not finished yet. diff --git a/crates/prune/types/src/event.rs b/crates/prune/types/src/event.rs new file mode 100644 index 00000000000..bac5f0d512c --- /dev/null +++ b/crates/prune/types/src/event.rs @@ -0,0 +1,22 @@ +use crate::PrunedSegmentInfo; +use alloy_primitives::BlockNumber; +use std::time::Duration; + +/// An event emitted by a pruner. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum PrunerEvent { + /// Emitted when pruner started running. + Started { + /// The tip block number before pruning. + tip_block_number: BlockNumber, + }, + /// Emitted when pruner finished running. + Finished { + /// The tip block number before pruning. + tip_block_number: BlockNumber, + /// The elapsed time for the pruning process. + elapsed: Duration, + /// Collected pruning stats. + stats: Vec, + }, +} diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 6e06d6fc5dc..bbf2cfe5ffc 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -9,17 +9,18 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod checkpoint; -mod limiter; +mod event; mod mode; mod pruner; mod segment; mod target; pub use checkpoint::PruneCheckpoint; -pub use limiter::PruneLimiter; +pub use event::PrunerEvent; pub use mode::PruneMode; pub use pruner::{ - PruneInterruptReason, PruneProgress, PrunerOutput, SegmentOutput, SegmentOutputCheckpoint, + PruneInterruptReason, PruneProgress, PrunedSegmentInfo, PrunerOutput, SegmentOutput, + SegmentOutputCheckpoint, }; pub use segment::{PrunePurpose, PruneSegment, PruneSegmentError}; use serde::{Deserialize, Serialize}; @@ -27,6 +28,7 @@ use std::collections::BTreeMap; pub use target::{PruneModes, MINIMUM_PRUNING_DISTANCE}; use alloy_primitives::{Address, BlockNumber}; +use std::ops::Deref; /// Configuration for pruning receipts not associated with logs emitted by the specified contracts. #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] @@ -59,7 +61,7 @@ impl ReceiptsLogPruneConfig { pruned_block: Option, ) -> Result>, PruneSegmentError> { let mut map = BTreeMap::new(); - let pruned_block = pruned_block.unwrap_or_default(); + let base_block = pruned_block.unwrap_or_default() + 1; for (address, mode) in &self.0 { // Getting `None`, means that there is nothing to prune yet, so we need it to include in @@ -69,7 +71,7 @@ impl ReceiptsLogPruneConfig { // // Reminder, that we increment because the [`BlockNumber`] key of the new map should be // viewed as `PruneMode::Before(block)` - let block = (pruned_block + 1).max( + let block = base_block.max( mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? .map(|(block, _)| block) .unwrap_or_default() + @@ -90,8 +92,8 @@ impl ReceiptsLogPruneConfig { let pruned_block = pruned_block.unwrap_or_default(); let mut lowest = None; - for mode in self.0.values() { - if let PruneMode::Distance(_) = mode { + for mode in self.values() { + if mode.is_distance() { if let Some((block, _)) = mode.prune_target_block(tip, PruneSegment::ContractLogs, PrunePurpose::User)? { @@ -103,3 +105,224 @@ impl ReceiptsLogPruneConfig { Ok(lowest.map(|lowest| lowest.max(pruned_block))) } } + +impl Deref for ReceiptsLogPruneConfig { + type Target = BTreeMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_group_by_block_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.group_by_block(tip, pruned_block).unwrap(); + assert!(result.is_empty(), "The result should be empty when the config is empty"); + } + + #[test] + fn test_group_by_block_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + // Big tip to have something to prune for the target block + let tip = 3000000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 500 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&500], vec![&address], "Address should be grouped under block 500"); + + // Tip smaller than the target block, so that we have nothing to prune for the block + let tip = 300; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect one entry with block 400 and the corresponding address + assert_eq!(result.len(), 1); + assert_eq!(result[&401], vec![&address], "Address should be grouped under block 400"); + } + + #[test] + fn test_group_by_block_multiple_entries() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Before(600); + let prune_mode2 = PruneMode::Before(800); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 900000; + let pruned_block = Some(400); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect two entries: one for block 600 and another for block 800 + assert_eq!(result.len(), 2); + assert_eq!(result[&600], vec![&address1], "Address1 should be grouped under block 600"); + assert_eq!(result[&800], vec![&address2], "Address2 should be grouped under block 800"); + } + + #[test] + fn test_group_by_block_with_distance_prune_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 100100; + // Pruned block is smaller than the target block + let pruned_block = Some(50); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 100 (tip - distance) + assert_eq!(result.len(), 1); + assert_eq!(result[&101], vec![&address], "Address should be grouped under block 100"); + + let tip = 100100; + // Pruned block is larger than the target block + let pruned_block = Some(800); + + let result = config.group_by_block(tip, pruned_block).unwrap(); + + // Expect the entry to be grouped under block 800 which is larger than tip - distance + assert_eq!(result.len(), 1); + assert_eq!(result[&801], vec![&address], "Address should be grouped under block 800"); + } + + #[test] + fn test_lowest_block_with_distance_empty_config() { + let config = ReceiptsLogPruneConfig(BTreeMap::new()); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when the config is empty"); + } + + #[test] + fn test_lowest_block_with_distance_no_distance_mode() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Before(500); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 1000; + let pruned_block = None; + + let result = config.lowest_block_with_distance(tip, pruned_block).unwrap(); + assert_eq!(result, None, "The result should be None when there are no Distance modes"); + } + + #[test] + fn test_lowest_block_with_distance_single_entry() { + let mut config_map = BTreeMap::new(); + let address = Address::new([1; 20]); + let prune_mode = PruneMode::Distance(100000); + config_map.insert(address, prune_mode); + + let config = ReceiptsLogPruneConfig(config_map); + + let tip = 100100; + let pruned_block = Some(400); + + // Expect the lowest block to be 400 as 400 > 100100 - 100000 (tip - distance) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(400), + "The lowest block should be 400" + ); + + let tip = 100100; + let pruned_block = Some(50); + + // Expect the lowest block to be 100 as 100 > 50 (pruned block) + assert_eq!( + config.lowest_block_with_distance(tip, pruned_block).unwrap(), + Some(100), + "The lowest block should be 100" + ); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_last() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100100); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100300 = 100000: + // - First iteration will return 100200 => 200300 - 100100 = 100200 + // - Second iteration will return 100000 => 200300 - 100300 = 100000 < 100200 + // - Final result is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_first() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100); + + // The lowest block should be 200300 - 100400 = 99900: + // - First iteration, lowest block is 200300 - 100400 = 99900 + // - Second iteration, lowest block is still 99900 < 200300 - 100300 = 100000 + // - Final result is 99900 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(99900)); + } + + #[test] + fn test_lowest_block_with_distance_multiple_entries_pruned_block() { + let mut config_map = BTreeMap::new(); + let address1 = Address::new([1; 20]); + let address2 = Address::new([2; 20]); + let prune_mode1 = PruneMode::Distance(100400); + let prune_mode2 = PruneMode::Distance(100300); + config_map.insert(address1, prune_mode1); + config_map.insert(address2, prune_mode2); + + let config = ReceiptsLogPruneConfig(config_map); + let tip = 200300; + let pruned_block = Some(100000); + + // The lowest block should be 100000 because: + // - Lowest is 200300 - 100400 = 99900 < 200300 - 100300 = 100000 + // - Lowest is compared to the pruned block 100000: 100000 > 99900 + // - Finally the lowest block is 100000 + assert_eq!(config.lowest_block_with_distance(tip, pruned_block).unwrap(), Some(100000)); + } +} diff --git a/crates/prune/types/src/mode.rs b/crates/prune/types/src/mode.rs index 3465882993b..de9b9e6dc08 100644 --- a/crates/prune/types/src/mode.rs +++ b/crates/prune/types/src/mode.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// Prune mode. #[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] #[serde(rename_all = "lowercase")] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub enum PruneMode { /// Prune all blocks. @@ -17,6 +17,13 @@ pub enum PruneMode { Before(BlockNumber), } +#[cfg(any(test, feature = "test-utils"))] +impl Default for PruneMode { + fn default() -> Self { + Self::Full + } +} + impl PruneMode { /// Prune blocks up to the specified block number. The specified block number is also pruned. /// @@ -67,12 +74,10 @@ impl PruneMode { pub const fn is_full(&self) -> bool { matches!(self, Self::Full) } -} -#[cfg(test)] -impl Default for PruneMode { - fn default() -> Self { - Self::Full + /// Returns true if the prune mode is [`PruneMode::Distance`]. + pub const fn is_distance(&self) -> bool { + matches!(self, Self::Distance(_)) } } diff --git a/crates/prune/types/src/pruner.rs b/crates/prune/types/src/pruner.rs index dbfafff639e..daf490501f3 100644 --- a/crates/prune/types/src/pruner.rs +++ b/crates/prune/types/src/pruner.rs @@ -1,6 +1,6 @@ +use crate::{PruneCheckpoint, PruneMode, PruneSegment}; use alloy_primitives::{BlockNumber, TxNumber}; - -use crate::{PruneCheckpoint, PruneLimiter, PruneMode, PruneSegment}; +use derive_more::Display; /// Pruner run output. #[derive(Debug)] @@ -17,6 +17,18 @@ impl From for PrunerOutput { } } +/// Represents information of a pruner run for a segment. +#[derive(Debug, Clone, PartialEq, Eq, Display)] +#[display("(table={segment}, pruned={pruned}, status={progress})")] +pub struct PrunedSegmentInfo { + /// The pruned segment + pub segment: PruneSegment, + /// Number of pruned entries + pub pruned: usize, + /// Prune progress + pub progress: PruneProgress, +} + /// Segment pruning output. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub struct SegmentOutput { @@ -67,16 +79,18 @@ impl SegmentOutputCheckpoint { } /// Progress of pruning. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)] pub enum PruneProgress { /// There is more data to prune. + #[display("HasMoreData({_0})")] HasMoreData(PruneInterruptReason), /// Pruning has been finished. + #[display("Finished")] Finished, } /// Reason for interrupting a prune run. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Display)] pub enum PruneInterruptReason { /// Prune run timed out. Timeout, @@ -87,17 +101,6 @@ pub enum PruneInterruptReason { } impl PruneInterruptReason { - /// Creates new [`PruneInterruptReason`] based on the [`PruneLimiter`]. - pub fn new(limiter: &PruneLimiter) -> Self { - if limiter.is_time_limit_reached() { - Self::Timeout - } else if limiter.is_deleted_entries_limit_reached() { - Self::DeletedEntriesLimitReached - } else { - Self::Unknown - } - } - /// Returns `true` if the reason is timeout. pub const fn is_timeout(&self) -> bool { matches!(self, Self::Timeout) @@ -110,19 +113,6 @@ impl PruneInterruptReason { } impl PruneProgress { - /// Creates new [`PruneProgress`]. - /// - /// If `done == true`, returns [`PruneProgress::Finished`], otherwise - /// [`PruneProgress::HasMoreData`] is returned with [`PruneInterruptReason`] according to the - /// passed limiter. - pub fn new(done: bool, limiter: &PruneLimiter) -> Self { - if done { - Self::Finished - } else { - Self::HasMoreData(PruneInterruptReason::new(limiter)) - } - } - /// Returns `true` if prune run is finished. pub const fn is_finished(&self) -> bool { matches!(self, Self::Finished) diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 7ffb06ce960..cc1c8edcb8d 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -13,14 +13,16 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true reth-primitives.workspace = true reth-storage-errors.workspace = true reth-execution-errors.workspace = true -reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-storage-api.workspace = true reth-trie = { workspace = true, optional = true } +reth-primitives-traits.workspace = true + +# alloy +alloy-eips.workspace = true alloy-primitives.workspace = true # revm @@ -30,10 +32,34 @@ revm.workspace = true reth-trie.workspace = true reth-ethereum-forks.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true [features] -default = ["std", "c-kzg"] -std = [] -c-kzg = ["revm/c-kzg"] -test-utils = ["dep:reth-trie"] -serde = ["revm/serde"] +default = ["std"] +std = [ + "reth-primitives/std", + "alloy-primitives/std", + "revm/std", + "alloy-eips/std", + "alloy-consensus/std", + "reth-primitives-traits/std", + "reth-ethereum-forks/std" +] +witness = ["dep:reth-trie"] +test-utils = [ + "dep:reth-trie", + "reth-primitives/test-utils", + "reth-trie?/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils", + "reth-primitives-traits/test-utils", +] +serde = [ + "revm/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "reth-primitives-traits/serde", + "reth-trie?/serde", + "reth-ethereum-forks/serde" +] diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index a63681aa132..c980bdc987c 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,9 +1,12 @@ //! Helper for handling execution of multiple blocks. use alloc::vec::Vec; -use alloy_primitives::{map::HashSet, Address, BlockNumber}; + +use alloy_eips::eip7685::Requests; +use alloy_primitives::{map::HashSet, Address, BlockNumber, Log}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; -use reth_primitives::{Receipt, Receipts, Request, Requests}; +use reth_primitives::Receipts; +use reth_primitives_traits::Receipt; use reth_prune_types::{PruneMode, PruneModes, PruneSegmentError, MINIMUM_PRUNING_DISTANCE}; use revm::db::states::bundle_state::BundleRetention; @@ -11,8 +14,8 @@ use revm::db::states::bundle_state::BundleRetention; /// - recording receipts during execution of multiple blocks. /// - pruning receipts according to the pruning configuration. /// - batch range if known -#[derive(Debug, Default)] -pub struct BlockBatchRecord { +#[derive(Debug)] +pub struct BlockBatchRecord { /// Pruning configuration. prune_modes: PruneModes, /// The collection of receipts. @@ -20,7 +23,7 @@ pub struct BlockBatchRecord { /// The inner vector stores receipts ordered by transaction number. /// /// If receipt is None it means it is pruned. - receipts: Receipts, + receipts: Receipts, /// The collection of EIP-7685 requests. /// Outer vector stores requests for each block sequentially. /// The inner vector stores requests ordered by transaction number. @@ -40,9 +43,25 @@ pub struct BlockBatchRecord { tip: Option, } -impl BlockBatchRecord { +impl Default for BlockBatchRecord { + fn default() -> Self { + Self { + prune_modes: Default::default(), + receipts: Default::default(), + requests: Default::default(), + pruning_address_filter: Default::default(), + first_block: Default::default(), + tip: Default::default(), + } + } +} + +impl BlockBatchRecord { /// Create a new receipts recorder with the given pruning configuration. - pub fn new(prune_modes: PruneModes) -> Self { + pub fn new(prune_modes: PruneModes) -> Self + where + T: Default, + { Self { prune_modes, ..Default::default() } } @@ -72,12 +91,12 @@ impl BlockBatchRecord { } /// Returns the recorded receipts. - pub const fn receipts(&self) -> &Receipts { + pub const fn receipts(&self) -> &Receipts { &self.receipts } /// Returns all recorded receipts. - pub fn take_receipts(&mut self) -> Receipts { + pub fn take_receipts(&mut self) -> Receipts { core::mem::take(&mut self.receipts) } @@ -93,15 +112,15 @@ impl BlockBatchRecord { /// Returns the [`BundleRetention`] for the given block based on the configured prune modes. pub fn bundle_retention(&self, block_number: BlockNumber) -> BundleRetention { - if self.tip.map_or(true, |tip| { + if self.tip.is_none_or(|tip| { !self .prune_modes .account_history - .map_or(false, |mode| mode.should_prune(block_number, tip)) && + .is_some_and(|mode| mode.should_prune(block_number, tip)) && !self .prune_modes .storage_history - .map_or(false, |mode| mode.should_prune(block_number, tip)) + .is_some_and(|mode| mode.should_prune(block_number, tip)) }) { BundleRetention::Reverts } else { @@ -110,7 +129,10 @@ impl BlockBatchRecord { } /// Save receipts to the executor. - pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> { + pub fn save_receipts(&mut self, receipts: Vec) -> Result<(), BlockExecutionError> + where + T: Receipt, + { let mut receipts = receipts.into_iter().map(Some).collect(); // Prune receipts if necessary. self.prune_receipts(&mut receipts).map_err(InternalBlockExecutionError::from)?; @@ -120,10 +142,10 @@ impl BlockBatchRecord { } /// Prune receipts according to the pruning configuration. - fn prune_receipts( - &mut self, - receipts: &mut Vec>, - ) -> Result<(), PruneSegmentError> { + fn prune_receipts(&mut self, receipts: &mut Vec>) -> Result<(), PruneSegmentError> + where + T: Receipt, + { let (Some(first_block), Some(tip)) = (self.first_block, self.tip) else { return Ok(()) }; let block_number = first_block + self.receipts.len() as u64; @@ -131,7 +153,7 @@ impl BlockBatchRecord { // Block receipts should not be retained if self.prune_modes.receipts == Some(PruneMode::Full) || // [`PruneSegment::Receipts`] takes priority over [`PruneSegment::ContractLogs`] - self.prune_modes.receipts.map_or(false, |mode| mode.should_prune(block_number, tip)) + self.prune_modes.receipts.is_some_and(|mode| mode.should_prune(block_number, tip)) { receipts.clear(); return Ok(()) @@ -160,7 +182,7 @@ impl BlockBatchRecord { // If there is an address_filter, it does not contain any of the // contract addresses, then remove this receipt. let inner_receipt = receipt.as_ref().expect("receipts have not been pruned"); - if !inner_receipt.logs.iter().any(|log| filter.contains(&log.address)) { + if !inner_receipt.logs().iter().any(|log| filter.contains(&log.address)) { receipt.take(); } } @@ -170,8 +192,8 @@ impl BlockBatchRecord { } /// Save EIP-7685 requests to the executor. - pub fn save_requests(&mut self, requests: Vec) { - self.requests.push(requests.into()); + pub fn save_requests(&mut self, requests: Requests) { + self.requests.push(requests); } } @@ -185,7 +207,7 @@ mod tests { #[test] fn test_save_receipts_empty() { - let mut recorder = BlockBatchRecord::default(); + let mut recorder: BlockBatchRecord = BlockBatchRecord::default(); // Create an empty vector of receipts let receipts = vec![]; diff --git a/crates/payload/builder/src/database.rs b/crates/revm/src/cached.rs similarity index 60% rename from crates/payload/builder/src/database.rs rename to crates/revm/src/cached.rs index d63f7322dee..5d5262adc5b 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/revm/src/cached.rs @@ -1,13 +1,13 @@ //! Database adapters for payload building. -use alloy_primitives::{Address, B256, U256}; -use reth_primitives::revm_primitives::{ +use alloy_primitives::{ + map::{Entry, HashMap}, + Address, B256, U256, +}; +use core::cell::RefCell; +use revm::primitives::{ db::{Database, DatabaseRef}, AccountInfo, Bytecode, }; -use std::{ - cell::RefCell, - collections::{hash_map::Entry, HashMap}, -}; /// A container type that caches reads from an underlying [`DatabaseRef`]. /// @@ -17,15 +17,15 @@ use std::{ /// # Example /// /// ``` -/// use reth_payload_builder::database::CachedReads; +/// use reth_revm::cached::CachedReads; /// use revm::db::{DatabaseRef, State}; /// /// fn build_payload(db: DB) { /// let mut cached_reads = CachedReads::default(); -/// let db_ref = cached_reads.as_db(db); -/// // this is `Database` and can be used to build a payload, it never writes to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. +/// let db = cached_reads.as_db_mut(db); +/// // this is `Database` and can be used to build a payload, it never commits to `CachedReads` or the underlying database, but all reads from the underlying database are cached in `CachedReads`. /// // Subsequent payload build attempts can use cached reads and avoid hitting the underlying database. -/// let db = State::builder().with_database_ref(db_ref).build(); +/// let state = State::builder().with_database(db).build(); /// } /// ``` #[derive(Debug, Clone, Default)] @@ -40,10 +40,11 @@ pub struct CachedReads { impl CachedReads { /// Gets a [`DatabaseRef`] that will cache reads from the given database. pub fn as_db(&mut self, db: DB) -> CachedReadsDBRef<'_, DB> { - CachedReadsDBRef { inner: RefCell::new(self.as_db_mut(db)) } + self.as_db_mut(db).into_db() } - fn as_db_mut(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> { + /// Gets a mutable [`Database`] that will cache reads from the underlying database. + pub fn as_db_mut(&mut self, db: DB) -> CachedReadsDbMut<'_, DB> { CachedReadsDbMut { cached: self, db } } @@ -56,6 +57,15 @@ impl CachedReads { ) { self.accounts.insert(address, CachedAccount { info: Some(info), storage }); } + + /// Extends current cache with entries from another [`CachedReads`] instance. + /// + /// Note: It is expected that both instances are based on the exact same state. + pub fn extend(&mut self, other: Self) { + self.accounts.extend(other.accounts); + self.contracts.extend(other.contracts); + self.block_hashes.extend(other.block_hashes); + } } /// A [Database] that caches reads inside [`CachedReads`]. @@ -67,6 +77,28 @@ pub struct CachedReadsDbMut<'a, DB> { pub db: DB, } +impl<'a, DB> CachedReadsDbMut<'a, DB> { + /// Converts this [`Database`] implementation into a [`DatabaseRef`] that will still cache + /// reads. + pub const fn into_db(self) -> CachedReadsDBRef<'a, DB> { + CachedReadsDBRef { inner: RefCell::new(self) } + } + + /// Returns access to wrapped [`DatabaseRef`]. + pub const fn inner(&self) -> &DB { + &self.db + } +} + +impl AsRef for CachedReadsDbMut<'_, DB> +where + DB: AsRef, +{ + fn as_ref(&self) -> &T { + self.inner().as_ref() + } +} + impl Database for CachedReadsDbMut<'_, DB> { type Error = ::Error; @@ -161,3 +193,57 @@ impl CachedAccount { Self { info, storage: HashMap::default() } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_extend_with_two_cached_reads() { + // Setup test data + let hash1 = B256::from_slice(&[1u8; 32]); + let hash2 = B256::from_slice(&[2u8; 32]); + let address1 = Address::from_slice(&[1u8; 20]); + let address2 = Address::from_slice(&[2u8; 20]); + + // Create primary cache + let mut primary = { + let mut cache = CachedReads::default(); + cache.accounts.insert(address1, CachedAccount::new(Some(AccountInfo::default()))); + cache.contracts.insert(hash1, Bytecode::default()); + cache.block_hashes.insert(1, hash1); + cache + }; + + // Create additional cache + let additional = { + let mut cache = CachedReads::default(); + cache.accounts.insert(address2, CachedAccount::new(Some(AccountInfo::default()))); + cache.contracts.insert(hash2, Bytecode::default()); + cache.block_hashes.insert(2, hash2); + cache + }; + + // Extending primary with additional cache + primary.extend(additional); + + // Verify the combined state + assert!( + primary.accounts.len() == 2 && + primary.contracts.len() == 2 && + primary.block_hashes.len() == 2, + "All maps should contain 2 entries" + ); + + // Verify specific entries + assert!( + primary.accounts.contains_key(&address1) && + primary.accounts.contains_key(&address2) && + primary.contracts.contains_key(&hash1) && + primary.contracts.contains_key(&hash2) && + primary.block_hashes.get(&1) == Some(&hash1) && + primary.block_hashes.get(&2) == Some(&hash2), + "All expected entries should be present" + ); + } +} diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 8f40d2be8d9..682aca6cf37 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -79,6 +79,12 @@ impl StateProviderDatabase { } } +impl AsRef for StateProviderDatabase { + fn as_ref(&self) -> &DB { + self + } +} + impl Deref for StateProviderDatabase { type Target = DB; @@ -101,21 +107,21 @@ impl Database for StateProviderDatabase { /// Returns `Ok` with `Some(AccountInfo)` if the account exists, /// `None` if it doesn't, or an error if encountered. fn basic(&mut self, address: Address) -> Result, Self::Error> { - DatabaseRef::basic_ref(self, address) + self.basic_ref(address) } /// Retrieves the bytecode associated with a given code hash. /// /// Returns `Ok` with the bytecode if found, or the default bytecode otherwise. fn code_by_hash(&mut self, code_hash: B256) -> Result { - DatabaseRef::code_by_hash_ref(self, code_hash) + self.code_by_hash_ref(code_hash) } /// Retrieves the storage value at a specific index for a given address. /// /// Returns `Ok` with the storage value, or the default value if not found. fn storage(&mut self, address: Address, index: U256) -> Result { - DatabaseRef::storage_ref(self, address, index) + self.storage_ref(address, index) } /// Retrieves the block hash for a given block number. @@ -123,7 +129,7 @@ impl Database for StateProviderDatabase { /// Returns `Ok` with the block hash if found, or the default hash otherwise. /// Note: It safely casts the `number` to `u64`. fn block_hash(&mut self, number: u64) -> Result { - DatabaseRef::block_hash_ref(self, number) + self.block_hash_ref(number) } } diff --git a/crates/revm/src/either.rs b/crates/revm/src/either.rs new file mode 100644 index 00000000000..e93ba3a8d01 --- /dev/null +++ b/crates/revm/src/either.rs @@ -0,0 +1,52 @@ +use alloy_primitives::{Address, B256, U256}; +use revm::{ + primitives::{AccountInfo, Bytecode}, + Database, +}; + +/// An enum type that can hold either of two different [`Database`] implementations. +/// +/// This allows flexible usage of different [`Database`] types in the same context. +#[derive(Debug, Clone)] +pub enum Either { + /// A value of type `L`. + Left(L), + /// A value of type `R`. + Right(R), +} + +impl Database for Either +where + L: Database, + R: Database, +{ + type Error = L::Error; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + match self { + Self::Left(db) => db.basic(address), + Self::Right(db) => db.basic(address), + } + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + match self { + Self::Left(db) => db.code_by_hash(code_hash), + Self::Right(db) => db.code_by_hash(code_hash), + } + } + + fn storage(&mut self, address: Address, index: U256) -> Result { + match self { + Self::Left(db) => db.storage(address, index), + Self::Right(db) => db.storage(address, index), + } + } + + fn block_hash(&mut self, number: u64) -> Result { + match self { + Self::Left(db) => db.block_hash(number), + Self::Right(db) => db.block_hash(number), + } + } +} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index 5515357d0d2..5f18a0fe616 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -11,13 +11,14 @@ extern crate alloc; -/// Contains glue code for integrating reth database into revm's [Database]. -pub mod database; - pub mod batch; -/// State changes that are not related to transactions. -pub mod state_change; +/// Cache database that reads from an underlying [`DatabaseRef`]. +/// Database adapters for payload building. +pub mod cached; + +/// Contains glue code for integrating reth database into revm's [Database]. +pub mod database; /// Common test helpers #[cfg(any(test, feature = "test-utils"))] @@ -25,3 +26,10 @@ pub mod test_utils; // Convenience re-exports. pub use revm::{self, *}; + +/// Either type for flexible usage of different database types in the same context. +pub mod either; + +/// Helper types for execution witness generation. +#[cfg(feature = "witness")] +pub mod witness; diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 813997c72d1..9460d3e1c78 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -6,13 +6,13 @@ use alloy_primitives::{ }; use reth_primitives::{Account, Bytecode}; use reth_storage_api::{ - AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, - StorageRootProvider, + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, KeccakKeyHasher, + MultiProof, StorageMultiProof, StorageProof, TrieInput, }; /// Mock state for testing @@ -112,6 +112,15 @@ impl StorageRootProvider for StateProviderTest { ) -> ProviderResult { unimplemented!("proof generation is not supported") } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + unimplemented!("proof generation is not supported") + } } impl StateProofProvider for StateProviderTest { @@ -141,6 +150,12 @@ impl StateProofProvider for StateProviderTest { } } +impl HashedPostStateProvider for StateProviderTest { + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::(bundle_state.state()) + } +} + impl StateProvider for StateProviderTest { fn storage( &self, diff --git a/crates/revm/src/witness.rs b/crates/revm/src/witness.rs new file mode 100644 index 00000000000..6140de9d48a --- /dev/null +++ b/crates/revm/src/witness.rs @@ -0,0 +1,76 @@ +use alloy_primitives::{keccak256, map::B256HashMap, Bytes, B256}; +use reth_trie::{HashedPostState, HashedStorage}; +use revm::State; + +/// Tracks state changes during execution. +#[derive(Debug, Clone, Default)] +pub struct ExecutionWitnessRecord { + /// Records all state changes + pub hashed_state: HashedPostState, + /// Map of all contract codes (created / accessed) to their preimages that were required during + /// the execution of the block, including during state root recomputation. + /// + /// `keccak(bytecodes) => bytecodes` + pub codes: B256HashMap, + /// Map of all hashed account and storage keys (addresses and slots) to their preimages + /// (unhashed account addresses and storage slots, respectively) that were required during + /// the execution of the block. during the execution of the block. + /// + /// `keccak(address|slot) => address|slot` + pub keys: B256HashMap, +} + +impl ExecutionWitnessRecord { + /// Records the state after execution. + pub fn record_executed_state(&mut self, statedb: &State) { + self.codes = statedb + .cache + .contracts + .iter() + .map(|(hash, code)| (*hash, code.original_bytes())) + .chain( + // cache state does not have all the contracts, especially when + // a contract is created within the block + // the contract only exists in bundle state, therefore we need + // to include them as well + statedb + .bundle_state + .contracts + .iter() + .map(|(hash, code)| (*hash, code.original_bytes())), + ) + .collect(); + + for (address, account) in &statedb.cache.accounts { + let hashed_address = keccak256(address); + self.hashed_state + .accounts + .insert(hashed_address, account.account.as_ref().map(|a| (&a.info).into())); + + let storage = self + .hashed_state + .storages + .entry(hashed_address) + .or_insert_with(|| HashedStorage::new(account.status.was_destroyed())); + + if let Some(account) = &account.account { + self.keys.insert(hashed_address, address.to_vec().into()); + + for (slot, value) in &account.storage { + let slot = B256::from(*slot); + let hashed_slot = keccak256(slot); + storage.storage.insert(hashed_slot, *value); + + self.keys.insert(hashed_slot, slot.into()); + } + } + } + } + + /// Creates the record from the state after execution. + pub fn from_executed_state(state: &State) -> Self { + let mut record = Self::default(); + record.record_executed_state(state); + record + } +} diff --git a/crates/rpc/ipc/src/client/mod.rs b/crates/rpc/ipc/src/client/mod.rs index 8f2fe0255c7..48f58e77a4a 100644 --- a/crates/rpc/ipc/src/client/mod.rs +++ b/crates/rpc/ipc/src/client/mod.rs @@ -136,10 +136,9 @@ pub enum IpcError { #[cfg(test)] mod tests { - use interprocess::local_socket::ListenerOptions; - use super::*; use crate::server::dummy_name; + use interprocess::local_socket::ListenerOptions; #[tokio::test] async fn test_connect() { diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 6e9e469ec44..abcdf98b544 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true reth-network-peers.workspace = true @@ -37,12 +36,9 @@ alloy-rpc-types-engine.workspace = true # misc jsonrpsee = { workspace = true, features = ["server", "macros"] } -[dev-dependencies] -serde_json.workspace = true - [features] client = [ "jsonrpsee/client", "jsonrpsee/async-client", - "reth-rpc-eth-api/client" + "reth-rpc-eth-api/client", ] diff --git a/crates/rpc/rpc-api/src/anvil.rs b/crates/rpc/rpc-api/src/anvil.rs index baa09166b83..0930264a63b 100644 --- a/crates/rpc/rpc-api/src/anvil.rs +++ b/crates/rpc/rpc-api/src/anvil.rs @@ -1,8 +1,8 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rpc_types::Block; use alloy_rpc_types_anvil::{Forking, Metadata, MineOptions, NodeInfo}; +use alloy_rpc_types_eth::Block; /// Anvil rpc interface. /// https://book.getfoundry.sh/reference/anvil/#custom-methods diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 3e03210f1ff..c2d1c605ff1 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,12 +1,11 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256}; -use alloy_rpc_types::{Block, Bundle, StateContext}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Bundle, StateContext}; use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag}; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] @@ -137,8 +136,7 @@ pub trait DebugApi { /// to their preimages that were required during the execution of the block, including during /// state root recomputation. /// - /// The first argument is the block number or block hash. The second argument is a boolean - /// indicating whether to include the preimages of keys in the response. + /// The first argument is the block number or block hash. #[method(name = "executionWitness")] async fn debug_execution_witness(&self, block: BlockNumberOrTag) -> RpcResult; @@ -387,3 +385,26 @@ pub trait DebugApi { #[method(name = "writeMutexProfile")] async fn debug_write_mutex_profile(&self, file: String) -> RpcResult<()>; } + +/// An extension to the `debug_` namespace that provides additional methods for retrieving +/// witnesses. +/// +/// This is separate from the regular `debug_` api, because this depends on the network specific +/// params. For optimism this will expect the optimism specific payload attributes +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "debug"))] +pub trait DebugExecutionWitnessApi { + /// The `debug_executePayload` method allows for re-execution of a group of transactions with + /// the purpose of generating an execution witness. The witness comprises of a map of all + /// hashed trie nodes to their preimages that were required during the execution of the block, + /// including during state root recomputation. + /// + /// The first argument is the parent block hash. The second argument is the payload + /// attributes for the new block. + #[method(name = "executePayload")] + async fn execute_payload( + &self, + parent_block_hash: B256, + attributes: Attributes, + ) -> RpcResult; +} diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 50181d23a75..f78b8349be8 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -3,18 +3,18 @@ //! This contains the `engine_` namespace and the subset of the `eth_` namespace that is exposed to //! the consensus client. -use alloy_eips::{eip4844::BlobAndProofV1, BlockId, BlockNumberOrTag}; +use alloy_eips::{eip4844::BlobAndProofV1, eip7685::Requests, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, BlockHash, Bytes, B256, U256, U64}; -use alloy_rpc_types::{ - state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, -}; use alloy_rpc_types_engine::{ - ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, - PayloadId, PayloadStatus, TransitionConfiguration, + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, + ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + TransitionConfiguration, +}; +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, + EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_engine_primitives::EngineTypes; @@ -54,9 +54,10 @@ pub trait EngineApi { #[method(name = "newPayloadV4")] async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + execution_requests: Requests, ) -> RpcResult; /// See also @@ -109,7 +110,10 @@ pub trait EngineApi { /// Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV1")] - async fn get_payload_v1(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v1( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// See also /// @@ -117,7 +121,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV2")] - async fn get_payload_v2(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v2( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// Post Cancun payload handler which also returns a blobs bundle. /// @@ -127,7 +134,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV3")] - async fn get_payload_v3(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v3( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// Post Prague payload handler. /// @@ -137,7 +147,10 @@ pub trait EngineApi { /// payload build process at the time of receiving this call. Note: /// > Provider software MAY stop the corresponding build process after serving this call. #[method(name = "getPayloadV4")] - async fn get_payload_v4(&self, payload_id: PayloadId) -> RpcResult; + async fn get_payload_v4( + &self, + payload_id: PayloadId, + ) -> RpcResult; /// See also #[method(name = "getPayloadBodiesByHashV1")] @@ -146,13 +159,6 @@ pub trait EngineApi { block_hashes: Vec, ) -> RpcResult; - /// See also - #[method(name = "getPayloadBodiesByHashV2")] - async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> RpcResult; - /// See also /// /// Returns the execution payload bodies by the range starting at `start`, containing `count` @@ -172,16 +178,6 @@ pub trait EngineApi { count: U64, ) -> RpcResult; - /// See also - /// - /// Similar to `getPayloadBodiesByRangeV1`, but returns [`ExecutionPayloadBodiesV2`] - #[method(name = "getPayloadBodiesByRangeV2")] - async fn get_payload_bodies_by_range_v2( - &self, - start: U64, - count: U64, - ) -> RpcResult; - /// See also /// /// Note: This method will be deprecated after the cancun hardfork: diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 73775112dcf..098214f103f 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -21,6 +21,7 @@ mod engine; mod ganache; mod hardhat; mod mev; +mod miner; mod net; mod otterscan; mod reth; @@ -37,9 +38,10 @@ pub use servers::*; pub mod servers { pub use crate::{ admin::AdminApiServer, - debug::DebugApiServer, + debug::{DebugApiServer, DebugExecutionWitnessApiServer}, engine::{EngineApiServer, EngineEthApiServer}, mev::{MevFullApiServer, MevSimApiServer}, + miner::MinerApiServer, net::NetApiServer, otterscan::OtterscanServer, reth::RethApiServer, @@ -65,11 +67,12 @@ pub mod clients { pub use crate::{ admin::AdminApiClient, anvil::AnvilApiClient, - debug::DebugApiClient, + debug::{DebugApiClient, DebugExecutionWitnessApiClient}, engine::{EngineApiClient, EngineEthApiClient}, ganache::GanacheApiClient, hardhat::HardhatApiClient, mev::{MevFullApiClient, MevSimApiClient}, + miner::MinerApiClient, net::NetApiClient, otterscan::OtterscanClient, reth::RethApiClient, diff --git a/crates/rpc/rpc-api/src/miner.rs b/crates/rpc/rpc-api/src/miner.rs new file mode 100644 index 00000000000..3673b51c6eb --- /dev/null +++ b/crates/rpc/rpc-api/src/miner.rs @@ -0,0 +1,21 @@ +use alloy_primitives::{Bytes, U128}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + +/// Miner namespace rpc interface that can control miner/builder settings +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "miner"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "miner"))] +pub trait MinerApi { + /// Sets the extra data string that is included when this miner mines a block. + /// + /// Returns an error if the extra data is too long. + #[method(name = "setExtra")] + fn set_extra(&self, record: Bytes) -> RpcResult; + + /// Sets the minimum accepted gas price for the miner. + #[method(name = "setGasPrice")] + fn set_gas_price(&self, gas_price: U128) -> RpcResult; + + /// Sets the gaslimit to target towards during mining. + #[method(name = "setGasLimit")] + fn set_gas_limit(&self, gas_price: U128) -> RpcResult; +} diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index ee805b482c3..eb2cb21a2ba 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,17 +1,16 @@ +use alloy_eips::BlockId; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types::Header; use alloy_rpc_types_trace::otterscan::{ BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, TransactionsWithReceipts, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; /// Otterscan rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "ots"))] -pub trait Otterscan { +pub trait Otterscan { /// Get the block header by block number, required by otterscan. /// Otterscan currently requires this endpoint, used as: /// @@ -20,7 +19,7 @@ pub trait Otterscan { /// /// Ref: #[method(name = "getHeaderByNumber", aliases = ["erigon_getHeaderByNumber"])] - async fn get_header_by_number(&self, block_number: u64) -> RpcResult>; + async fn get_header_by_number(&self, block_number: u64) -> RpcResult>; /// Check if a certain address contains a deployed code. #[method(name = "hasCode")] @@ -48,11 +47,11 @@ pub trait Otterscan { /// Tailor-made and expanded version of eth_getBlockByNumber for block details page in /// Otterscan. #[method(name = "getBlockDetails")] - async fn get_block_details(&self, block_number: u64) -> RpcResult; + async fn get_block_details(&self, block_number: u64) -> RpcResult>; /// Tailor-made and expanded version of eth_getBlockByHash for block details page in Otterscan. #[method(name = "getBlockDetailsByHash")] - async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult; + async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult>; /// Get paginated transactions for a certain block. Also remove some verbose fields like logs. #[method(name = "getBlockTransactions")] @@ -61,7 +60,7 @@ pub trait Otterscan { block_number: u64, page_number: usize, page_size: usize, - ) -> RpcResult>; + ) -> RpcResult>; /// Gets paginated inbound/outbound transaction calls for a certain address. #[method(name = "searchTransactionsBefore")] diff --git a/crates/rpc/rpc-api/src/reth.rs b/crates/rpc/rpc-api/src/reth.rs index 98c31b78f9a..0589ffc00ce 100644 --- a/crates/rpc/rpc-api/src/reth.rs +++ b/crates/rpc/rpc-api/src/reth.rs @@ -1,6 +1,6 @@ +use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; use std::collections::HashMap; /// Reth API namespace for reth-specific methods diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index 58dda422ab8..41e2b4c1c3e 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,13 +1,14 @@ +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256}; -use alloy_rpc_types::{state::StateOverride, BlockOverrides, Index}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, Index, +}; use alloy_rpc_types_trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, parity::*, }; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::BlockId; /// Ethereum trace API #[cfg_attr(not(feature = "client"), rpc(server, namespace = "trace"))] diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index bbfa673d259..5e4f2e26143 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -2,6 +2,7 @@ use alloy_rpc_types_beacon::relay::{ BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, + BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, }; use jsonrpsee::proc_macros::rpc; @@ -22,4 +23,18 @@ pub trait BlockSubmissionValidationApi { &self, request: BuilderBlockValidationRequestV2, ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV3")] + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV4")] + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> jsonrpsee::core::RpcResult<()>; } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 817d2a3d76b..7dbbe7608a7 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -15,9 +15,11 @@ workspace = true # reth reth-ipc.workspace = true reth-chainspec.workspace = true +reth-consensus.workspace = true reth-network-api.workspace = true reth-node-core.workspace = true reth-provider.workspace = true +reth-primitives.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true @@ -28,12 +30,6 @@ reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-evm.workspace = true reth-engine-primitives.workspace = true -reth-primitives.workspace = true - -# ethereum -alloy-network.workspace = true -alloy-rpc-types.workspace = true -alloy-serde.workspace = true # rpc/net jsonrpsee = { workspace = true, features = ["server"] } @@ -50,6 +46,8 @@ metrics.workspace = true serde = { workspace = true, features = ["derive"] } thiserror.workspace = true tracing.workspace = true +tokio-util = { workspace = true } +tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } [dev-dependencies] reth-chainspec.workspace = true @@ -64,14 +62,14 @@ reth-rpc-api = { workspace = true, features = ["client"] } reth-rpc-engine-api.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } -reth-tokio-util.workspace = true -reth-node-api.workspace = true reth-rpc-types-compat.workspace = true +reth-primitives.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 25626e4f12d..f22fd554ca6 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -221,6 +221,30 @@ impl AuthRpcModule { self.module_mut().merge(other.into()).map(|_| true) } + /// Removes the method with the given name from the configured authenticated methods. + /// + /// Returns `true` if the method was found and removed, `false` otherwise. + pub fn remove_auth_method(&mut self, method_name: &'static str) -> bool { + self.module_mut().remove_method(method_name).is_some() + } + + /// Removes the given methods from the configured authenticated methods. + pub fn remove_auth_methods(&mut self, methods: impl IntoIterator) { + for name in methods { + self.remove_auth_method(name); + } + } + + /// Replace the given [Methods] in the configured authenticated methods. + pub fn replace_auth_methods( + &mut self, + other: impl Into, + ) -> Result { + let other = other.into(); + self.remove_auth_methods(other.method_names()); + self.merge_auth_methods(other) + } + /// Convenience function for starting a server pub async fn start_server( self, diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 4ff98ae8d50..967f5840c01 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -2,6 +2,7 @@ use std::{net::SocketAddr, path::PathBuf}; use jsonrpsee::server::ServerBuilder; use reth_node_core::{args::RpcServerArgs, utils::get_or_create_jwt_secret_from_path}; +use reth_rpc::ValidationApiConfig; use reth_rpc_eth_types::{EthConfig, EthStateCacheConfig, GasPriceOracleConfig}; use reth_rpc_layer::{JwtError, JwtSecret}; use reth_rpc_server_types::RpcModuleSelection; @@ -27,6 +28,9 @@ pub trait RethRpcServerConfig { /// The configured ethereum RPC settings. fn eth_config(&self) -> EthConfig; + /// The configured ethereum RPC settings. + fn flashbots_config(&self) -> ValidationApiConfig; + /// Returns state cache configuration. fn state_cache_config(&self) -> EthStateCacheConfig; @@ -101,11 +105,15 @@ impl RethRpcServerConfig for RpcServerArgs { .proof_permits(self.rpc_proof_permits) } + fn flashbots_config(&self) -> ValidationApiConfig { + ValidationApiConfig { disallow: self.builder_disallow.clone().unwrap_or_default() } + } + fn state_cache_config(&self) -> EthStateCacheConfig { EthStateCacheConfig { max_blocks: self.rpc_state_cache.max_blocks, max_receipts: self.rpc_state_cache.max_receipts, - max_envs: self.rpc_state_cache.max_envs, + max_headers: self.rpc_state_cache.max_headers, max_concurrent_db_requests: self.rpc_state_cache.max_concurrent_db_requests, } } @@ -124,7 +132,7 @@ impl RethRpcServerConfig for RpcServerArgs { fn transport_rpc_module_config(&self) -> TransportRpcModuleConfig { let mut config = TransportRpcModuleConfig::default() - .with_config(RpcModuleConfig::new(self.eth_config())); + .with_config(RpcModuleConfig::new(self.eth_config(), self.flashbots_config())); if self.http { config = config.with_http( diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 613652678a2..7339c7089e5 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,5 +1,5 @@ use reth_evm::ConfigureEvm; -use reth_primitives::Header; +use reth_primitives::NodePrimitives; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider, StateProviderFactory}; use reth_rpc::{EthFilter, EthPubSub}; use reth_rpc_eth_api::EthApiTypes; @@ -14,22 +14,27 @@ pub type DynEthApiBuilder { +pub struct EthHandlers { /// Main `eth_` request handler pub api: EthApi, /// The async caching layer used by the eth handlers - pub cache: EthStateCache, + pub cache: EthStateCache, /// Polling based filter handler available on all transports - pub filter: EthFilter, + pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) - pub pubsub: EthPubSub, + pub pubsub: EthPubSub, } -impl EthHandlers +impl EthHandlers where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, - Pool: Send + Sync + Clone + 'static, - Network: Clone + 'static, + Provider: StateProviderFactory + + BlockReader< + Block = ::Block, + Receipt = ::Receipt, + > + EvmEnvProvider + + Clone + + Unpin + + 'static, Events: CanonStateSubscriptions + Clone + 'static, EthApi: EthApiTypes + 'static, { @@ -37,7 +42,7 @@ where /// /// This will spawn all necessary tasks for the handlers. #[allow(clippy::too_many_arguments)] - pub fn bootstrap( + pub fn bootstrap( provider: Provider, pool: Pool, network: Network, @@ -56,15 +61,10 @@ where >, ) -> Self where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, Tasks: TaskSpawner + Clone + 'static, { - let cache = EthStateCache::spawn_with( - provider.clone(), - config.cache, - executor.clone(), - evm_config.clone(), - ); + let cache = EthStateCache::spawn_with(provider.clone(), config.cache, executor.clone()); let new_canonical_blocks = events.canonical_state_stream(); let c = cache.clone(); @@ -88,19 +88,12 @@ where let api = eth_api_builder(&ctx); - let filter = EthFilter::new( - ctx.provider.clone(), - ctx.pool.clone(), - ctx.cache.clone(), - ctx.config.filter_config(), - Box::new(ctx.executor.clone()), - ); + let filter = + EthFilter::new(api.clone(), ctx.config.filter_config(), Box::new(ctx.executor.clone())); let pubsub = EthPubSub::with_spawner( - ctx.provider.clone(), - ctx.pool.clone(), + api.clone(), ctx.events.clone(), - ctx.network.clone(), Box::new(ctx.executor.clone()), ); diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index cd93aeb620e..877e8089786 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -16,31 +16,59 @@ //! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` +//! use reth_engine_primitives::PayloadValidator; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::Header; +//! use reth_primitives::{Header, PooledTransactionsElement, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_builder::{ //! RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig, //! }; //! use reth_tasks::TokioTaskExecutor; -//! use reth_transaction_pool::TransactionPool; +//! use reth_transaction_pool::{PoolTransaction, TransactionPool}; +//! use std::sync::Arc; //! -//! pub async fn launch( +//! pub async fn launch< +//! Provider, +//! Pool, +//! Network, +//! Events, +//! EvmConfig, +//! BlockExecutor, +//! Consensus, +//! Validator, +//! >( //! provider: Provider, //! pool: Pool, //! network: Network, //! events: Events, //! evm_config: EvmConfig, //! block_executor: BlockExecutor, +//! consensus: Consensus, +//! validator: Validator, //! ) where -//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, -//! Pool: TransactionPool + 'static, +//! Provider: FullRpcProvider< +//! Transaction = TransactionSigned, +//! Block = reth_primitives::Block, +//! Receipt = reth_primitives::Receipt, +//! Header = reth_primitives::Header, +//! > + AccountReader +//! + ChangeSetReader, +//! Pool: TransactionPool< +//! Transaction: PoolTransaction< +//! Consensus = TransactionSigned, +//! Pooled = PooledTransactionsElement, +//! >, +//! > + Unpin +//! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! Events: CanonStateSubscriptions + Clone + 'static, -//! EvmConfig: ConfigureEvm
, -//! BlockExecutor: BlockExecutorProvider, +//! Events: +//! CanonStateSubscriptions + Clone + 'static, +//! EvmConfig: ConfigureEvm
, +//! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Validator: PayloadValidator, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -57,8 +85,9 @@ //! events, //! evm_config, //! block_executor, +//! consensus, //! ) -//! .build(transports, Box::new(EthApi::with_spawner)); +//! .build(transports, Box::new(EthApi::with_spawner), Arc::new(validator)); //! let handle = RpcServerConfig::default() //! .with_http(ServerBuilder::default()) //! .start(&transport_modules) @@ -70,10 +99,10 @@ //! //! //! ``` -//! use reth_engine_primitives::EngineTypes; +//! use reth_engine_primitives::{EngineTypes, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_primitives::Header; +//! use reth_primitives::{Header, PooledTransactionsElement, TransactionSigned}; //! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc::EthApi; //! use reth_rpc_api::EngineApiServer; @@ -83,8 +112,10 @@ //! }; //! use reth_rpc_layer::JwtSecret; //! use reth_tasks::TokioTaskExecutor; -//! use reth_transaction_pool::TransactionPool; +//! use reth_transaction_pool::{PoolTransaction, TransactionPool}; +//! use std::sync::Arc; //! use tokio::try_join; +//! //! pub async fn launch< //! Provider, //! Pool, @@ -94,6 +125,8 @@ //! EngineT, //! EvmConfig, //! BlockExecutor, +//! Consensus, +//! Validator, //! >( //! provider: Provider, //! pool: Pool, @@ -102,15 +135,32 @@ //! engine_api: EngineApi, //! evm_config: EvmConfig, //! block_executor: BlockExecutor, +//! consensus: Consensus, +//! validator: Validator, //! ) where -//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, -//! Pool: TransactionPool + 'static, +//! Provider: FullRpcProvider< +//! Transaction = TransactionSigned, +//! Block = reth_primitives::Block, +//! Receipt = reth_primitives::Receipt, +//! Header = reth_primitives::Header, +//! > + AccountReader +//! + ChangeSetReader, +//! Pool: TransactionPool< +//! Transaction: PoolTransaction< +//! Consensus = TransactionSigned, +//! Pooled = PooledTransactionsElement, +//! >, +//! > + Unpin +//! + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, -//! Events: CanonStateSubscriptions + Clone + 'static, +//! Events: +//! CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes, -//! EvmConfig: ConfigureEvm
, -//! BlockExecutor: BlockExecutorProvider, +//! EvmConfig: ConfigureEvm
, +//! BlockExecutor: BlockExecutorProvider, +//! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Validator: PayloadValidator, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -127,11 +177,16 @@ //! events, //! evm_config, //! block_executor, +//! consensus, //! ); //! //! // configure the server modules -//! let (modules, auth_module, _registry) = -//! builder.build_with_auth_server(transports, engine_api, Box::new(EthApi::with_spawner)); +//! let (modules, auth_module, _registry) = builder.build_with_auth_server( +//! transports, +//! engine_api, +//! Box::new(EthApi::with_spawner), +//! Arc::new(validator), +//! ); //! //! // start the servers //! let auth_config = AuthServerConfig::builder(JwtSecret::random()).build(); @@ -154,9 +209,11 @@ use std::{ collections::HashMap, fmt::Debug, net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}, }; +use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; use error::{ConflictingModules, RpcError, ServerKind}; use eth::DynEthApiBuilder; use http::{header::AUTHORIZATION, HeaderMap}; @@ -169,33 +226,33 @@ use jsonrpsee::{ Methods, RpcModule, }; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::EngineTypes; +use reth_consensus::FullConsensus; +use reth_engine_primitives::{EngineTypes, PayloadValidator}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; -use reth_primitives::Header; +use reth_primitives::{NodePrimitives, PooledTransactionsElement}; use reth_provider::{ AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, FullRpcProvider, StateProviderFactory, + EvmEnvProvider, FullRpcProvider, ProviderBlock, ProviderHeader, ProviderReceipt, + StateProviderFactory, }; use reth_rpc::{ - AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, - TxPoolApi, Web3Api, + AdminApi, DebugApi, EngineEthApi, EthBundle, MinerApi, NetApi, OtterscanApi, RPCApi, RethApi, + TraceApi, TxPoolApi, ValidationApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ helpers::{Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt}, - EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcReceipt, RpcTransaction, + EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, }; use reth_rpc_eth_types::{EthConfig, EthStateCache, EthSubscriptionIdProvider}; -use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; +use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; +use reth_transaction_pool::{noop::NoopTransactionPool, PoolTransaction, TransactionPool}; use serde::{Deserialize, Serialize}; use tower::Layer; use tower_http::cors::CorsLayer; -use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; - pub use cors::CorsDomainError; // re-export for convenience @@ -226,6 +283,9 @@ pub use eth::EthHandlers; mod metrics; pub use metrics::{MeteredRequestFuture, RpcRequestMetricsService}; +// Rpc rate limiter +pub mod rate_limiter; + /// Convenience function for starting a server in one step. #[allow(clippy::too_many_arguments)] pub async fn launch( @@ -239,15 +299,32 @@ pub async fn launch, block_executor: BlockExecutor, + consensus: Arc>, + payload_validator: Arc>, ) -> Result where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, - Pool: TransactionPool + 'static, + Provider: FullRpcProvider< + Block = ProviderBlock, + Receipt = ProviderReceipt, + Header = ProviderHeader, + > + AccountReader + + ChangeSetReader, + Pool: TransactionPool::Transaction> + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm
, - EthApi: FullEthApiServer, + Events: CanonStateSubscriptions + Clone + 'static, + EvmConfig: ConfigureEvm< + Header = ::BlockHeader, + Transaction = ::SignedTx, + >, + EthApi: FullEthApiServer< + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, + >, + Pool: TransactionPool>, + >, BlockExecutor: BlockExecutorProvider, { let module_config = module_config.into(); @@ -262,8 +339,9 @@ where events, evm_config, block_executor, + consensus, ) - .build(module_config, eth), + .build(module_config, eth, payload_validator), ) .await } @@ -272,7 +350,16 @@ where /// /// This is the main entrypoint and the easiest way to configure an RPC server. #[derive(Debug, Clone)] -pub struct RpcModuleBuilder { +pub struct RpcModuleBuilder< + Provider, + Pool, + Network, + Tasks, + Events, + EvmConfig, + BlockExecutor, + Consensus, +> { /// The Provider type to when creating all rpc handlers provider: Provider, /// The Pool type to when creating all rpc handlers @@ -287,14 +374,17 @@ pub struct RpcModuleBuilder - RpcModuleBuilder +impl + RpcModuleBuilder { /// Create a new instance of the builder + #[allow(clippy::too_many_arguments)] pub const fn new( provider: Provider, pool: Pool, @@ -303,32 +393,54 @@ impl events: Events, evm_config: EvmConfig, block_executor: BlockExecutor, + consensus: Consensus, ) -> Self { - Self { provider, pool, network, executor, events, evm_config, block_executor } + Self { provider, pool, network, executor, events, evm_config, block_executor, consensus } } /// Configure the provider instance. pub fn with_provider

( self, provider: P, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where P: BlockReader + StateProviderFactory + EvmEnvProvider + 'static, { - let Self { pool, network, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { pool, network, executor, events, evm_config, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the transaction pool instance. pub fn with_pool

( self, pool: P, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where P: TransactionPool + 'static, { - let Self { provider, network, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, network, executor, events, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure a [`NoopTransactionPool`] instance. @@ -346,8 +458,11 @@ impl Events, EvmConfig, BlockExecutor, + Consensus, > { - let Self { provider, executor, events, network, evm_config, block_executor, .. } = self; + let Self { + provider, executor, events, network, evm_config, block_executor, consensus, .. + } = self; RpcModuleBuilder { provider, executor, @@ -356,6 +471,7 @@ impl evm_config, block_executor, pool: NoopTransactionPool::default(), + consensus, } } @@ -363,12 +479,23 @@ impl pub fn with_network( self, network: N, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where N: NetworkInfo + Peers + 'static, { - let Self { provider, pool, executor, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, pool, executor, events, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure a [`NoopNetwork`] instance. @@ -378,9 +505,19 @@ impl /// [`EthApi`](reth_rpc::eth::EthApi) which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, - ) -> RpcModuleBuilder - { - let Self { provider, pool, executor, events, evm_config, block_executor, .. } = self; + ) -> RpcModuleBuilder< + Provider, + Pool, + NoopNetwork, + Tasks, + Events, + EvmConfig, + BlockExecutor, + Consensus, + > { + let Self { + provider, pool, executor, events, evm_config, block_executor, consensus, .. + } = self; RpcModuleBuilder { provider, pool, @@ -389,6 +526,7 @@ impl network: NoopNetwork::default(), evm_config, block_executor, + consensus, } } @@ -396,12 +534,22 @@ impl pub fn with_executor( self, executor: T, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where T: TaskSpawner + 'static, { - let Self { pool, network, provider, events, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { pool, network, provider, events, evm_config, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure [`TokioTaskExecutor`] as the task executor to use for additional tasks. @@ -418,8 +566,10 @@ impl Events, EvmConfig, BlockExecutor, + Consensus, > { - let Self { pool, network, provider, events, evm_config, block_executor, .. } = self; + let Self { pool, network, provider, events, evm_config, block_executor, consensus, .. } = + self; RpcModuleBuilder { provider, network, @@ -428,6 +578,7 @@ impl executor: TokioTaskExecutor::default(), evm_config, block_executor, + consensus, } } @@ -435,49 +586,107 @@ impl pub fn with_events( self, events: E, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where E: CanonStateSubscriptions + 'static, { - let Self { provider, pool, executor, network, evm_config, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { + provider, pool, executor, network, evm_config, block_executor, consensus, .. + } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the evm configuration type pub fn with_evm_config( self, evm_config: E, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where E: ConfigureEvm + 'static, { - let Self { provider, pool, executor, network, events, block_executor, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { provider, pool, executor, network, events, block_executor, consensus, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } /// Configure the block executor provider pub fn with_block_executor( self, block_executor: BE, - ) -> RpcModuleBuilder + ) -> RpcModuleBuilder where BE: BlockExecutorProvider, { - let Self { provider, network, pool, executor, events, evm_config, .. } = self; - RpcModuleBuilder { provider, network, pool, executor, events, evm_config, block_executor } + let Self { provider, network, pool, executor, events, evm_config, consensus, .. } = self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } + } + + /// Configure the consensus implementation. + pub fn with_consensus( + self, + consensus: C, + ) -> RpcModuleBuilder { + let Self { provider, network, pool, executor, events, evm_config, block_executor, .. } = + self; + RpcModuleBuilder { + provider, + network, + pool, + executor, + events, + evm_config, + block_executor, + consensus, + } } } -impl - RpcModuleBuilder +impl + RpcModuleBuilder where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider< + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, + > + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm

, + Events: CanonStateSubscriptions + Clone + 'static, + EvmConfig: ConfigureEvm< + Header = ::BlockHeader, + Transaction = ::SignedTx, + >, BlockExecutor: BlockExecutorProvider, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -491,17 +700,34 @@ where module_config: TransportRpcModuleConfig, engine: EngineApi, eth: DynEthApiBuilder, + payload_validator: Arc>, ) -> ( TransportRpcModules, AuthRpcModule, - RpcRegistryInner, + RpcRegistryInner, ) where EngineT: EngineTypes, EngineApi: EngineApiServer, - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, + >, + Pool: TransactionPool>, + >, { - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; let config = module_config.config.clone().unwrap_or_default(); @@ -511,10 +737,12 @@ where network, executor, events, + consensus, config, evm_config, eth, block_executor, + payload_validator, ); let modules = registry.create_transport_rpc_modules(module_config); @@ -532,17 +760,24 @@ where /// # Example /// /// ```no_run + /// use reth_consensus::noop::NoopConsensus; + /// use reth_engine_primitives::PayloadValidator; /// use reth_evm::ConfigureEvm; /// use reth_evm_ethereum::execute::EthExecutorProvider; /// use reth_network_api::noop::NoopNetwork; - /// use reth_primitives::Header; + /// use reth_primitives::{Header, TransactionSigned}; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; /// use reth_rpc::EthApi; /// use reth_rpc_builder::RpcModuleBuilder; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::noop::NoopTransactionPool; + /// use std::sync::Arc; /// - /// fn init + 'static>(evm: Evm) { + /// fn init(evm: Evm, validator: Validator) + /// where + /// Evm: ConfigureEvm
+ 'static, + /// Validator: PayloadValidator + 'static, + /// { /// let mut registry = RpcModuleBuilder::default() /// .with_provider(NoopProvider::default()) /// .with_pool(NoopTransactionPool::default()) @@ -551,7 +786,8 @@ where /// .with_events(TestCanonStateSubscriptions::default()) /// .with_evm_config(evm) /// .with_block_executor(EthExecutorProvider::mainnet()) - /// .into_registry(Default::default(), Box::new(EthApi::with_spawner)); + /// .with_consensus(NoopConsensus::default()) + /// .into_registry(Default::default(), Box::new(EthApi::with_spawner), Arc::new(validator)); /// /// let eth_api = registry.eth_api(); /// } @@ -560,21 +796,33 @@ where self, config: RpcModuleConfig, eth: DynEthApiBuilder, - ) -> RpcRegistryInner + payload_validator: Arc>, + ) -> RpcRegistryInner where EthApi: EthApiTypes + 'static, { - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; RpcRegistryInner::new( provider, pool, network, executor, events, + consensus, config, evm_config, eth, block_executor, + payload_validator, ) } @@ -584,13 +832,31 @@ where self, module_config: TransportRpcModuleConfig, eth: DynEthApiBuilder, + payload_validator: Arc>, ) -> TransportRpcModules<()> where - EthApi: FullEthApiServer, + EthApi: FullEthApiServer< + Provider: BlockReader< + Receipt = ::Receipt, + Block = ::Block, + Header = ::BlockHeader, + >, + Pool: TransactionPool>, + >, + Pool: TransactionPool::Transaction>, { let mut modules = TransportRpcModules::default(); - let Self { provider, pool, network, executor, events, evm_config, block_executor } = self; + let Self { + provider, + pool, + network, + executor, + events, + evm_config, + block_executor, + consensus, + } = self; if !module_config.is_empty() { let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone(); @@ -601,10 +867,12 @@ where network, executor, events, + consensus, config.unwrap_or_default(), evm_config, eth, block_executor, + payload_validator, ); modules.config = module_config; @@ -617,9 +885,9 @@ where } } -impl Default for RpcModuleBuilder<(), (), (), (), (), (), ()> { +impl Default for RpcModuleBuilder<(), (), (), (), (), (), (), ()> { fn default() -> Self { - Self::new((), (), (), (), (), (), ()) + Self::new((), (), (), (), (), (), (), ()) } } @@ -628,6 +896,8 @@ impl Default for RpcModuleBuilder<(), (), (), (), (), (), ()> { pub struct RpcModuleConfig { /// `eth` namespace settings eth: EthConfig, + /// `flashbots` namespace settings + flashbots: ValidationApiConfig, } // === impl RpcModuleConfig === @@ -639,8 +909,8 @@ impl RpcModuleConfig { } /// Returns a new RPC module config given the eth namespace config - pub const fn new(eth: EthConfig) -> Self { - Self { eth } + pub const fn new(eth: EthConfig, flashbots: ValidationApiConfig) -> Self { + Self { eth, flashbots } } /// Get a reference to the eth namespace config @@ -658,6 +928,7 @@ impl RpcModuleConfig { #[derive(Clone, Debug, Default)] pub struct RpcModuleConfigBuilder { eth: Option, + flashbots: Option, } // === impl RpcModuleConfigBuilder === @@ -669,10 +940,16 @@ impl RpcModuleConfigBuilder { self } + /// Configures a custom flashbots namespace config + pub fn flashbots(mut self, flashbots: ValidationApiConfig) -> Self { + self.flashbots = Some(flashbots); + self + } + /// Consumes the type and creates the [`RpcModuleConfig`] pub fn build(self) -> RpcModuleConfig { - let Self { eth } = self; - RpcModuleConfig { eth: eth.unwrap_or_default() } + let Self { eth, flashbots } = self; + RpcModuleConfig { eth: eth.unwrap_or_default(), flashbots: flashbots.unwrap_or_default() } } /// Get a reference to the eth namespace config, if any @@ -694,13 +971,14 @@ impl RpcModuleConfigBuilder { /// A Helper type the holds instances of the configured modules. #[derive(Debug, Clone)] pub struct RpcRegistryInner< - Provider, + Provider: BlockReader, Pool, Network, Tasks, Events, EthApi: EthApiTypes, BlockExecutor, + Consensus, > { provider: Provider, pool: Pool, @@ -708,8 +986,12 @@ pub struct RpcRegistryInner< executor: Tasks, events: Events, block_executor: BlockExecutor, + consensus: Consensus, + payload_validator: Arc>, + /// Holds the configuration for the RPC modules + config: RpcModuleConfig, /// Holds a all `eth_` namespace handlers - eth: EthHandlers, + eth: EthHandlers, /// to put trace calls behind semaphore blocking_pool_guard: BlockingTaskGuard, /// Contains the [Methods] of a module @@ -718,10 +1000,17 @@ pub struct RpcRegistryInner< // === impl RpcRegistryInner === -impl - RpcRegistryInner +impl + RpcRegistryInner where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + + BlockReader< + Block = ::Block, + Receipt = ::Receipt, + > + EvmEnvProvider + + Clone + + Unpin + + 'static, Pool: Send + Sync + Clone + 'static, Network: Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, @@ -737,6 +1026,7 @@ where network: Network, executor: Tasks, events: Events, + consensus: Consensus, config: RpcModuleConfig, evm_config: EvmConfig, eth_api_builder: DynEthApiBuilder< @@ -749,9 +1039,10 @@ where EthApi, >, block_executor: BlockExecutor, + payload_validator: Arc>, ) -> Self where - EvmConfig: ConfigureEvm
, + EvmConfig: ConfigureEvm
, { let blocking_pool_guard = BlockingTaskGuard::new(config.eth.max_tracing_requests); @@ -772,17 +1063,21 @@ where network, eth, executor, + consensus, + config, modules: Default::default(), blocking_pool_guard, events, block_executor, + payload_validator, } } } -impl - RpcRegistryInner +impl + RpcRegistryInner where + Provider: BlockReader, EthApi: EthApiTypes, { /// Returns a reference to the installed [`EthApi`](reth_rpc::eth::EthApi). @@ -791,7 +1086,7 @@ where } /// Returns a reference to the installed [`EthHandlers`]. - pub const fn eth_handlers(&self) -> &EthHandlers { + pub const fn eth_handlers(&self) -> &EthHandlers { &self.eth } @@ -799,7 +1094,7 @@ where /// /// This will spawn exactly one [`EthStateCache`] service if this is the first time the cache is /// requested. - pub const fn eth_cache(&self) -> &EthStateCache { + pub const fn eth_cache(&self) -> &EthStateCache { &self.eth.cache } @@ -838,12 +1133,12 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Network: NetworkInfo + Clone + 'static, EthApi: EthApiTypes, - Provider: ChainSpecProvider, + Provider: BlockReader + ChainSpecProvider, BlockExecutor: BlockExecutorProvider, { /// Instantiates `AdminApi` @@ -877,8 +1172,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, @@ -887,6 +1182,7 @@ where RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + EthApiTypes, BlockExecutor: BlockExecutorProvider, { @@ -908,14 +1204,7 @@ where /// If called outside of the tokio runtime. See also [`Self::eth_api`] pub fn register_ots(&mut self) -> &mut Self where - EthApi: TraceExt - + EthTransactions< - NetworkTypes: alloy_network::Network< - TransactionResponse = alloy_serde::WithOtherFields< - alloy_rpc_types::Transaction, - >, - >, - >, + EthApi: TraceExt + EthTransactions, { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); @@ -930,6 +1219,7 @@ where pub fn register_debug(&mut self) -> &mut Self where EthApi: EthApiSpec + EthTransactions + TraceExt, + BlockExecutor::Primitives: NodePrimitives>, { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); @@ -990,8 +1280,8 @@ where } } -impl - RpcRegistryInner +impl + RpcRegistryInner where Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, @@ -1004,15 +1294,11 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn trace_api(&self) -> TraceApi + pub fn trace_api(&self) -> TraceApi where EthApi: TraceExt, { - TraceApi::new( - self.provider.clone(), - self.eth_api().clone(), - self.blocking_pool_guard.clone(), - ) + TraceApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone()) } /// Instantiates [`EthBundle`] Api @@ -1033,13 +1319,12 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&self) -> DebugApi + pub fn debug_api(&self) -> DebugApi where EthApi: EthApiSpec + EthTransactions + TraceExt, - BlockExecutor: BlockExecutorProvider, + BlockExecutor::Primitives: NodePrimitives>, { DebugApi::new( - self.provider.clone(), self.eth_api().clone(), self.blocking_pool_guard.clone(), self.block_executor.clone(), @@ -1063,18 +1348,44 @@ where pub fn reth_api(&self) -> RethApi { RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) } + + /// Instantiates `ValidationApi` + pub fn validation_api(&self) -> ValidationApi + where + Consensus: reth_consensus::FullConsensus + Clone + 'static, + Provider: BlockReader::Block>, + { + ValidationApi::new( + self.provider.clone(), + Arc::new(self.consensus.clone()), + self.block_executor.clone(), + self.config.flashbots.clone(), + Box::new(self.executor.clone()), + self.payload_validator.clone(), + ) + } } -impl - RpcRegistryInner +impl + RpcRegistryInner where - Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Provider: FullRpcProvider::Block> + + AccountReader + + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EthApi: FullEthApiServer, + Events: CanonStateSubscriptions + Clone + 'static, + EthApi: FullEthApiServer< + Provider: BlockReader< + Block = ::Block, + Receipt = ::Receipt, + Header = ::BlockHeader, + >, + Pool: TransactionPool>, + >, BlockExecutor: BlockExecutorProvider, + Consensus: reth_consensus::FullConsensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace @@ -1164,7 +1475,6 @@ where .into() } RethRpcModule::Debug => DebugApi::new( - self.provider.clone(), eth_api.clone(), self.blocking_pool_guard.clone(), self.block_executor.clone(), @@ -1191,17 +1501,18 @@ where RethRpcModule::Net => { NetApi::new(self.network.clone(), eth_api.clone()).into_rpc().into() } - RethRpcModule::Trace => TraceApi::new( - self.provider.clone(), - eth_api.clone(), - self.blocking_pool_guard.clone(), + RethRpcModule::Trace => { + TraceApi::new(eth_api.clone(), self.blocking_pool_guard.clone()) + .into_rpc() + .into() + } + RethRpcModule::Web3 => Web3Api::new(self.network.clone()).into_rpc().into(), + RethRpcModule::Txpool => TxPoolApi::new( + self.eth.api.pool().clone(), + self.eth.api.tx_resp_builder().clone(), ) .into_rpc() .into(), - RethRpcModule::Web3 => Web3Api::new(self.network.clone()).into_rpc().into(), - RethRpcModule::Txpool => { - TxPoolApi::<_, EthApi>::new(self.pool.clone()).into_rpc().into() - } RethRpcModule::Rpc => RPCApi::new( namespaces .iter() @@ -1216,6 +1527,17 @@ where .into_rpc() .into() } + RethRpcModule::Flashbots => ValidationApi::new( + eth_api.provider().clone(), + Arc::new(self.consensus.clone()), + self.block_executor.clone(), + self.config.flashbots.clone(), + Box::new(self.executor.clone()), + self.payload_validator.clone(), + ) + .into_rpc() + .into(), + RethRpcModule::Miner => MinerApi::default().into_rpc().into(), }) .clone() }) @@ -1443,6 +1765,12 @@ impl RpcServerConfig { jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) } + /// Returns a [`CompressionLayer`] that adds compression support (gzip, deflate, brotli, zstd) + /// based on the client's `Accept-Encoding` header + fn maybe_compression_layer() -> Option { + Some(CompressionLayer::new()) + } + /// Builds and starts the configured server(s): http, ws, ipc. /// /// If both http and ws are on the same port, they are combined into one server. @@ -1507,7 +1835,8 @@ impl RpcServerConfig { .set_http_middleware( tower::ServiceBuilder::new() .option_layer(Self::maybe_cors_layer(cors)?) - .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)) + .option_layer(Self::maybe_compression_layer()), ) .set_rpc_middleware( self.rpc_middleware.clone().layer( @@ -1579,8 +1908,9 @@ impl RpcServerConfig { .http_only() .set_http_middleware( tower::ServiceBuilder::new() - .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) - .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)) + .option_layer(Self::maybe_compression_layer()), ) .set_rpc_middleware( self.rpc_middleware.clone().layer( @@ -1673,7 +2003,7 @@ impl TransportRpcModuleConfig { } /// Sets a custom [`RpcModuleConfig`] for the configured modules. - pub const fn with_config(mut self, config: RpcModuleConfig) -> Self { + pub fn with_config(mut self, config: RpcModuleConfig) -> Self { self.config = Some(config); self } @@ -1723,6 +2053,26 @@ impl TransportRpcModuleConfig { self.config.as_ref() } + /// Returns true if the given module is configured for any transport. + pub fn contains_any(&self, module: &RethRpcModule) -> bool { + self.contains_http(module) || self.contains_ws(module) || self.contains_ipc(module) + } + + /// Returns true if the given module is configured for the http transport. + pub fn contains_http(&self, module: &RethRpcModule) -> bool { + self.http.as_ref().is_some_and(|http| http.contains(module)) + } + + /// Returns true if the given module is configured for the ws transport. + pub fn contains_ws(&self, module: &RethRpcModule) -> bool { + self.ws.as_ref().is_some_and(|ws| ws.contains(module)) + } + + /// Returns true if the given module is configured for the ipc transport. + pub fn contains_ipc(&self, module: &RethRpcModule) -> bool { + self.ipc.as_ref().is_some_and(|ipc| ipc.contains(module)) + } + /// Ensures that both http and ws are configured and that they are configured to use the same /// port. fn ensure_ws_http_identical(&self) -> Result<(), WsHttpSamePortError> { @@ -1768,6 +2118,29 @@ impl TransportRpcModules { &self.config } + /// Merge the given [`Methods`] in all configured transport modules if the given + /// [`RethRpcModule`] is configured for the transport. + /// + /// Fails if any of the methods in other is present already. + pub fn merge_if_module_configured( + &mut self, + module: RethRpcModule, + other: impl Into, + ) -> Result<(), RegisterMethodError> { + let other = other.into(); + if self.module_config().contains_http(&module) { + self.merge_http(other.clone())?; + } + if self.module_config().contains_ws(&module) { + self.merge_ws(other.clone())?; + } + if self.module_config().contains_ipc(&module) { + self.merge_ipc(other)?; + } + + Ok(()) + } + /// Merge the given [Methods] in the configured http methods. /// /// Fails if any of the methods in other is present already. @@ -1804,7 +2177,7 @@ impl TransportRpcModules { Ok(false) } - /// Merge the given [Methods] in all configured methods. + /// Merge the given [`Methods`] in all configured methods. /// /// Fails if any of the methods in other is present already. pub fn merge_configured( @@ -1895,7 +2268,22 @@ impl TransportRpcModules { http_removed || ws_removed || ipc_removed } - /// Replace the given [Methods] in the configured http methods. + /// Renames a method in all configured transports by: + /// 1. Removing the old method name. + /// 2. Adding the new method. + pub fn rename( + &mut self, + old_name: &'static str, + new_method: impl Into, + ) -> Result<(), RegisterMethodError> { + // Remove the old method from all configured transports + self.remove_method_from_configured(old_name); + + // Merge the new method into the configured transports + self.merge_configured(new_method) + } + + /// Replace the given [`Methods`] in the configured http methods. /// /// Fails if any of the methods in other is present already or if the method being removed is /// not present @@ -2267,6 +2655,43 @@ mod tests { assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); } + #[test] + fn test_transport_rpc_module_rename() { + let mut modules = TransportRpcModules { + http: Some(create_test_module()), + ws: Some(create_test_module()), + ipc: Some(create_test_module()), + ..Default::default() + }; + + // Verify that the old we want to rename exists at the start + assert!(modules.http.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_some()); + + // Verify that the new method does not exist at the start + assert!(modules.http.as_ref().unwrap().method("something").is_none()); + assert!(modules.ws.as_ref().unwrap().method("something").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_none()); + + // Create another module + let mut other_module = RpcModule::new(()); + other_module.register_method("something", |_, _, _| "fails").unwrap(); + + // Rename the method + modules.rename("anything", other_module).expect("rename failed"); + + // Verify that the old method was removed from all transports + assert!(modules.http.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); + + // Verify that the new method was added to all transports + assert!(modules.http.as_ref().unwrap().method("something").is_some()); + assert!(modules.ws.as_ref().unwrap().method("something").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_some()); + } + #[test] fn test_replace_http_method() { let mut modules = diff --git a/crates/rpc/rpc-builder/src/rate_limiter.rs b/crates/rpc/rpc-builder/src/rate_limiter.rs new file mode 100644 index 00000000000..85df0eee61c --- /dev/null +++ b/crates/rpc/rpc-builder/src/rate_limiter.rs @@ -0,0 +1,116 @@ +//! [`jsonrpsee`] helper layer for rate limiting certain methods. + +use jsonrpsee::{server::middleware::rpc::RpcServiceT, types::Request, MethodResponse}; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{ready, Context, Poll}, +}; +use tokio::sync::{OwnedSemaphorePermit, Semaphore}; +use tokio_util::sync::PollSemaphore; +use tower::Layer; + +/// Rate limiter for the RPC server. +/// +/// Rate limits expensive calls such as debug_ and trace_. +#[derive(Debug, Clone)] +pub struct RpcRequestRateLimiter { + inner: Arc, +} + +impl RpcRequestRateLimiter { + /// Create a new rate limit layer with the given number of permits. + pub fn new(rate_limit: usize) -> Self { + Self { + inner: Arc::new(RpcRequestRateLimiterInner { + call_guard: PollSemaphore::new(Arc::new(Semaphore::new(rate_limit))), + }), + } + } +} + +impl Layer for RpcRequestRateLimiter { + type Service = RpcRequestRateLimitingService; + + fn layer(&self, inner: S) -> Self::Service { + RpcRequestRateLimitingService::new(inner, self.clone()) + } +} + +/// Rate Limiter for the RPC server +#[derive(Debug, Clone)] +struct RpcRequestRateLimiterInner { + /// Semaphore to rate limit calls + call_guard: PollSemaphore, +} + +/// A [`RpcServiceT`] middleware that rate limits RPC calls to the server. +#[derive(Debug, Clone)] +pub struct RpcRequestRateLimitingService { + /// The rate limiter for RPC requests + rate_limiter: RpcRequestRateLimiter, + /// The inner service being wrapped + inner: S, +} + +impl RpcRequestRateLimitingService { + /// Create a new rate limited service. + pub const fn new(service: S, rate_limiter: RpcRequestRateLimiter) -> Self { + Self { inner: service, rate_limiter } + } +} + +impl<'a, S> RpcServiceT<'a> for RpcRequestRateLimitingService +where + S: RpcServiceT<'a> + Send + Sync + Clone + 'static, +{ + type Future = RateLimitingRequestFuture; + + fn call(&self, req: Request<'a>) -> Self::Future { + let method_name = req.method_name(); + if method_name.starts_with("trace_") || method_name.starts_with("debug_") { + RateLimitingRequestFuture { + fut: self.inner.call(req), + guard: Some(self.rate_limiter.inner.call_guard.clone()), + permit: None, + } + } else { + // if we don't need to rate limit, then there + // is no need to get a semaphore permit + RateLimitingRequestFuture { fut: self.inner.call(req), guard: None, permit: None } + } + } +} + +/// Response future. +#[pin_project::pin_project] +pub struct RateLimitingRequestFuture { + #[pin] + fut: F, + guard: Option, + permit: Option, +} + +impl std::fmt::Debug for RateLimitingRequestFuture { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("RateLimitingRequestFuture") + } +} + +impl> Future for RateLimitingRequestFuture { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + if let Some(guard) = this.guard.as_mut() { + *this.permit = ready!(guard.poll_acquire(cx)); + *this.guard = None; + } + let res = this.fut.poll(cx); + if res.is_ready() { + *this.permit = None; + } + res + } +} diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index 71e8bf39f9e..390ea7d6ba4 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -5,7 +5,7 @@ use alloy_primitives::U64; use alloy_rpc_types_engine::{ForkchoiceState, PayloadId, TransitionConfiguration}; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_primitives::Block; +use reth_primitives::{Block, BlockExt}; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_layer::JwtSecret; use reth_rpc_types_compat::engine::payload::{ diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index 7a8093c5062..357e3135e04 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -2,12 +2,12 @@ //! Standalone http tests use crate::utils::{launch_http, launch_http_ws, launch_ws}; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{hex_literal::hex, Address, Bytes, TxHash, B256, B64, U256, U64}; -use alloy_rpc_types::{ - Block, FeeHistory, Filter, Index, Log, PendingTransactionFilterKind, SyncStatus, Transaction, - TransactionReceipt, +use alloy_rpc_types_eth::{ + transaction::TransactionRequest, Block, FeeHistory, Filter, Header, Index, Log, + PendingTransactionFilterKind, SyncStatus, Transaction, TransactionReceipt, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::filter::TraceFilter; use jsonrpsee::{ core::{ @@ -19,7 +19,7 @@ use jsonrpsee::{ types::error::ErrorCode, }; use reth_network_peers::NodeRecord; -use reth_primitives::{BlockId, BlockNumberOrTag, Receipt}; +use reth_primitives::Receipt; use reth_rpc_api::{ clients::{AdminApiClient, EthApiClient}, DebugApiClient, EthFilterApiClient, NetApiClient, OtterscanClient, TraceApiClient, @@ -174,16 +174,24 @@ where .unwrap(); // Implemented - EthApiClient::::protocol_version(client).await.unwrap(); - EthApiClient::::chain_id(client).await.unwrap(); - EthApiClient::::accounts(client).await.unwrap(); - EthApiClient::::get_account(client, address, block_number.into()) + EthApiClient::::protocol_version(client).await.unwrap(); + EthApiClient::::chain_id(client).await.unwrap(); + EthApiClient::::accounts(client).await.unwrap(); + EthApiClient::::get_account( + client, + address, + block_number.into(), + ) + .await + .unwrap(); + EthApiClient::::block_number(client).await.unwrap(); + EthApiClient::::get_code(client, address, None) + .await + .unwrap(); + EthApiClient::::send_raw_transaction(client, tx) .await .unwrap(); - EthApiClient::::block_number(client).await.unwrap(); - EthApiClient::::get_code(client, address, None).await.unwrap(); - EthApiClient::::send_raw_transaction(client, tx).await.unwrap(); - EthApiClient::::fee_history( + EthApiClient::::fee_history( client, U64::from(0), block_number, @@ -191,11 +199,13 @@ where ) .await .unwrap(); - EthApiClient::::balance(client, address, None).await.unwrap(); - EthApiClient::::transaction_count(client, address, None) + EthApiClient::::balance(client, address, None) .await .unwrap(); - EthApiClient::::storage_at( + EthApiClient::::transaction_count(client, address, None) + .await + .unwrap(); + EthApiClient::::storage_at( client, address, U256::default().into(), @@ -203,72 +213,87 @@ where ) .await .unwrap(); - EthApiClient::::block_by_hash(client, hash, false).await.unwrap(); - EthApiClient::::block_by_number(client, block_number, false) + EthApiClient::::block_by_hash(client, hash, false) .await .unwrap(); - EthApiClient::::block_transaction_count_by_number( + EthApiClient::::block_by_number( client, block_number, + false, ) .await .unwrap(); - EthApiClient::::block_transaction_count_by_hash(client, hash) - .await - .unwrap(); - EthApiClient::::block_uncles_count_by_hash(client, hash) - .await - .unwrap(); - EthApiClient::::block_uncles_count_by_number(client, block_number) - .await - .unwrap(); - EthApiClient::::uncle_by_block_hash_and_index(client, hash, index) + EthApiClient::::block_transaction_count_by_number( + client, + block_number, + ) + .await + .unwrap(); + EthApiClient::::block_transaction_count_by_hash( + client, hash, + ) + .await + .unwrap(); + EthApiClient::::block_uncles_count_by_hash(client, hash) .await .unwrap(); - EthApiClient::::uncle_by_block_number_and_index( + EthApiClient::::block_uncles_count_by_number( + client, + block_number, + ) + .await + .unwrap(); + EthApiClient::::uncle_by_block_hash_and_index( + client, hash, index, + ) + .await + .unwrap(); + EthApiClient::::uncle_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::sign(client, address, bytes.clone()) + EthApiClient::::sign(client, address, bytes.clone()) .await .unwrap_err(); - EthApiClient::::sign_typed_data(client, address, typed_data) - .await - .unwrap_err(); - EthApiClient::::transaction_by_hash(client, tx_hash) + EthApiClient::::sign_typed_data( + client, address, typed_data, + ) + .await + .unwrap_err(); + EthApiClient::::transaction_by_hash(client, tx_hash) .await .unwrap(); - EthApiClient::::transaction_by_block_hash_and_index( + EthApiClient::::transaction_by_block_hash_and_index( client, hash, index, ) .await .unwrap(); - EthApiClient::::transaction_by_block_number_and_index( + EthApiClient::::transaction_by_block_number_and_index( client, block_number, index, ) .await .unwrap(); - EthApiClient::::create_access_list( + EthApiClient::::create_access_list( client, call_request.clone(), Some(block_number.into()), ) .await - .unwrap(); - EthApiClient::::estimate_gas( + .unwrap_err(); + EthApiClient::::estimate_gas( client, call_request.clone(), Some(block_number.into()), None, ) .await - .unwrap(); - EthApiClient::::call( + .unwrap_err(); + EthApiClient::::call( client, call_request.clone(), Some(block_number.into()), @@ -276,39 +301,48 @@ where None, ) .await - .unwrap(); - EthApiClient::::syncing(client).await.unwrap(); - EthApiClient::::send_transaction(client, transaction_request) - .await - .unwrap_err(); - EthApiClient::::hashrate(client).await.unwrap(); - EthApiClient::::submit_hashrate( + .unwrap_err(); + EthApiClient::::syncing(client).await.unwrap(); + EthApiClient::::send_transaction( + client, + transaction_request.clone(), + ) + .await + .unwrap_err(); + EthApiClient::::sign_transaction( + client, + transaction_request, + ) + .await + .unwrap_err(); + EthApiClient::::hashrate(client).await.unwrap(); + EthApiClient::::submit_hashrate( client, U256::default(), B256::default(), ) .await .unwrap(); - EthApiClient::::gas_price(client).await.unwrap_err(); - EthApiClient::::max_priority_fee_per_gas(client) + EthApiClient::::gas_price(client).await.unwrap_err(); + EthApiClient::::max_priority_fee_per_gas(client) .await .unwrap_err(); - EthApiClient::::get_proof(client, address, vec![], None) + EthApiClient::::get_proof(client, address, vec![], None) .await .unwrap(); // Unimplemented assert!(is_unimplemented( - EthApiClient::::author(client).await.err().unwrap() + EthApiClient::::author(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::is_mining(client).await.err().unwrap() + EthApiClient::::is_mining(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::get_work(client).await.err().unwrap() + EthApiClient::::get_work(client).await.err().unwrap() )); assert!(is_unimplemented( - EthApiClient::::submit_work( + EthApiClient::::submit_work( client, B64::default(), B256::default(), @@ -318,12 +352,6 @@ where .err() .unwrap() )); - assert!(is_unimplemented( - EthApiClient::::sign_transaction(client, call_request.clone()) - .await - .err() - .unwrap() - )); } async fn test_basic_debug_calls(client: &C) @@ -368,13 +396,15 @@ where .unwrap_err(); TraceApiClient::trace_call_many(client, vec![], Some(BlockNumberOrTag::Latest.into())) .await - .unwrap(); + .unwrap_err(); TraceApiClient::replay_transaction(client, B256::default(), HashSet::default()) .await .err() .unwrap(); - TraceApiClient::trace_block(client, block_id).await.unwrap(); - TraceApiClient::replay_block_transactions(client, block_id, HashSet::default()).await.unwrap(); + TraceApiClient::trace_block(client, block_id).await.unwrap_err(); + TraceApiClient::replay_block_transactions(client, block_id, HashSet::default()) + .await + .unwrap_err(); TraceApiClient::trace_filter(client, trace_filter).await.unwrap(); } @@ -400,28 +430,32 @@ where let nonce = 1; let block_hash = B256::default(); - OtterscanClient::::get_header_by_number(client, block_number).await.unwrap(); + OtterscanClient::::get_header_by_number(client, block_number) + .await + .unwrap(); - OtterscanClient::::has_code(client, address, None).await.unwrap(); - OtterscanClient::::has_code(client, address, Some(block_number.into())) + OtterscanClient::::has_code(client, address, None).await.unwrap(); + OtterscanClient::::has_code(client, address, Some(block_number.into())) .await .unwrap(); - OtterscanClient::::get_api_level(client).await.unwrap(); + OtterscanClient::::get_api_level(client).await.unwrap(); - OtterscanClient::::get_internal_operations(client, tx_hash).await.unwrap(); + OtterscanClient::::get_internal_operations(client, tx_hash).await.unwrap(); - OtterscanClient::::get_transaction_error(client, tx_hash).await.unwrap(); + OtterscanClient::::get_transaction_error(client, tx_hash).await.unwrap(); - OtterscanClient::::trace_transaction(client, tx_hash).await.unwrap(); + OtterscanClient::::trace_transaction(client, tx_hash).await.unwrap(); - OtterscanClient::::get_block_details(client, block_number).await.unwrap_err(); + OtterscanClient::::get_block_details(client, block_number) + .await + .unwrap_err(); - OtterscanClient::::get_block_details_by_hash(client, block_hash) + OtterscanClient::::get_block_details_by_hash(client, block_hash) .await .unwrap_err(); - OtterscanClient::::get_block_transactions( + OtterscanClient::::get_block_transactions( client, block_number, page_number, @@ -432,7 +466,7 @@ where .unwrap(); assert!(is_unimplemented( - OtterscanClient::::search_transactions_before( + OtterscanClient::::search_transactions_before( client, address, block_number, @@ -443,7 +477,7 @@ where .unwrap() )); assert!(is_unimplemented( - OtterscanClient::::search_transactions_after( + OtterscanClient::::search_transactions_after( client, address, block_number, @@ -453,13 +487,13 @@ where .err() .unwrap() )); - assert!(OtterscanClient::::get_transaction_by_sender_and_nonce( + assert!(OtterscanClient::::get_transaction_by_sender_and_nonce( client, sender, nonce ) .await .err() .is_none()); - assert!(OtterscanClient::::get_contract_creator(client, address) + assert!(OtterscanClient::::get_contract_creator(client, address) .await .unwrap() .is_none()); diff --git a/crates/rpc/rpc-builder/tests/it/middleware.rs b/crates/rpc/rpc-builder/tests/it/middleware.rs index bcc26dcad89..0e0bb80c08b 100644 --- a/crates/rpc/rpc-builder/tests/it/middleware.rs +++ b/crates/rpc/rpc-builder/tests/it/middleware.rs @@ -1,10 +1,12 @@ use crate::utils::{test_address, test_rpc_builder}; -use alloy_rpc_types::{Block, Receipt, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Receipt, Transaction}; use jsonrpsee::{ server::{middleware::rpc::RpcServiceT, RpcServiceBuilder}, types::Request, MethodResponse, }; +use reth_chainspec::MAINNET; +use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_rpc::EthApi; use reth_rpc_builder::{RpcServerConfig, TransportRpcModuleConfig}; use reth_rpc_eth_api::EthApiClient; @@ -63,6 +65,7 @@ async fn test_rpc_middleware() { let modules = builder.build( TransportRpcModuleConfig::set_http(RpcModuleSelection::All), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let mylayer = MyMiddlewareLayer::default(); @@ -75,7 +78,7 @@ async fn test_rpc_middleware() { .unwrap(); let client = handle.http_client().unwrap(); - EthApiClient::::protocol_version(&client).await.unwrap(); + EthApiClient::::protocol_version(&client).await.unwrap(); let count = mylayer.count.load(Ordering::Relaxed); assert_eq!(count, 1); } diff --git a/crates/rpc/rpc-builder/tests/it/startup.rs b/crates/rpc/rpc-builder/tests/it/startup.rs index 9f6961fbba0..ac53b014956 100644 --- a/crates/rpc/rpc-builder/tests/it/startup.rs +++ b/crates/rpc/rpc-builder/tests/it/startup.rs @@ -1,7 +1,9 @@ //! Startup tests -use std::io; +use std::{io, sync::Arc}; +use reth_chainspec::MAINNET; +use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_rpc::EthApi; use reth_rpc_builder::{ error::{RpcError, ServerKind, WsHttpSamePortError}, @@ -30,6 +32,7 @@ async fn test_http_addr_in_use() { let server = builder.build( TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let result = RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await; @@ -45,6 +48,7 @@ async fn test_ws_addr_in_use() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await; let err = result.unwrap_err(); @@ -66,6 +70,7 @@ async fn test_launch_same_port_different_modules() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -88,6 +93,7 @@ async fn test_launch_same_port_same_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) @@ -108,6 +114,7 @@ async fn test_launch_same_port_different_cors() { TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); let res = RpcServerConfig::ws(Default::default()) diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 847de99564e..be708dac5f8 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,10 +1,15 @@ -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::{ + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, +}; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; +use reth_consensus::noop::NoopConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; -use reth_evm_ethereum::{execute::EthExecutorProvider, EthEvmConfig}; +use reth_evm::execute::BasicBlockExecutorProvider; +use reth_evm_ethereum::{execute::EthExecutionStrategyFactory, EthEvmConfig}; use reth_network_api::noop::NoopNetwork; use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; @@ -59,8 +64,11 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { /// Launches a new server with http only with the given modules pub async fn launch_http(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = - builder.build(TransportRpcModuleConfig::set_http(modules), Box::new(EthApi::with_spawner)); + let server = builder.build( + TransportRpcModuleConfig::set_http(modules), + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), + ); RpcServerConfig::http(Default::default()) .with_http_address(test_address()) .start(&server) @@ -71,8 +79,11 @@ pub async fn launch_http(modules: impl Into) -> RpcServerHan /// Launches a new server with ws only with the given modules pub async fn launch_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = - builder.build(TransportRpcModuleConfig::set_ws(modules), Box::new(EthApi::with_spawner)); + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules), + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), + ); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) .start(&server) @@ -87,6 +98,7 @@ pub async fn launch_http_ws(modules: impl Into) -> RpcServer let server = builder.build( TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); RpcServerConfig::ws(Default::default()) .with_ws_address(test_address()) @@ -105,6 +117,7 @@ pub async fn launch_http_ws_same_port(modules: impl Into) -> let server = builder.build( TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(MAINNET.clone())), ); let addr = test_address(); RpcServerConfig::ws(Default::default()) @@ -124,7 +137,8 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< TokioTaskExecutor, TestCanonStateSubscriptions, EthEvmConfig, - EthExecutorProvider, + BasicBlockExecutorProvider, + NoopConsensus, > { RpcModuleBuilder::default() .with_provider(NoopProvider::default()) @@ -133,5 +147,8 @@ pub fn test_rpc_builder() -> RpcModuleBuilder< .with_executor(TokioTaskExecutor::default()) .with_events(TestCanonStateSubscriptions::default()) .with_evm_config(EthEvmConfig::new(MAINNET.clone())) - .with_block_executor(EthExecutorProvider::ethereum(MAINNET.clone())) + .with_block_executor( + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::mainnet()), + ) + .with_consensus(NoopConsensus::default()) } diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 00503f2c1dd..f9f05da33d3 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -19,6 +19,7 @@ reth-rpc-api.workspace = true reth-storage-api.workspace = true reth-beacon-consensus.workspace = true reth-payload-builder.workspace = true +reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-rpc-types-compat.workspace = true @@ -45,6 +46,7 @@ jsonrpsee-types.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true +parking_lot.workspace = true [dev-dependencies] reth-ethereum-engine-primitives.workspace = true @@ -52,7 +54,6 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-testing-utils.workspace = true - alloy-rlp.workspace = true assert_matches.workspace = true \ No newline at end of file diff --git a/crates/rpc/rpc-engine-api/src/capabilities.rs b/crates/rpc/rpc-engine-api/src/capabilities.rs index de4d9623153..af0609b0d1f 100644 --- a/crates/rpc/rpc-engine-api/src/capabilities.rs +++ b/crates/rpc/rpc-engine-api/src/capabilities.rs @@ -17,8 +17,6 @@ pub const CAPABILITIES: &[&str] = &[ "engine_newPayloadV4", "engine_getPayloadBodiesByHashV1", "engine_getPayloadBodiesByRangeV1", - "engine_getPayloadBodiesByHashV2", - "engine_getPayloadBodiesByRangeV2", "engine_getBlobsV1", ]; diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 252808c14a7..2e80c105e7e 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1,16 +1,21 @@ use crate::{ capabilities::EngineCapabilities, metrics::EngineApiMetrics, EngineApiError, EngineApiResult, }; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::{ + eip1898::BlockHashOrNumber, + eip4844::BlobAndProofV1, + eip7685::{Requests, RequestsOrHash}, +}; use alloy_primitives::{BlockHash, BlockNumber, B256, U64}; use alloy_rpc_types_engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, - ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV3, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, PraguePayloadFields, TransitionConfiguration, }; use async_trait::async_trait; use jsonrpsee_core::RpcResult; +use parking_lot::Mutex; use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::{EthereumHardforks, Hardforks}; use reth_engine_primitives::{EngineTypes, EngineValidator}; @@ -20,10 +25,10 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{Block, BlockHashOrNumber, EthereumHardfork}; +use reth_primitives::EthereumHardfork; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::{ - convert_payload_input_v2_to_payload, convert_to_payload_body_v1, convert_to_payload_body_v2, + convert_payload_input_v2_to_payload, convert_to_payload_body_v1, }; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -68,6 +73,8 @@ struct EngineApiInner>, } impl @@ -103,6 +110,7 @@ where capabilities, tx_pool, validator, + latest_new_payload_response: Mutex::new(None), }); Self { inner } } @@ -141,7 +149,27 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V1, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) + + Ok(self + .inner + .beacon_consensus + .new_payload(payload, ExecutionPayloadSidecar::none()) + .await + .inspect(|_| self.inner.on_new_payload_response())?) + } + + /// Metered version of `new_payload_v1`. + async fn new_payload_v1_metered( + &self, + payload: ExecutionPayloadV1, + ) -> EngineApiResult { + let start = Instant::now(); + let gas_used = payload.gas_used; + let res = Self::new_payload_v1(self, payload).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v1.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + res } /// See also @@ -157,7 +185,26 @@ where self.inner .validator .validate_version_specific_fields(EngineApiMessageVersion::V2, payload_or_attrs)?; - Ok(self.inner.beacon_consensus.new_payload(payload, None).await?) + Ok(self + .inner + .beacon_consensus + .new_payload(payload, ExecutionPayloadSidecar::none()) + .await + .inspect(|_| self.inner.on_new_payload_response())?) + } + + /// Metered version of `new_payload_v2`. + pub async fn new_payload_v2_metered( + &self, + payload: ExecutionPayloadInputV2, + ) -> EngineApiResult { + let start = Instant::now(); + let gas_used = payload.execution_payload.gas_used; + let res = Self::new_payload_v2(self, payload).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v2.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + res } /// See also @@ -177,17 +224,44 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V3, payload_or_attrs)?; - let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; + Ok(self + .inner + .beacon_consensus + .new_payload( + payload, + ExecutionPayloadSidecar::v3(CancunPayloadFields { + versioned_hashes, + parent_beacon_block_root, + }), + ) + .await + .inspect(|_| self.inner.on_new_payload_response())?) + } - Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + // Metrics version of `new_payload_v3` + async fn new_payload_v3_metered( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + ) -> RpcResult { + let start = Instant::now(); + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = + Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v3.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + Ok(res?) } /// See also pub async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + execution_requests: Requests, ) -> EngineApiResult { let payload = ExecutionPayload::from(payload); let payload_or_attrs = @@ -199,9 +273,46 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V4, payload_or_attrs)?; - let cancun_fields = CancunPayloadFields { versioned_hashes, parent_beacon_block_root }; + Ok(self + .inner + .beacon_consensus + .new_payload( + payload, + ExecutionPayloadSidecar::v4( + CancunPayloadFields { versioned_hashes, parent_beacon_block_root }, + PraguePayloadFields { + requests: RequestsOrHash::Requests(execution_requests), + // TODO: add as an argument and handle in `try_into_block` + target_blobs_per_block: 0, + }, + ), + ) + .await + .inspect(|_| self.inner.on_new_payload_response())?) + } - Ok(self.inner.beacon_consensus.new_payload(payload, Some(cancun_fields)).await?) + /// Metrics version of `new_payload_v4` + async fn new_payload_v4_metered( + &self, + payload: ExecutionPayloadV3, + versioned_hashes: Vec, + parent_beacon_block_root: B256, + execution_requests: Requests, + ) -> RpcResult { + let start = Instant::now(); + let gas_used = payload.payload_inner.payload_inner.gas_used; + let res = Self::new_payload_v4( + self, + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await; + let elapsed = start.elapsed(); + self.inner.metrics.latency.new_payload_v4.record(elapsed); + self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); + Ok(res?) } /// Sends a message to the beacon consensus engine to update the fork choice _without_ @@ -257,7 +368,7 @@ where pub async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { self.inner .payload_store .resolve(payload_id) @@ -281,7 +392,7 @@ where pub async fn get_payload_v2( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -316,7 +427,7 @@ where pub async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -351,7 +462,7 @@ where pub async fn get_payload_v4( &self, payload_id: PayloadId, - ) -> EngineApiResult { + ) -> EngineApiResult { // First we fetch the payload attributes to check the timestamp let attributes = self.get_payload_attributes(payload_id).await?; @@ -371,7 +482,7 @@ where .map_err(|_| EngineApiError::UnknownPayload)? .try_into() .map_err(|_| { - warn!("could not transform built payload into ExecutionPayloadV4"); + warn!("could not transform built payload into ExecutionPayloadV3"); EngineApiError::UnknownPayload }) } @@ -385,7 +496,7 @@ where f: F, ) -> EngineApiResult>> where - F: Fn(Block) -> R + Send + 'static, + F: Fn(Provider::Block) -> R + Send + 'static, R: Send + 'static, { let (tx, rx) = oneshot::channel(); @@ -451,18 +562,6 @@ where self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v1).await } - /// Returns the execution payload bodies by the range starting at `start`, containing `count` - /// blocks. - /// - /// Same as [`Self::get_payload_bodies_by_range_v1`] but as [`ExecutionPayloadBodiesV2`]. - pub async fn get_payload_bodies_by_range_v2( - &self, - start: BlockNumber, - count: u64, - ) -> EngineApiResult { - self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v2).await - } - /// Called to retrieve execution payload bodies by hashes. async fn get_payload_bodies_by_hash_with( &self, @@ -470,7 +569,7 @@ where f: F, ) -> EngineApiResult>> where - F: Fn(Block) -> R + Send + 'static, + F: Fn(Provider::Block) -> R + Send + 'static, R: Send + 'static, { let len = hashes.len() as u64; @@ -509,16 +608,6 @@ where self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v1).await } - /// Called to retrieve execution payload bodies by hashes. - /// - /// Same as [`Self::get_payload_bodies_by_hash_v1`] but as [`ExecutionPayloadBodiesV2`]. - pub async fn get_payload_bodies_by_hash_v2( - &self, - hashes: Vec, - ) -> EngineApiResult { - self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v2).await - } - /// Called to verify network configuration parameters and ensure that Consensus and Execution /// layers are using the latest configuration. pub fn exchange_transition_configuration( @@ -596,6 +685,8 @@ where state: ForkchoiceState, payload_attrs: Option, ) -> EngineApiResult { + self.inner.record_elapsed_time_on_fcu(); + if let Some(ref attrs) = payload_attrs { let attr_validation_res = self.inner.validator.ensure_well_formed_attributes(version, attrs); @@ -614,7 +705,8 @@ where // To do this, we set the payload attrs to `None` if attribute validation failed, but // we still apply the forkchoice update. if let Err(err) = attr_validation_res { - let fcu_res = self.inner.beacon_consensus.fork_choice_updated(state, None).await?; + let fcu_res = + self.inner.beacon_consensus.fork_choice_updated(state, None, version).await?; // TODO: decide if we want this branch - the FCU INVALID response might be more // useful than the payload attributes INVALID response if fcu_res.is_invalid() { @@ -624,7 +716,27 @@ where } } - Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs).await?) + Ok(self.inner.beacon_consensus.fork_choice_updated(state, payload_attrs, version).await?) + } +} + +impl + EngineApiInner +where + EngineT: EngineTypes, +{ + /// Tracks the elapsed time between the new payload response and the received forkchoice update + /// request. + fn record_elapsed_time_on_fcu(&self) { + if let Some(start_time) = self.latest_new_payload_response.lock().take() { + let elapsed_time = start_time.elapsed(); + self.metrics.latency.new_payload_forkchoice_updated_time_diff.record(elapsed_time); + } + } + + /// Updates the timestamp for the latest new payload response. + fn on_new_payload_response(&self) { + self.latest_new_payload_response.lock().replace(Instant::now()); } } @@ -643,26 +755,14 @@ where /// Caution: This should not accept the `withdrawals` field async fn new_payload_v1(&self, payload: ExecutionPayloadV1) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV1"); - let start = Instant::now(); - let gas_used = payload.gas_used; - let res = Self::new_payload_v1(self, payload).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v1.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v1_metered(payload).await?) } /// Handler for `engine_newPayloadV2` /// See also async fn new_payload_v2(&self, payload: ExecutionPayloadInputV2) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV2"); - let start = Instant::now(); - let gas_used = payload.execution_payload.gas_used; - let res = Self::new_payload_v2(self, payload).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v2.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v2_metered(payload).await?) } /// Handler for `engine_newPayloadV3` @@ -674,33 +774,27 @@ where parent_beacon_block_root: B256, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV3"); - let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.gas_used; - let res = - Self::new_payload_v3(self, payload, versioned_hashes, parent_beacon_block_root).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v3.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self.new_payload_v3_metered(payload, versioned_hashes, parent_beacon_block_root).await?) } /// Handler for `engine_newPayloadV4` /// See also async fn new_payload_v4( &self, - payload: ExecutionPayloadV4, + payload: ExecutionPayloadV3, versioned_hashes: Vec, parent_beacon_block_root: B256, + execution_requests: Requests, ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_newPayloadV4"); - let start = Instant::now(); - let gas_used = payload.payload_inner.payload_inner.payload_inner.gas_used; - let res = - Self::new_payload_v4(self, payload, versioned_hashes, parent_beacon_block_root).await; - let elapsed = start.elapsed(); - self.inner.metrics.latency.new_payload_v4.record(elapsed); - self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); - Ok(res?) + Ok(self + .new_payload_v4_metered( + payload, + versioned_hashes, + parent_beacon_block_root, + execution_requests, + ) + .await?) } /// Handler for `engine_forkchoiceUpdatedV1` @@ -765,7 +859,7 @@ where async fn get_payload_v1( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV1"); let start = Instant::now(); let res = Self::get_payload_v1(self, payload_id).await; @@ -785,7 +879,7 @@ where async fn get_payload_v2( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV2"); let start = Instant::now(); let res = Self::get_payload_v2(self, payload_id).await; @@ -805,7 +899,7 @@ where async fn get_payload_v3( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV3"); let start = Instant::now(); let res = Self::get_payload_v3(self, payload_id).await; @@ -825,7 +919,7 @@ where async fn get_payload_v4( &self, payload_id: PayloadId, - ) -> RpcResult { + ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadV4"); let start = Instant::now(); let res = Self::get_payload_v4(self, payload_id).await; @@ -846,17 +940,6 @@ where Ok(res.await?) } - async fn get_payload_bodies_by_hash_v2( - &self, - block_hashes: Vec, - ) -> RpcResult { - trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV2"); - let start = Instant::now(); - let res = Self::get_payload_bodies_by_hash_v2(self, block_hashes); - self.inner.metrics.latency.get_payload_bodies_by_hash_v2.record(start.elapsed()); - Ok(res.await?) - } - /// Handler for `engine_getPayloadBodiesByRangeV1` /// /// See also @@ -885,18 +968,6 @@ where Ok(res?) } - async fn get_payload_bodies_by_range_v2( - &self, - start: U64, - count: U64, - ) -> RpcResult { - trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV2"); - let start_time = Instant::now(); - let res = Self::get_payload_bodies_by_range_v2(self, start.to(), count.to()).await; - self.inner.metrics.latency.get_payload_bodies_by_range_v2.record(start_time.elapsed()); - Ok(res?) - } - /// Handler for `engine_exchangeTransitionConfigurationV1` /// See also async fn exchange_transition_configuration( @@ -961,11 +1032,12 @@ mod tests { use super::*; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use assert_matches::assert_matches; - use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; + use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chainspec::{ChainSpec, MAINNET}; + use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_payload_builder::test_utils::spawn_test_payload_service; - use reth_primitives::SealedBlock; + use reth_primitives::{Block, SealedBlock}; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; use reth_tasks::TokioTaskExecutor; @@ -1091,7 +1163,7 @@ mod tests { let expected = blocks .iter() .cloned() - .map(|b| Some(convert_to_payload_body_v1(b.unseal()))) + .map(|b| Some(convert_to_payload_body_v1(b.unseal::()))) .collect::>(); let res = api.get_payload_bodies_by_range_v1(start, count).await.unwrap(); @@ -1133,7 +1205,7 @@ mod tests { if first_missing_range.contains(&b.number) { None } else { - Some(convert_to_payload_body_v1(b.unseal())) + Some(convert_to_payload_body_v1(b.unseal::())) } }) .collect::>(); @@ -1152,7 +1224,7 @@ mod tests { { None } else { - Some(convert_to_payload_body_v1(b.unseal())) + Some(convert_to_payload_body_v1(b.unseal::())) } }) .collect::>(); diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 677bd2fb246..4210d415bfe 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -2,8 +2,10 @@ use alloy_primitives::{B256, U256}; use jsonrpsee_types::error::{ INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE, INVALID_PARAMS_MSG, SERVER_ERROR_MSG, }; -use reth_beacon_consensus::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; -use reth_payload_primitives::{EngineObjectValidationError, PayloadBuilderError}; +use reth_beacon_consensus::BeaconForkChoiceUpdateError; +use reth_engine_primitives::BeaconOnNewPayloadError; +use reth_payload_builder_primitives::PayloadBuilderError; +use reth_payload_primitives::EngineObjectValidationError; use thiserror::Error; /// The Engine API result type diff --git a/crates/rpc/rpc-engine-api/src/lib.rs b/crates/rpc/rpc-engine-api/src/lib.rs index a2da00eee70..a9305a00820 100644 --- a/crates/rpc/rpc-engine-api/src/lib.rs +++ b/crates/rpc/rpc-engine-api/src/lib.rs @@ -15,9 +15,6 @@ mod engine_api; /// Engine API capabilities. pub mod capabilities; -/// The Engine API message type. -mod message; - /// Engine API error. mod error; @@ -26,7 +23,6 @@ mod metrics; pub use engine_api::{EngineApi, EngineApiSender}; pub use error::*; -pub use message::EngineApiMessageVersion; // re-export server trait for convenience pub use reth_rpc_api::EngineApiServer; diff --git a/crates/rpc/rpc-engine-api/src/message.rs b/crates/rpc/rpc-engine-api/src/message.rs deleted file mode 100644 index c0d6b85d511..00000000000 --- a/crates/rpc/rpc-engine-api/src/message.rs +++ /dev/null @@ -1,14 +0,0 @@ -/// The version of Engine API message. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum EngineApiMessageVersion { - /// Version 1 - V1, - /// Version 2 - /// - /// Added for shanghai hardfork. - V2, - /// Version 3 - /// - /// Added for cancun hardfork. - V3, -} diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 2c4216664ae..9325ce26778 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -34,6 +34,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) fork_choice_updated_v2: Histogram, /// Latency for `engine_forkchoiceUpdatedV3` pub(crate) fork_choice_updated_v3: Histogram, + /// Time diff between `engine_newPayloadV*` and the next FCU + pub(crate) new_payload_forkchoice_updated_time_diff: Histogram, /// Latency for `engine_getPayloadV1` pub(crate) get_payload_v1: Histogram, /// Latency for `engine_getPayloadV2` @@ -44,12 +46,8 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_payload_v4: Histogram, /// Latency for `engine_getPayloadBodiesByRangeV1` pub(crate) get_payload_bodies_by_range_v1: Histogram, - /// Latency for `engine_getPayloadBodiesByRangeV2` - pub(crate) get_payload_bodies_by_range_v2: Histogram, /// Latency for `engine_getPayloadBodiesByHashV1` pub(crate) get_payload_bodies_by_hash_v1: Histogram, - /// Latency for `engine_getPayloadBodiesByHashV2` - pub(crate) get_payload_bodies_by_hash_v2: Histogram, /// Latency for `engine_exchangeTransitionConfigurationV1` pub(crate) exchange_transition_configuration: Histogram, } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index c08c30c1de0..363c816d240 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -1,12 +1,14 @@ //! Some payload tests -use alloy_primitives::{Bytes, Sealable, U256}; +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{Bytes, U256}; use alloy_rlp::{Decodable, Error as RlpError}; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadV1, PayloadError, + ExecutionPayload, ExecutionPayloadBodyV1, ExecutionPayloadSidecar, ExecutionPayloadV1, + PayloadError, }; use assert_matches::assert_matches; -use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned, Withdrawals}; +use reth_primitives::{proofs, Block, SealedBlock, SealedHeader, TransactionSigned}; use reth_rpc_types_compat::engine::payload::{ block_to_payload, block_to_payload_v1, convert_to_payload_body_v1, try_into_sealed_block, try_payload_v1_to_block, @@ -22,10 +24,8 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body.transactions); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.body.ommers); - let sealed = transformed.header.seal_slow(); - let (header, seal) = sealed.into_parts(); block_to_payload(SealedBlock { - header: SealedHeader::new(header, seal), + header: SealedHeader::seal(transformed.header), body: transformed.body, }) } @@ -38,7 +38,7 @@ fn payload_body_roundtrip() { 0..=99, BlockRangeParams { tx_count: 0..2, ..Default::default() }, ) { - let unsealed = block.clone().unseal(); + let unsealed = block.clone().unseal::(); let payload_body: ExecutionPayloadBodyV1 = convert_to_payload_body_v1(unsealed); assert_eq!( @@ -75,7 +75,10 @@ fn payload_validation() { b }); - assert_matches!(try_into_sealed_block(block_with_valid_extra_data, None), Ok(_)); + assert_matches!( + try_into_sealed_block(block_with_valid_extra_data, &ExecutionPayloadSidecar::none()), + Ok(_) + ); // Invalid extra data let block_with_invalid_extra_data = Bytes::from_static(&[0; 33]); @@ -84,7 +87,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(invalid_extra_data_block,None), + try_into_sealed_block(invalid_extra_data_block, &ExecutionPayloadSidecar::none()), Err(PayloadError::ExtraData(data)) if data == block_with_invalid_extra_data ); @@ -94,8 +97,7 @@ fn payload_validation() { b }); assert_matches!( - - try_into_sealed_block(block_with_zero_base_fee,None), + try_into_sealed_block(block_with_zero_base_fee, &ExecutionPayloadSidecar::none()), Err(PayloadError::BaseFee(val)) if val.is_zero() ); @@ -114,8 +116,7 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_ommers.clone(),None), - + try_into_sealed_block(block_with_ommers.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_ommers.block_hash() ); @@ -126,9 +127,8 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_difficulty.clone(),None), + try_into_sealed_block(block_with_difficulty.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_difficulty.block_hash() - ); // None zero nonce @@ -137,9 +137,8 @@ fn payload_validation() { b }); assert_matches!( - try_into_sealed_block(block_with_nonce.clone(),None), + try_into_sealed_block(block_with_nonce.clone(), &ExecutionPayloadSidecar::none()), Err(PayloadError::BlockHash { consensus, .. }) if consensus == block_with_nonce.block_hash() - ); // Valid block diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 9d0f6cfd83d..6f65b91d8f8 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -16,6 +16,7 @@ workspace = true revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } +reth-primitives-traits.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-primitives.workspace = true @@ -25,20 +26,21 @@ reth-rpc-types-compat.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true reth-chainspec.workspace = true -reth-execution-types.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-network-api.workspace = true -reth-trie.workspace = true +reth-node-api.workspace = true +reth-trie-common = { workspace = true, features = ["eip1186"] } # ethereum +alloy-rlp.workspace = true +alloy-serde.workspace = true alloy-eips.workspace = true alloy-dyn-abi = { workspace = true, features = ["eip712"] } alloy-json-rpc.workspace = true alloy-network.workspace = true alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-mev.workspace = true alloy-consensus.workspace = true diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 20edf96d810..c103835a801 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -1,25 +1,24 @@ //! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for //! the `eth_` namespace. use alloy_dyn_abi::TypedData; -use alloy_eips::eip2930::AccessListResult; +use alloy_eips::{eip2930::AccessListResult, BlockId, BlockNumberOrTag}; use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256, B64, U256, U64}; -use alloy_rpc_types::{ - serde_helpers::JsonStorageKey, +use alloy_rpc_types_eth::{ simulate::{SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, - BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, - Index, StateContext, SyncStatus, Work, + transaction::TransactionRequest, + BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Index, + StateContext, SyncStatus, Work, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use tracing::trace; use crate::{ helpers::{EthApiSpec, EthBlocks, EthCall, EthFees, EthState, EthTransactions, FullEthApi}, - RpcBlock, RpcReceipt, RpcTransaction, + RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, }; /// Helper trait, unifies functionality that must be supported to implement all RPC methods for @@ -29,6 +28,7 @@ pub trait FullEthApiServer: RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + FullEthApi + Clone { @@ -39,6 +39,7 @@ impl FullEthApiServer for T where RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + FullEthApi + Clone { @@ -47,7 +48,7 @@ impl FullEthApiServer for T where /// Eth rpc interface: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EthApi { +pub trait EthApi { /// Returns the protocol version encoded as a string. #[method(name = "protocolVersion")] async fn protocol_version(&self) -> RpcResult; @@ -201,11 +202,11 @@ pub trait EthApi { /// Returns the block's header at given number. #[method(name = "getHeaderByNumber")] - async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult>; + async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult>; /// Returns the block's header at given hash. #[method(name = "getHeaderByHash")] - async fn header_by_hash(&self, hash: B256) -> RpcResult>; + async fn header_by_hash(&self, hash: B256) -> RpcResult>; /// `eth_simulateV1` executes an arbitrary number of transactions on top of the requested state. /// The transactions are packed into individual blocks. Overrides can be provided. @@ -277,7 +278,7 @@ pub trait EthApi { &self, address: Address, block: BlockId, - ) -> RpcResult>; + ) -> RpcResult>; /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. #[method(name = "maxPriorityFeePerGas")] @@ -367,6 +368,7 @@ impl RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > for T where T: FullEthApi, @@ -502,7 +504,8 @@ where trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); Ok(EthTransactions::transaction_by_hash(self, hash) .await? - .map(|tx| tx.into_transaction::())) + .map(|tx| tx.into_transaction(self.tx_resp_builder())) + .transpose()?) } /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` @@ -607,13 +610,16 @@ where } /// Handler for: `eth_getHeaderByNumber` - async fn header_by_number(&self, block_number: BlockNumberOrTag) -> RpcResult> { + async fn header_by_number( + &self, + block_number: BlockNumberOrTag, + ) -> RpcResult>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_getHeaderByNumber"); Ok(EthBlocks::rpc_block_header(self, block_number.into()).await?) } /// Handler for: `eth_getHeaderByHash` - async fn header_by_hash(&self, hash: B256) -> RpcResult> { + async fn header_by_hash(&self, hash: B256) -> RpcResult>> { trace!(target: "rpc::eth", ?hash, "Serving eth_getHeaderByHash"); Ok(EthBlocks::rpc_block_header(self, hash.into()).await?) } @@ -625,6 +631,7 @@ where block_number: Option, ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_simulateV1"); + let _permit = self.tracing_task_guard().clone().acquire_owned().await; Ok(EthCall::simulate_v1(self, payload, block_number).await?) } @@ -695,7 +702,7 @@ where &self, address: Address, block: BlockId, - ) -> RpcResult> { + ) -> RpcResult> { trace!(target: "rpc::eth", "Serving eth_getAccount"); Ok(EthState::get_account(self, address, block).await?) } @@ -780,8 +787,9 @@ where } /// Handler for: `eth_signTransaction` - async fn sign_transaction(&self, _transaction: TransactionRequest) -> RpcResult { - Err(internal_rpc_err("unimplemented")) + async fn sign_transaction(&self, request: TransactionRequest) -> RpcResult { + trace!(target: "rpc::eth", ?request, "Serving eth_signTransaction"); + Ok(EthTransactions::sign_transaction(self, request).await?) } /// Handler for: `eth_signTypedData` diff --git a/crates/rpc/rpc-eth-api/src/filter.rs b/crates/rpc/rpc-eth-api/src/filter.rs index c73d9672843..1acba351af7 100644 --- a/crates/rpc/rpc-eth-api/src/filter.rs +++ b/crates/rpc/rpc-eth-api/src/filter.rs @@ -1,7 +1,7 @@ //! `eth_` RPC API for filtering. use alloy_json_rpc::RpcObject; -use alloy_rpc_types::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind}; +use alloy_rpc_types_eth::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; /// Rpc Interface for poll-based ethereum filter API. diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 9993b477a66..5f0d9f744ef 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -2,35 +2,47 @@ use std::sync::Arc; -use alloy_rpc_types::{Header, Index}; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockId; +use alloy_primitives::Sealable; +use alloy_rlp::Encodable; +use alloy_rpc_types_eth::{Block, BlockTransactions, Header, Index}; use futures::Future; -use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders}; -use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_types::EthStateCache; -use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; +use reth_node_api::BlockBody; +use reth_primitives::{SealedBlockFor, SealedBlockWithSenders}; +use reth_provider::{ + BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider, ProviderHeader, ProviderReceipt, +}; +use reth_rpc_types_compat::block::from_block; +use revm_primitives::U256; -use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; +use crate::{ + node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, + RpcReceipt, +}; use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; /// Result type of the fetched block receipts. pub type BlockReceiptsResult = Result>>, E>; /// Result type of the fetched block and its receipts. -pub type BlockAndReceiptsResult = Result>)>, E>; +pub type BlockAndReceiptsResult = Result< + Option<( + SealedBlockFor<<::Provider as BlockReader>::Block>, + Arc::Provider>>>, + )>, + ::Error, +>; /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. pub trait EthBlocks: LoadBlock { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl HeaderProvider; - /// Returns the block header for the given block id. + #[expect(clippy::type_complexity)] fn rpc_block_header( &self, block_id: BlockId, - ) -> impl Future, Self::Error>> + Send + ) -> impl Future>>, Self::Error>> + Send where Self: FullEthApiTypes, { @@ -52,24 +64,24 @@ pub trait EthBlocks: LoadBlock { async move { let Some(block) = self.block_with_senders(block_id).await? else { return Ok(None) }; let block_hash = block.hash(); - let mut total_difficulty = EthBlocks::provider(self) - .header_td_by_number(block.number) + let mut total_difficulty = self + .provider() + .header_td_by_number(block.number()) .map_err(Self::Error::from_eth_err)?; if total_difficulty.is_none() { // if we failed to find td after we successfully loaded the block, try again using // the hash this only matters if the chain is currently transitioning the merge block and there's a reorg: - total_difficulty = EthBlocks::provider(self) - .header_td(&block.hash()) - .map_err(Self::Error::from_eth_err)?; + total_difficulty = + self.provider().header_td(&block.hash()).map_err(Self::Error::from_eth_err)?; } - let block = from_block::( + let block = from_block( (*block).clone().unseal(), total_difficulty.unwrap_or_default(), full.into(), Some(block_hash), - ) - .map_err(Self::Error::from_eth_err)?; + self.tx_resp_builder(), + )?; Ok(Some(block)) } } @@ -84,13 +96,15 @@ pub trait EthBlocks: LoadBlock { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - return Ok(LoadBlock::provider(self) + return Ok(self + .provider() .pending_block() .map_err(Self::Error::from_eth_err)? - .map(|block| block.body.transactions.len())) + .map(|block| block.body.transactions().len())) } - let block_hash = match LoadBlock::provider(self) + let block_hash = match self + .provider() .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { @@ -103,7 +117,7 @@ pub trait EthBlocks: LoadBlock { .get_sealed_block_with_senders(block_hash) .await .map_err(Self::Error::from_eth_err)? - .map(|b| b.body.transactions.len())) + .map(|b| b.body.transactions().len())) } } @@ -123,7 +137,7 @@ pub trait EthBlocks: LoadBlock { fn load_block_and_receipts( &self, block_id: BlockId, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadReceipt, { @@ -131,7 +145,8 @@ pub trait EthBlocks: LoadBlock { if block_id.is_pending() { // First, try to get the pending block from the provider, in case we already // received the actual pending block from the CL. - if let Some((block, receipts)) = LoadBlock::provider(self) + if let Some((block, receipts)) = self + .provider() .pending_block_and_receipts() .map_err(Self::Error::from_eth_err)? { @@ -144,11 +159,11 @@ pub trait EthBlocks: LoadBlock { } } - if let Some(block_hash) = LoadBlock::provider(self) - .block_hash_for_id(block_id) - .map_err(Self::Error::from_eth_err)? + if let Some(block_hash) = + self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? { - return LoadReceipt::cache(self) + return self + .cache() .get_block_and_receipts(block_hash) .await .map_err(Self::Error::from_eth_err) @@ -162,11 +177,12 @@ pub trait EthBlocks: LoadBlock { /// Returns uncle headers of given block. /// /// Returns an empty vec if there are none. + #[expect(clippy::type_complexity)] fn ommers( &self, block_id: BlockId, - ) -> Result>, Self::Error> { - LoadBlock::provider(self).ommers_by_id(block_id).map_err(Self::Error::from_eth_err) + ) -> Result>>, Self::Error> { + self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err) } /// Returns uncle block at given index in given block. @@ -181,18 +197,25 @@ pub trait EthBlocks: LoadBlock { async move { let uncles = if block_id.is_pending() { // Pending block can be fetched directly without need for caching - LoadBlock::provider(self) + self.provider() .pending_block() .map_err(Self::Error::from_eth_err)? - .map(|block| block.body.ommers) + .and_then(|block| block.body.ommers().map(|o| o.to_vec())) } else { - LoadBlock::provider(self) - .ommers_by_id(block_id) - .map_err(Self::Error::from_eth_err)? + self.provider().ommers_by_id(block_id).map_err(Self::Error::from_eth_err)? } .unwrap_or_default(); - Ok(uncles.into_iter().nth(index.into()).map(uncle_block_from_header)) + Ok(uncles.into_iter().nth(index.into()).map(|header| { + let block = alloy_consensus::Block::::uncle(header); + let size = U256::from(block.length()); + Block { + uncles: vec![], + header: Header::from_consensus(block.header.seal_slow(), None, Some(size)), + transactions: BlockTransactions::Uncle, + withdrawals: None, + } + })) } } } @@ -200,40 +223,38 @@ pub trait EthBlocks: LoadBlock { /// Loads a block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { - // Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl BlockReaderIdExt; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - +pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. + #[expect(clippy::type_complexity)] fn block_with_senders( &self, block_id: BlockId, - ) -> impl Future>, Self::Error>> + Send { + ) -> impl Future< + Output = Result< + Option::Block>>>, + Self::Error, + >, + > + Send { async move { if block_id.is_pending() { // Pending block can be fetched directly without need for caching - let maybe_pending = LoadPendingBlock::provider(self) + if let Some(pending_block) = self + .provider() .pending_block_with_senders() - .map_err(Self::Error::from_eth_err)?; - return if maybe_pending.is_some() { - Ok(maybe_pending.map(Arc::new)) - } else { - // If no pending block from provider, try to get local pending block - return match self.local_pending_block().await? { - Some((block, _)) => Ok(Some(Arc::new(block))), - None => Ok(None), - }; + .map_err(Self::Error::from_eth_err)? + { + return Ok(Some(Arc::new(pending_block))); + } + + // If no pending block from provider, try to get local pending block + return match self.local_pending_block().await? { + Some((block, _)) => Ok(Some(Arc::new(block))), + None => Ok(None), }; } - let block_hash = match LoadPendingBlock::provider(self) + let block_hash = match self + .provider() .block_hash_for_id(block_id) .map_err(Self::Error::from_eth_err)? { diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index b43b34305bd..e22fccc6726 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -1,29 +1,34 @@ //! Loads a pending block from database. Helper trait for `eth_` transaction, call and trace RPC //! methods. +use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; use crate::{ - AsEthApiError, FromEthApiError, FromEvmError, FullEthApiTypes, IntoEthApiError, RpcBlock, + helpers::estimate::EstimateCall, FromEthApiError, FromEvmError, FullEthApiTypes, + IntoEthApiError, RpcBlock, RpcNodeCore, }; +use alloy_consensus::BlockHeader; use alloy_eips::{eip1559::calc_next_block_base_fee, eip2930::AccessListResult}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ simulate::{SimBlock, SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, + transaction::TransactionRequest, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use futures::Future; -use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; +use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm_primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, - ResultAndState, TransactTo, TxEnv, +use reth_node_api::BlockBody; +use reth_primitives_traits::SignedTransaction; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, ProviderHeader}; +use reth_revm::{ + database::StateProviderDatabase, + db::CacheDB, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, ResultAndState, TxEnv, }, - Header, TransactionSigned, + DatabaseRef, }; -use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider, StateProvider}; -use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, error::ensure_success, @@ -34,19 +39,16 @@ use reth_rpc_eth_types::{ simulate::{self, EthSimulateError}, EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, }; -use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; use revm::{Database, DatabaseCommit, GetInspector}; use revm_inspectors::{access_list::AccessListInspector, transfer::TransferInspector}; use tracing::trace; -use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; - /// Result type for `eth_simulateV1` RPC method. pub type SimulatedBlocksResult = Result>>, E>; /// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in /// the `eth_` namespace. -pub trait EthCall: Call + LoadPendingBlock { +pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthApiTypes { /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, @@ -54,7 +56,7 @@ pub trait EthCall: Call + LoadPendingBlock { at: BlockId, state_override: Option, ) -> impl Future> + Send { - Call::estimate_gas_at(self, request, at, state_override) + EstimateCall::estimate_gas_at(self, request, at, state_override) } /// `eth_simulateV1` executes an arbitrary number of transactions on top of the requested state. @@ -66,10 +68,7 @@ pub trait EthCall: Call + LoadPendingBlock { &self, payload: SimulatePayload, block: Option, - ) -> impl Future> + Send - where - Self: LoadBlock + FullEthApiTypes, - { + ) -> impl Future> + Send { async move { if payload.block_state_calls.len() > self.max_simulate_blocks() as usize { return Err(EthApiError::InvalidParams("too many blocks.".to_string()).into()) @@ -96,7 +95,7 @@ pub trait EthCall: Call + LoadPendingBlock { let base_block = self.block_with_senders(block).await?.ok_or(EthApiError::HeaderNotFound(block))?; let mut parent_hash = base_block.header.hash(); - let total_difficulty = LoadPendingBlock::provider(self) + let total_difficulty = RpcNodeCore::provider(self) .header_td_by_number(block_env.number.to()) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block))?; @@ -118,15 +117,15 @@ pub trait EthCall: Call + LoadPendingBlock { block_env.timestamp += U256::from(1); if validation { - let chain_spec = LoadPendingBlock::provider(&this).chain_spec(); + let chain_spec = RpcNodeCore::provider(&this).chain_spec(); let base_fee_params = chain_spec.base_fee_params_at_timestamp(block_env.timestamp.to()); let base_fee = if let Some(latest) = blocks.last() { let header = &latest.inner.header; calc_next_block_base_fee( - header.gas_used, - header.gas_limit, - header.base_fee_per_gas.unwrap_or_default(), + header.gas_used(), + header.gas_limit(), + header.base_fee_per_gas().unwrap_or_default(), base_fee_params, ) } else { @@ -162,9 +161,11 @@ pub trait EthCall: Call + LoadPendingBlock { block_env.gas_limit.to(), cfg.chain_id, &mut db, + this.tx_resp_builder(), )?; let mut calls = calls.into_iter().peekable(); + let mut senders = Vec::with_capacity(transactions.len()); let mut results = Vec::with_capacity(calls.len()); while let Some(tx) = calls.next() { @@ -188,21 +189,31 @@ pub trait EthCall: Call + LoadPendingBlock { db.commit(res.state); } - results.push((env.tx.caller, res.result)); + senders.push(env.tx.caller); + results.push(res.result); } - let block = simulate::build_block::( - results, - transactions, + let (block, _) = this.assemble_block_and_receipts( &block_env, parent_hash, - total_difficulty, - return_full_transactions, - &db, - )?; + // state root calculation is skipped for performance reasons + B256::ZERO, + transactions, + results.clone(), + ); + + let block: SimulatedBlock> = + simulate::build_simulated_block( + senders, + results, + total_difficulty, + return_full_transactions, + this.tx_resp_builder(), + block, + )?; parent_hash = block.inner.header.hash; - gas_used += block.inner.header.gas_used; + gas_used += block.inner.header.gas_used(); blocks.push(block); } @@ -235,10 +246,7 @@ pub trait EthCall: Call + LoadPendingBlock { bundle: Bundle, state_context: Option, mut state_override: Option, - ) -> impl Future, Self::Error>> + Send - where - Self: LoadBlock, - { + ) -> impl Future, Self::Error>> + Send { async move { let Bundle { transactions, block_override } = bundle; if transactions.is_empty() { @@ -257,7 +265,8 @@ pub trait EthCall: Call + LoadPendingBlock { // if it's not pending, we should always use block_hash over block_number to ensure that // different provider calls query data related to the same block. if !is_block_target_pending { - target_block = LoadBlock::provider(self) + target_block = self + .provider() .block_hash_for_id(target_block) .map_err(|_| EthApiError::HeaderNotFound(target_block))? .ok_or_else(|| EthApiError::HeaderNotFound(target_block))? @@ -274,14 +283,15 @@ pub trait EthCall: Call + LoadPendingBlock { // we're essentially replaying the transactions in the block here, hence we need the // state that points to the beginning of the block, which is the state at // the parent block - let mut at = block.parent_hash; + let mut at = block.parent_hash(); let mut replay_block_txs = true; - let num_txs = transaction_index.index().unwrap_or(block.body.transactions.len()); + let num_txs = + transaction_index.index().unwrap_or_else(|| block.body.transactions().len()); // but if all transactions are to be replayed, we can use the state at the block itself, // however only if we're not targeting the pending block, because for pending we can't // rely on the block's state being available - if !is_block_target_pending && num_txs == block.body.transactions.len() { + if !is_block_target_pending && num_txs == block.body.transactions().len() { at = block.hash(); replay_block_txs = false; } @@ -299,7 +309,7 @@ pub trait EthCall: Call + LoadPendingBlock { let env = EnvWithHandlerCfg::new_with_cfg_env( cfg.clone(), block_env.clone(), - Call::evm_config(&this).tx_env(tx, *signer), + RpcNodeCore::evm_config(&this).tx_env(tx, *signer), ); let (res, _) = this.transact(&mut db, env)?; db.commit(res.state); @@ -451,7 +461,9 @@ pub trait EthCall: Call + LoadPendingBlock { } /// Executes code on state. -pub trait Call: LoadState + SpawnBlocking { +pub trait Call: + LoadState>> + SpawnBlocking +{ /// Returns default gas limit to use for `eth_call` and tracing RPC methods. /// /// Data access in default trait method implementations. @@ -460,11 +472,6 @@ pub trait Call: LoadState + SpawnBlocking { /// Returns the maximum number of blocks accepted for `eth_simulateV1`. fn max_simulate_blocks(&self) -> u64; - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - /// Executes the closure with the state that corresponds to the given [`BlockId`]. fn with_state_at_block(&self, at: BlockId, f: F) -> Result where @@ -544,6 +551,16 @@ pub trait Call: LoadState + SpawnBlocking { /// /// This returns the configured [`EnvWithHandlerCfg`] for the given [`TransactionRequest`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. + /// + /// This is primarily used by `eth_call`. + /// + /// # Blocking behaviour + /// + /// This assumes executing the call is relatively more expensive on IO than CPU because it + /// transacts a single transaction on an empty in memory database. Because `eth_call`s are + /// usually allowed to consume a lot of gas, this also allows a lot of memory operations so + /// we assume this is not primarily CPU bound and instead spawn the call on a regular tokio task + /// instead, where blocking IO is less problematic. fn spawn_with_call_at( &self, request: TransactionRequest, @@ -561,7 +578,7 @@ pub trait Call: LoadState + SpawnBlocking { async move { let (cfg, block_env, at) = self.evm_env_at(at).await?; let this = self.clone(); - self.spawn_tracing(move |_| { + self.spawn_blocking_io(move |_| { let state = this.state_at_block_id(at)?; let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); @@ -589,7 +606,7 @@ pub trait Call: LoadState + SpawnBlocking { f: F, ) -> impl Future, Self::Error>> + Send where - Self: LoadBlock + LoadPendingBlock + LoadTransaction, + Self: LoadBlock + LoadTransaction, F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> Result + Send + 'static, @@ -606,7 +623,7 @@ pub trait Call: LoadState + SpawnBlocking { // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in - let parent_block = block.parent_hash; + let parent_block = block.parent_hash(); let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { @@ -619,13 +636,13 @@ pub trait Call: LoadState + SpawnBlocking { cfg.clone(), block_env.clone(), block_txs, - tx.hash, + *tx.tx_hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block_env, - Call::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), ); let (res, _) = this.transact(&mut db, env)?; @@ -645,23 +662,24 @@ pub trait Call: LoadState + SpawnBlocking { /// Returns the index of the target transaction in the given iterator. fn replay_transactions_until<'a, DB, I>( &self, - db: &mut CacheDB, + db: &mut DB, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, transactions: I, target_tx_hash: B256, ) -> Result where - DB: DatabaseRef, + DB: Database + DatabaseCommit, EthApiError: From, - I: IntoIterator, + I: IntoIterator::Transaction)>, + ::Transaction: SignedTransaction, { let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); let mut evm = self.evm_config().evm_with_env(db, env); let mut index = 0; for (sender, tx) in transactions { - if tx.hash() == target_tx_hash { + if *tx.tx_hash() == target_tx_hash { // reached the target transaction break } @@ -673,283 +691,6 @@ pub trait Call: LoadState + SpawnBlocking { Ok(index) } - /// Estimate gas needed for execution of the `request` at the [`BlockId`]. - fn estimate_gas_at( - &self, - request: TransactionRequest, - at: BlockId, - state_override: Option, - ) -> impl Future> + Send - where - Self: LoadPendingBlock, - { - async move { - let (cfg, block_env, at) = self.evm_env_at(at).await?; - - self.spawn_blocking_io(move |this| { - let state = this.state_at_block_id(at)?; - this.estimate_gas_with(cfg, block_env, request, state, state_override) - }) - .await - } - } - - /// Estimates the gas usage of the `request` with the state. - /// - /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search. - /// - /// ## EVM settings - /// - /// This modifies certain EVM settings to mirror geth's `SkipAccountChecks` when transacting requests, see also: : - /// - /// - `disable_eip3607` is set to `true` - /// - `disable_base_fee` is set to `true` - /// - `nonce` is set to `None` - fn estimate_gas_with( - &self, - mut cfg: CfgEnvWithHandlerCfg, - block: BlockEnv, - mut request: TransactionRequest, - state: S, - state_override: Option, - ) -> Result - where - S: StateProvider, - { - // Disabled because eth_estimateGas is sometimes used with eoa senders - // See - cfg.disable_eip3607 = true; - - // The basefee should be ignored for eth_estimateGas and similar - // See: - // - cfg.disable_base_fee = true; - - // set nonce to None so that the correct nonce is chosen by the EVM - request.nonce = None; - - // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; - let tx_request_gas_price = request.gas_price; - // the gas limit of the corresponding block - let block_env_gas_limit = block.gas_limit; - - // Determine the highest possible gas limit, considering both the request's specified limit - // and the block's limit. - let mut highest_gas_limit = tx_request_gas_limit - .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) - .unwrap_or(block_env_gas_limit); - - // Configure the evm env - let mut env = self.build_call_evm_env(cfg, block, request)?; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - // Apply any state overrides if specified. - if let Some(state_override) = state_override { - apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; - } - - // Optimize for simple transfer transactions, potentially reducing the gas estimate. - if env.tx.data.is_empty() { - if let TransactTo::Call(to) = env.tx.transact_to { - if let Ok(code) = db.db.account_code(to) { - let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); - if no_code_callee { - // If the tx is a simple transfer (call to an account with no code) we can - // shortcircuit. But simply returning - // `MIN_TRANSACTION_GAS` is dangerous because there might be additional - // field combos that bump the price up, so we try executing the function - // with the minimum gas limit to make sure. - let mut env = env.clone(); - env.tx.gas_limit = MIN_TRANSACTION_GAS; - if let Ok((res, _)) = self.transact(&mut db, env) { - if res.result.is_success() { - return Ok(U256::from(MIN_TRANSACTION_GAS)) - } - } - } - } - } - } - - // Check funds of the sender (only useful to check if transaction gas price is more than 0). - // - // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` - if env.tx.gas_price > U256::ZERO { - // cap the highest gas limit by max gas caller can afford with given gas price - highest_gas_limit = highest_gas_limit - .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); - } - - // We can now normalize the highest gas limit to a u64 - let mut highest_gas_limit: u64 = highest_gas_limit - .try_into() - .unwrap_or_else(|_| self.provider().chain_spec().max_gas_limit()); - - // If the provided gas limit is less than computed cap, use that - env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); - - trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); - - // Execute the transaction with the highest possible gas limit. - let (mut res, mut env) = match self.transact(&mut db, env.clone()) { - // Handle the exceptional case where the transaction initialization uses too much gas. - // If the gas price or gas limit was specified in the request, retry the transaction - // with the block's gas limit to determine if the failure was due to - // insufficient gas. - Err(err) - if err.is_gas_too_high() && - (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => - { - return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } - // Propagate other results (successful or other errors). - ethres => ethres?, - }; - - let gas_refund = match res.result { - ExecutionResult::Success { gas_refunded, .. } => gas_refunded, - ExecutionResult::Halt { reason, gas_used } => { - // here we don't check for invalid opcode because already executed with highest gas - // limit - return Err(RpcInvalidTransactionError::halt(reason, gas_used).into_eth_err()) - } - ExecutionResult::Revert { output, .. } => { - // if price or limit was included in the request then we can execute the request - // again with the block's gas limit to check if revert is gas related or not - return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { - Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } else { - // the transaction did revert - Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) - } - } - }; - - // At this point we know the call succeeded but want to find the _best_ (lowest) gas the - // transaction succeeds with. We find this by doing a binary search over the possible range. - - // we know the tx succeeded with the configured gas limit, so we can use that as the - // highest, in case we applied a gas cap due to caller allowance above - highest_gas_limit = env.tx.gas_limit; - - // NOTE: this is the gas the transaction used, which is less than the - // transaction requires to succeed. - let mut gas_used = res.result.gas_used(); - // the lowest value is capped by the gas used by the unconstrained transaction - let mut lowest_gas_limit = gas_used.saturating_sub(1); - - // As stated in Geth, there is a good chance that the transaction will pass if we set the - // gas limit to the execution gas used plus the gas refund, so we check this first - // 1 { - // An estimation error is allowed once the current gas limit range used in the binary - // search is small enough (less than 1.5% of the highest gas limit) - // { - // Decrease the highest gas limit if gas is too high - highest_gas_limit = mid_gas_limit; - } - Err(err) if err.is_gas_too_low() => { - // Increase the lowest gas limit if gas is too low - lowest_gas_limit = mid_gas_limit; - } - // Handle other cases, including successful transactions. - ethres => { - // Unpack the result and environment if the transaction was successful. - (res, env) = ethres?; - // Update the estimated gas range based on the transaction result. - update_estimated_gas_range( - res.result, - mid_gas_limit, - &mut highest_gas_limit, - &mut lowest_gas_limit, - )?; - } - } - - // New midpoint - mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; - } - - Ok(U256::from(highest_gas_limit)) - } - - /// Executes the requests again after an out of gas error to check if the error is gas related - /// or not - #[inline] - fn map_out_of_gas_err( - &self, - env_gas_limit: U256, - mut env: EnvWithHandlerCfg, - db: &mut CacheDB>, - ) -> Self::Error - where - S: StateProvider, - { - let req_gas_limit = env.tx.gas_limit; - env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); - let (res, _) = match self.transact(db, env) { - Ok(res) => res, - Err(err) => return err, - }; - match res.result { - ExecutionResult::Success { .. } => { - // transaction succeeded by manually increasing the gas limit to - // highest, which means the caller lacks funds to pay for the tx - RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() - } - ExecutionResult::Revert { output, .. } => { - // reverted again after bumping the limit - RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() - } - ExecutionResult::Halt { reason, .. } => { - RpcInvalidTransactionError::EvmHalt(reason).into_eth_err() - } - } - } - /// Configures a new [`TxEnv`] for the [`TransactionRequest`] /// /// All [`TxEnv`] fields are derived from the given [`TransactionRequest`], if fields are @@ -960,7 +701,7 @@ pub trait Call: LoadState + SpawnBlocking { request: TransactionRequest, ) -> Result { // Ensure that if versioned hashes are set, they're not empty - if request.blob_versioned_hashes.as_ref().map_or(false, |hashes| hashes.is_empty()) { + if request.blob_versioned_hashes.as_ref().is_some_and(|hashes| hashes.is_empty()) { return Err(RpcInvalidTransactionError::BlobTransactionMissingBlobHashes.into_eth_err()) } @@ -1114,51 +855,3 @@ pub trait Call: LoadState + SpawnBlocking { Ok(env) } } - -/// Updates the highest and lowest gas limits for binary search based on the execution result. -/// -/// This function refines the gas limit estimates used in a binary search to find the optimal -/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on -/// whether the execution succeeded, reverted, or halted due to specific reasons. -#[inline] -fn update_estimated_gas_range( - result: ExecutionResult, - tx_gas_limit: u64, - highest_gas_limit: &mut u64, - lowest_gas_limit: &mut u64, -) -> Result<(), EthApiError> { - match result { - ExecutionResult::Success { .. } => { - // Cap the highest gas limit with the succeeding gas limit. - *highest_gas_limit = tx_gas_limit; - } - ExecutionResult::Revert { .. } => { - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - ExecutionResult::Halt { reason, .. } => { - match reason { - HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { - // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas - // left is too low. Treat this as an out of gas - // condition, knowing that the call succeeds with a - // higher gas limit. - // - // Common usage of invalid opcode in OpenZeppelin: - // - - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - err => { - // These cases should be unreachable because we know the transaction - // succeeds, but if they occur, treat them as an - // error. - return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) - } - } - } - }; - - Ok(()) -} diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs new file mode 100644 index 00000000000..f9d62855be1 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -0,0 +1,363 @@ +//! Estimate gas needed implementation + +use super::{Call, LoadPendingBlock}; +use crate::{AsEthApiError, FromEthApiError, IntoEthApiError}; +use alloy_primitives::U256; +use alloy_rpc_types_eth::{state::StateOverride, transaction::TransactionRequest, BlockId}; +use futures::Future; +use reth_chainspec::{EthChainSpec, MIN_TRANSACTION_GAS}; +use reth_provider::{ChainSpecProvider, StateProvider}; +use reth_revm::{ + database::StateProviderDatabase, + db::CacheDB, + primitives::{BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, HaltReason, TransactTo}, +}; +use reth_rpc_eth_types::{ + revm_utils::{apply_state_overrides, caller_gas_allowance}, + EthApiError, RevertError, RpcInvalidTransactionError, +}; +use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; +use revm_primitives::{db::Database, EnvWithHandlerCfg}; +use tracing::trace; + +/// Gas execution estimates +pub trait EstimateCall: Call { + /// Estimates the gas usage of the `request` with the state. + /// + /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search. + /// + /// ## EVM settings + /// + /// This modifies certain EVM settings to mirror geth's `SkipAccountChecks` when transacting requests, see also: : + /// + /// - `disable_eip3607` is set to `true` + /// - `disable_base_fee` is set to `true` + /// - `nonce` is set to `None` + fn estimate_gas_with( + &self, + mut cfg: CfgEnvWithHandlerCfg, + block: BlockEnv, + mut request: TransactionRequest, + state: S, + state_override: Option, + ) -> Result + where + S: StateProvider, + { + // Disabled because eth_estimateGas is sometimes used with eoa senders + // See + cfg.disable_eip3607 = true; + + // The basefee should be ignored for eth_estimateGas and similar + // See: + // + cfg.disable_base_fee = true; + + // set nonce to None so that the correct nonce is chosen by the EVM + request.nonce = None; + + // Keep a copy of gas related request values + let tx_request_gas_limit = request.gas.map(U256::from); + let tx_request_gas_price = request.gas_price; + // the gas limit of the corresponding block + let block_env_gas_limit = block.gas_limit; + + // Determine the highest possible gas limit, considering both the request's specified limit + // and the block's limit. + let mut highest_gas_limit = tx_request_gas_limit + .map(|mut tx_gas_limit| { + if block_env_gas_limit < tx_gas_limit { + // requested gas limit is higher than the allowed gas limit, capping + tx_gas_limit = block_env_gas_limit; + } + tx_gas_limit + }) + .unwrap_or(block_env_gas_limit); + + // Configure the evm env + let mut env = self.build_call_evm_env(cfg, block, request)?; + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // Apply any state overrides if specified. + if let Some(state_override) = state_override { + apply_state_overrides(state_override, &mut db).map_err(Self::Error::from_eth_err)?; + } + + // Optimize for simple transfer transactions, potentially reducing the gas estimate. + if env.tx.data.is_empty() { + if let TransactTo::Call(to) = env.tx.transact_to { + if let Ok(code) = db.db.account_code(to) { + let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); + if no_code_callee { + // If the tx is a simple transfer (call to an account with no code) we can + // shortcircuit. But simply returning + // `MIN_TRANSACTION_GAS` is dangerous because there might be additional + // field combos that bump the price up, so we try executing the function + // with the minimum gas limit to make sure. + let mut env = env.clone(); + env.tx.gas_limit = MIN_TRANSACTION_GAS; + if let Ok((res, _)) = self.transact(&mut db, env) { + if res.result.is_success() { + return Ok(U256::from(MIN_TRANSACTION_GAS)) + } + } + } + } + } + } + + // Check funds of the sender (only useful to check if transaction gas price is more than 0). + // + // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` + if env.tx.gas_price > U256::ZERO { + // cap the highest gas limit by max gas caller can afford with given gas price + highest_gas_limit = highest_gas_limit + .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); + } + + // We can now normalize the highest gas limit to a u64 + let mut highest_gas_limit: u64 = highest_gas_limit + .try_into() + .unwrap_or_else(|_| self.provider().chain_spec().max_gas_limit()); + + // If the provided gas limit is less than computed cap, use that + env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); + + trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); + + // Execute the transaction with the highest possible gas limit. + let (mut res, mut env) = match self.transact(&mut db, env.clone()) { + // Handle the exceptional case where the transaction initialization uses too much gas. + // If the gas price or gas limit was specified in the request, retry the transaction + // with the block's gas limit to determine if the failure was due to + // insufficient gas. + Err(err) + if err.is_gas_too_high() && + (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => + { + return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } + // Propagate other results (successful or other errors). + ethres => ethres?, + }; + + let gas_refund = match res.result { + ExecutionResult::Success { gas_refunded, .. } => gas_refunded, + ExecutionResult::Halt { reason, gas_used } => { + // here we don't check for invalid opcode because already executed with highest gas + // limit + return Err(RpcInvalidTransactionError::halt(reason, gas_used).into_eth_err()) + } + ExecutionResult::Revert { output, .. } => { + // if price or limit was included in the request then we can execute the request + // again with the block's gas limit to check if revert is gas related or not + return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { + Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } else { + // the transaction did revert + Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) + } + } + }; + + // At this point we know the call succeeded but want to find the _best_ (lowest) gas the + // transaction succeeds with. We find this by doing a binary search over the possible range. + + // we know the tx succeeded with the configured gas limit, so we can use that as the + // highest, in case we applied a gas cap due to caller allowance above + highest_gas_limit = env.tx.gas_limit; + + // NOTE: this is the gas the transaction used, which is less than the + // transaction requires to succeed. + let mut gas_used = res.result.gas_used(); + // the lowest value is capped by the gas used by the unconstrained transaction + let mut lowest_gas_limit = gas_used.saturating_sub(1); + + // As stated in Geth, there is a good chance that the transaction will pass if we set the + // gas limit to the execution gas used plus the gas refund, so we check this first + // 1 { + // An estimation error is allowed once the current gas limit range used in the binary + // search is small enough (less than 1.5% of the highest gas limit) + // { + // Decrease the highest gas limit if gas is too high + highest_gas_limit = mid_gas_limit; + } + Err(err) if err.is_gas_too_low() => { + // Increase the lowest gas limit if gas is too low + lowest_gas_limit = mid_gas_limit; + } + // Handle other cases, including successful transactions. + ethres => { + // Unpack the result and environment if the transaction was successful. + (res, env) = ethres?; + // Update the estimated gas range based on the transaction result. + update_estimated_gas_range( + res.result, + mid_gas_limit, + &mut highest_gas_limit, + &mut lowest_gas_limit, + )?; + } + } + + // New midpoint + mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; + } + + Ok(U256::from(highest_gas_limit)) + } + + /// Estimate gas needed for execution of the `request` at the [`BlockId`]. + fn estimate_gas_at( + &self, + request: TransactionRequest, + at: BlockId, + state_override: Option, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + { + async move { + let (cfg, block_env, at) = self.evm_env_at(at).await?; + + self.spawn_blocking_io(move |this| { + let state = this.state_at_block_id(at)?; + EstimateCall::estimate_gas_with( + &this, + cfg, + block_env, + request, + state, + state_override, + ) + }) + .await + } + } + + /// Executes the requests again after an out of gas error to check if the error is gas related + /// or not + #[inline] + fn map_out_of_gas_err( + &self, + env_gas_limit: U256, + mut env: EnvWithHandlerCfg, + db: &mut DB, + ) -> Self::Error + where + DB: Database, + EthApiError: From, + { + let req_gas_limit = env.tx.gas_limit; + env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); + let (res, _) = match self.transact(db, env) { + Ok(res) => res, + Err(err) => return err, + }; + match res.result { + ExecutionResult::Success { .. } => { + // transaction succeeded by manually increasing the gas limit to + // highest, which means the caller lacks funds to pay for the tx + RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into_eth_err() + } + ExecutionResult::Revert { output, .. } => { + // reverted again after bumping the limit + RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err() + } + ExecutionResult::Halt { reason, .. } => { + RpcInvalidTransactionError::EvmHalt(reason).into_eth_err() + } + } + } +} + +/// Updates the highest and lowest gas limits for binary search based on the execution result. +/// +/// This function refines the gas limit estimates used in a binary search to find the optimal +/// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on +/// whether the execution succeeded, reverted, or halted due to specific reasons. +#[inline] +pub fn update_estimated_gas_range( + result: ExecutionResult, + tx_gas_limit: u64, + highest_gas_limit: &mut u64, + lowest_gas_limit: &mut u64, +) -> Result<(), EthApiError> { + match result { + ExecutionResult::Success { .. } => { + // Cap the highest gas limit with the succeeding gas limit. + *highest_gas_limit = tx_gas_limit; + } + ExecutionResult::Revert { .. } => { + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + ExecutionResult::Halt { reason, .. } => { + match reason { + HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { + // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas + // left is too low. Treat this as an out of gas + // condition, knowing that the call succeeds with a + // higher gas limit. + // + // Common usage of invalid opcode in OpenZeppelin: + // + + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + err => { + // These cases should be unreachable because we know the transaction + // succeeds, but if they occur, treat them as an + // error. + return Err(RpcInvalidTransactionError::EvmHalt(err).into_eth_err()) + } + } + } + }; + + Ok(()) +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index 34ba6dc7e4e..e0618cb6910 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -1,13 +1,15 @@ //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. +use alloy_consensus::BlockHeader; use alloy_primitives::U256; -use alloy_rpc_types::{BlockNumberOrTag, FeeHistory}; +use alloy_rpc_types_eth::{BlockNumberOrTag, FeeHistory}; use futures::Future; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; +use reth_chainspec::EthChainSpec; +use reth_primitives_traits::BlockBody; +use reth_provider::{BlockIdReader, ChainSpecProvider, HeaderProvider}; use reth_rpc_eth_types::{ - fee_history::calculate_reward_percentiles_for_block, EthApiError, EthStateCache, - FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, + fee_history::calculate_reward_percentiles_for_block, EthApiError, FeeHistoryCache, + FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, }; use tracing::debug; @@ -82,7 +84,8 @@ pub trait EthFees: LoadFee { block_count = block_count.saturating_sub(1); } - let end_block = LoadFee::provider(self) + let end_block = self + .provider() .block_number_for_id(newest_block.into()) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(newest_block.into()))?; @@ -147,13 +150,12 @@ pub trait EthFees: LoadFee { // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the // next block base_fee_per_gas - .push(last_entry.next_block_base_fee(LoadFee::provider(self).chain_spec()) - as u128); + .push(last_entry.next_block_base_fee(self.provider().chain_spec()) as u128); base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); } else { // read the requested header range - let headers = LoadFee::provider(self) + let headers = self.provider() .sealed_headers_range(start_block..=end_block) .map_err(Self::Error::from_eth_err)?; if headers.len() != block_count as usize { @@ -162,17 +164,17 @@ pub trait EthFees: LoadFee { for header in &headers { - base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); - gas_used_ratio.push(header.gas_used as f64 / header.gas_limit as f64); + base_fee_per_gas.push(header.base_fee_per_gas().unwrap_or_default() as u128); + gas_used_ratio.push(header.gas_used() as f64 / header.gas_limit() as f64); base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); blob_gas_used_ratio.push( - header.blob_gas_used.unwrap_or_default() as f64 - / reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + header.blob_gas_used().unwrap_or_default() as f64 + / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, ); // Percentiles were specified, so we need to collect reward percentile ino if let Some(percentiles) = &reward_percentiles { - let (block, receipts) = LoadFee::cache(self) + let (block, receipts) = self.cache() .get_block_and_receipts(header.hash()) .await .map_err(Self::Error::from_eth_err)? @@ -180,9 +182,9 @@ pub trait EthFees: LoadFee { rewards.push( calculate_reward_percentiles_for_block( percentiles, - header.gas_used, - header.base_fee_per_gas.unwrap_or_default(), - &block.body.transactions, + header.gas_used(), + header.base_fee_per_gas().unwrap_or_default(), + block.body.transactions(), &receipts, ) .unwrap_or_default(), @@ -197,14 +199,10 @@ pub trait EthFees: LoadFee { // The unwrap is safe since we checked earlier that we got at least 1 header. let last_header = headers.last().expect("is present"); base_fee_per_gas.push( - LoadFee::provider(self) + last_header.next_block_base_fee( + self.provider() .chain_spec() - .base_fee_params_at_timestamp(last_header.timestamp) - .next_block_base_fee( - last_header.gas_used , - last_header.gas_limit, - last_header.base_fee_per_gas.unwrap_or_default() , - ) as u128, + .base_fee_params_at_timestamp(last_header.timestamp())).unwrap_or_default() as u128 ); // Same goes for the `base_fee_per_blob_gas`: @@ -242,22 +240,10 @@ pub trait EthFees: LoadFee { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods. pub trait LoadFee: LoadBlock { - // Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - /// Returns a handle for reading gas price. /// /// Data access in default (L1) trait method implementations. - fn gas_oracle(&self) -> &GasPriceOracle; + fn gas_oracle(&self) -> &GasPriceOracle; /// Returns a handle for reading fee history data from memory. /// @@ -299,7 +285,7 @@ pub trait LoadFee: LoadBlock { .block_with_senders(BlockNumberOrTag::Pending.into()) .await? .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Pending.into()))? - .base_fee_per_gas + .base_fee_per_gas() .ok_or(EthApiError::InvalidTransaction( RpcInvalidTransactionError::TxTypeNotSupported, ))?; @@ -336,7 +322,7 @@ pub trait LoadFee: LoadBlock { let suggested_tip = self.suggested_priority_fee(); async move { let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; - let base_fee = header.and_then(|h| h.base_fee_per_gas).unwrap_or_default(); + let base_fee = header.and_then(|h| h.base_fee_per_gas()).unwrap_or_default(); Ok(suggested_tip + U256::from(base_fee)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs index 8adb0e281e7..27d23da74b2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/mod.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -17,7 +17,7 @@ pub mod block; pub mod blocking_task; pub mod call; -pub mod error; +pub mod estimate; pub mod fee; pub mod pending_block; pub mod receipt; @@ -42,12 +42,9 @@ pub use transaction::{EthTransactions, LoadTransaction}; use crate::FullEthApiTypes; /// Extension trait that bundles traits needed for tracing transactions. -pub trait TraceExt: - LoadTransaction + LoadBlock + LoadPendingBlock + SpawnBlocking + Trace + Call -{ -} +pub trait TraceExt: LoadTransaction + LoadBlock + SpawnBlocking + Trace + Call {} -impl TraceExt for T where T: LoadTransaction + LoadBlock + LoadPendingBlock + Trace + Call {} +impl TraceExt for T where T: LoadTransaction + LoadBlock + Trace + Call {} /// Helper trait to unify all `eth` rpc server building block traits, for simplicity. /// diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 81c6a567846..c6e0e0c5939 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -1,148 +1,157 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. -use std::time::{Duration, Instant}; - -use crate::{EthApiTypes, FromEthApiError, FromEvmError}; - -use alloy_consensus::EMPTY_ROOT_HASH; -use alloy_primitives::{BlockNumber, B256, U256}; -use alloy_rpc_types::BlockNumberOrTag; +use super::SpawnBlocking; +use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; +use alloy_consensus::{BlockHeader, Transaction}; +use alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK; +use alloy_network::Network; +use alloy_primitives::B256; +use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{ - constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, - proofs::calculate_transaction_root, - revm_primitives::{ - BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, - ResultAndState, SpecId, - }, - Block, BlockBody, Header, Receipt, Requests, SealedBlockWithSenders, SealedHeader, - TransactionSignedEcRecovered, EMPTY_OMMER_ROOT_HASH, +use reth_errors::RethError; +use reth_evm::{ + state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, + ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes, }; +use reth_primitives::{BlockExt, InvalidTransactionError, SealedBlockWithSenders}; +use reth_primitives_traits::receipt::ReceiptExt; use reth_provider::{ - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderError, - ReceiptProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderBlock, ProviderError, + ProviderHeader, ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderFactory, }; use reth_revm::{ - database::StateProviderDatabase, state_change::post_block_withdrawals_balance_increments, + database::StateProviderDatabase, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, + ResultAndState, + }, }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; -use reth_trie::HashedPostState; +use reth_transaction_pool::{ + error::InvalidPoolTransactionError, BestTransactionsAttributes, PoolTransaction, + TransactionPool, +}; use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; +use std::time::{Duration, Instant}; use tokio::sync::Mutex; use tracing::debug; -use super::SpawnBlocking; - /// Loads a pending block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadPendingBlock: EthApiTypes { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider( - &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory; - - /// Returns a handle for reading data from transaction pool. - /// - /// Data access in default (L1) trait method implementations. - fn pool(&self) -> impl TransactionPool; - +pub trait LoadPendingBlock: + EthApiTypes< + NetworkTypes: Network< + HeaderResponse = alloy_rpc_types_eth::Header>, + >, + > + RpcNodeCore< + Provider: BlockReaderIdExt + + EvmEnvProvider> + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool>>, + Evm: ConfigureEvm< + Header = ProviderHeader, + Transaction = ProviderTx, + >, + > +{ /// Returns a handle to the pending block. /// /// Data access in default (L1) trait method implementations. - fn pending_block(&self) -> &Mutex>; - - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; + #[expect(clippy::type_complexity)] + fn pending_block( + &self, + ) -> &Mutex, ProviderReceipt>>>; /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block - fn pending_block_env_and_cfg(&self) -> Result { - let origin: PendingBlockEnvOrigin = if let Some(pending) = + #[expect(clippy::type_complexity)] + fn pending_block_env_and_cfg( + &self, + ) -> Result< + PendingBlockEnv, ProviderReceipt>, + Self::Error, + > { + if let Some(block) = self.provider().pending_block_with_senders().map_err(Self::Error::from_eth_err)? { - PendingBlockEnvOrigin::ActualPending(pending) - } else { - // no pending block from the CL yet, so we use the latest block and modify the env - // values that we can - let latest = self + if let Some(receipts) = self .provider() - .latest_header() + .receipts_by_block(block.hash().into()) .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - - let (mut latest_header, block_hash) = latest.split(); - // child block - latest_header.number += 1; - // assumed child block is in the next slot: 12s - latest_header.timestamp += 12; - // base fee of the child block - let chain_spec = self.provider().chain_spec(); - - latest_header.base_fee_per_gas = latest_header.next_block_base_fee( - chain_spec.base_fee_params_at_timestamp(latest_header.timestamp), - ); - - // update excess blob gas consumed above target - latest_header.excess_blob_gas = latest_header.next_block_excess_blob_gas(); - - // we're reusing the same block hash because we need this to lookup the block's state - let latest = SealedHeader::new(latest_header, block_hash); - - PendingBlockEnvOrigin::DerivedFromLatest(latest) - }; - - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); + { + // Note: for the PENDING block we assume it is past the known merge block and + // thus this will not fail when looking up the total + // difficulty value for the blockenv. + let (cfg, block_env) = self + .provider() + .env_with_header(block.header(), self.evm_config().clone()) + .map_err(Self::Error::from_eth_err)?; + + return Ok(PendingBlockEnv::new( + cfg, + block_env, + PendingBlockEnvOrigin::ActualPending(block, receipts), + )); + } + } - let mut block_env = BlockEnv::default(); - // Note: for the PENDING block we assume it is past the known merge block and thus this will - // not fail when looking up the total difficulty value for the blockenv. - self.provider() - .fill_env_with_header( - &mut cfg, - &mut block_env, - origin.header(), - self.evm_config().clone(), + // no pending block from the CL yet, so we use the latest block and modify the env + // values that we can + let latest = self + .provider() + .latest_header() + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; + + let (cfg, block_env) = self + .evm_config() + .next_cfg_and_block_env( + &latest, + NextBlockEnvAttributes { + timestamp: latest.timestamp() + 12, + suggested_fee_recipient: latest.beneficiary(), + prev_randao: B256::random(), + }, ) + .map_err(RethError::other) .map_err(Self::Error::from_eth_err)?; - Ok(PendingBlockEnv::new(cfg, block_env, origin)) + Ok(PendingBlockEnv::new( + cfg, + block_env, + PendingBlockEnvOrigin::DerivedFromLatest(latest.hash()), + )) } /// Returns the locally built pending block + #[expect(clippy::type_complexity)] fn local_pending_block( &self, - ) -> impl Future)>, Self::Error>> + Send + ) -> impl Future< + Output = Result< + Option<( + SealedBlockWithSenders<::Block>, + Vec>, + )>, + Self::Error, + >, + > + Send where Self: SpawnBlocking, { async move { let pending = self.pending_block_env_and_cfg()?; - if pending.origin.is_actual_pending() { - if let Some(block) = pending.origin.clone().into_actual_pending() { - // we have the real pending block, so we should also have its receipts - if let Some(receipts) = self - .provider() - .receipts_by_block(block.hash().into()) - .map_err(Self::Error::from_eth_err)? - { - return Ok(Some((block, receipts))) - } + let parent_hash = match pending.origin { + PendingBlockEnvOrigin::ActualPending(block, receipts) => { + return Ok(Some((block, receipts))); } - } + PendingBlockEnvOrigin::DerivedFromLatest(parent_hash) => parent_hash, + }; // we couldn't find the real pending block, so we need to build it ourselves let mut lock = self.pending_block().lock().await; @@ -152,11 +161,11 @@ pub trait LoadPendingBlock: EthApiTypes { // check if the block is still good if let Some(pending_block) = lock.as_ref() { // this is guaranteed to be the `latest` header - if pending.block_env.number.to::() == pending_block.block.number && - pending.origin.header().hash() == pending_block.block.parent_hash && + if pending.block_env.number.to::() == pending_block.block.number() && + parent_hash == pending_block.block.parent_hash() && now <= pending_block.expires_at { - return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))) + return Ok(Some((pending_block.block.clone(), pending_block.receipts.clone()))); } } @@ -164,7 +173,7 @@ pub trait LoadPendingBlock: EthApiTypes { let (sealed_block, receipts) = match self .spawn_blocking_io(move |this| { // we rebuild the block - this.build_block(pending) + this.build_block(pending.cfg, pending.block_env, parent_hash) }) .await { @@ -186,33 +195,45 @@ pub trait LoadPendingBlock: EthApiTypes { } } - /// Assembles a [`Receipt`] for a transaction, based on its [`ExecutionResult`]. + /// Assembles a receipt for a transaction, based on its [`ExecutionResult`]. fn assemble_receipt( &self, - tx: &TransactionSignedEcRecovered, + tx: &ProviderTx, result: ExecutionResult, cumulative_gas_used: u64, - ) -> Receipt { - #[allow(clippy::needless_update)] - Receipt { - tx_type: tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs().into_iter().map(Into::into).collect(), - ..Default::default() - } - } + ) -> ProviderReceipt; - /// Calculates receipts root in block building. - /// - /// Panics if block is not in the [`ExecutionOutcome`]'s block range. - fn receipts_root( + /// Assembles a pending block. + fn assemble_block( &self, - _block_env: &BlockEnv, - execution_outcome: &ExecutionOutcome, - block_number: BlockNumber, - ) -> B256 { - execution_outcome.receipts_root_slow(block_number).expect("Block is present") + block_env: &BlockEnv, + parent_hash: revm_primitives::B256, + state_root: revm_primitives::B256, + transactions: Vec>, + receipts: &[ProviderReceipt], + ) -> ProviderBlock; + + /// Helper to invoke both [`Self::assemble_block`] and [`Self::assemble_receipt`]. + fn assemble_block_and_receipts( + &self, + block_env: &BlockEnv, + parent_hash: revm_primitives::B256, + state_root: revm_primitives::B256, + transactions: Vec>, + results: Vec, + ) -> (ProviderBlock, Vec>) { + let mut cumulative_gas_used = 0; + let mut receipts = Vec::with_capacity(results.len()); + + for (tx, outcome) in transactions.iter().zip(results) { + cumulative_gas_used += outcome.gas_used(); + receipts.push(self.assemble_receipt(tx, outcome, cumulative_gas_used)); + } + + let block = + self.assemble_block(block_env, parent_hash, state_root, transactions, &receipts); + + (block, receipts) } /// Builds a pending block using the configured provider and pool. @@ -221,16 +242,22 @@ pub trait LoadPendingBlock: EthApiTypes { /// /// After Cancun, if the origin is the actual pending block, the block includes the EIP-4788 pre /// block contract call using the parent beacon block root received from the CL. + #[expect(clippy::type_complexity)] fn build_block( &self, - env: PendingBlockEnv, - ) -> Result<(SealedBlockWithSenders, Vec), Self::Error> + cfg: CfgEnvWithHandlerCfg, + block_env: BlockEnv, + parent_hash: B256, + ) -> Result< + ( + SealedBlockWithSenders>, + Vec>, + ), + Self::Error, + > where EthApiError: From, { - let PendingBlockEnv { cfg, block_env, origin } = env; - - let parent_hash = origin.build_target_hash(); let state_provider = self .provider() .history_by_block_hash(parent_hash) @@ -242,7 +269,6 @@ pub trait LoadPendingBlock: EthApiTypes { let mut sum_blob_gas_used = 0; let block_gas_limit: u64 = block_env.gas_limit.to::(); let base_fee = block_env.basefee.to::(); - let block_number = block_env.number.to::(); let mut executed_txs = Vec::new(); let mut senders = Vec::new(); @@ -252,37 +278,15 @@ pub trait LoadPendingBlock: EthApiTypes { block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); - let (withdrawals, withdrawals_root) = match origin { - PendingBlockEnvOrigin::ActualPending(ref block) => { - (block.body.withdrawals.clone(), block.withdrawals_root) - } - PendingBlockEnvOrigin::DerivedFromLatest(_) => (None, None), - }; - let chain_spec = self.provider().chain_spec(); let mut system_caller = SystemCaller::new(self.evm_config().clone(), chain_spec.clone()); - let parent_beacon_block_root = if origin.is_actual_pending() { - // apply eip-4788 pre block contract call if we got the block from the CL with the real - // parent beacon block root - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - origin.header().parent_beacon_block_root, - ) - .map_err(|err| EthApiError::Internal(err.into()))?; - origin.header().parent_beacon_block_root - } else { - None - }; system_caller - .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, origin.header().hash()) + .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, parent_hash) .map_err(|err| EthApiError::Internal(err.into()))?; - let mut receipts = Vec::new(); + let mut results = Vec::new(); while let Some(pool_tx) = best_txs.next() { // ensure we still have capacity for this transaction @@ -290,7 +294,13 @@ pub trait LoadPendingBlock: EthApiTypes { // we can't fit this transaction into the block, so we need to mark it as invalid // which also removes all dependent transaction from the iterator before we can // continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + pool_tx.gas_limit(), + block_gas_limit, + ), + ); continue } @@ -298,23 +308,33 @@ pub trait LoadPendingBlock: EthApiTypes { // we don't want to leak any state changes made by private transactions, so we mark // them as invalid here which removes all dependent transactions from the iterator // before we can continue - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); continue } // convert tx to a signed transaction - let tx = pool_tx.to_recovered_transaction(); + let tx = pool_tx.to_consensus(); // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block - if let Some(blob_tx) = tx.transaction.as_eip4844() { - let tx_blob_gas = blob_tx.blob_gas(); + if let Some(tx_blob_gas) = tx.blob_gas_used() { if sum_blob_gas_used + tx_blob_gas > MAX_DATA_GAS_PER_BLOCK { // we can't fit this _blob_ transaction into the block, so we mark it as // invalid, which removes its dependent transactions from // the iterator. This is similar to the gas limit condition // for regular transactions above. - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::ExceedsGasLimit( + tx_blob_gas, + MAX_DATA_GAS_PER_BLOCK, + ), + ); continue } } @@ -338,7 +358,12 @@ pub trait LoadPendingBlock: EthApiTypes { } else { // if the transaction is invalid, we can skip it and all of its // descendants - best_txs.mark_invalid(&pool_tx); + best_txs.mark_invalid( + &pool_tx, + InvalidPoolTransactionError::Consensus( + InvalidTransactionError::TxTypeNotSupported, + ), + ); } continue } @@ -355,8 +380,7 @@ pub trait LoadPendingBlock: EthApiTypes { db.commit(state); // add to the total blob gas used if the transaction successfully executed - if let Some(blob_tx) = tx.transaction.as_eip4844() { - let tx_blob_gas = blob_tx.blob_gas(); + if let Some(tx_blob_gas) = tx.blob_gas_used() { sum_blob_gas_used += tx_blob_gas; // if we've reached the max data gas per block, we can skip blob txs entirely @@ -370,20 +394,18 @@ pub trait LoadPendingBlock: EthApiTypes { // add gas used by the transaction to cumulative gas used, before creating the receipt cumulative_gas_used += gas_used; - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(self.assemble_receipt(&tx, result, cumulative_gas_used))); - // append transaction to the list of executed transactions let (tx, sender) = tx.to_components(); executed_txs.push(tx); senders.push(sender); + results.push(result); } // executes the withdrawals and commits them to the Database and BundleState. let balance_increments = post_block_withdrawals_balance_increments( chain_spec.as_ref(), block_env.timestamp.try_into().unwrap_or(u64::MAX), - &withdrawals.clone().unwrap_or_default(), + &[], ); // increment account balances for withdrawals @@ -392,72 +414,20 @@ pub trait LoadPendingBlock: EthApiTypes { // merge all transitions into bundle state. db.merge_transitions(BundleRetention::PlainState); - let execution_outcome = ExecutionOutcome::new( - db.take_bundle(), - vec![receipts.clone()].into(), - block_number, - Vec::new(), - ); - let hashed_state = HashedPostState::from_bundle_state(&execution_outcome.state().state); - - let receipts_root = self.receipts_root(&block_env, &execution_outcome, block_number); - - let logs_bloom = - execution_outcome.block_logs_bloom(block_number).expect("Block is present"); + let bundle_state = db.take_bundle(); + let hashed_state = db.database.hashed_post_state(&bundle_state); // calculate the state root - let state_provider = &db.database; - let state_root = - state_provider.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; - - // create the block header - let transactions_root = calculate_transaction_root(&executed_txs); - - // check if cancun is activated to set eip4844 header fields correctly - let blob_gas_used = - (cfg.handler_cfg.spec_id >= SpecId::CANCUN).then_some(sum_blob_gas_used); - - // note(onbjerg): the rpc spec has not been changed to include requests, so for now we just - // set these to empty - let (requests, requests_root) = - if chain_spec.is_prague_active_at_timestamp(block_env.timestamp.to::()) { - (Some(Requests::default()), Some(EMPTY_ROOT_HASH)) - } else { - (None, None) - }; + let state_root = db.database.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; - let header = Header { + let (block, receipts) = self.assemble_block_and_receipts( + &block_env, parent_hash, - ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: block_env.coinbase, state_root, - transactions_root, - receipts_root, - withdrawals_root, - logs_bloom, - timestamp: block_env.timestamp.to::(), - mix_hash: block_env.prevrandao.unwrap_or_default(), - nonce: BEACON_NONCE.into(), - base_fee_per_gas: Some(base_fee), - number: block_number, - gas_limit: block_gas_limit, - difficulty: U256::ZERO, - gas_used: cumulative_gas_used, - blob_gas_used: blob_gas_used.map(Into::into), - excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), - extra_data: Default::default(), - parent_beacon_block_root, - requests_root, - }; - - // Convert Vec> to Vec - let receipts: Vec = receipts.into_iter().flatten().collect(); - - // seal the block - let block = Block { - header, - body: BlockBody { transactions: executed_txs, ommers: vec![], withdrawals, requests }, - }; + executed_txs, + results, + ); + Ok((SealedBlockWithSenders { block: block.seal_slow(), senders }, receipts)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index eae99bbe45d..f663c5863b5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -2,25 +2,22 @@ //! loads receipt data w.r.t. network. use futures::Future; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_types::EthStateCache; +use reth_primitives::TransactionMeta; +use reth_provider::{ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider}; -use crate::{EthApiTypes, RpcReceipt}; +use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. -pub trait LoadReceipt: EthApiTypes + Send + Sync { - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - +pub trait LoadReceipt: + EthApiTypes + RpcNodeCoreExt + Send + Sync +{ /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( &self, - tx: TransactionSigned, + tx: ProviderTx, meta: TransactionMeta, - receipt: Receipt, + receipt: ProviderReceipt, ) -> impl Future, Self::Error>> + Send; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index ab11e62d543..85c95414765 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -1,10 +1,9 @@ //! An abstraction over ethereum signers. use alloy_dyn_abi::TypedData; -use alloy_primitives::Address; +use alloy_primitives::{Address, PrimitiveSignature as Signature}; use alloy_rpc_types_eth::TransactionRequest; use dyn_clone::DynClone; -use reth_primitives::{Signature, TransactionSigned}; use reth_rpc_eth_types::SignError; use std::result; @@ -13,7 +12,7 @@ pub type Result = result::Result; /// An Ethereum Signer used via RPC. #[async_trait::async_trait] -pub trait EthSigner: Send + Sync + DynClone { +pub trait EthSigner: Send + Sync + DynClone { /// Returns the available accounts for this signer. fn accounts(&self) -> Vec
; @@ -26,17 +25,13 @@ pub trait EthSigner: Send + Sync + DynClone { async fn sign(&self, address: Address, message: &[u8]) -> Result; /// signs a transaction request using the given account in request - async fn sign_transaction( - &self, - request: TransactionRequest, - address: &Address, - ) -> Result; + async fn sign_transaction(&self, request: TransactionRequest, address: &Address) -> Result; /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result; } -dyn_clone::clone_trait_object!(EthSigner); +dyn_clone::clone_trait_object!( EthSigner); /// Adds 20 random dev signers for access via the API. Used in dev mode. #[auto_impl::auto_impl(&)] diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index 5976cf29c07..13ad9b778b2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -1,33 +1,35 @@ //! Loads chain metadata. use alloy_primitives::{Address, U256, U64}; -use alloy_rpc_types::{Stage, SyncInfo, SyncStatus}; +use alloy_rpc_types_eth::{Stage, SyncInfo, SyncStatus}; use futures::Future; use reth_chainspec::{ChainInfo, EthereumHardforks}; use reth_errors::{RethError, RethResult}; use reth_network_api::NetworkInfo; use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; -use super::EthSigner; +use crate::{helpers::EthSigner, RpcNodeCore}; /// `Eth` API trait. /// /// Defines core functionality of the `eth` API implementation. #[auto_impl::auto_impl(&, Arc)] -pub trait EthApiSpec: Send + Sync { - /// Returns a handle for reading data from disk. - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader; - - /// Returns a handle for reading network data summary. - fn network(&self) -> impl NetworkInfo; +pub trait EthApiSpec: + RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, +> +{ + /// The transaction type signers are using. + type Transaction; /// Returns the block node is started on. fn starting_block(&self) -> U256; /// Returns a handle to the signers owned by provider. - fn signers(&self) -> &parking_lot::RwLock>>; + fn signers(&self) -> &parking_lot::RwLock>>>; /// Returns the current ethereum protocol version. fn protocol_version(&self) -> impl Future> + Send { diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 7b11ce6afe6..4c9ccecd363 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -1,26 +1,24 @@ //! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace //! RPC methods. - +use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; +use crate::{EthApiTypes, FromEthApiError, RpcNodeCore, RpcNodeCoreExt}; +use alloy_consensus::{constants::KECCAK_EMPTY, BlockHeader}; +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, B256, U256}; -use alloy_rpc_types::{serde_helpers::JsonStorageKey, Account, EIP1186AccountProofResponse}; +use alloy_rpc_types_eth::{Account, EIP1186AccountProofResponse}; +use alloy_serde::JsonStorageKey; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header, KECCAK_EMPTY}; use reth_provider::{ - BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, - StateProviderFactory, + BlockIdReader, BlockNumReader, ChainSpecProvider, EvmEnvProvider as _, ProviderHeader, + StateProvider, StateProviderBox, StateProviderFactory, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError}; -use reth_rpc_types_compat::proof::from_primitive_account_proof; +use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; -use crate::{EthApiTypes, FromEthApiError}; - -use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; - /// Helper methods for `eth_` methods relating to state (accounts). pub trait EthState: LoadState + SpawnBlocking { /// Returns the maximum number of blocks into the past for generating state proofs. @@ -28,7 +26,7 @@ pub trait EthState: LoadState + SpawnBlocking { /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will /// look up the highest transaction in pool and return the next nonce (highest + 1). fn transaction_count( &self, @@ -72,7 +70,7 @@ pub trait EthState: LoadState + SpawnBlocking { self.spawn_blocking_io(move |this| { Ok(B256::new( this.state_at_block_id_or_latest(block_id)? - .storage(address, index.0) + .storage(address, index.as_b256()) .map_err(Self::Error::from_eth_err)? .unwrap_or_default() .to_be_bytes(), @@ -104,7 +102,8 @@ pub trait EthState: LoadState + SpawnBlocking { let block_id = block_id.unwrap_or_default(); // Check whether the distance to the block exceeds the maximum configured window. - let block_number = LoadState::provider(self) + let block_number = self + .provider() .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -115,11 +114,11 @@ pub trait EthState: LoadState + SpawnBlocking { self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(block_id)?; - let storage_keys = keys.iter().map(|key| key.0).collect::>(); + let storage_keys = keys.iter().map(|key| key.as_b256()).collect::>(); let proof = state .proof(Default::default(), address, &storage_keys) .map_err(Self::Error::from_eth_err)?; - Ok(from_primitive_account_proof(proof)) + Ok(proof.into_eip1186_response(keys)) }) .await }) @@ -137,9 +136,9 @@ pub trait EthState: LoadState + SpawnBlocking { let Some(account) = account else { return Ok(None) }; // Check whether the distance to the block exceeds the maximum configured proof window. - let chain_info = - LoadState::provider(&this).chain_info().map_err(Self::Error::from_eth_err)?; - let block_number = LoadState::provider(&this) + let chain_info = this.provider().chain_info().map_err(Self::Error::from_eth_err)?; + let block_number = this + .provider() .block_number_for_id(block_id) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -166,24 +165,14 @@ pub trait EthState: LoadState + SpawnBlocking { /// Loads state from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. -pub trait LoadState: EthApiTypes { - /// Returns a handle for reading state from database. - /// - /// Data access in default trait method implementations. - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - - /// Returns a handle for reading data from transaction pool. - /// - /// Data access in default trait method implementations. - fn pool(&self) -> impl TransactionPool; - +pub trait LoadState: + EthApiTypes + + RpcNodeCoreExt< + Provider: StateProviderFactory + + ChainSpecProvider, + Pool: TransactionPool, + > +{ /// Returns the state at the given block number fn state_at_hash(&self, block_hash: B256) -> Result { self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) @@ -191,7 +180,7 @@ pub trait LoadState: EthApiTypes { /// Returns the state at the given [`BlockId`] enum. /// - /// Note: if not [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this + /// Note: if not [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this /// will only return canonical state. See also fn state_at_block_id(&self, at: BlockId) -> Result { self.provider().state_by_block_id(at).map_err(Self::Error::from_eth_err) @@ -236,16 +225,19 @@ pub trait LoadState: EthApiTypes { Ok((cfg, block_env, origin.state_block_id())) } else { // Use cached values if there is no pending block - let block_hash = LoadPendingBlock::provider(self) + let block_hash = RpcNodeCore::provider(self) .block_hash_for_id(at) .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(at))?; - let (cfg, env) = self - .cache() - .get_evm_env(block_hash) - .await + + let header = + self.cache().get_header(block_hash).await.map_err(Self::Error::from_eth_err)?; + let evm_config = self.evm_config().clone(); + let (cfg, block_env) = self + .provider() + .env_with_header(&header, evm_config) .map_err(Self::Error::from_eth_err)?; - Ok((cfg, env, block_hash.into())) + Ok((cfg, block_env, block_hash.into())) } } } @@ -255,14 +247,14 @@ pub trait LoadState: EthApiTypes { /// This is used for tracing raw blocks fn evm_env_for_raw_block( &self, - header: &Header, + header: &ProviderHeader, ) -> impl Future> + Send where Self: LoadPendingBlock + SpawnBlocking, { async move { // get the parent config first - let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash.into()).await?; + let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash().into()).await?; let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; self.evm_config().fill_block_env(&mut block_env, header, after_merge); @@ -271,9 +263,45 @@ pub trait LoadState: EthApiTypes { } } + /// Returns the next available nonce without gaps for the given address + /// Next available nonce is either the on chain nonce of the account or the highest consecutive + /// nonce in the pool + 1 + fn next_available_nonce( + &self, + address: Address, + ) -> impl Future> + Send + where + Self: SpawnBlocking, + { + self.spawn_blocking_io(move |this| { + // first fetch the on chain nonce of the account + let on_chain_account_nonce = this + .latest_state()? + .account_nonce(address) + .map_err(Self::Error::from_eth_err)? + .unwrap_or_default(); + + let mut next_nonce = on_chain_account_nonce; + // Retrieve the highest consecutive transaction for the sender from the transaction pool + if let Some(highest_tx) = this + .pool() + .get_highest_consecutive_transaction_by_sender(address, on_chain_account_nonce) + { + // Return the nonce of the highest consecutive transaction + 1 + next_nonce = highest_tx.nonce().checked_add(1).ok_or_else(|| { + Self::Error::from(EthApiError::InvalidTransaction( + RpcInvalidTransactionError::NonceMaxValue, + )) + })?; + } + + Ok(next_nonce) + }) + } + /// Returns the number of transactions sent from an address at the given block identifier. /// - /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// If this is [`BlockNumberOrTag::Pending`](alloy_eips::BlockNumberOrTag) then this will /// look up the highest transaction in pool and return the next nonce (highest + 1). fn transaction_count( &self, diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 981de8fa6c4..e000218e70e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,14 +1,17 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; -use crate::FromEvmError; +use crate::{FromEvmError, RpcNodeCore}; +use alloy_consensus::BlockHeader; use alloy_primitives::B256; -use alloy_rpc_types::{BlockId, TransactionInfo}; +use alloy_rpc_types_eth::{BlockId, TransactionInfo}; use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{Header, SealedBlockWithSenders}; +use reth_primitives::SealedBlockWithSenders; +use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_provider::{BlockReader, ProviderBlock, ProviderHeader, ProviderTx}; use reth_revm::database::StateProviderDatabase; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, @@ -16,17 +19,22 @@ use reth_rpc_eth_types::{ }; use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; -use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState}; +use revm_primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState, +}; use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; /// Executes CPU heavy tasks. -pub trait Trace: LoadState { - /// Returns a handle for reading evm config. - /// - /// Data access in default (L1) trait method implementations. - fn evm_config(&self) -> &impl ConfigureEvm
; - +pub trait Trace: + LoadState< + Provider: BlockReader, + Evm: ConfigureEvm< + Header = ProviderHeader, + Transaction = ProviderTx, + >, +> +{ /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state /// changes. fn inspect( @@ -117,7 +125,7 @@ pub trait Trace: LoadState { self.spawn_with_state_at_block(at, move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + let (res, _) = this.inspect(&mut db, env, &mut inspector)?; f(inspector, res, db) }) } @@ -191,31 +199,14 @@ pub trait Trace: LoadState { // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in - let parent_block = block.parent_hash; - let parent_beacon_block_root = block.parent_beacon_block_root; + let parent_block = block.parent_hash(); let this = self.clone(); self.spawn_with_state_at_block(parent_block.into(), move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let block_txs = block.transactions_with_sender(); - // apply relevant system calls - let mut system_caller = SystemCaller::new( - Trace::evm_config(&this).clone(), - LoadState::provider(&this).chain_spec(), - ); - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom( - "failed to apply 4788 beacon root system call".to_string(), - ) - })?; + this.apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; // replay all transactions prior to the targeted transaction this.replay_transactions_until( @@ -223,13 +214,13 @@ pub trait Trace: LoadState { cfg.clone(), block_env.clone(), block_txs, - tx.hash, + *tx.tx_hash(), )?; let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block_env, - Call::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), + RpcNodeCore::evm_config(&this).tx_env(tx.as_signed(), tx.signer()), ); let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; @@ -249,7 +240,7 @@ pub trait Trace: LoadState { fn trace_block_until( &self, block_id: BlockId, - block: Option>, + block: Option>>>, highest_index: Option, config: TracingInspectorConfig, f: F, @@ -289,7 +280,7 @@ pub trait Trace: LoadState { fn trace_block_until_with_inspector( &self, block_id: BlockId, - block: Option>, + block: Option>>>, highest_index: Option, mut inspector_setup: Setup, f: F, @@ -322,7 +313,7 @@ pub trait Trace: LoadState { let Some(block) = block else { return Ok(None) }; - if block.body.transactions.is_empty() { + if block.body.transactions().is_empty() { // nothing to trace return Ok(Some(Vec::new())) } @@ -331,7 +322,7 @@ pub trait Trace: LoadState { self.spawn_tracing(move |this| { // we need to get the state of the parent block because we're replaying this block // on top of its parent block's state - let state_at = block.parent_hash; + let state_at = block.parent_hash(); let block_hash = block.hash(); let block_number = block_env.number.saturating_to::(); @@ -342,26 +333,12 @@ pub trait Trace: LoadState { let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); - // apply relevant system calls - let mut system_caller = SystemCaller::new( - Trace::evm_config(&this).clone(), - LoadState::provider(&this).chain_spec(), - ); - system_caller - .pre_block_beacon_root_contract_call( - &mut db, - &cfg, - &block_env, - block.header().parent_beacon_block_root, - ) - .map_err(|_| { - EthApiError::EvmCustom("failed to apply 4788 system call".to_string()) - })?; + this.apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; // prepare transactions, we do everything upfront to reduce time spent with open // state let max_transactions = - highest_index.map_or(block.body.transactions.len(), |highest| { + highest_index.map_or(block.body.transactions().len(), |highest| { // we need + 1 because the index is 0-based highest as usize + 1 }); @@ -373,13 +350,13 @@ pub trait Trace: LoadState { .enumerate() .map(|(idx, (signer, tx))| { let tx_info = TransactionInfo { - hash: Some(tx.hash()), + hash: Some(*tx.tx_hash()), index: Some(idx as u64), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: Some(base_fee), }; - let tx_env = Trace::evm_config(&this).tx_env(tx, *signer); + let tx_env = this.evm_config().tx_env(tx, *signer); (tx_info, tx_env) }) .peekable(); @@ -421,7 +398,7 @@ pub trait Trace: LoadState { fn trace_block_with( &self, block_id: BlockId, - block: Option>, + block: Option>>>, config: TracingInspectorConfig, f: F, ) -> impl Future>, Self::Error>> + Send @@ -460,7 +437,7 @@ pub trait Trace: LoadState { fn trace_block_inspector( &self, block_id: BlockId, - block: Option>, + block: Option>>>, insp_setup: Setup, f: F, ) -> impl Future>, Self::Error>> + Send @@ -483,4 +460,37 @@ pub trait Trace: LoadState { { self.trace_block_until_with_inspector(block_id, block, None, insp_setup, f) } + + /// Applies chain-specific state transitions required before executing a block. + /// + /// Note: This should only be called when tracing an entire block vs individual transactions. + /// When tracing transaction on top of an already committed block state, those transitions are + /// already applied. + fn apply_pre_execution_changes + DatabaseCommit>( + &self, + block: &SealedBlockWithSenders>, + db: &mut DB, + cfg: &CfgEnvWithHandlerCfg, + block_env: &BlockEnv, + ) -> Result<(), Self::Error> { + let mut system_caller = + SystemCaller::new(self.evm_config().clone(), self.provider().chain_spec()); + // apply relevant system calls + system_caller + .pre_block_beacon_root_contract_call( + db, + cfg, + block_env, + block.header.parent_beacon_block_root(), + ) + .map_err(|_| EthApiError::EvmCustom("failed to apply 4788 system call".to_string()))?; + + system_caller + .pre_block_blockhashes_contract_call(db, cfg, block_env, block.header.parent_hash()) + .map_err(|_| { + EthApiError::EvmCustom("failed to apply blockhashes system call".to_string()) + })?; + + Ok(()) + } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 54d60cb7abd..253aac91d8b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -1,30 +1,34 @@ //! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. //! network. -use alloy_consensus::Transaction; +use alloy_consensus::{BlockHeader, Transaction}; use alloy_dyn_abi::TypedData; -use alloy_eips::eip2718::Encodable2718; +use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types::{BlockNumberOrTag, TransactionInfo}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; +use reth_node_api::BlockBody; use reth_primitives::{ - BlockId, Receipt, SealedBlockWithSenders, TransactionMeta, TransactionSigned, + transaction::SignedTransactionIntoRecoveredExt, SealedBlockWithSenders, TransactionMeta, +}; +use reth_primitives_traits::SignedTransaction; +use reth_provider::{ + BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, + TransactionsProvider, }; -use reth_provider::{BlockNumReader, BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; use reth_rpc_eth_types::{ utils::{binary_search, recover_raw_transaction}, - EthApiError, EthStateCache, SignError, TransactionSource, + EthApiError, SignError, TransactionSource, }; use reth_rpc_types_compat::transaction::{from_recovered, from_recovered_with_block_context}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::sync::Arc; -use crate::{FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcReceipt, RpcTransaction}; - -use super::{ - Call, EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, +use super::{EthApiSpec, EthSigner, LoadBlock, LoadReceipt, LoadState, SpawnBlocking}; +use crate::{ + helpers::estimate::EstimateCall, FromEthApiError, FullEthApiTypes, IntoEthApiError, + RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, }; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -50,41 +54,42 @@ use super::{ /// See also /// /// This implementation follows the behaviour of Geth and disables the basefee check for tracing. -pub trait EthTransactions: LoadTransaction { - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl BlockReaderIdExt; - +pub trait EthTransactions: LoadTransaction { /// Returns a handle for signing data. /// /// Singer access in default (L1) trait method implementations. - fn signers(&self) -> &parking_lot::RwLock>>; + #[expect(clippy::type_complexity)] + fn signers(&self) -> &parking_lot::RwLock>>>>; /// Returns the transaction by hash. /// /// Checks the pool and state. /// /// Returns `Ok(None)` if no matching transaction was found. + #[expect(clippy::complexity)] fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>>, Self::Error>, + > + Send { LoadTransaction::transaction_by_hash(self, hash) } /// Get all transactions in the block with the given hash. /// /// Returns `None` if block does not exist. + #[expect(clippy::type_complexity)] fn transactions_by_block( &self, block: B256, - ) -> impl Future>, Self::Error>> + Send { + ) -> impl Future>>, Self::Error>> + Send + { async move { self.cache() .get_sealed_block_with_senders(block) .await - .map(|b| b.map(|b| b.body.transactions.clone())) + .map(|b| b.map(|b| b.body.transactions().to_vec())) .map_err(Self::Error::from_eth_err) } } @@ -109,7 +114,8 @@ pub trait EthTransactions: LoadTransaction { } self.spawn_blocking_io(move |ref this| { - Ok(LoadTransaction::provider(this) + Ok(this + .provider() .transaction_by_hash(hash) .map_err(Self::Error::from_eth_err)? .map(|tx| tx.encoded_2718().into())) @@ -119,10 +125,13 @@ pub trait EthTransactions: LoadTransaction { } /// Returns the _historical_ transaction and the block it was mined in + #[expect(clippy::type_complexity)] fn historical_transaction_by_hash_at( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>, B256)>, Self::Error>, + > + Send { async move { match self.transaction_by_hash_at(hash).await? { None => Ok(None), @@ -153,18 +162,22 @@ pub trait EthTransactions: LoadTransaction { } /// Helper method that loads a transaction and its receipt. + #[expect(clippy::complexity)] fn load_transaction_and_receipt( &self, hash: TxHash, ) -> impl Future< - Output = Result, Self::Error>, + Output = Result< + Option<(ProviderTx, TransactionMeta, ProviderReceipt)>, + Self::Error, + >, > + Send where Self: 'static, { - let this = self.clone(); + let provider = self.provider().clone(); self.spawn_blocking_io(move |_| { - let (tx, meta) = match LoadTransaction::provider(&this) + let (tx, meta) = match provider .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? { @@ -172,10 +185,7 @@ pub trait EthTransactions: LoadTransaction { None => return Ok(None), }; - let receipt = match EthTransactions::provider(&this) - .receipt_by_hash(hash) - .map_err(Self::Error::from_eth_err)? - { + let receipt = match provider.receipt_by_hash(hash).map_err(Self::Error::from_eth_err)? { Some(recpt) => recpt, None => return Ok(None), }; @@ -198,21 +208,22 @@ pub trait EthTransactions: LoadTransaction { async move { if let Some(block) = self.block_with_senders(block_id).await? { let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee_per_gas = block.base_fee_per_gas(); if let Some((signer, tx)) = block.transactions_with_sender().nth(index) { let tx_info = TransactionInfo { - hash: Some(tx.hash()), + hash: Some(*tx.tx_hash()), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: base_fee_per_gas.map(u128::from), index: Some(index as u64), }; - return Ok(Some(from_recovered_with_block_context::( + return Ok(Some(from_recovered_with_block_context( tx.clone().with_signer(*signer), tx_info, - ))) + self.tx_resp_builder(), + )?)) } } @@ -228,16 +239,16 @@ pub trait EthTransactions: LoadTransaction { include_pending: bool, ) -> impl Future>, Self::Error>> + Send where - Self: LoadBlock + LoadState + FullEthApiTypes, + Self: LoadBlock + LoadState, { async move { // Check the pool first if include_pending { if let Some(tx) = - LoadState::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) + RpcNodeCore::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { - let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(from_recovered::(transaction.into()))); + let transaction = tx.transaction.clone_into_consensus(); + return Ok(Some(from_recovered(transaction, self.tx_resp_builder())?)); } } @@ -254,7 +265,7 @@ pub trait EthTransactions: LoadTransaction { return Ok(None); } - let Ok(high) = LoadBlock::provider(self).best_block_number() else { + let Ok(high) = self.provider().best_block_number() else { return Err(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()).into()); }; @@ -273,28 +284,29 @@ pub trait EthTransactions: LoadTransaction { .await? .and_then(|block| { let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee_per_gas = block.base_fee_per_gas(); block .transactions_with_sender() .enumerate() - .find(|(_, (signer, tx))| **signer == sender && tx.nonce() == nonce) + .find(|(_, (signer, tx))| **signer == sender && (*tx).nonce() == nonce) .map(|(index, (signer, tx))| { let tx_info = TransactionInfo { - hash: Some(tx.hash()), + hash: Some(*tx.tx_hash()), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: base_fee_per_gas.map(u128::from), index: Some(index as u64), }; - from_recovered_with_block_context::( + from_recovered_with_block_context( tx.clone().with_signer(*signer), tx_info, + self.tx_resp_builder(), ) }) }) - .ok_or(EthApiError::HeaderNotFound(block_id).into()) + .ok_or(EthApiError::HeaderNotFound(block_id))? .map(Some) } } @@ -312,7 +324,7 @@ pub trait EthTransactions: LoadTransaction { { async move { if let Some(block) = self.block_with_senders(block_id).await? { - if let Some(tx) = block.transactions().nth(index) { + if let Some(tx) = block.transactions().get(index) { return Ok(Some(tx.encoded_2718().into())) } } @@ -329,9 +341,9 @@ pub trait EthTransactions: LoadTransaction { tx: Bytes, ) -> impl Future> + Send { async move { - let recovered = recover_raw_transaction(tx.clone())?; + let recovered = recover_raw_transaction(&tx)?; let pool_transaction = - ::Transaction::from_pooled(recovered.into()); + ::Transaction::from_pooled(recovered); // submit the transaction to the pool with a `Local` origin let hash = self @@ -351,7 +363,7 @@ pub trait EthTransactions: LoadTransaction { mut request: TransactionRequest, ) -> impl Future> + Send where - Self: EthApiSpec + LoadBlock + LoadPendingBlock + Call, + Self: EthApiSpec + LoadBlock + EstimateCall, { async move { let from = match request.from { @@ -365,9 +377,8 @@ pub trait EthTransactions: LoadTransaction { // set nonce if not already set before if request.nonce.is_none() { - let nonce = self.transaction_count(from, Some(BlockId::pending())).await?; - // note: `.to()` can't panic because the nonce is constructed from a `u64` - request.nonce = Some(nonce.to()); + let nonce = self.next_available_nonce(from).await?; + request.nonce = Some(nonce); } let chain_id = self.chain_id(); @@ -380,10 +391,15 @@ pub trait EthTransactions: LoadTransaction { let transaction = self.sign_request(&from, request).await?.with_signer(from); - let pool_transaction = <::Pool as TransactionPool>::Transaction::try_from_consensus(transaction.into()).map_err(|_| EthApiError::TransactionConversionError)?; + let pool_transaction = + <::Pool as TransactionPool>::Transaction::try_from_consensus( + transaction, + ) + .map_err(|_| EthApiError::TransactionConversionError)?; // submit the transaction to the pool with a `Local` origin - let hash = LoadTransaction::pool(self) + let hash = self + .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await .map_err(Self::Error::from_eth_err)?; @@ -397,18 +413,12 @@ pub trait EthTransactions: LoadTransaction { &self, from: &Address, txn: TransactionRequest, - ) -> impl Future> + Send { + ) -> impl Future, Self::Error>> + Send { async move { - let signers: Vec<_> = self.signers().read().iter().cloned().collect(); - for signer in signers { - if signer.is_signer_for(from) { - return match signer.sign_transaction(txn, from).await { - Ok(tx) => Ok(tx), - Err(e) => Err(e.into_eth_err()), - } - } - } - Err(EthApiError::InvalidTransactionSignature.into()) + self.find_signer(from)? + .sign_transaction(txn, from) + .await + .map_err(Self::Error::from_eth_err) } } @@ -429,6 +439,22 @@ pub trait EthTransactions: LoadTransaction { } } + /// Signs a transaction request using the given account in request + /// Returns the EIP-2718 encoded signed transaction. + fn sign_transaction( + &self, + request: TransactionRequest, + ) -> impl Future> + Send { + async move { + let from = match request.from { + Some(from) => from, + None => return Err(SignError::NoAccount.into_eth_err()), + }; + + Ok(self.sign_request(&from, request).await?.encoded_2718().into()) + } + } + /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. fn sign_typed_data(&self, data: &TypedData, account: Address) -> Result { Ok(self @@ -440,10 +466,11 @@ pub trait EthTransactions: LoadTransaction { } /// Returns the signer for the given account, if found in configured signers. + #[expect(clippy::type_complexity)] fn find_signer( &self, account: &Address, - ) -> Result, Self::Error> { + ) -> Result> + 'static)>, Self::Error> { self.signers() .read() .iter() @@ -457,35 +484,19 @@ pub trait EthTransactions: LoadTransaction { /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC /// methods. -pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { - /// Transaction pool with pending transactions. [`TransactionPool::Transaction`] is the - /// supported transaction type. - type Pool: TransactionPool; - - /// Returns a handle for reading data from disk. - /// - /// Data access in default (L1) trait method implementations. - fn provider(&self) -> impl TransactionsProvider; - - /// Returns a handle for reading data from memory. - /// - /// Data access in default (L1) trait method implementations. - fn cache(&self) -> &EthStateCache; - - /// Returns a handle for reading data from pool. - /// - /// Data access in default (L1) trait method implementations. - fn pool(&self) -> &Self::Pool; - +pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt { /// Returns the transaction by hash. /// /// Checks the pool and state. /// /// Returns `Ok(None)` if no matching transaction was found. + #[expect(clippy::complexity)] fn transaction_by_hash( &self, hash: B256, - ) -> impl Future, Self::Error>> + Send { + ) -> impl Future< + Output = Result>>, Self::Error>, + > + Send { async move { // Try to find the transaction on disk let mut resp = self @@ -533,11 +544,16 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { /// Returns the transaction by including its corresponding [`BlockId`]. /// /// Note: this supports pending transactions + #[expect(clippy::type_complexity)] fn transaction_by_hash_at( &self, transaction_hash: B256, - ) -> impl Future, Self::Error>> + Send - { + ) -> impl Future< + Output = Result< + Option<(TransactionSource>, BlockId)>, + Self::Error, + >, + > + Send { async move { Ok(self.transaction_by_hash(transaction_hash).await?.map(|tx| match tx { tx @ TransactionSource::Pool(_) => (tx, BlockId::pending()), @@ -549,11 +565,18 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { } /// Fetches the transaction and the transaction's block + #[expect(clippy::type_complexity)] fn transaction_and_block( &self, hash: B256, ) -> impl Future< - Output = Result)>, Self::Error>, + Output = Result< + Option<( + TransactionSource>, + Arc>>, + )>, + Self::Error, + >, > + Send { async move { let (transaction, at) = match self.transaction_by_hash_at(hash).await? { diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs index 849c8e2e4c8..d9c7f39a440 100644 --- a/crates/rpc/rpc-eth-api/src/lib.rs +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -16,17 +16,20 @@ pub mod bundle; pub mod core; pub mod filter; pub mod helpers; +pub mod node; pub mod pubsub; pub mod types; -pub use reth_rpc_types_compat::TransactionCompat; - pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; pub use core::{EthApiServer, FullEthApiServer}; pub use filter::EthFilterApiServer; -pub use helpers::error::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; +pub use node::{RpcNodeCore, RpcNodeCoreExt}; pub use pubsub::EthPubSubApiServer; -pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; +pub use reth_rpc_eth_types::error::{ + AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError, +}; +pub use reth_rpc_types_compat::TransactionCompat; +pub use types::{EthApiTypes, FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; #[cfg(feature = "client")] pub use bundle::{EthBundleApiClient, EthCallBundleApiClient}; @@ -34,3 +37,5 @@ pub use bundle::{EthBundleApiClient, EthCallBundleApiClient}; pub use core::EthApiClient; #[cfg(feature = "client")] pub use filter::EthFilterApiClient; + +use reth_trie_common as _; diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs new file mode 100644 index 00000000000..538cb2ead8a --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -0,0 +1,85 @@ +//! Helper trait for interfacing with [`FullNodeComponents`]. + +use reth_node_api::FullNodeComponents; +use reth_provider::{BlockReader, ProviderBlock, ProviderReceipt}; +use reth_rpc_eth_types::EthStateCache; + +/// Helper trait to relax trait bounds on [`FullNodeComponents`]. +/// +/// Helpful when defining types that would otherwise have a generic `N: FullNodeComponents`. Using +/// `N: RpcNodeCore` instead, allows access to all the associated types on [`FullNodeComponents`] +/// that are used in RPC, but with more flexibility since they have no trait bounds (asides auto +/// traits). +pub trait RpcNodeCore: Clone + Send + Sync { + /// The provider type used to interact with the node. + type Provider: Send + Sync + Clone + Unpin; + /// The transaction pool of the node. + type Pool: Send + Sync + Clone + Unpin; + /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. + type Evm: Send + Sync + Clone + Unpin; + /// Network API. + type Network: Send + Sync + Clone; + + /// Builds new blocks. + type PayloadBuilder: Send + Sync + Clone; + + /// Returns the transaction pool of the node. + fn pool(&self) -> &Self::Pool; + + /// Returns the node's evm config. + fn evm_config(&self) -> &Self::Evm; + + /// Returns the handle to the network + fn network(&self) -> &Self::Network; + + /// Returns the handle to the payload builder service. + fn payload_builder(&self) -> &Self::PayloadBuilder; + + /// Returns the provider of the node. + fn provider(&self) -> &Self::Provider; +} + +impl RpcNodeCore for T +where + T: FullNodeComponents, +{ + type Provider = T::Provider; + type Pool = T::Pool; + type Evm = ::Evm; + type Network = ::Network; + type PayloadBuilder = ::PayloadBuilder; + + #[inline] + fn pool(&self) -> &Self::Pool { + FullNodeComponents::pool(self) + } + + #[inline] + fn evm_config(&self) -> &Self::Evm { + FullNodeComponents::evm_config(self) + } + + #[inline] + fn network(&self) -> &Self::Network { + FullNodeComponents::network(self) + } + + #[inline] + fn payload_builder(&self) -> &Self::PayloadBuilder { + FullNodeComponents::payload_builder(self) + } + + #[inline] + fn provider(&self) -> &Self::Provider { + FullNodeComponents::provider(self) + } +} + +/// Additional components, asides the core node components, needed to run `eth_` namespace API +/// server. +pub trait RpcNodeCoreExt: RpcNodeCore { + /// Returns handle to RPC cache service. + fn cache( + &self, + ) -> &EthStateCache, ProviderReceipt>; +} diff --git a/crates/rpc/rpc-eth-api/src/pubsub.rs b/crates/rpc/rpc-eth-api/src/pubsub.rs index b70dacb26fa..ecbb1fe9a83 100644 --- a/crates/rpc/rpc-eth-api/src/pubsub.rs +++ b/crates/rpc/rpc-eth-api/src/pubsub.rs @@ -1,7 +1,7 @@ //! `eth_` RPC API for pubsub subscription. use alloy_json_rpc::RpcObject; -use alloy_rpc_types::pubsub::{Params, SubscriptionKind}; +use alloy_rpc_types_eth::pubsub::{Params, SubscriptionKind}; use jsonrpsee::proc_macros::rpc; /// Ethereum pub-sub rpc interface. diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 9ddc23ea32e..2da1bdac281 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -1,17 +1,21 @@ //! Trait for specifying `eth` network dependent API types. -use std::{error::Error, fmt}; +use std::{ + error::Error, + fmt::{self}, +}; -use alloy_network::{AnyNetwork, Network}; -use alloy_rpc_types::Block; -use reth_rpc_eth_types::EthApiError; +use alloy_network::Network; +use alloy_rpc_types_eth::Block; +use reth_provider::{ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_rpc_types_compat::TransactionCompat; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use crate::{AsEthApiError, FromEthApiError, FromEvmError}; +use crate::{AsEthApiError, FromEthApiError, FromEvmError, RpcNodeCore}; /// Network specific `eth` API types. pub trait EthApiTypes: Send + Sync + Clone { - /// Extension of [`EthApiError`], with network specific errors. + /// Extension of [`FromEthApiError`], with network specific errors. type Error: Into> + FromEthApiError + AsEthApiError @@ -20,15 +24,12 @@ pub trait EthApiTypes: Send + Sync + Clone { + Send + Sync; /// Blockchain primitive types, specific to network, e.g. block and transaction. - type NetworkTypes: Network; + type NetworkTypes: Network; /// Conversion methods for transaction RPC type. type TransactionCompat: Send + Sync + Clone + fmt::Debug; -} -impl EthApiTypes for () { - type Error = EthApiError; - type NetworkTypes = AnyNetwork; - type TransactionCompat = (); + /// Returns reference to transaction response builder. + fn tx_resp_builder(&self) -> &Self::TransactionCompat; } /// Adapter for network specific transaction type. @@ -40,15 +41,42 @@ pub type RpcBlock = Block, ::HeaderResponse>; /// Adapter for network specific receipt type. pub type RpcReceipt = ::ReceiptResponse; +/// Adapter for network specific header type. +pub type RpcHeader = ::HeaderResponse; + +/// Adapter for network specific error type. +pub type RpcError = ::Error; + /// Helper trait holds necessary trait bounds on [`EthApiTypes`] to implement `eth` API. -pub trait FullEthApiTypes: - EthApiTypes>> +pub trait FullEthApiTypes +where + Self: RpcNodeCore< + Provider: TransactionsProvider + ReceiptProvider, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, + > + EthApiTypes< + TransactionCompat: TransactionCompat< + ::Transaction, + Transaction = RpcTransaction, + Error = RpcError, + >, + >, { } impl FullEthApiTypes for T where - T: EthApiTypes< - TransactionCompat: TransactionCompat>, - > + T: RpcNodeCore< + Provider: TransactionsProvider + ReceiptProvider, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, + > + EthApiTypes< + TransactionCompat: TransactionCompat< + ::Transaction, + Transaction = RpcTransaction, + Error = RpcError, + >, + > { } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 46a0d7b5c32..72b153ab084 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -15,10 +15,10 @@ workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-errors.workspace = true -reth-evm.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-storage-api.workspace = true reth-revm.workspace = true reth-rpc-server-types.workspace = true @@ -28,6 +28,7 @@ reth-transaction-pool.workspace = true reth-trie.workspace = true # ethereum +alloy-eips.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-sol-types.workspace = true @@ -35,9 +36,6 @@ alloy-rpc-types-eth.workspace = true revm.workspace = true revm-inspectors.workspace = true revm-primitives = { workspace = true, features = ["dev"] } -alloy-rpc-types.workspace = true -alloy-serde.workspace = true -alloy-eips.workspace = true # rpc jsonrpsee-core.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/builder/config.rs b/crates/rpc/rpc-eth-types/src/builder/config.rs index a016d021586..532c1077203 100644 --- a/crates/rpc/rpc-eth-types/src/builder/config.rs +++ b/crates/rpc/rpc-eth-types/src/builder/config.rs @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; pub const DEFAULT_STALE_FILTER_TTL: Duration = Duration::from_secs(5 * 60); /// Additional config values for the eth namespace. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] pub struct EthConfig { /// Settings for the caching layer pub cache: EthStateCacheConfig, diff --git a/crates/rpc/rpc-eth-types/src/builder/ctx.rs b/crates/rpc/rpc-eth-types/src/builder/ctx.rs index 2132dd0e22c..f9710882f2b 100644 --- a/crates/rpc/rpc-eth-types/src/builder/ctx.rs +++ b/crates/rpc/rpc-eth-types/src/builder/ctx.rs @@ -2,7 +2,8 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; -use reth_storage_api::BlockReaderIdExt; +use reth_primitives::NodePrimitives; +use reth_storage_api::{BlockReader, BlockReaderIdExt}; use reth_tasks::TaskSpawner; use crate::{ @@ -12,7 +13,10 @@ use crate::{ /// Context for building the `eth` namespace API. #[derive(Debug, Clone)] -pub struct EthApiBuilderCtx { +pub struct EthApiBuilderCtx +where + Provider: BlockReader, +{ /// Database handle. pub provider: Provider, /// Mempool handle. @@ -28,7 +32,7 @@ pub struct EthApiBuilderCtx { /// Events handle. pub events: Events, /// RPC cache handle. - pub cache: EthStateCache, + pub cache: EthStateCache, } impl @@ -37,22 +41,24 @@ where Provider: BlockReaderIdExt + Clone, { /// Returns a new [`FeeHistoryCache`] for the context. - pub fn new_fee_history_cache(&self) -> FeeHistoryCache + pub fn new_fee_history_cache(&self) -> FeeHistoryCache where - Provider: ChainSpecProvider + 'static, + N: NodePrimitives, Tasks: TaskSpawner, - Events: CanonStateSubscriptions, + Events: CanonStateSubscriptions, + Provider: + BlockReaderIdExt + ChainSpecProvider + 'static, { - let fee_history_cache = - FeeHistoryCache::new(self.cache.clone(), self.config.fee_history_cache); + let fee_history_cache = FeeHistoryCache::new(self.config.fee_history_cache); let new_canonical_blocks = self.events.canonical_state_stream(); let fhc = fee_history_cache.clone(); let provider = self.provider.clone(); + let cache = self.cache.clone(); self.executor.spawn_critical( "cache canonical blocks for fee history task", Box::pin(async move { - fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider).await; + fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider, cache).await; }), ); diff --git a/crates/rpc/rpc-eth-types/src/cache/config.rs b/crates/rpc/rpc-eth-types/src/cache/config.rs index 64999bd6bf3..001a5b4d4d5 100644 --- a/crates/rpc/rpc-eth-types/src/cache/config.rs +++ b/crates/rpc/rpc-eth-types/src/cache/config.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use reth_rpc_server_types::constants::cache::{ - DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN, + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_HEADER_CACHE_MAX_LEN, DEFAULT_RECEIPT_CACHE_MAX_LEN, }; @@ -19,10 +19,10 @@ pub struct EthStateCacheConfig { /// /// Default is 2000. pub max_receipts: u32, - /// Max number of bytes for cached env data. + /// Max number of headers in cache. /// /// Default is 1000. - pub max_envs: u32, + pub max_headers: u32, /// Max number of concurrent database requests. /// /// Default is 512. @@ -34,7 +34,7 @@ impl Default for EthStateCacheConfig { Self { max_blocks: DEFAULT_BLOCK_CACHE_MAX_LEN, max_receipts: DEFAULT_RECEIPT_CACHE_MAX_LEN, - max_envs: DEFAULT_ENV_CACHE_MAX_LEN, + max_headers: DEFAULT_HEADER_CACHE_MAX_LEN, max_concurrent_db_requests: DEFAULT_CONCURRENT_DB_REQUESTS, } } diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 7422dcfb8a7..ed107f3b0a9 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -8,7 +8,7 @@ use alloy_primitives::{ }; use reth_errors::ProviderResult; use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; -use reth_storage_api::StateProvider; +use reth_storage_api::{HashedPostStateProvider, StateProvider}; use reth_trie::HashedStorage; use revm::Database; @@ -67,6 +67,15 @@ impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> ) -> ProviderResult { self.0.storage_proof(address, slot, hashed_storage) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.0.storage_multiproof(address, slots, hashed_storage) + } } impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { @@ -114,6 +123,13 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { self.0.block_hash(block_number) } + fn convert_block_hash( + &self, + hash_or_number: alloy_rpc_types_eth::BlockHashOrNumber, + ) -> reth_errors::ProviderResult> { + self.0.convert_block_hash(hash_or_number) + } + fn canonical_hashes_range( &self, start: alloy_primitives::BlockNumber, @@ -121,50 +137,52 @@ impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { ) -> reth_errors::ProviderResult> { self.0.canonical_hashes_range(start, end) } +} - fn convert_block_hash( +impl HashedPostStateProvider for StateProviderTraitObjWrapper<'_> { + fn hashed_post_state( &self, - hash_or_number: alloy_rpc_types::BlockHashOrNumber, - ) -> reth_errors::ProviderResult> { - self.0.convert_block_hash(hash_or_number) + bundle_state: &revm::db::BundleState, + ) -> reth_trie::HashedPostState { + self.0.hashed_post_state(bundle_state) } } impl StateProvider for StateProviderTraitObjWrapper<'_> { - fn account_balance( + fn storage( &self, - addr: revm_primitives::Address, - ) -> reth_errors::ProviderResult> { - self.0.account_balance(addr) + account: revm_primitives::Address, + storage_key: alloy_primitives::StorageKey, + ) -> reth_errors::ProviderResult> { + self.0.storage(account, storage_key) } - fn account_code( + fn bytecode_by_hash( &self, - addr: revm_primitives::Address, + code_hash: B256, ) -> reth_errors::ProviderResult> { - self.0.account_code(addr) + self.0.bytecode_by_hash(code_hash) } - fn account_nonce( + fn account_code( &self, addr: revm_primitives::Address, - ) -> reth_errors::ProviderResult> { - self.0.account_nonce(addr) + ) -> reth_errors::ProviderResult> { + self.0.account_code(addr) } - fn bytecode_by_hash( + fn account_balance( &self, - code_hash: B256, - ) -> reth_errors::ProviderResult> { - self.0.bytecode_by_hash(code_hash) + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_balance(addr) } - fn storage( + fn account_nonce( &self, - account: revm_primitives::Address, - storage_key: alloy_primitives::StorageKey, - ) -> reth_errors::ProviderResult> { - self.0.storage(account, storage_key) + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_nonce(addr) } } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index cbf05f2764e..16863887240 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -1,17 +1,16 @@ //! Async caching support for eth RPC +use super::{EthStateCacheConfig, MultiConsumerLruCache}; +use alloy_eips::BlockHashOrNumber; use alloy_primitives::B256; use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; -use reth_evm::{provider::EvmEnvProvider, ConfigureEvm}; use reth_execution_types::Chain; -use reth_primitives::{ - BlockHashOrNumber, Header, Receipt, SealedBlockWithSenders, TransactionSigned, -}; +use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; +use reth_primitives_traits::{Block, BlockBody}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; use schnellru::{ByLength, Limiter}; use std::{ future::Future, @@ -25,71 +24,77 @@ use tokio::sync::{ }; use tokio_stream::wrappers::UnboundedReceiverStream; -use super::{EthStateCacheConfig, MultiConsumerLruCache}; - pub mod config; pub mod db; pub mod metrics; pub mod multi_consumer; /// The type that can send the response to a requested [`SealedBlockWithSenders`] -type BlockTransactionsResponseSender = - oneshot::Sender>>>; +type BlockTransactionsResponseSender = oneshot::Sender>>>; /// The type that can send the response to a requested [`SealedBlockWithSenders`] -type BlockWithSendersResponseSender = - oneshot::Sender>>>; +type BlockWithSendersResponseSender = + oneshot::Sender>>>>; /// The type that can send the response to the requested receipts of a block. -type ReceiptsResponseSender = oneshot::Sender>>>>; +type ReceiptsResponseSender = oneshot::Sender>>>>; -/// The type that can send the response to a requested env -type EnvResponseSender = oneshot::Sender>; +/// The type that can send the response to a requested header +type HeaderResponseSender = oneshot::Sender>; -type BlockLruCache = MultiConsumerLruCache< +type BlockLruCache = MultiConsumerLruCache< B256, - Arc, + Arc>, L, - Either, + Either< + BlockWithSendersResponseSender, + BlockTransactionsResponseSender<<::Body as BlockBody>::Transaction>, + >, >; -type ReceiptsLruCache = - MultiConsumerLruCache>, L, ReceiptsResponseSender>; +type ReceiptsLruCache = + MultiConsumerLruCache>, L, ReceiptsResponseSender>; -type EnvLruCache = - MultiConsumerLruCache; +type HeaderLruCache = MultiConsumerLruCache>; /// Provides async access to cached eth data /// /// This is the frontend for the async caching service which manages cached data on a different /// task. -#[derive(Debug, Clone)] -pub struct EthStateCache { - to_service: UnboundedSender, +#[derive(Debug)] +pub struct EthStateCache { + to_service: UnboundedSender>, +} + +impl Clone for EthStateCache { + fn clone(&self) -> Self { + Self { to_service: self.to_service.clone() } + } } -impl EthStateCache { +impl EthStateCache { /// Creates and returns both [`EthStateCache`] frontend and the memory bound service. - fn create( + fn create( provider: Provider, action_task_spawner: Tasks, - evm_config: EvmConfig, max_blocks: u32, max_receipts: u32, - max_envs: u32, + max_headers: u32, max_concurrent_db_operations: usize, - ) -> (Self, EthStateCacheService) { + ) -> (Self, EthStateCacheService) + where + Provider: BlockReader, + { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { provider, full_block_cache: BlockLruCache::new(max_blocks, "blocks"), receipts_cache: ReceiptsLruCache::new(max_receipts, "receipts"), - evm_env_cache: EnvLruCache::new(max_envs, "evm_env"), + headers_cache: HeaderLruCache::new(max_headers, "headers"), action_tx: to_service.clone(), action_rx: UnboundedReceiverStream::new(rx), action_task_spawner, rate_limiter: Arc::new(Semaphore::new(max_concurrent_db_operations)), - evm_config, }; let cache = Self { to_service }; (cache, service) @@ -99,42 +104,40 @@ impl EthStateCache { /// [`tokio::spawn`]. /// /// See also [`Self::spawn_with`] - pub fn spawn( - provider: Provider, - config: EthStateCacheConfig, - evm_config: EvmConfig, - ) -> Self + pub fn spawn(provider: Provider, config: EthStateCacheConfig) -> Self where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, - EvmConfig: ConfigureEvm
, + Provider: + StateProviderFactory + BlockReader + Clone + Unpin + 'static, { - Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) + Self::spawn_with(provider, config, TokioTaskExecutor::default()) } /// Creates a new async LRU backed cache service task and spawns it to a new task via the given /// spawner. /// /// The cache is memory limited by the given max bytes values. - pub fn spawn_with( + pub fn spawn_with( provider: Provider, config: EthStateCacheConfig, executor: Tasks, - evm_config: EvmConfig, ) -> Self where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: + StateProviderFactory + BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { - let EthStateCacheConfig { max_blocks, max_receipts, max_envs, max_concurrent_db_requests } = - config; + let EthStateCacheConfig { + max_blocks, + max_receipts, + max_headers, + max_concurrent_db_requests, + } = config; let (this, service) = Self::create( provider, executor.clone(), - evm_config, max_blocks, max_receipts, - max_envs, + max_headers, max_concurrent_db_requests, ); executor.spawn_critical("eth state cache", Box::pin(service)); @@ -147,19 +150,16 @@ impl EthStateCache { pub async fn get_sealed_block_with_senders( &self, block_hash: B256, - ) -> ProviderResult>> { + ) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } - /// Requests the [Receipt] for the block hash + /// Requests the receipts for the block hash /// /// Returns `None` if the block was not found. - pub async fn get_receipts( - &self, - block_hash: B256, - ) -> ProviderResult>>> { + pub async fn get_receipts(&self, block_hash: B256) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetReceipts { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -169,7 +169,7 @@ impl EthStateCache { pub async fn get_block_and_receipts( &self, block_hash: B256, - ) -> ProviderResult, Arc>)>> { + ) -> ProviderResult>, Arc>)>> { let block = self.get_sealed_block_with_senders(block_hash); let receipts = self.get_receipts(block_hash); @@ -178,16 +178,12 @@ impl EthStateCache { Ok(block.zip(receipts)) } - /// Requests the evm env config for the block hash. + /// Requests the header for the given hash. /// - /// Returns an error if the corresponding header (required for populating the envs) was not - /// found. - pub async fn get_evm_env( - &self, - block_hash: B256, - ) -> ProviderResult<(CfgEnvWithHandlerCfg, BlockEnv)> { + /// Returns an error if the header is not found. + pub async fn get_header(&self, block_hash: B256) -> ProviderResult { let (response_tx, rx) = oneshot::channel(); - let _ = self.to_service.send(CacheAction::GetEnv { block_hash, response_tx }); + let _ = self.to_service.send(CacheAction::GetHeader { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } } @@ -212,45 +208,45 @@ impl EthStateCache { pub(crate) struct EthStateCacheService< Provider, Tasks, - EvmConfig, LimitBlocks = ByLength, LimitReceipts = ByLength, - LimitEnvs = ByLength, + LimitHeaders = ByLength, > where - LimitBlocks: Limiter>, - LimitReceipts: Limiter>>, - LimitEnvs: Limiter, + Provider: BlockReader, + LimitBlocks: Limiter>>, + LimitReceipts: Limiter>>, + LimitHeaders: Limiter, { /// The type used to lookup data from disk provider: Provider, /// The LRU cache for full blocks grouped by their hash. - full_block_cache: BlockLruCache, + full_block_cache: BlockLruCache, /// The LRU cache for full blocks grouped by their hash. - receipts_cache: ReceiptsLruCache, - /// The LRU cache for revm environments - evm_env_cache: EnvLruCache, + receipts_cache: ReceiptsLruCache, + /// The LRU cache for headers. + /// + /// Headers are cached because they are required to populate the environment for execution + /// (evm). + headers_cache: HeaderLruCache, /// Sender half of the action channel. - action_tx: UnboundedSender, + action_tx: UnboundedSender>, /// Receiver half of the action channel. - action_rx: UnboundedReceiverStream, + action_rx: UnboundedReceiverStream>, /// The type that's used to spawn tasks that do the actual work action_task_spawner: Tasks, /// Rate limiter rate_limiter: Arc, - /// The type that determines how to configure the EVM. - evm_config: EvmConfig, } -impl EthStateCacheService +impl EthStateCacheService where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { fn on_new_block( &mut self, block_hash: B256, - res: ProviderResult>>, + res: ProviderResult>>>, ) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders @@ -261,7 +257,7 @@ where } Either::Right(transaction_tx) => { let _ = transaction_tx.send(res.clone().map(|maybe_block| { - maybe_block.map(|block| block.block.body.transactions.clone()) + maybe_block.map(|block| block.block.body.transactions().to_vec()) })); } } @@ -277,7 +273,7 @@ where fn on_new_receipts( &mut self, block_hash: B256, - res: ProviderResult>>>, + res: ProviderResult>>>, ) { if let Some(queued) = self.receipts_cache.remove(&block_hash) { // send the response to queued senders @@ -295,7 +291,7 @@ where fn on_reorg_block( &mut self, block_hash: B256, - res: ProviderResult>, + res: ProviderResult>>, ) { let res = res.map(|b| b.map(Arc::new)); if let Some(queued) = self.full_block_cache.remove(&block_hash) { @@ -307,7 +303,7 @@ where } Either::Right(transaction_tx) => { let _ = transaction_tx.send(res.clone().map(|maybe_block| { - maybe_block.map(|block| block.block.body.transactions.clone()) + maybe_block.map(|block| block.block.body.transactions().to_vec()) })); } } @@ -318,7 +314,7 @@ where fn on_reorg_receipts( &mut self, block_hash: B256, - res: ProviderResult>>>, + res: ProviderResult>>>, ) { if let Some(queued) = self.receipts_cache.remove(&block_hash) { // send the response to queued senders @@ -331,15 +327,14 @@ where fn update_cached_metrics(&self) { self.full_block_cache.update_cached_metrics(); self.receipts_cache.update_cached_metrics(); - self.evm_env_cache.update_cached_metrics(); + self.headers_cache.update_cached_metrics(); } } -impl Future for EthStateCacheService +impl Future for EthStateCacheService where - Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Provider: StateProviderFactory + BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm
, { type Output = (); @@ -406,39 +401,30 @@ where })); } } - CacheAction::GetEnv { block_hash, response_tx } => { - // check if env data is cached - if let Some(env) = this.evm_env_cache.get(&block_hash).cloned() { - let _ = response_tx.send(Ok(env)); + CacheAction::GetHeader { block_hash, response_tx } => { + // check if the header is cached + if let Some(header) = this.headers_cache.get(&block_hash).cloned() { + let _ = response_tx.send(Ok(header)); continue } - // env data is not in the cache, request it if this is the first + // header is not in the cache, request it if this is the first // consumer - if this.evm_env_cache.queue(block_hash, response_tx) { + if this.headers_cache.queue(block_hash, response_tx) { let provider = this.provider.clone(); let action_tx = this.action_tx.clone(); let rate_limiter = this.rate_limiter.clone(); - let evm_config = this.evm_config.clone(); this.action_task_spawner.spawn_blocking(Box::pin(async move { // Acquire permit let _permit = rate_limiter.acquire().await; - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id( - CfgEnv::default(), - SpecId::LATEST, - ); - let mut block_env = BlockEnv::default(); - let res = provider - .fill_env_at( - &mut cfg, - &mut block_env, - block_hash.into(), - evm_config, - ) - .map(|_| (cfg, block_env)); - let _ = action_tx.send(CacheAction::EnvResult { + let header = provider.header(&block_hash).and_then(|header| { + header.ok_or_else(|| { + ProviderError::HeaderNotFound(block_hash.into()) + }) + }); + let _ = action_tx.send(CacheAction::HeaderResult { block_hash, - res: Box::new(res), + res: Box::new(header), }); })); } @@ -457,18 +443,18 @@ where this.on_new_block(block_hash, Err(e)); } }, - CacheAction::EnvResult { block_hash, res } => { + CacheAction::HeaderResult { block_hash, res } => { let res = *res; - if let Some(queued) = this.evm_env_cache.remove(&block_hash) { + if let Some(queued) = this.headers_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { let _ = tx.send(res.clone()); } } - // cache good env data + // cache good header if let Ok(data) = res { - this.evm_env_cache.insert(block_hash, data); + this.headers_cache.insert(block_hash, data); } } CacheAction::CacheNewCanonicalChain { chain_change } => { @@ -508,52 +494,55 @@ where } /// All message variants sent through the channel -enum CacheAction { +enum CacheAction { GetBlockWithSenders { block_hash: B256, - response_tx: BlockWithSendersResponseSender, + response_tx: BlockWithSendersResponseSender, }, - GetEnv { + GetHeader { block_hash: B256, - response_tx: EnvResponseSender, + response_tx: HeaderResponseSender, }, GetReceipts { block_hash: B256, - response_tx: ReceiptsResponseSender, + response_tx: ReceiptsResponseSender, }, BlockWithSendersResult { block_hash: B256, - res: ProviderResult>>, + res: ProviderResult>>>, }, ReceiptsResult { block_hash: B256, - res: ProviderResult>>>, + res: ProviderResult>>>, }, - EnvResult { + HeaderResult { block_hash: B256, - res: Box>, + res: Box>, }, CacheNewCanonicalChain { - chain_change: ChainChange, + chain_change: ChainChange, }, RemoveReorgedChain { - chain_change: ChainChange, + chain_change: ChainChange, }, } -struct BlockReceipts { +struct BlockReceipts { block_hash: B256, - receipts: Vec>, + receipts: Vec>, } /// A change of the canonical chain -struct ChainChange { - blocks: Vec, - receipts: Vec, +struct ChainChange { + blocks: Vec>, + receipts: Vec>, } -impl ChainChange { - fn new(chain: Arc) -> Self { +impl ChainChange { + fn new(chain: Arc>) -> Self + where + N: NodePrimitives, + { let (blocks, receipts): (Vec<_>, Vec<_>) = chain .blocks_and_receipts() .map(|(block, receipts)| { @@ -570,9 +559,11 @@ impl ChainChange { /// immediately before they need to be fetched from disk. /// /// Reorged blocks are removed from the cache. -pub async fn cache_new_blocks_task(eth_state_cache: EthStateCache, mut events: St) -where - St: Stream + Unpin + 'static, +pub async fn cache_new_blocks_task( + eth_state_cache: EthStateCache, + mut events: St, +) where + St: Stream> + Unpin + 'static, { while let Some(event) = events.next().await { if let Some(reverted) = event.reverted() { diff --git a/crates/rpc/rpc-eth-api/src/helpers/error.rs b/crates/rpc/rpc-eth-types/src/error/api.rs similarity index 87% rename from crates/rpc/rpc-eth-api/src/helpers/error.rs rename to crates/rpc/rpc-eth-types/src/error/api.rs index 1d991b8e65b..419f530c4e2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/error.rs +++ b/crates/rpc/rpc-eth-types/src/error/api.rs @@ -1,9 +1,10 @@ //! Helper traits to wrap generic l1 errors, in network specific error type configured in -//! [`EthApiTypes`](crate::EthApiTypes). +//! `reth_rpc_eth_api::EthApiTypes`. -use reth_rpc_eth_types::EthApiError; use revm_primitives::EVMError; +use crate::EthApiError; + /// Helper trait to wrap core [`EthApiError`]. pub trait FromEthApiError: From { /// Converts from error via [`EthApiError`]. @@ -51,7 +52,7 @@ pub trait AsEthApiError { fn as_err(&self) -> Option<&EthApiError>; /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooHigh`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooHigh). + /// [`RpcInvalidTransactionError::GasTooHigh`](crate::RpcInvalidTransactionError::GasTooHigh). fn is_gas_too_high(&self) -> bool { if let Some(err) = self.as_err() { return err.is_gas_too_high() @@ -61,7 +62,7 @@ pub trait AsEthApiError { } /// Returns `true` if error is - /// [`RpcInvalidTransactionError::GasTooLow`](reth_rpc_eth_types::RpcInvalidTransactionError::GasTooLow). + /// [`RpcInvalidTransactionError::GasTooLow`](crate::RpcInvalidTransactionError::GasTooLow). fn is_gas_too_low(&self) -> bool { if let Some(err) = self.as_err() { return err.is_gas_too_low() diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs similarity index 98% rename from crates/rpc/rpc-eth-types/src/error.rs rename to crates/rpc/rpc-eth-types/src/error/mod.rs index b38b3122708..aeea8ea5b89 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -1,12 +1,15 @@ //! Implementation specific Errors for the `eth_` namespace. -use std::time::Duration; +pub mod api; +pub use api::{AsEthApiError, FromEthApiError, FromEvmError, IntoEthApiError}; +use core::time::Duration; + +use alloy_eips::BlockId; use alloy_primitives::{Address, Bytes, U256}; -use alloy_rpc_types::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; +use alloy_rpc_types_eth::{error::EthRpcErrorCode, request::TransactionInputError, BlockError}; use alloy_sol_types::decode_revert_reason; use reth_errors::RethError; -use reth_primitives::{revm_primitives::InvalidHeader, BlockId}; use reth_rpc_server_types::result::{ block_id_to_str, internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, }; @@ -16,6 +19,7 @@ use reth_transaction_pool::error::{ }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, InvalidTransaction, OutOfGasError}; use revm_inspectors::tracing::MuxError; +use revm_primitives::InvalidHeader; use tracing::error; /// A trait to convert an error to an RPC error. @@ -358,7 +362,7 @@ pub enum RpcInvalidTransactionError { SenderNoEOA, /// Gas limit was exceeded during execution. /// Contains the gas limit. - #[error("out of gas: gas required exceeds allowance: {0}")] + #[error("out of gas: gas required exceeds: {0}")] BasicOutOfGas(u64), /// Gas limit was exceeded during memory expansion. /// Contains the gas limit. @@ -672,6 +676,9 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(error: RpcPoolError) -> Self { match error { RpcPoolError::Invalid(err) => err.into(), + RpcPoolError::TxPoolOverflow => { + rpc_error_with_code(EthRpcErrorCode::TransactionRejected.code(), error.to_string()) + } error => internal_rpc_err(error.to_string()), } } diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 57dd276e5cf..2c365ae90bf 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -6,9 +6,10 @@ use std::{ sync::{atomic::Ordering::SeqCst, Arc}, }; +use alloy_consensus::{BlockHeader, Transaction, TxReceipt}; use alloy_eips::eip1559::calc_next_block_base_fee; use alloy_primitives::B256; -use alloy_rpc_types::TxGasAndReward; +use alloy_rpc_types_eth::TxGasAndReward; use futures::{ future::{Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -16,14 +17,14 @@ use futures::{ use metrics::atomics::AtomicU64; use reth_chain_state::CanonStateNotification; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_primitives::{Receipt, SealedBlock, TransactionSigned}; +use reth_primitives::{NodePrimitives, SealedBlock}; +use reth_primitives_traits::BlockBody; +use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; use reth_storage_api::BlockReaderIdExt; use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas}; use serde::{Deserialize, Serialize}; use tracing::trace; -use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; - use super::{EthApiError, EthStateCache}; /// Contains cached fee history entries for blocks. @@ -36,13 +37,12 @@ pub struct FeeHistoryCache { impl FeeHistoryCache { /// Creates new `FeeHistoryCache` instance, initialize it with the more recent data, set bounds - pub fn new(eth_cache: EthStateCache, config: FeeHistoryCacheConfig) -> Self { + pub fn new(config: FeeHistoryCacheConfig) -> Self { let inner = FeeHistoryCacheInner { lower_bound: Default::default(), upper_bound: Default::default(), config, entries: Default::default(), - eth_cache, }; Self { inner: Arc::new(inner) } } @@ -73,9 +73,12 @@ impl FeeHistoryCache { } /// Insert block data into the cache. - async fn insert_blocks<'a, I>(&self, blocks: I) + async fn insert_blocks<'a, I, H, B, R>(&self, blocks: I) where - I: IntoIterator>)>, + H: BlockHeader + 'a, + B: BlockBody, + R: TxReceipt, + I: IntoIterator, Arc>)>, { let mut entries = self.inner.entries.write().await; @@ -87,11 +90,11 @@ impl FeeHistoryCache { &percentiles, fee_history_entry.gas_used, fee_history_entry.base_fee_per_gas, - &block.body.transactions, + block.body.transactions(), &receipts, ) .unwrap_or_default(); - entries.insert(block.number, fee_history_entry); + entries.insert(block.number(), fee_history_entry); } // enforce bounds by popping the oldest entries @@ -200,18 +203,20 @@ struct FeeHistoryCacheInner { config: FeeHistoryCacheConfig, /// Stores the entries of the cache entries: tokio::sync::RwLock>, - eth_cache: EthStateCache, } /// Awaits for new chain events and directly inserts them into the cache so they're available /// immediately before they need to be fetched from disk. -pub async fn fee_history_cache_new_blocks_task( +pub async fn fee_history_cache_new_blocks_task( fee_history_cache: FeeHistoryCache, mut events: St, provider: Provider, + cache: EthStateCache, ) where - St: Stream + Unpin + 'static, - Provider: BlockReaderIdExt + ChainSpecProvider + 'static, + St: Stream> + Unpin + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + 'static, + N: NodePrimitives, { // We're listening for new blocks emitted when the node is in live sync. // If the node transitions to stage sync, we need to fetch the missing blocks @@ -224,12 +229,7 @@ pub async fn fee_history_cache_new_blocks_task( trace!(target: "rpc::fee", ?block_number, "Fetching missing block for fee history cache"); if let Ok(Some(hash)) = provider.block_hash(block_number) { // fetch missing block - fetch_missing_block = fee_history_cache - .inner - .eth_cache - .get_block_and_receipts(hash) - .boxed() - .fuse(); + fetch_missing_block = cache.get_block_and_receipts(hash).boxed().fuse(); } } } @@ -248,7 +248,7 @@ pub async fn fee_history_cache_new_blocks_task( break; }; - let committed = event .committed(); + let committed = event.committed(); let (blocks, receipts): (Vec<_>, Vec<_>) = committed .blocks_and_receipts() .map(|(block, receipts)| { @@ -269,13 +269,17 @@ pub async fn fee_history_cache_new_blocks_task( /// the corresponding rewards for the transactions at each percentile. /// /// The results are returned as a vector of U256 values. -pub fn calculate_reward_percentiles_for_block( +pub fn calculate_reward_percentiles_for_block( percentiles: &[f64], gas_used: u64, base_fee_per_gas: u64, - transactions: &[TransactionSigned], - receipts: &[Receipt], -) -> Result, EthApiError> { + transactions: &[T], + receipts: &[R], +) -> Result, EthApiError> +where + T: Transaction, + R: TxReceipt, +{ let mut transactions = transactions .iter() .zip(receipts) @@ -286,12 +290,12 @@ pub fn calculate_reward_percentiles_for_block( // While we will sum up the gas again later, it is worth // noting that the order of the transactions will be different, // so the sum will also be different for each receipt. - let gas_used = receipt.cumulative_gas_used - *previous_gas; - *previous_gas = receipt.cumulative_gas_used; + let gas_used = receipt.cumulative_gas_used() - *previous_gas; + *previous_gas = receipt.cumulative_gas_used(); Some(TxGasAndReward { - gas_used, - reward: tx.effective_tip_per_gas(Some(base_fee_per_gas)).unwrap_or_default(), + gas_used: gas_used as u64, + reward: tx.effective_tip_per_gas(base_fee_per_gas).unwrap_or_default(), }) }) .collect::>(); @@ -305,7 +309,7 @@ pub fn calculate_reward_percentiles_for_block( // the percentiles are monotonically increasing. let mut tx_index = 0; let mut cumulative_gas_used = transactions.first().map(|tx| tx.gas_used).unwrap_or_default(); - let mut rewards_in_block = Vec::new(); + let mut rewards_in_block = Vec::with_capacity(percentiles.len()); for percentile in percentiles { // Empty blocks should return in a zero row if transactions.is_empty() { @@ -360,20 +364,20 @@ impl FeeHistoryEntry { /// Creates a new entry from a sealed block. /// /// Note: This does not calculate the rewards for the block. - pub fn new(block: &SealedBlock) -> Self { + pub fn new(block: &SealedBlock) -> Self { Self { - base_fee_per_gas: block.base_fee_per_gas.unwrap_or_default(), - gas_used_ratio: block.gas_used as f64 / block.gas_limit as f64, - base_fee_per_blob_gas: block.blob_fee(), - blob_gas_used_ratio: block.blob_gas_used() as f64 / - reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, - excess_blob_gas: block.excess_blob_gas, - blob_gas_used: block.blob_gas_used, - gas_used: block.gas_used, + base_fee_per_gas: block.base_fee_per_gas().unwrap_or_default(), + gas_used_ratio: block.gas_used() as f64 / block.gas_limit() as f64, + base_fee_per_blob_gas: block.excess_blob_gas().map(calc_blob_gasprice), + blob_gas_used_ratio: block.body.blob_gas_used() as f64 / + alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + excess_blob_gas: block.excess_blob_gas(), + blob_gas_used: block.blob_gas_used(), + gas_used: block.gas_used(), header_hash: block.hash(), - gas_limit: block.gas_limit, + gas_limit: block.gas_limit(), rewards: Vec::new(), - timestamp: block.timestamp, + timestamp: block.timestamp(), } } diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 84e7ab8306d..ed49d7c6701 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -1,24 +1,27 @@ //! An implementation of the eth gas price oracle, used for providing gas price estimates based on //! previous blocks. +use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader, Transaction}; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::{B256, U256}; -use alloy_rpc_types::BlockId; +use alloy_rpc_types_eth::BlockId; use derive_more::{Deref, DerefMut, From, Into}; use itertools::Itertools; -use reth_primitives::{constants::GWEI_TO_WEI, BlockNumberOrTag}; -use reth_rpc_server_types::constants; -use reth_storage_api::BlockReaderIdExt; +use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_rpc_server_types::{ + constants, + constants::gas_oracle::{ + DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, + DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, + }, +}; +use reth_storage_api::{BlockReader, BlockReaderIdExt}; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; use std::fmt::{self, Debug, Formatter}; use tokio::sync::Mutex; use tracing::warn; -use reth_rpc_server_types::constants::gas_oracle::{ - DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, - DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, -}; - use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; /// The default gas limit for `eth_call` and adjacent calls. See @@ -67,11 +70,14 @@ impl Default for GasPriceOracleConfig { /// Calculates a gas price depending on recent blocks. #[derive(Debug)] -pub struct GasPriceOracle { +pub struct GasPriceOracle +where + Provider: BlockReader, +{ /// The type used to subscribe to block events and get block info provider: Provider, /// The cache for blocks - cache: EthStateCache, + cache: EthStateCache, /// The config for the oracle oracle_config: GasPriceOracleConfig, /// The price under which the sample will be ignored. @@ -89,7 +95,7 @@ where pub fn new( provider: Provider, mut oracle_config: GasPriceOracleConfig, - cache: EthStateCache, + cache: EthStateCache, ) -> Self { // sanitize the percentile to be less than 100 if oracle_config.percentile > 100 { @@ -139,8 +145,8 @@ where let mut populated_blocks = 0; // we only check a maximum of 2 * max_block_history, or the number of blocks in the chain - let max_blocks = if self.oracle_config.max_block_history * 2 > header.number { - header.number + let max_blocks = if self.oracle_config.max_block_history * 2 > header.number() { + header.number() } else { self.oracle_config.max_block_history * 2 }; @@ -217,43 +223,44 @@ where None => return Ok(None), }; - let base_fee_per_gas = block.base_fee_per_gas; - let parent_hash = block.parent_hash; + let base_fee_per_gas = block.base_fee_per_gas(); + let parent_hash = block.parent_hash(); // sort the functions by ascending effective tip first - let sorted_transactions = block - .body - .transactions - .iter() - .sorted_by_cached_key(|tx| tx.effective_tip_per_gas(base_fee_per_gas)); + let sorted_transactions = block.body.transactions().iter().sorted_by_cached_key(|tx| { + if let Some(base_fee) = base_fee_per_gas { + (*tx).effective_tip_per_gas(base_fee) + } else { + Some((*tx).priority_fee_or_price()) + } + }); let mut prices = Vec::with_capacity(limit); for tx in sorted_transactions { - let mut effective_gas_tip = None; + let effective_tip = if let Some(base_fee) = base_fee_per_gas { + tx.effective_tip_per_gas(base_fee) + } else { + Some(tx.priority_fee_or_price()) + }; + // ignore transactions with a tip under the configured threshold if let Some(ignore_under) = self.ignore_price { - let tip = tx.effective_tip_per_gas(base_fee_per_gas); - effective_gas_tip = Some(tip); - if tip < Some(ignore_under) { + if effective_tip < Some(ignore_under) { continue } } // check if the sender was the coinbase, if so, ignore if let Some(sender) = tx.recover_signer() { - if sender == block.beneficiary { + if sender == block.beneficiary() { continue } } // a `None` effective_gas_tip represents a transaction where the max_fee_per_gas is // less than the base fee which would be invalid - let effective_gas_tip = effective_gas_tip - .unwrap_or_else(|| tx.effective_tip_per_gas(base_fee_per_gas)) - .ok_or(RpcInvalidTransactionError::FeeCapTooLow)?; - - prices.push(U256::from(effective_gas_tip)); + prices.push(U256::from(effective_tip.ok_or(RpcInvalidTransactionError::FeeCapTooLow)?)); // we have enough entries if prices.len() >= limit { diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index fa36dae4c88..03c23dc3456 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -37,5 +37,5 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use receipt::ReceiptBuilder; +pub use receipt::EthReceiptBuilder; pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index 205e2bba37b..8b2dbaa5441 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -2,30 +2,34 @@ //! //! Log parsing for building filter. +use alloy_consensus::TxReceipt; +use alloy_eips::{eip2718::Encodable2718, BlockNumHash}; use alloy_primitives::TxHash; -use alloy_rpc_types::{FilteredParams, Log}; +use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::{BlockNumHash, Receipt, SealedBlockWithSenders}; -use reth_storage_api::BlockReader; +use reth_primitives::SealedBlockWithSenders; +use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_storage_api::{BlockReader, ProviderBlock}; use std::sync::Arc; /// Returns all matching of a block's receipts when the transaction hashes are known. -pub fn matching_block_logs_with_tx_hashes<'a, I>( +pub fn matching_block_logs_with_tx_hashes<'a, I, R>( filter: &FilteredParams, block_num_hash: BlockNumHash, tx_hashes_and_receipts: I, removed: bool, ) -> Vec where - I: IntoIterator, + I: IntoIterator, + R: TxReceipt + 'a, { let mut all_logs = Vec::new(); // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; // Iterate over transaction hashes and receipts and append matching logs. for (receipt_idx, (tx_hash, receipt)) in tx_hashes_and_receipts.into_iter().enumerate() { - for log in &receipt.logs { + for log in receipt.logs() { if log_matches_filter(block_num_hash, log, filter) { let log = Log { inner: log.clone(), @@ -52,20 +56,23 @@ pub enum ProviderOrBlock<'a, P: BlockReader> { /// Provider Provider(&'a P), /// [`SealedBlockWithSenders`] - Block(Arc), + Block(Arc>>), } /// Appends all matching logs of a block's receipts. /// If the log matches, look up the corresponding transaction hash. -pub fn append_matching_block_logs( +pub fn append_matching_block_logs

( all_logs: &mut Vec, provider_or_block: ProviderOrBlock<'_, P>, filter: &FilteredParams, block_num_hash: BlockNumHash, - receipts: &[Receipt], + receipts: &[P::Receipt], removed: bool, block_timestamp: u64, -) -> Result<(), ProviderError> { +) -> Result<(), ProviderError> +where + P: BlockReader, +{ // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; @@ -79,13 +86,13 @@ pub fn append_matching_block_logs( // The transaction hash of the current receipt. let mut transaction_hash = None; - for log in &receipt.logs { + for log in receipt.logs() { if log_matches_filter(block_num_hash, log, filter) { // if this is the first match in the receipt's logs, look up the transaction hash if transaction_hash.is_none() { transaction_hash = match &provider_or_block { ProviderOrBlock::Block(block) => { - block.body.transactions.get(receipt_idx).map(|t| t.hash()) + block.body.transactions().get(receipt_idx).map(|t| t.trie_hash()) } ProviderOrBlock::Provider(provider) => { let first_tx_num = match loaded_first_tx_num { @@ -109,7 +116,7 @@ pub fn append_matching_block_logs( ProviderError::TransactionNotFound(transaction_id.into()) })?; - Some(transaction.hash()) + Some(transaction.trie_hash()) } }; } @@ -178,7 +185,7 @@ pub fn get_filter_block_range( #[cfg(test)] mod tests { - use alloy_rpc_types::Filter; + use alloy_rpc_types_eth::Filter; use super::*; @@ -241,8 +248,8 @@ mod tests { let start_block = info.best_number; let (from_block_number, to_block_number) = get_filter_block_range( - from_block.and_then(alloy_rpc_types::BlockNumberOrTag::as_number), - to_block.and_then(alloy_rpc_types::BlockNumberOrTag::as_number), + from_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number), + to_block.and_then(alloy_rpc_types_eth::BlockNumberOrTag::as_number), start_block, info, ); diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 949e205dcf8..ef2a61dd720 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -4,46 +4,49 @@ use std::time::Instant; +use alloy_consensus::BlockHeader; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{BlockId, BlockNumberOrTag, Receipt, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives_traits::Block; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; /// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. #[derive(Debug, Clone, Constructor)] -pub struct PendingBlockEnv { +pub struct PendingBlockEnv { /// Configured [`CfgEnvWithHandlerCfg`] for the pending block. pub cfg: CfgEnvWithHandlerCfg, /// Configured [`BlockEnv`] for the pending block. pub block_env: BlockEnv, /// Origin block for the config - pub origin: PendingBlockEnvOrigin, + pub origin: PendingBlockEnvOrigin, } /// The origin for a configured [`PendingBlockEnv`] #[derive(Clone, Debug)] -pub enum PendingBlockEnvOrigin { +pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders), + ActualPending(SealedBlockWithSenders, Vec), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: /// - the timestamp /// - the block number /// - fees - DerivedFromLatest(SealedHeader), + DerivedFromLatest(B256), } -impl PendingBlockEnvOrigin { +impl PendingBlockEnvOrigin { /// Returns true if the origin is the actual pending block as received from the CL. pub const fn is_actual_pending(&self) -> bool { - matches!(self, Self::ActualPending(_)) + matches!(self, Self::ActualPending(_, _)) } /// Consumes the type and returns the actual pending block. - pub fn into_actual_pending(self) -> Option { + pub fn into_actual_pending(self) -> Option> { match self { - Self::ActualPending(block) => Some(block), + Self::ActualPending(block, _) => Some(block), _ => None, } } @@ -54,8 +57,8 @@ impl PendingBlockEnvOrigin { /// identify the block by its hash (latest block). pub fn state_block_id(&self) -> BlockId { match self { - Self::ActualPending(_) => BlockNumberOrTag::Pending.into(), - Self::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), + Self::ActualPending(_, _) => BlockNumberOrTag::Pending.into(), + Self::DerivedFromLatest(hash) => BlockId::Hash((*hash).into()), } } @@ -66,27 +69,19 @@ impl PendingBlockEnvOrigin { /// header. pub fn build_target_hash(&self) -> B256 { match self { - Self::ActualPending(block) => block.parent_hash, - Self::DerivedFromLatest(header) => header.hash(), - } - } - - /// Returns the header this pending block is based on. - pub fn header(&self) -> &SealedHeader { - match self { - Self::ActualPending(block) => &block.header, - Self::DerivedFromLatest(header) => header, + Self::ActualPending(block, _) => block.header().parent_hash(), + Self::DerivedFromLatest(hash) => *hash, } } } /// Locally built pending block for `pending` tag. #[derive(Debug, Constructor)] -pub struct PendingBlock { +pub struct PendingBlock { /// Timestamp when the pending block is considered outdated. pub expires_at: Instant, /// The locally built pending block. - pub block: SealedBlockWithSenders, + pub block: SealedBlockWithSenders, /// The receipts for the pending block - pub receipts: Vec, + pub receipts: Vec, } diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 2668291e2c8..b7f82782b0b 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,26 +1,101 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. -use alloy_consensus::Transaction; +use super::{EthApiError, EthResult}; +use alloy_consensus::{ReceiptEnvelope, Transaction}; use alloy_primitives::{Address, TxKind}; -use alloy_rpc_types::{ - AnyReceiptEnvelope, AnyTransactionReceipt, Log, ReceiptWithBloom, TransactionReceipt, -}; -use alloy_serde::{OtherFields, WithOtherFields}; -use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned, TxType}; +use reth_primitives_traits::SignedTransaction; use revm_primitives::calc_blob_gasprice; -use super::{EthApiError, EthResult}; +/// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. +pub fn build_receipt( + transaction: &TransactionSigned, + meta: TransactionMeta, + receipt: &Receipt, + all_receipts: &[Receipt], + build_envelope: impl FnOnce(ReceiptWithBloom>) -> T, +) -> EthResult> { + // Note: we assume this transaction is valid, because it's mined (or part of pending block) + // and we don't need to check for pre EIP-2 + let from = + transaction.recover_signer_unchecked().ok_or(EthApiError::InvalidTransactionSignature)?; + + // get the previous transaction cumulative gas used + let gas_used = if meta.index == 0 { + receipt.cumulative_gas_used + } else { + let prev_tx_idx = (meta.index - 1) as usize; + all_receipts + .get(prev_tx_idx) + .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) + .unwrap_or_default() + }; + + let blob_gas_used = transaction.transaction.blob_gas_used(); + // Blob gas price should only be present if the transaction is a blob transaction + let blob_gas_price = blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); + let logs_bloom = receipt.bloom_slow(); + + // get number of logs in the block + let mut num_logs = 0; + for prev_receipt in all_receipts.iter().take(meta.index as usize) { + num_logs += prev_receipt.logs.len(); + } + + let logs: Vec = receipt + .logs + .iter() + .enumerate() + .map(|(tx_log_idx, log)| Log { + inner: log.clone(), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + block_timestamp: Some(meta.timestamp), + transaction_hash: Some(meta.tx_hash), + transaction_index: Some(meta.index), + log_index: Some((num_logs + tx_log_idx) as u64), + removed: false, + }) + .collect(); + + let rpc_receipt = alloy_rpc_types_eth::Receipt { + status: receipt.success.into(), + cumulative_gas_used: receipt.cumulative_gas_used as u128, + logs, + }; + + let (contract_address, to) = match transaction.transaction.kind() { + TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + Ok(TransactionReceipt { + inner: build_envelope(ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }), + transaction_hash: meta.tx_hash, + transaction_index: Some(meta.index), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + from, + to, + gas_used: gas_used as u128, + contract_address, + effective_gas_price: transaction.effective_gas_price(meta.base_fee), + // EIP-4844 fields + blob_gas_price, + blob_gas_used: blob_gas_used.map(u128::from), + authorization_list: transaction.authorization_list().map(|l| l.to_vec()), + }) +} /// Receipt response builder. #[derive(Debug)] -pub struct ReceiptBuilder { +pub struct EthReceiptBuilder { /// The base response body, contains L1 fields. - pub base: TransactionReceipt>, - /// Additional L2 fields. - pub other: OtherFields, + pub base: TransactionReceipt, } -impl ReceiptBuilder { +impl EthReceiptBuilder { /// Returns a new builder with the base response body (L1 fields) set. /// /// Note: This requires _all_ block receipts because we need to calculate the gas used by the @@ -31,97 +106,23 @@ impl ReceiptBuilder { receipt: &Receipt, all_receipts: &[Receipt], ) -> EthResult { - // Note: we assume this transaction is valid, because it's mined (or part of pending block) - // and we don't need to check for pre EIP-2 - let from = transaction - .recover_signer_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature)?; - - // get the previous transaction cumulative gas used - let gas_used = if meta.index == 0 { - receipt.cumulative_gas_used - } else { - let prev_tx_idx = (meta.index - 1) as usize; - all_receipts - .get(prev_tx_idx) - .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) - .unwrap_or_default() - }; - - let blob_gas_used = transaction.transaction.blob_gas_used(); - // Blob gas price should only be present if the transaction is a blob transaction - let blob_gas_price = - blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); - let logs_bloom = receipt.bloom_slow(); - - // get number of logs in the block - let mut num_logs = 0; - for prev_receipt in all_receipts.iter().take(meta.index as usize) { - num_logs += prev_receipt.logs.len(); - } - - let logs: Vec = receipt - .logs - .iter() - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log.clone(), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), - removed: false, - }) - .collect(); - - let rpc_receipt = alloy_rpc_types::Receipt { - status: receipt.success.into(), - cumulative_gas_used: receipt.cumulative_gas_used as u128, - logs, - }; - - let (contract_address, to) = match transaction.transaction.kind() { - TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), - TxKind::Call(addr) => (None, Some(Address(*addr))), - }; - - #[allow(clippy::needless_update)] - let base = TransactionReceipt { - inner: AnyReceiptEnvelope { - inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - r#type: transaction.transaction.tx_type().into(), - }, - transaction_hash: meta.tx_hash, - transaction_index: Some(meta.index), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - from, - to, - gas_used: gas_used as u128, - contract_address, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // TODO pre-byzantium receipts have a post-transaction state root - state_root: None, - // EIP-4844 fields - blob_gas_price, - blob_gas_used: blob_gas_used.map(u128::from), - authorization_list: transaction.authorization_list().map(|l| l.to_vec()), - }; - - Ok(Self { base, other: Default::default() }) - } + let base = build_receipt(transaction, meta, receipt, all_receipts, |receipt_with_bloom| { + match receipt.tx_type { + TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom), + TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom), + TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom), + TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), + TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), + #[allow(unreachable_patterns)] + _ => unreachable!(), + } + })?; - /// Adds fields to response body. - pub fn add_other_fields(mut self, mut fields: OtherFields) -> Self { - self.other.append(&mut fields); - self + Ok(Self { base }) } /// Builds a receipt response from the base response body, and any set additional fields. - pub fn build(self) -> AnyTransactionReceipt { - let Self { base, other } = self; - WithOtherFields { inner: base, other } + pub fn build(self) -> TransactionReceipt { + self.base } } diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs index 25c54fd4677..782ef569796 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -1,7 +1,7 @@ //! utilities for working with revm use alloy_primitives::{Address, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ state::{AccountOverride, StateOverride}, BlockOverrides, }; @@ -265,7 +265,7 @@ where { // we need to fetch the account via the `DatabaseRef` to not update the state of the account, // which is modified via `Database::basic_ref` - let mut account_info = DatabaseRef::basic_ref(db, account)?.unwrap_or_default(); + let mut account_info = db.basic_ref(account)?.unwrap_or_default(); if let Some(nonce) = account_override.nonce { account_info.nonce = nonce; @@ -315,7 +315,7 @@ where #[cfg(test)] mod tests { use super::*; - use reth_primitives::constants::GWEI_TO_WEI; + use alloy_consensus::constants::GWEI_TO_WEI; #[test] fn test_ensure_0_fallback() { diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index a673da96720..a6ea5c4b788 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -1,30 +1,22 @@ //! Utilities for serving `eth_simulateV1` -use alloy_consensus::{Transaction as _, TxEip4844Variant, TxType, TypedTransaction}; -use alloy_primitives::Parity; -use alloy_rpc_types::{ +use alloy_consensus::{BlockHeader, Transaction as _, TxType}; +use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, - Block, BlockTransactionsKind, + transaction::TransactionRequest, + Block, BlockTransactionsKind, Header, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use jsonrpsee_types::ErrorObject; -use reth_primitives::{ - logs_bloom, - proofs::{calculate_receipt_root, calculate_transaction_root}, - BlockBody, BlockWithSenders, Receipt, Signature, Transaction, TransactionSigned, - TransactionSignedNoHash, -}; -use reth_revm::database::StateProviderDatabase; +use reth_primitives::BlockWithSenders; +use reth_primitives_traits::{block::BlockTx, BlockBody as _, SignedTransaction}; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; -use reth_storage_api::StateRootProvider; -use reth_trie::{HashedPostState, HashedStorage}; -use revm::{db::CacheDB, Database}; -use revm_primitives::{keccak256, Address, BlockEnv, Bytes, ExecutionResult, TxKind, B256, U256}; +use revm::Database; +use revm_primitives::{Address, Bytes, ExecutionResult, TxKind, U256}; use crate::{ - cache::db::StateProviderTraitObjWrapper, error::ToRpcError, EthApiError, RevertError, - RpcInvalidTransactionError, + error::{api::FromEthApiError, ToRpcError}, + EthApiError, RevertError, RpcInvalidTransactionError, }; /// Errors which may occur during `eth_simulateV1` execution. @@ -54,17 +46,18 @@ impl ToRpcError for EthSimulateError { } /// Goes over the list of [`TransactionRequest`]s and populates missing fields trying to resolve -/// them into [`TransactionSigned`]. +/// them into primitive transactions. /// /// If validation is enabled, the function will return error if any of the transactions can't be /// built right away. -pub fn resolve_transactions( +pub fn resolve_transactions>( txs: &mut [TransactionRequest], validation: bool, block_gas_limit: u64, chain_id: u64, db: &mut DB, -) -> Result, EthApiError> + tx_resp_builder: &T, +) -> Result, EthApiError> where EthApiError: From, { @@ -130,77 +123,44 @@ where } } - let Ok(tx) = tx.clone().build_typed_tx() else { - return Err(EthApiError::TransactionConversionError) - }; - - // Create an empty signature for the transaction. - let signature = - Signature::new(Default::default(), Default::default(), Parity::Parity(false)); - - let tx = match tx { - TypedTransaction::Legacy(tx) => { - TransactionSignedNoHash { transaction: Transaction::Legacy(tx), signature } - .with_hash() - } - TypedTransaction::Eip2930(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip2930(tx), signature } - .with_hash() - } - TypedTransaction::Eip1559(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip1559(tx), signature } - .with_hash() - } - TypedTransaction::Eip4844(tx) => { - let tx = match tx { - TxEip4844Variant::TxEip4844(tx) => tx, - TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx, - }; - TransactionSignedNoHash { transaction: Transaction::Eip4844(tx), signature } - .with_hash() - } - TypedTransaction::Eip7702(tx) => { - TransactionSignedNoHash { transaction: Transaction::Eip7702(tx), signature } - .with_hash() - } - }; - - transactions.push(tx); + transactions.push( + tx_resp_builder + .build_simulate_v1_transaction(tx.clone()) + .map_err(|e| EthApiError::other(e.into()))?, + ); } Ok(transactions) } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -pub fn build_block( - results: Vec<(Address, ExecutionResult)>, - transactions: Vec, - block_env: &BlockEnv, - parent_hash: B256, +#[expect(clippy::type_complexity)] +pub fn build_simulated_block( + senders: Vec

, + results: Vec, total_difficulty: U256, full_transactions: bool, - db: &CacheDB>>, -) -> Result>, EthApiError> { + tx_resp_builder: &T, + block: B, +) -> Result>>, T::Error> +where + T: TransactionCompat, Error: FromEthApiError>, + B: reth_primitives_traits::Block, +{ let mut calls: Vec = Vec::with_capacity(results.len()); - let mut senders = Vec::with_capacity(results.len()); - let mut receipts = Vec::new(); let mut log_index = 0; - for (transaction_index, ((sender, result), tx)) in - results.into_iter().zip(transactions.iter()).enumerate() - { - senders.push(sender); - + for (index, (result, tx)) in results.iter().zip(block.body().transactions()).enumerate() { let call = match result { ExecutionResult::Halt { reason, gas_used } => { - let error = RpcInvalidTransactionError::halt(reason, tx.gas_limit()); + let error = RpcInvalidTransactionError::halt(*reason, tx.gas_limit()); SimCallResult { return_data: Bytes::new(), error: Some(SimulateError { code: error.error_code(), message: error.to_string(), }), - gas_used, + gas_used: *gas_used, logs: Vec::new(), status: false, } @@ -208,31 +168,31 @@ pub fn build_block( ExecutionResult::Revert { output, gas_used } => { let error = RevertError::new(output.clone()); SimCallResult { - return_data: output, + return_data: output.clone(), error: Some(SimulateError { code: error.error_code(), message: error.to_string(), }), - gas_used, + gas_used: *gas_used, status: false, logs: Vec::new(), } } ExecutionResult::Success { output, gas_used, logs, .. } => SimCallResult { - return_data: output.into_data(), + return_data: output.clone().into_data(), error: None, - gas_used, + gas_used: *gas_used, logs: logs - .into_iter() + .iter() .map(|log| { log_index += 1; - alloy_rpc_types::Log { - inner: log, + alloy_rpc_types_eth::Log { + inner: log.clone(), log_index: Some(log_index - 1), - transaction_index: Some(transaction_index as u64), - transaction_hash: Some(tx.hash()), - block_number: Some(block_env.number.to()), - block_timestamp: Some(block_env.timestamp.to()), + transaction_index: Some(index as u64), + transaction_hash: Some(*tx.tx_hash()), + block_number: Some(block.header().number()), + block_timestamp: Some(block.header().timestamp()), ..Default::default() } }) @@ -241,69 +201,14 @@ pub fn build_block( }, }; - receipts.push( - #[allow(clippy::needless_update)] - Receipt { - tx_type: tx.tx_type(), - success: call.status, - cumulative_gas_used: call.gas_used + calls.iter().map(|c| c.gas_used).sum::(), - logs: call.logs.iter().map(|log| &log.inner).cloned().collect(), - ..Default::default() - } - .into(), - ); - calls.push(call); } - let mut hashed_state = HashedPostState::default(); - for (address, account) in &db.accounts { - let hashed_address = keccak256(address); - hashed_state.accounts.insert(hashed_address, Some(account.info.clone().into())); - - let storage = hashed_state - .storages - .entry(hashed_address) - .or_insert_with(|| HashedStorage::new(account.account_state.is_storage_cleared())); - - for (slot, value) in &account.storage { - let slot = B256::from(*slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, *value); - } - } - - let state_root = db.db.0.state_root(hashed_state)?; - - let header = reth_primitives::Header { - beneficiary: block_env.coinbase, - difficulty: block_env.difficulty, - number: block_env.number.to(), - timestamp: block_env.timestamp.to(), - base_fee_per_gas: Some(block_env.basefee.to()), - gas_limit: block_env.gas_limit.to(), - gas_used: calls.iter().map(|c| c.gas_used).sum::(), - blob_gas_used: Some(0), - parent_hash, - receipts_root: calculate_receipt_root(&receipts), - transactions_root: calculate_transaction_root(&transactions), - state_root, - logs_bloom: logs_bloom(receipts.iter().flat_map(|r| r.receipt.logs.iter())), - mix_hash: block_env.prevrandao.unwrap_or_default(), - ..Default::default() - }; - - let block = BlockWithSenders { - block: reth_primitives::Block { - header, - body: BlockBody { transactions, ..Default::default() }, - }, - senders, - }; + let block = BlockWithSenders { block, senders }; let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - let block = from_block::(block, total_difficulty, txs_kind, None)?; + let block = from_block(block, total_difficulty, txs_kind, None, tx_resp_builder)?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs index c3ca1b503ae..f994638d3af 100644 --- a/crates/rpc/rpc-eth-types/src/transaction.rs +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -3,8 +3,9 @@ //! Transaction wrapper that labels transaction with its origin. use alloy_primitives::B256; -use alloy_rpc_types::TransactionInfo; -use reth_primitives::TransactionSignedEcRecovered; +use alloy_rpc_types_eth::TransactionInfo; +use reth_primitives::{RecoveredTx, TransactionSigned}; +use reth_primitives_traits::SignedTransaction; use reth_rpc_types_compat::{ transaction::{from_recovered, from_recovered_with_block_context}, TransactionCompat, @@ -12,15 +13,15 @@ use reth_rpc_types_compat::{ /// Represents from where a transaction was fetched. #[derive(Debug, Clone, Eq, PartialEq)] -pub enum TransactionSource { +pub enum TransactionSource { /// Transaction exists in the pool (Pending) - Pool(TransactionSignedEcRecovered), + Pool(RecoveredTx), /// Transaction already included in a block /// /// This can be a historical block or a pending block (received from the CL) Block { /// Transaction fetched via provider - transaction: TransactionSignedEcRecovered, + transaction: RecoveredTx, /// Index of the transaction in the block index: u64, /// Hash of the block. @@ -34,39 +35,42 @@ pub enum TransactionSource { // === impl TransactionSource === -impl TransactionSource { +impl TransactionSource { /// Consumes the type and returns the wrapped transaction. - pub fn into_recovered(self) -> TransactionSignedEcRecovered { + pub fn into_recovered(self) -> RecoveredTx { self.into() } /// Conversion into network specific transaction type. - pub fn into_transaction(self) -> T::Transaction { + pub fn into_transaction>( + self, + resp_builder: &Builder, + ) -> Result { match self { - Self::Pool(tx) => from_recovered::(tx), + Self::Pool(tx) => from_recovered(tx, resp_builder), Self::Block { transaction, index, block_hash, block_number, base_fee } => { let tx_info = TransactionInfo { - hash: Some(transaction.hash()), + hash: Some(transaction.trie_hash()), index: Some(index), block_hash: Some(block_hash), block_number: Some(block_number), base_fee: base_fee.map(u128::from), }; - from_recovered_with_block_context::(transaction, tx_info) + from_recovered_with_block_context(transaction, tx_info, resp_builder) } } } /// Returns the transaction and block related info, if not pending - pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { + pub fn split(self) -> (RecoveredTx, TransactionInfo) { match self { Self::Pool(tx) => { - let hash = tx.hash(); + let hash = tx.trie_hash(); (tx, TransactionInfo { hash: Some(hash), ..Default::default() }) } Self::Block { transaction, index, block_hash, block_number, base_fee } => { - let hash = transaction.hash(); + let hash = transaction.trie_hash(); ( transaction, TransactionInfo { @@ -82,8 +86,8 @@ impl TransactionSource { } } -impl From for TransactionSignedEcRecovered { - fn from(value: TransactionSource) -> Self { +impl From> for RecoveredTx { + fn from(value: TransactionSource) -> Self { match value { TransactionSource::Pool(tx) => tx, TransactionSource::Block { transaction, .. } => transaction, diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index 596acc74ce1..f12c819aea3 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,22 +1,23 @@ //! Commonly used code snippets -use alloy_eips::eip2718::Decodable2718; -use alloy_primitives::Bytes; -use reth_primitives::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; -use std::future::Future; - use super::{EthApiError, EthResult}; +use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, RecoveredTx}; +use reth_primitives_traits::SignedTransaction; +use std::future::Future; -/// Recovers a [`PooledTransactionsElementEcRecovered`] from an enveloped encoded byte stream. +/// Recovers a [`SignedTransaction`] from an enveloped encoded byte stream. +/// +/// This is a helper function that returns the appropriate RPC-specific error if the input data is +/// malformed. /// -/// See [`Decodable2718::decode_2718`] -pub fn recover_raw_transaction(data: Bytes) -> EthResult { +/// See [`alloy_eips::eip2718::Decodable2718::decode_2718`] +pub fn recover_raw_transaction(mut data: &[u8]) -> EthResult> { if data.is_empty() { return Err(EthApiError::EmptyRawTransactionData) } - let transaction = PooledTransactionsElement::decode_2718(&mut data.as_ref()) - .map_err(|_| EthApiError::FailedToDecodeSignedTransaction)?; + let transaction = + T::decode_2718(&mut data).map_err(|_| EthApiError::FailedToDecodeSignedTransaction)?; transaction.try_into_ecrecovered().or(Err(EthApiError::InvalidTransactionSignature)) } diff --git a/crates/rpc/rpc-layer/Cargo.toml b/crates/rpc/rpc-layer/Cargo.toml index ec8dcb8229e..d44e5e89f01 100644 --- a/crates/rpc/rpc-layer/Cargo.toml +++ b/crates/rpc/rpc-layer/Cargo.toml @@ -17,10 +17,11 @@ http.workspace = true jsonrpsee-http-client.workspace = true pin-project.workspace = true tower.workspace = true - +tower-http = { workspace = true, features = ["full"] } tracing.workspace = true [dev-dependencies] reqwest.workspace = true tokio = { workspace = true, features = ["macros"] } jsonrpsee = { workspace = true, features = ["server"] } +http-body-util.workspace=true diff --git a/crates/rpc/rpc-layer/src/compression_layer.rs b/crates/rpc/rpc-layer/src/compression_layer.rs new file mode 100644 index 00000000000..cf15f04aa78 --- /dev/null +++ b/crates/rpc/rpc-layer/src/compression_layer.rs @@ -0,0 +1,169 @@ +use jsonrpsee_http_client::{HttpBody, HttpRequest, HttpResponse}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tower::{Layer, Service}; +use tower_http::compression::{Compression, CompressionLayer as TowerCompressionLayer}; + +/// This layer is a wrapper around [`tower_http::compression::CompressionLayer`] that integrates +/// with jsonrpsee's HTTP types. It automatically compresses responses based on the client's +/// Accept-Encoding header. +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct CompressionLayer { + inner_layer: TowerCompressionLayer, +} + +impl CompressionLayer { + /// Creates a new compression layer with zstd, gzip, brotli and deflate enabled. + pub fn new() -> Self { + Self { + inner_layer: TowerCompressionLayer::new().gzip(true).br(true).deflate(true).zstd(true), + } + } +} + +impl Default for CompressionLayer { + /// Creates a new compression layer with default settings. + /// See [`CompressionLayer::new`] for details. + fn default() -> Self { + Self::new() + } +} + +impl Layer for CompressionLayer { + type Service = CompressionService; + + fn layer(&self, inner: S) -> Self::Service { + CompressionService { compression: self.inner_layer.layer(inner) } + } +} + +/// Service that performs response compression. +/// +/// Created by [`CompressionLayer`]. +#[allow(missing_debug_implementations)] +#[derive(Clone)] +pub struct CompressionService { + compression: Compression, +} + +impl Service for CompressionService +where + S: Service, + S::Future: Send + 'static, +{ + type Response = HttpResponse; + type Error = S::Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.compression.poll_ready(cx) + } + + fn call(&mut self, req: HttpRequest) -> Self::Future { + let fut = self.compression.call(req); + + Box::pin(async move { + let resp = fut.await?; + let (parts, compressed_body) = resp.into_parts(); + let http_body = HttpBody::new(compressed_body); + + Ok(Self::Response::from_parts(parts, http_body)) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use http::header::{ACCEPT_ENCODING, CONTENT_ENCODING}; + use http_body_util::BodyExt; + use jsonrpsee_http_client::{HttpRequest, HttpResponse}; + use std::{convert::Infallible, future::ready}; + + const TEST_DATA: &str = "compress test data "; + const REPEAT_COUNT: usize = 1000; + + #[derive(Clone)] + struct MockRequestService; + + impl Service for MockRequestService { + type Response = HttpResponse; + type Error = Infallible; + type Future = std::future::Ready>; + + fn poll_ready( + &mut self, + _: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + std::task::Poll::Ready(Ok(())) + } + + fn call(&mut self, _: HttpRequest) -> Self::Future { + let body = HttpBody::from(TEST_DATA.repeat(REPEAT_COUNT)); + let response = HttpResponse::builder().body(body).unwrap(); + ready(Ok(response)) + } + } + + fn setup_compression_service( + ) -> impl Service { + CompressionLayer::new().layer(MockRequestService) + } + + async fn get_response_size(response: HttpResponse) -> usize { + // Get the total size of the response body + response.into_body().collect().await.unwrap().to_bytes().len() + } + + #[tokio::test] + async fn test_gzip_compression() { + let mut service = setup_compression_service(); + let request = + HttpRequest::builder().header(ACCEPT_ENCODING, "gzip").body(HttpBody::empty()).unwrap(); + + let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len(); + + // Make the request + let response = service.call(request).await.unwrap(); + + // Verify the response has gzip content-encoding + assert_eq!( + response.headers().get(CONTENT_ENCODING).unwrap(), + "gzip", + "Response should be gzip encoded" + ); + + // Verify the response body is actually compressed (should be smaller than original) + let compressed_size = get_response_size(response).await; + assert!( + compressed_size < uncompressed_len, + "Compressed size ({compressed_size}) should be smaller than original size ({uncompressed_len})" + ); + } + + #[tokio::test] + async fn test_no_compression_when_not_requested() { + // Create a service with compression + let mut service = setup_compression_service(); + let request = HttpRequest::builder().body(HttpBody::empty()).unwrap(); + + let response = service.call(request).await.unwrap(); + assert!( + response.headers().get(CONTENT_ENCODING).is_none(), + "Response should not be compressed when not requested" + ); + + let uncompressed_len = TEST_DATA.repeat(REPEAT_COUNT).len(); + + // Verify the response body matches the original size + let response_size = get_response_size(response).await; + assert!( + response_size == uncompressed_len, + "Response size ({response_size}) should equal original size ({uncompressed_len})" + ); + } +} diff --git a/crates/rpc/rpc-layer/src/lib.rs b/crates/rpc/rpc-layer/src/lib.rs index 8387bb160e8..540daf5592b 100644 --- a/crates/rpc/rpc-layer/src/lib.rs +++ b/crates/rpc/rpc-layer/src/lib.rs @@ -13,9 +13,11 @@ use jsonrpsee_http_client::HttpResponse; mod auth_client_layer; mod auth_layer; +mod compression_layer; mod jwt_validator; pub use auth_layer::{AuthService, ResponseFuture}; +pub use compression_layer::CompressionLayer; // Export alloy JWT types pub use alloy_rpc_types_engine::{Claims, JwtError, JwtSecret}; diff --git a/crates/rpc/rpc-server-types/Cargo.toml b/crates/rpc/rpc-server-types/Cargo.toml index 08ecd394774..275d8ea561b 100644 --- a/crates/rpc/rpc-server-types/Cargo.toml +++ b/crates/rpc/rpc-server-types/Cargo.toml @@ -14,11 +14,11 @@ workspace = true [dependencies] reth-errors.workspace = true reth-network-api.workspace = true -reth-primitives.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-eips.workspace = true # rpc jsonrpsee-core.workspace = true @@ -27,4 +27,3 @@ jsonrpsee-types.workspace = true # misc strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } - diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 0bc44181932..89b496da0fc 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -51,9 +51,9 @@ pub const DEFAULT_MAX_SIMULATE_BLOCKS: u64 = 256; /// The default eth historical proof window. pub const DEFAULT_ETH_PROOF_WINDOW: u64 = 0; -/// Maximum eth historical proof window. Equivalent to roughly one and a half months of data on a 12 -/// second block time, and a week on a 2 second block time. -pub const MAX_ETH_PROOF_WINDOW: u64 = 7 * 24 * 60 * 60 / 2; +/// Maximum eth historical proof window. Equivalent to roughly 6 months of data on a 12 +/// second block time, and a month on a 2 second block time. +pub const MAX_ETH_PROOF_WINDOW: u64 = 28 * 24 * 60 * 60 / 2; /// GPO specific constants pub mod gas_oracle { @@ -80,9 +80,8 @@ pub mod gas_oracle { /// The default gas limit for `eth_call` and adjacent calls. /// - /// This is different from the default to regular 30M block gas limit - /// [`ETHEREUM_BLOCK_GAS_LIMIT`](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow - /// for more complex calls. + /// This is different from the default to regular 30M block gas limit `ETHEREUM_BLOCK_GAS_LIMIT` + /// to allow for more complex calls. pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; /// Allowed error ratio for gas estimation @@ -114,8 +113,8 @@ pub mod cache { /// Default cache size for the receipts cache: 2000 receipts. pub const DEFAULT_RECEIPT_CACHE_MAX_LEN: u32 = 2000; - /// Default cache size for the env cache: 1000 envs. - pub const DEFAULT_ENV_CACHE_MAX_LEN: u32 = 1000; + /// Default cache size for the header cache: 1000 headers. + pub const DEFAULT_HEADER_CACHE_MAX_LEN: u32 = 1000; /// Default number of concurrent database requests. pub const DEFAULT_CONCURRENT_DB_REQUESTS: usize = 512; diff --git a/crates/rpc/rpc-server-types/src/module.rs b/crates/rpc/rpc-server-types/src/module.rs index 72a5e7c8583..7dc92993611 100644 --- a/crates/rpc/rpc-server-types/src/module.rs +++ b/crates/rpc/rpc-server-types/src/module.rs @@ -140,6 +140,15 @@ impl RpcModuleSelection { (None, None) => true, } } + + /// Returns true if the selection contains the given module. + pub fn contains(&self, module: &RethRpcModule) -> bool { + match self { + Self::All => true, + Self::Standard => Self::STANDARD_MODULES.contains(module), + Self::Selection(s) => s.contains(module), + } + } } impl From<&HashSet> for RpcModuleSelection { @@ -199,9 +208,12 @@ impl FromStr for RpcModuleSelection { } let mut modules = s.split(',').map(str::trim).peekable(); let first = modules.peek().copied().ok_or(ParseError::VariantNotFound)?; - match first { - "all" | "All" => Ok(Self::All), - "none" | "None" => Ok(Self::Selection(Default::default())), + // We convert to lowercase to make the comparison case-insensitive + // + // This is a way to allow typing "all" and "ALL" and "All" and "aLl" etc. + match first.to_lowercase().as_str() { + "all" => Ok(Self::All), + "none" => Ok(Self::Selection(Default::default())), _ => Self::try_from_selection(modules), } } @@ -255,6 +267,10 @@ pub enum RethRpcModule { Reth, /// `ots_` module Ots, + /// `flashbots_` module + Flashbots, + /// `miner_` module + Miner, } // === impl RethRpcModule === @@ -303,6 +319,8 @@ impl FromStr for RethRpcModule { "rpc" => Self::Rpc, "reth" => Self::Reth, "ots" => Self::Ots, + "flashbots" => Self::Flashbots, + "miner" => Self::Miner, _ => return Err(ParseError::VariantNotFound), }) } @@ -329,3 +347,229 @@ impl Serialize for RethRpcModule { s.serialize_str(self.as_ref()) } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_all_modules() { + let all_modules = RpcModuleSelection::all_modules(); + assert_eq!(all_modules.len(), RethRpcModule::variant_count()); + } + + #[test] + fn test_standard_modules() { + let standard_modules = RpcModuleSelection::standard_modules(); + let expected_modules: HashSet = + HashSet::from([RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]); + assert_eq!(standard_modules, expected_modules); + } + + #[test] + fn test_default_ipc_modules() { + let default_ipc_modules = RpcModuleSelection::default_ipc_modules(); + assert_eq!(default_ipc_modules, RpcModuleSelection::all_modules()); + } + + #[test] + fn test_try_from_selection_success() { + let selection = vec!["eth", "admin"]; + let config = RpcModuleSelection::try_from_selection(selection).unwrap(); + assert_eq!(config, RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin])); + } + + #[test] + fn test_rpc_module_selection_len() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.len(), RethRpcModule::variant_count()); + assert_eq!(standard.len(), 3); + assert_eq!(selection.len(), 2); + } + + #[test] + fn test_rpc_module_selection_is_empty() { + let empty_selection = RpcModuleSelection::from(HashSet::new()); + assert!(empty_selection.is_empty()); + + let non_empty_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert!(!non_empty_selection.is_empty()); + } + + #[test] + fn test_rpc_module_selection_iter_selection() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.iter_selection().count(), RethRpcModule::variant_count()); + assert_eq!(standard.iter_selection().count(), 3); + assert_eq!(selection.iter_selection().count(), 2); + } + + #[test] + fn test_rpc_module_selection_to_selection() { + let all_modules = RpcModuleSelection::All; + let standard = RpcModuleSelection::Standard; + let selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + + assert_eq!(all_modules.to_selection(), RpcModuleSelection::all_modules()); + assert_eq!(standard.to_selection(), RpcModuleSelection::standard_modules()); + assert_eq!( + selection.to_selection(), + HashSet::from([RethRpcModule::Eth, RethRpcModule::Admin]) + ); + } + + #[test] + fn test_rpc_module_selection_are_identical() { + // Test scenario: both selections are `All` + // + // Since both selections include all possible RPC modules, they should be considered + // identical. + let all_modules = RpcModuleSelection::All; + assert!(RpcModuleSelection::are_identical(Some(&all_modules), Some(&all_modules))); + + // Test scenario: both `http` and `ws` are `None` + // + // When both arguments are `None`, the function should return `true` because no modules are + // selected. + assert!(RpcModuleSelection::are_identical(None, None)); + + // Test scenario: both selections contain identical sets of specific modules + // + // In this case, both selections contain the same modules (`Eth` and `Admin`), + // so they should be considered identical. + let selection1 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + let selection2 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + assert!(RpcModuleSelection::are_identical(Some(&selection1), Some(&selection2))); + + // Test scenario: one selection is `All`, the other is `Standard` + // + // `All` includes all possible modules, while `Standard` includes a specific set of modules. + // Since `Standard` does not cover all modules, these two selections should not be + // considered identical. + let standard = RpcModuleSelection::Standard; + assert!(!RpcModuleSelection::are_identical(Some(&all_modules), Some(&standard))); + + // Test scenario: one is `None`, the other is an empty selection + // + // When one selection is `None` and the other is an empty selection (no modules), + // they should be considered identical because neither selects any modules. + let empty_selection = RpcModuleSelection::Selection(HashSet::new()); + assert!(RpcModuleSelection::are_identical(None, Some(&empty_selection))); + assert!(RpcModuleSelection::are_identical(Some(&empty_selection), None)); + + // Test scenario: one is `None`, the other is a non-empty selection + // + // If one selection is `None` and the other contains modules, they should not be considered + // identical because `None` represents no selection, while the other explicitly + // selects modules. + let non_empty_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert!(!RpcModuleSelection::are_identical(None, Some(&non_empty_selection))); + assert!(!RpcModuleSelection::are_identical(Some(&non_empty_selection), None)); + + // Test scenario: `All` vs. non-full selection + // + // If one selection is `All` (which includes all modules) and the other contains only a + // subset of modules, they should not be considered identical. + let partial_selection = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + assert!(!RpcModuleSelection::are_identical(Some(&all_modules), Some(&partial_selection))); + + // Test scenario: full selection vs `All` + // + // If the other selection explicitly selects all available modules, it should be identical + // to `All`. + let full_selection = + RpcModuleSelection::from(RethRpcModule::modules().into_iter().collect::>()); + assert!(RpcModuleSelection::are_identical(Some(&all_modules), Some(&full_selection))); + + // Test scenario: different non-empty selections + // + // If the two selections contain different sets of modules, they should not be considered + // identical. + let selection3 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + let selection4 = RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Web3]); + assert!(!RpcModuleSelection::are_identical(Some(&selection3), Some(&selection4))); + + // Test scenario: `Standard` vs an equivalent selection + // The `Standard` selection includes a predefined set of modules. If we explicitly create + // a selection with the same set of modules, they should be considered identical. + let matching_standard = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net, RethRpcModule::Web3]); + assert!(RpcModuleSelection::are_identical(Some(&standard), Some(&matching_standard))); + + // Test scenario: `Standard` vs non-matching selection + // + // If the selection does not match the modules included in `Standard`, they should not be + // considered identical. + let non_matching_standard = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Net]); + assert!(!RpcModuleSelection::are_identical(Some(&standard), Some(&non_matching_standard))); + } + + #[test] + fn test_rpc_module_selection_from_str() { + // Test empty string returns default selection + let result = RpcModuleSelection::from_str(""); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + // Test "all" (case insensitive) returns All variant + let result = RpcModuleSelection::from_str("all"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + let result = RpcModuleSelection::from_str("All"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + let result = RpcModuleSelection::from_str("ALL"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::All); + + // Test "none" (case insensitive) returns empty selection + let result = RpcModuleSelection::from_str("none"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + let result = RpcModuleSelection::from_str("None"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + let result = RpcModuleSelection::from_str("NONE"); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), RpcModuleSelection::Selection(Default::default())); + + // Test valid selections: "eth,admin" + let result = RpcModuleSelection::from_str("eth,admin"); + assert!(result.is_ok()); + let expected_selection = + RpcModuleSelection::from([RethRpcModule::Eth, RethRpcModule::Admin]); + assert_eq!(result.unwrap(), expected_selection); + + // Test valid selection with extra spaces: " eth , admin " + let result = RpcModuleSelection::from_str(" eth , admin "); + assert!(result.is_ok()); + assert_eq!(result.unwrap(), expected_selection); + + // Test invalid selection should return error + let result = RpcModuleSelection::from_str("invalid,unknown"); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), ParseError::VariantNotFound); + + // Test single valid selection: "eth" + let result = RpcModuleSelection::from_str("eth"); + assert!(result.is_ok()); + let expected_selection = RpcModuleSelection::from([RethRpcModule::Eth]); + assert_eq!(result.unwrap(), expected_selection); + + // Test single invalid selection: "unknown" + let result = RpcModuleSelection::from_str("unknown"); + assert!(result.is_err()); + assert_eq!(result.unwrap_err(), ParseError::VariantNotFound); + } +} diff --git a/crates/rpc/rpc-server-types/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs index 78e6436643a..5d1b702e9fc 100644 --- a/crates/rpc/rpc-server-types/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -2,9 +2,10 @@ use std::fmt; +use alloy_eips::BlockId; use alloy_rpc_types_engine::PayloadError; use jsonrpsee_core::RpcResult; -use reth_primitives::BlockId; +use reth_errors::ConsensusError; /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { @@ -102,6 +103,7 @@ macro_rules! impl_to_rpc_result { } impl_to_rpc_result!(PayloadError); +impl_to_rpc_result!(ConsensusError); impl_to_rpc_result!(reth_errors::RethError); impl_to_rpc_result!(reth_errors::ProviderError); impl_to_rpc_result!(reth_network_api::NetworkError); diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 4977c3a2c40..149073b1c68 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -19,8 +19,8 @@ reth-rpc-api = { workspace = true, features = ["client"] } # ethereum alloy-primitives.workspace = true alloy-rpc-types-eth.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-trace.workspace = true +alloy-eips.workspace = true # async futures.workspace = true @@ -36,4 +36,4 @@ similar-asserts.workspace = true tokio = { workspace = true, features = ["rt-multi-thread", "macros", "rt"] } reth-rpc-eth-api.workspace = true jsonrpsee-http-client.workspace = true -alloy-rpc-types-trace.workspace = true \ No newline at end of file +alloy-rpc-types-trace.workspace = true diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index f50064e80ce..36a01fa5903 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -6,16 +6,16 @@ use std::{ task::{Context, Poll}, }; +use alloy_eips::BlockId; use alloy_primitives::{TxHash, B256}; -use alloy_rpc_types::{Block, Transaction}; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Header, Transaction}; use alloy_rpc_types_trace::{ common::TraceResult, geth::{GethDebugTracerType, GethDebugTracingOptions, GethTrace}, }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::{BlockId, Receipt}; +use reth_primitives::Receipt; use reth_rpc_api::{clients::DebugApiClient, EthApiClient}; const NOOP_TRACER: &str = include_str!("../assets/noop-tracer.js"); @@ -77,7 +77,7 @@ pub trait DebugApiExt { impl DebugApiExt for T where - T: EthApiClient + DebugApiClient + Sync, + T: EthApiClient + DebugApiClient + Sync, { type Provider = T; @@ -292,7 +292,7 @@ pub struct DebugTraceTransactionsStream<'a> { stream: Pin + 'a>>, } -impl<'a> DebugTraceTransactionsStream<'a> { +impl DebugTraceTransactionsStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, TxHash)> { loop { @@ -324,7 +324,7 @@ pub struct DebugTraceBlockStream<'a> { stream: Pin + 'a>>, } -impl<'a> DebugTraceBlockStream<'a> { +impl DebugTraceBlockStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index c6dc16cf106..ee3fce68d3b 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -1,16 +1,16 @@ //! Helpers for testing trace calls. +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, TxHash, B256}; -use alloy_rpc_types::Index; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{transaction::TransactionRequest, Index}; use alloy_rpc_types_trace::{ filter::TraceFilter, + opcode::BlockOpcodeGas, parity::{LocalizedTransactionTrace, TraceResults, TraceType}, tracerequest::TraceCallRequest, }; use futures::{Stream, StreamExt}; use jsonrpsee::core::client::Error as RpcError; -use reth_primitives::BlockId; use reth_rpc_api::clients::TraceApiClient; use std::{ pin::Pin, @@ -24,6 +24,9 @@ type RawTransactionTraceResult<'a> = /// A result type for the `trace_block` method that also captures the requested block. pub type TraceBlockResult = Result<(Vec, BlockId), (RpcError, BlockId)>; +/// A result type for the `trace_blockOpcodeGas` method that also captures the requested block. +pub type TraceBlockOpCodeGasResult = Result<(BlockOpcodeGas, BlockId), (RpcError, BlockId)>; + /// Type alias representing the result of replaying a transaction. pub type ReplayTransactionResult = Result<(TraceResults, TxHash), (RpcError, TxHash)>; @@ -66,6 +69,18 @@ pub trait TraceApiExt { I: IntoIterator, B: Into; + /// Returns a new stream that yields the traces the opcodes for the given blocks. + /// + /// See also [`StreamExt::buffered`]. + fn trace_block_opcode_gas_unordered( + &self, + params: I, + n: usize, + ) -> TraceBlockOpcodeGasStream<'_> + where + I: IntoIterator, + B: Into; + /// Returns a new stream that replays the transactions for the given transaction hashes. /// /// This returns all results in order. @@ -270,6 +285,26 @@ impl TraceApiExt for T { TraceBlockStream { stream: Box::pin(stream) } } + fn trace_block_opcode_gas_unordered( + &self, + params: I, + n: usize, + ) -> TraceBlockOpcodeGasStream<'_> + where + I: IntoIterator, + B: Into, + { + let blocks = params.into_iter().map(|b| b.into()).collect::>(); + let stream = futures::stream::iter(blocks.into_iter().map(move |block| async move { + match self.trace_block_opcode_gas(block).await { + Ok(result) => Ok((result.unwrap(), block)), + Err(err) => Err((err, block)), + } + })) + .buffered(n); + TraceBlockOpcodeGasStream { stream: Box::pin(stream) } + } + fn replay_transactions( &self, tx_hashes: I, @@ -381,7 +416,7 @@ pub struct TraceBlockStream<'a> { stream: Pin + 'a>>, } -impl<'a> TraceBlockStream<'a> { +impl TraceBlockStream<'_> { /// Returns the next error result of the stream. pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { @@ -407,6 +442,38 @@ impl std::fmt::Debug for TraceBlockStream<'_> { } } +/// A stream that yields the opcodes for the requested blocks. +#[must_use = "streams do nothing unless polled"] +pub struct TraceBlockOpcodeGasStream<'a> { + stream: Pin + 'a>>, +} + +impl TraceBlockOpcodeGasStream<'_> { + /// Returns the next error result of the stream. + pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { + loop { + match self.next().await? { + Ok(_) => continue, + Err(err) => return Some(err), + } + } + } +} + +impl Stream for TraceBlockOpcodeGasStream<'_> { + type Item = TraceBlockOpCodeGasResult; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.stream.as_mut().poll_next(cx) + } +} + +impl std::fmt::Debug for TraceBlockOpcodeGasStream<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TraceBlockOpcodeGasStream").finish_non_exhaustive() + } +} + /// A utility to compare RPC responses from two different clients. /// /// The `RpcComparer` is designed to perform comparisons between two RPC clients. @@ -514,9 +581,9 @@ where #[cfg(test)] mod tests { use super::*; + use alloy_eips::BlockNumberOrTag; use alloy_rpc_types_trace::filter::TraceFilterMode; use jsonrpsee::http_client::HttpClientBuilder; - use reth_primitives::BlockNumberOrTag; const fn assert_is_stream(_: &St) {} @@ -671,4 +738,14 @@ mod tests { println!("Total successes: {successes}"); println!("Total failures: {failures}"); } + + #[tokio::test] + #[ignore] + async fn block_opcode_gas_stream() { + let client = HttpClientBuilder::default().build("http://localhost:8545").unwrap(); + let block = vec![BlockNumberOrTag::Latest]; + let mut stream = client.trace_block_opcode_gas_unordered(block, 2); + assert_is_stream(&stream); + let _opcodes = stream.next().await.unwrap(); + } } diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index b0fccefbb46..47932bd7302 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,7 +1,7 @@ //! Integration tests for the trace API. use alloy_primitives::map::HashSet; -use alloy_rpc_types::{Block, Transaction}; +use alloy_rpc_types_eth::{Block, Header, Transaction}; use alloy_rpc_types_trace::{ filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, }; @@ -113,7 +113,7 @@ async fn debug_trace_block_entire_chain() { let client = HttpClientBuilder::default().build(url).unwrap(); let current_block: u64 = - >::block_number(&client) + >::block_number(&client) .await .unwrap() .try_into() @@ -126,3 +126,47 @@ async fn debug_trace_block_entire_chain() { } println!("Traced all blocks in {:?}", now.elapsed()); } + +/// This is intended to be run locally against a running node. This traces all blocks for a given +/// chain. +/// +/// This is a noop of env var `RETH_RPC_TEST_NODE_URL` is not set. +#[tokio::test(flavor = "multi_thread")] +async fn debug_trace_block_opcodes_entire_chain() { + let opcodes7702 = ["EXTCODESIZE", "EXTCODECOPY", "EXTCODEHASH"]; + let url = parse_env_url("RETH_RPC_TEST_NODE_URL"); + if url.is_err() { + return + } + let url = url.unwrap(); + + let client = HttpClientBuilder::default().build(url).unwrap(); + let current_block: u64 = + >::block_number(&client) + .await + .unwrap() + .try_into() + .unwrap(); + let range = 0..=current_block; + println!("Tracing blocks {range:?} for opcodes"); + let mut stream = client.trace_block_opcode_gas_unordered(range, 2).enumerate(); + let now = Instant::now(); + while let Some((num, next)) = stream.next().await { + match next { + Ok((block_opcodes, block)) => { + for opcode in opcodes7702 { + if block_opcodes.contains(opcode) { + eprintln!("Found opcode {opcode}: in {block}"); + } + } + } + Err((err, block)) => { + eprintln!("Error tracing block {block:?}: {err}"); + } + }; + if num % 10000 == 0 { + println!("Traced {num} blocks"); + } + } + println!("Traced all blocks in {:?}", now.elapsed()); +} diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index 8e436f0d393..d4e1aac88bf 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -14,17 +14,19 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-trie-common.workspace = true +reth-primitives-traits.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true -alloy-rpc-types.workspace = true alloy-rpc-types-eth = { workspace = true, default-features = false, features = ["serde"] } -alloy-serde.workspace = true alloy-rpc-types-engine.workspace = true alloy-consensus.workspace = true +# io +serde.workspace = true +jsonrpsee-types.workspace = true + [dev-dependencies] -serde_json.workspace = true \ No newline at end of file +serde_json.workspace = true diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index fc8ea9e1c48..d3238757fc5 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,13 +1,13 @@ //! Compatibility functions for rpc `Block` type. +use alloy_consensus::{BlockHeader, Sealable, Sealed}; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{B256, U256}; -use alloy_rlp::Encodable; -use alloy_rpc_types::{ - Block, BlockError, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, -}; -use reth_primitives::{ - Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, SealedHeader, Withdrawals, +use alloy_rpc_types_eth::{ + Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; +use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, BlockWithSenders}; +use reth_primitives_traits::{Block as BlockTrait, BlockBody, SignedTransaction}; use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; @@ -15,17 +15,25 @@ use crate::{transaction::from_recovered_with_block_context, TransactionCompat}; /// [`BlockTransactionsKind`] /// /// If a `block_hash` is provided, then this is used, otherwise the block hash is computed. -pub fn from_block( - block: BlockWithSenders, +#[expect(clippy::type_complexity)] +pub fn from_block( + block: BlockWithSenders, total_difficulty: U256, kind: BlockTransactionsKind, block_hash: Option, -) -> Result, BlockError> { + tx_resp_builder: &T, +) -> Result>, T::Error> +where + T: TransactionCompat<<::Body as BlockBody>::Transaction>, + B: BlockTrait, +{ match kind { BlockTransactionsKind::Hashes => { - Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) + Ok(from_block_with_tx_hashes::(block, total_difficulty, block_hash)) + } + BlockTransactionsKind::Full => { + from_block_full::(block, total_difficulty, block_hash, tx_resp_builder) } - BlockTransactionsKind::Full => from_block_full::(block, total_difficulty, block_hash), } } @@ -34,13 +42,16 @@ pub fn from_block( /// /// This will populate the `transactions` field with only the hashes of the transactions in the /// block: [`BlockTransactions::Hashes`] -pub fn from_block_with_tx_hashes( - block: BlockWithSenders, +pub fn from_block_with_tx_hashes( + block: BlockWithSenders, total_difficulty: U256, block_hash: Option, -) -> Block { - let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); - let transactions = block.body.transactions().map(|tx| tx.hash()).collect(); +) -> Block> +where + B: BlockTrait, +{ + let block_hash = block_hash.unwrap_or_else(|| block.header().hash_slow()); + let transactions = block.body().transactions().iter().map(|tx| *tx.tx_hash()).collect(); from_block_with_transactions( block.length(), @@ -56,24 +67,30 @@ pub fn from_block_with_tx_hashes( /// /// This will populate the `transactions` field with the _full_ /// [`TransactionCompat::Transaction`] objects: [`BlockTransactions::Full`] -pub fn from_block_full( - mut block: BlockWithSenders, +#[expect(clippy::type_complexity)] +pub fn from_block_full( + block: BlockWithSenders, total_difficulty: U256, block_hash: Option, -) -> Result, BlockError> { - let block_hash = block_hash.unwrap_or_else(|| block.block.header.hash_slow()); - let block_number = block.block.number; - let base_fee_per_gas = block.block.base_fee_per_gas; + tx_resp_builder: &T, +) -> Result>, T::Error> +where + T: TransactionCompat<<::Body as BlockBody>::Transaction>, + B: BlockTrait, +{ + let block_hash = block_hash.unwrap_or_else(|| block.block.header().hash_slow()); + let block_number = block.block.header().number(); + let base_fee_per_gas = block.block.header().base_fee_per_gas(); // NOTE: we can safely remove the body here because not needed to finalize the `Block` in // `from_block_with_transactions`, however we need to compute the length before let block_length = block.block.length(); - let transactions = std::mem::take(&mut block.block.body.transactions); + let transactions = block.block.body().transactions().to_vec(); let transactions_with_senders = transactions.into_iter().zip(block.senders); let transactions = transactions_with_senders .enumerate() .map(|(idx, (tx, sender))| { - let tx_hash = tx.hash(); + let tx_hash = *tx.tx_hash(); let signed_tx_ec_recovered = tx.with_signer(sender); let tx_info = TransactionInfo { hash: Some(tx_hash), @@ -83,9 +100,13 @@ pub fn from_block_full( index: Some(idx as u64), }; - from_recovered_with_block_context::(signed_tx_ec_recovered, tx_info) + from_recovered_with_block_context::<_, T>( + signed_tx_ec_recovered, + tx_info, + tx_resp_builder, + ) }) - .collect::>(); + .collect::, T::Error>>()?; Ok(from_block_with_transactions( block_length, @@ -96,97 +117,32 @@ pub fn from_block_full( )) } -/// Converts from a [`reth_primitives::SealedHeader`] to a [`alloy-rpc-types::Header`] -/// -/// # Note -/// -/// This does not set the `totalDifficulty` field. -pub fn from_primitive_with_hash(primitive_header: reth_primitives::SealedHeader) -> Header { - let (header, hash) = primitive_header.split(); - let PrimitiveHeader { - parent_hash, - ommers_hash, - beneficiary, - state_root, - transactions_root, - receipts_root, - logs_bloom, - difficulty, - number, - gas_limit, - gas_used, - timestamp, - mix_hash, - nonce, - base_fee_per_gas, - extra_data, - withdrawals_root, - blob_gas_used, - excess_blob_gas, - parent_beacon_block_root, - requests_root, - } = header; - - Header { - hash, - parent_hash, - uncles_hash: ommers_hash, - miner: beneficiary, - state_root, - transactions_root, - receipts_root, - withdrawals_root, - number, - gas_used, - gas_limit, - extra_data, - logs_bloom, - timestamp, - difficulty, - mix_hash: Some(mix_hash), - nonce: Some(nonce), - base_fee_per_gas, - blob_gas_used, - excess_blob_gas, - parent_beacon_block_root, - total_difficulty: None, - requests_root, - } -} - #[inline] -fn from_block_with_transactions( +fn from_block_with_transactions( block_length: usize, block_hash: B256, - block: PrimitiveBlock, + block: B, total_difficulty: U256, transactions: BlockTransactions, -) -> Block { - let uncles = block.body.ommers.into_iter().map(|h| h.hash_slow()).collect(); - let mut header = from_primitive_with_hash(SealedHeader::new(block.header, block_hash)); - header.total_difficulty = Some(total_difficulty); - - let withdrawals = header - .withdrawals_root +) -> Block> { + let withdrawals = block + .header() + .withdrawals_root() .is_some() - .then(|| block.body.withdrawals.map(Withdrawals::into_inner)) + .then(|| block.body().withdrawals().cloned().map(Withdrawals::into_inner).map(Into::into)) .flatten(); - Block { header, uncles, transactions, size: Some(U256::from(block_length)), withdrawals } -} + let uncles = block + .body() + .ommers() + .map(|o| o.iter().map(|h| h.hash_slow()).collect()) + .unwrap_or_default(); + let (header, _) = block.split(); + let header = Header::from_consensus( + Sealed::new_unchecked(header, block_hash), + Some(total_difficulty), + Some(U256::from(block_length)), + ); -/// Build an RPC block response representing -/// an Uncle from its header. -pub fn uncle_block_from_header(header: PrimitiveHeader) -> Block { - let hash = header.hash_slow(); - let rpc_header = from_primitive_with_hash(SealedHeader::new(header.clone(), hash)); - let uncle_block = PrimitiveBlock { header, ..Default::default() }; - let size = Some(U256::from(uncle_block.length())); - Block { - uncles: vec![], - header: rpc_header, - transactions: BlockTransactions::Uncle, - withdrawals: Some(vec![]), - size, - } + Block { header, uncles, transactions, withdrawals } } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 84943b60e20..3be7835a35a 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,18 +1,22 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine -use alloy_eips::eip2718::{Decodable2718, Encodable2718}; +use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{ + eip2718::{Decodable2718, Encodable2718}, + eip4895::Withdrawals, +}; use alloy_primitives::{B256, U256}; use alloy_rpc_types_engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadBodyV2, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, + ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, + ExecutionPayloadV3, PayloadError, }; use reth_primitives::{ - constants::{EMPTY_OMMER_ROOT_HASH, MAXIMUM_EXTRA_DATA_SIZE}, proofs::{self}, - Block, BlockBody, Header, Request, SealedBlock, TransactionSigned, Withdrawals, + Block, BlockBody, BlockExt, SealedBlock, TransactionSigned, }; +use reth_primitives_traits::BlockBody as _; /// Converts [`ExecutionPayloadV1`] to [`Block`] pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result { @@ -67,12 +71,13 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Result Result { - let ExecutionPayloadV4 { - payload_inner, - deposit_requests, - withdrawal_requests, - consolidation_requests, - } = payload; - let mut block = try_payload_v3_to_block(payload_inner)?; - - // attach requests with asc type identifiers - let requests = deposit_requests - .into_iter() - .map(Request::DepositRequest) - .chain(withdrawal_requests.into_iter().map(Request::WithdrawalRequest)) - .chain(consolidation_requests.into_iter().map(Request::ConsolidationRequest)) - .collect::>(); - - let requests_root = proofs::calculate_requests_root(&requests); - block.header.requests_root = Some(requests_root); - block.body.requests = Some(requests.into()); - - Ok(block) -} - /// Converts [`SealedBlock`] to [`ExecutionPayload`] pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { - if value.header.requests_root.is_some() { + if value.header.requests_hash.is_some() { // block with requests root: V3 - ExecutionPayload::V4(block_to_payload_v4(value)) + ExecutionPayload::V3(block_to_payload_v3(value)) } else if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 ExecutionPayload::V3(block_to_payload_v3(value)) @@ -145,7 +125,7 @@ pub fn block_to_payload(value: SealedBlock) -> ExecutionPayload { /// Converts [`SealedBlock`] to [`ExecutionPayloadV1`] pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, fee_recipient: value.beneficiary, @@ -166,7 +146,7 @@ pub fn block_to_payload_v1(value: SealedBlock) -> ExecutionPayloadV1 { /// Converts [`SealedBlock`] to [`ExecutionPayloadV2`] pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV2 { payload_inner: ExecutionPayloadV1 { @@ -191,7 +171,7 @@ pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { /// Converts [`SealedBlock`] to [`ExecutionPayloadV3`], and returns the parent beacon block root. pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV3 { blob_gas_used: value.blob_gas_used.unwrap_or_default(), excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), @@ -217,37 +197,6 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { } } -/// Converts [`SealedBlock`] to [`ExecutionPayloadV4`] -pub fn block_to_payload_v4(mut value: SealedBlock) -> ExecutionPayloadV4 { - let (deposit_requests, withdrawal_requests, consolidation_requests) = - value.body.requests.take().unwrap_or_default().into_iter().fold( - (Vec::new(), Vec::new(), Vec::new()), - |(mut deposits, mut withdrawals, mut consolidation_requests), request| { - match request { - Request::DepositRequest(r) => { - deposits.push(r); - } - Request::WithdrawalRequest(r) => { - withdrawals.push(r); - } - Request::ConsolidationRequest(r) => { - consolidation_requests.push(r); - } - _ => {} - }; - - (deposits, withdrawals, consolidation_requests) - }, - ); - - ExecutionPayloadV4 { - deposit_requests, - withdrawal_requests, - consolidation_requests, - payload_inner: block_to_payload_v3(value), - } -} - /// Converts [`SealedBlock`] to [`ExecutionPayloadFieldV2`] pub fn convert_block_to_payload_field_v2(value: SealedBlock) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 @@ -302,45 +251,49 @@ pub fn convert_block_to_payload_input_v2(value: SealedBlock) -> ExecutionPayload } } -/// Tries to create a new block (without a block hash) from the given payload and optional parent -/// beacon block root. +/// Tries to create a new unsealed block from the given payload and payload sidecar. +/// /// Performs additional validation of `extra_data` and `base_fee_per_gas` fields. /// -/// NOTE: The log bloom is assumed to be validated during serialization. +/// # Note +/// +/// The log bloom is assumed to be validated during serialization. /// /// See pub fn try_into_block( value: ExecutionPayload, - parent_beacon_block_root: Option, + sidecar: &ExecutionPayloadSidecar, ) -> Result { let mut base_payload = match value { ExecutionPayload::V1(payload) => try_payload_v1_to_block(payload)?, ExecutionPayload::V2(payload) => try_payload_v2_to_block(payload)?, ExecutionPayload::V3(payload) => try_payload_v3_to_block(payload)?, - ExecutionPayload::V4(payload) => try_payload_v4_to_block(payload)?, }; - base_payload.header.parent_beacon_block_root = parent_beacon_block_root; + base_payload.header.parent_beacon_block_root = sidecar.parent_beacon_block_root(); + base_payload.header.requests_hash = sidecar.requests_hash(); Ok(base_payload) } -/// Tries to create a new block from the given payload and optional parent beacon block root. -/// -/// NOTE: Empty ommers, nonce and difficulty values are validated upon computing block hash and -/// comparing the value with `payload.block_hash`. +/// Tries to create a sealed new block from the given payload and payload sidecar. /// /// Uses [`try_into_block`] to convert from the [`ExecutionPayload`] to [`Block`] and seals the /// block with its hash. /// /// Uses [`validate_block_hash`] to validate the payload block hash and ultimately return the /// [`SealedBlock`]. +/// +/// # Note +/// +/// Empty ommers, nonce, difficulty, and execution request values are validated upon computing block +/// hash and comparing the value with `payload.block_hash`. pub fn try_into_sealed_block( payload: ExecutionPayload, - parent_beacon_block_root: Option, + sidecar: &ExecutionPayloadSidecar, ) -> Result { let block_hash = payload.block_hash(); - let base_payload = try_into_block(payload, parent_beacon_block_root)?; + let base_payload = try_into_block(payload, sidecar)?; // validate block hash and return validate_block_hash(block_hash, base_payload) @@ -368,67 +321,19 @@ pub fn validate_block_hash( } /// Converts [`Block`] to [`ExecutionPayloadBodyV1`] -pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { - let transactions = value.body.transactions.into_iter().map(|tx| { - let mut out = Vec::new(); - tx.encode_2718(&mut out); - out.into() - }); +pub fn convert_to_payload_body_v1( + value: impl reth_primitives_traits::Block, +) -> ExecutionPayloadBodyV1 { + let transactions = value.body().transactions().iter().map(|tx| tx.encoded_2718().into()); ExecutionPayloadBodyV1 { transactions: transactions.collect(), - withdrawals: value.body.withdrawals.map(Withdrawals::into_inner), + withdrawals: value.body().withdrawals().cloned().map(Withdrawals::into_inner), } } -/// Converts [`Block`] to [`ExecutionPayloadBodyV2`] -pub fn convert_to_payload_body_v2(value: Block) -> ExecutionPayloadBodyV2 { - let transactions = value.body.transactions.into_iter().map(|tx| { - let mut out = Vec::new(); - tx.encode_2718(&mut out); - out.into() - }); - - let mut payload = ExecutionPayloadBodyV2 { - transactions: transactions.collect(), - withdrawals: value.body.withdrawals.map(Withdrawals::into_inner), - deposit_requests: None, - withdrawal_requests: None, - consolidation_requests: None, - }; - - if let Some(requests) = value.body.requests { - let (deposit_requests, withdrawal_requests, consolidation_requests) = - requests.into_iter().fold( - (Vec::new(), Vec::new(), Vec::new()), - |(mut deposits, mut withdrawals, mut consolidation_requests), request| { - match request { - Request::DepositRequest(r) => { - deposits.push(r); - } - Request::WithdrawalRequest(r) => { - withdrawals.push(r); - } - Request::ConsolidationRequest(r) => { - consolidation_requests.push(r); - } - _ => {} - }; - - (deposits, withdrawals, consolidation_requests) - }, - ); - - payload.deposit_requests = Some(deposit_requests); - payload.withdrawal_requests = Some(withdrawal_requests); - payload.consolidation_requests = Some(consolidation_requests); - } - - payload -} - /// Transforms a [`SealedBlock`] into a [`ExecutionPayloadV1`] pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPayloadV1 { - let transactions = value.raw_transactions(); + let transactions = value.encoded_2718_transactions(); ExecutionPayloadV1 { parent_hash: value.parent_hash, fee_recipient: value.beneficiary, @@ -450,14 +355,14 @@ pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPaylo #[cfg(test)] mod tests { use super::{ - block_to_payload_v3, try_into_block, try_payload_v3_to_block, try_payload_v4_to_block, - validate_block_hash, + block_to_payload_v3, try_into_block, try_payload_v3_to_block, validate_block_hash, }; use alloy_primitives::{b256, hex, Bytes, U256}; use alloy_rpc_types_engine::{ - CancunPayloadFields, ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, - ExecutionPayloadV3, ExecutionPayloadV4, + CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, + ExecutionPayloadV2, ExecutionPayloadV3, }; + use reth_primitives::BlockExt; #[test] fn roundtrip_payload_to_block() { @@ -674,60 +579,9 @@ mod tests { let cancun_fields = CancunPayloadFields { parent_beacon_block_root, versioned_hashes }; // convert into block - let block = try_into_block(payload, Some(cancun_fields.parent_beacon_block_root)).unwrap(); + let block = try_into_block(payload, &ExecutionPayloadSidecar::v3(cancun_fields)).unwrap(); // Ensure the actual hash is calculated if we set the fields to what they should be validate_block_hash(block_hash_with_blob_fee_fields, block).unwrap(); } - - #[test] - fn parse_payload_v4() { - let s = r#"{ - "baseFeePerGas": "0x2ada43", - "blobGasUsed": "0x0", - "blockHash": "0x86eeb2a4b656499f313b601e1dcaedfeacccab27131b6d4ea99bc69a57607f7d", - "blockNumber": "0x2c", - "depositRequests": [ - { - "amount": "0xe8d4a51000", - "index": "0x0", - "pubkey": "0xaab5f2b3aad5c2075faf0c1d8937c7de51a53b765a21b4173eb2975878cea05d9ed3428b77f16a981716aa32af74c464", - "signature": "0xa889cd238be2dae44f2a3c24c04d686c548f6f82eb44d4604e1bc455b6960efb72b117e878068a8f2cfb91ad84b7ebce05b9254207aa51a1e8a3383d75b5a5bd2439f707636ea5b17b2b594b989c93b000b33e5dff6e4bed9d53a6d2d6889b0c", - "withdrawalCredentials": "0x00ab9364f8bf7561862ea0fc3b69c424c94ace406c4dc36ddfbf8a9d72051c80" - }, - { - "amount": "0xe8d4a51000", - "index": "0x1", - "pubkey": "0xb0b1b3b51cf688ead965a954c5cc206ba4e76f3f8efac60656ae708a9aad63487a2ca1fb30ccaf2ebe1028a2b2886b1b", - "signature": "0xb9759766e9bb191b1c457ae1da6cdf71a23fb9d8bc9f845eaa49ee4af280b3b9720ac4d81e64b1b50a65db7b8b4e76f1176a12e19d293d75574600e99fbdfecc1ab48edaeeffb3226cd47691d24473821dad0c6ff3973f03e4aa89f418933a56", - "withdrawalCredentials": "0x002d2b75f4a27f78e585a4735a40ab2437eceb12ec39938a94dc785a54d62513" - } - ], - "excessBlobGas": "0x0", - "extraData": "0x726574682f76302e322e302d626574612e372f6c696e7578", - "feeRecipient": "0x8943545177806ed17b9f23f0a21ee5948ecaa776", - "gasLimit": "0x1855e85", - "gasUsed": "0x25f98", - "logsBloom": "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000", - "parentHash": "0xd753194ef19b5c566b7eca6e9ebcca03895b548e1e93a20a23d922ba0bc210d4", - "prevRandao": "0x8c52256fd491776dc32f531ad4c0dc1444684741bca15f54c9cd40c60142df90", - "receiptsRoot": "0x510e7fb94279897e5dcd6c1795f6137d8fe02e49e871bfea7999fd21a89f66aa", - "stateRoot": "0x59ae0706a2b47162666fc7af3e30ff7aa34154954b68cc6aed58c3af3d58c9c2", - "timestamp": "0x6643c5a9", - "transactions": [ - "0x02f9021e8330182480843b9aca0085174876e80083030d40944242424242424242424242424242424242424242893635c9adc5dea00000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000012049f42823819771c6bbbd9cb6649850083fd3b6e5d0beb1069342c32d65a3b0990000000000000000000000000000000000000000000000000000000000000030aab5f2b3aad5c2075faf0c1d8937c7de51a53b765a21b4173eb2975878cea05d9ed3428b77f16a981716aa32af74c46400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000ab9364f8bf7561862ea0fc3b69c424c94ace406c4dc36ddfbf8a9d72051c800000000000000000000000000000000000000000000000000000000000000060a889cd238be2dae44f2a3c24c04d686c548f6f82eb44d4604e1bc455b6960efb72b117e878068a8f2cfb91ad84b7ebce05b9254207aa51a1e8a3383d75b5a5bd2439f707636ea5b17b2b594b989c93b000b33e5dff6e4bed9d53a6d2d6889b0cc080a0db786f0d89923949e533680524f003cebd66f32fbd30429a6b6bfbd3258dcf60a05241c54e05574765f7ddc1a742ae06b044edfe02bffb202bf172be97397eeca9", - "0x02f9021e8330182401843b9aca0085174876e80083030d40944242424242424242424242424242424242424242893635c9adc5dea00000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000120d694d6a0b0103651aafd87db6c88297175d7317c6e6da53ccf706c3c991c91fd0000000000000000000000000000000000000000000000000000000000000030b0b1b3b51cf688ead965a954c5cc206ba4e76f3f8efac60656ae708a9aad63487a2ca1fb30ccaf2ebe1028a2b2886b1b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020002d2b75f4a27f78e585a4735a40ab2437eceb12ec39938a94dc785a54d625130000000000000000000000000000000000000000000000000000000000000060b9759766e9bb191b1c457ae1da6cdf71a23fb9d8bc9f845eaa49ee4af280b3b9720ac4d81e64b1b50a65db7b8b4e76f1176a12e19d293d75574600e99fbdfecc1ab48edaeeffb3226cd47691d24473821dad0c6ff3973f03e4aa89f418933a56c080a099dc5b94a51e9b91a6425b1fed9792863006496ab71a4178524819d7db0c5e88a0119748e62700234079d91ae80f4676f9e0f71b260e9b46ef9b4aff331d3c2318" - ], - "withdrawalRequests": [], - "withdrawals": [], - "consolidationRequests": [] - }"#; - - let payload = serde_json::from_str::(s).unwrap(); - let mut block = try_payload_v4_to_block(payload).unwrap(); - block.header.parent_beacon_block_root = - Some(b256!("d9851db05fa63593f75e2b12c4bba9f47740613ca57da3b523a381b8c27f3297")); - let hash = block.seal_slow().hash(); - assert_eq!(hash, b256!("86eeb2a4b656499f313b601e1dcaedfeacccab27131b6d4ea99bc69a57607f7d")) - } } diff --git a/crates/rpc/rpc-types-compat/src/lib.rs b/crates/rpc/rpc-types-compat/src/lib.rs index c324eebc872..206d502f87d 100644 --- a/crates/rpc/rpc-types-compat/src/lib.rs +++ b/crates/rpc/rpc-types-compat/src/lib.rs @@ -12,7 +12,5 @@ pub mod block; pub mod engine; -pub mod proof; pub mod transaction; - pub use transaction::TransactionCompat; diff --git a/crates/rpc/rpc-types-compat/src/proof.rs b/crates/rpc/rpc-types-compat/src/proof.rs deleted file mode 100644 index 19bc76f3d7b..00000000000 --- a/crates/rpc/rpc-types-compat/src/proof.rs +++ /dev/null @@ -1,24 +0,0 @@ -//! Compatibility functions for rpc proof related types. - -use alloy_rpc_types::serde_helpers::JsonStorageKey; -use alloy_rpc_types_eth::{EIP1186AccountProofResponse, EIP1186StorageProof}; -use reth_trie_common::{AccountProof, StorageProof}; - -/// Creates a new rpc storage proof from a primitive storage proof type. -pub fn from_primitive_storage_proof(proof: StorageProof) -> EIP1186StorageProof { - EIP1186StorageProof { key: JsonStorageKey(proof.key), value: proof.value, proof: proof.proof } -} - -/// Creates a new rpc account proof from a primitive account proof type. -pub fn from_primitive_account_proof(proof: AccountProof) -> EIP1186AccountProofResponse { - let info = proof.info.unwrap_or_default(); - EIP1186AccountProofResponse { - address: proof.address, - balance: info.balance, - code_hash: info.get_bytecode_hash(), - nonce: info.nonce, - storage_hash: proof.storage_root, - account_proof: proof.proof, - storage_proof: proof.storage_proofs.into_iter().map(from_primitive_storage_proof).collect(), - } -} diff --git a/crates/rpc/rpc-types-compat/src/transaction.rs b/crates/rpc/rpc-types-compat/src/transaction.rs new file mode 100644 index 00000000000..d3d1a71decc --- /dev/null +++ b/crates/rpc/rpc-types-compat/src/transaction.rs @@ -0,0 +1,111 @@ +//! Compatibility functions for rpc `Transaction` type. + +use core::error; +use std::fmt; + +use alloy_consensus::Transaction as _; +use alloy_rpc_types_eth::{ + request::{TransactionInput, TransactionRequest}, + TransactionInfo, +}; +use reth_primitives::{RecoveredTx, TransactionSigned}; +use serde::{Deserialize, Serialize}; + +/// Create a new rpc transaction result for a mined transaction, using the given block hash, +/// number, and tx index fields to populate the corresponding fields in the rpc result. +/// +/// The block hash, number, and tx index fields should be from the original block where the +/// transaction was mined. +pub fn from_recovered_with_block_context>( + tx: RecoveredTx, + tx_info: TransactionInfo, + resp_builder: &T, +) -> Result { + resp_builder.fill(tx, tx_info) +} + +/// Create a new rpc transaction result for a _pending_ signed transaction, setting block +/// environment related fields to `None`. +pub fn from_recovered>( + tx: RecoveredTx, + resp_builder: &T, +) -> Result { + resp_builder.fill(tx, TransactionInfo::default()) +} + +/// Builds RPC transaction w.r.t. network. +pub trait TransactionCompat: + Send + Sync + Unpin + Clone + fmt::Debug +{ + /// RPC transaction response type. + type Transaction: Serialize + + for<'de> Deserialize<'de> + + Send + + Sync + + Unpin + + Clone + + fmt::Debug; + + /// RPC transaction error type. + type Error: error::Error + Into>; + + /// Create a new rpc transaction result for a _pending_ signed transaction, setting block + /// environment related fields to `None`. + fn fill( + &self, + tx: RecoveredTx, + tx_inf: TransactionInfo, + ) -> Result; + + /// Builds a fake transaction from a transaction request for inclusion into block built in + /// `eth_simulateV1`. + fn build_simulate_v1_transaction(&self, request: TransactionRequest) -> Result; + + /// Truncates the input of a transaction to only the first 4 bytes. + // todo: remove in favour of using constructor on `TransactionResponse` or similar + // . + fn otterscan_api_truncate_input(tx: &mut Self::Transaction); +} + +/// Convert [`RecoveredTx`] to [`TransactionRequest`] +pub fn transaction_to_call_request(tx: RecoveredTx) -> TransactionRequest { + let from = tx.signer(); + let to = Some(tx.transaction.to().into()); + let gas = tx.transaction.gas_limit(); + let value = tx.transaction.value(); + let input = tx.transaction.input().clone(); + let nonce = tx.transaction.nonce(); + let chain_id = tx.transaction.chain_id(); + let access_list = tx.transaction.access_list().cloned(); + let max_fee_per_blob_gas = tx.transaction.max_fee_per_blob_gas(); + let authorization_list = tx.transaction.authorization_list().map(|l| l.to_vec()); + let blob_versioned_hashes = tx.transaction.blob_versioned_hashes().map(Vec::from); + let tx_type = tx.transaction.tx_type(); + + // fees depending on the transaction type + let (gas_price, max_fee_per_gas) = if tx.is_dynamic_fee() { + (None, Some(tx.max_fee_per_gas())) + } else { + (Some(tx.max_fee_per_gas()), None) + }; + let max_priority_fee_per_gas = tx.transaction.max_priority_fee_per_gas(); + + TransactionRequest { + from: Some(from), + to, + gas_price, + max_fee_per_gas, + max_priority_fee_per_gas, + gas: Some(gas), + value: Some(value), + input: TransactionInput::new(input), + nonce: Some(nonce), + chain_id, + access_list, + max_fee_per_blob_gas, + blob_versioned_hashes, + transaction_type: Some(tx_type.into()), + sidecar: None, + authorization_list, + } +} diff --git a/crates/rpc/rpc-types-compat/src/transaction/mod.rs b/crates/rpc/rpc-types-compat/src/transaction/mod.rs deleted file mode 100644 index a489a588617..00000000000 --- a/crates/rpc/rpc-types-compat/src/transaction/mod.rs +++ /dev/null @@ -1,146 +0,0 @@ -//! Compatibility functions for rpc `Transaction` type. -mod signature; - -pub use signature::*; -use std::fmt; - -use alloy_consensus::Transaction as _; -use alloy_rpc_types::{ - request::{TransactionInput, TransactionRequest}, - Transaction, TransactionInfo, -}; -use alloy_serde::WithOtherFields; -use reth_primitives::{TransactionSigned, TransactionSignedEcRecovered, TxType}; - -/// Create a new rpc transaction result for a mined transaction, using the given block hash, -/// number, and tx index fields to populate the corresponding fields in the rpc result. -/// -/// The block hash, number, and tx index fields should be from the original block where the -/// transaction was mined. -pub fn from_recovered_with_block_context( - tx: TransactionSignedEcRecovered, - tx_info: TransactionInfo, -) -> T::Transaction { - T::fill(tx, tx_info) -} - -/// Create a new rpc transaction result for a _pending_ signed transaction, setting block -/// environment related fields to `None`. -pub fn from_recovered(tx: TransactionSignedEcRecovered) -> T::Transaction { - T::fill(tx, TransactionInfo::default()) -} - -/// Builds RPC transaction w.r.t. network. -pub trait TransactionCompat: Send + Sync + Unpin + Clone + fmt::Debug { - /// RPC transaction response type. - type Transaction: Send + Clone + Default + fmt::Debug; - - /// Formats gas price and max fee per gas for RPC transaction response w.r.t. network specific - /// transaction type. - fn gas_price(signed_tx: &TransactionSigned, base_fee: Option) -> GasPrice { - #[allow(unreachable_patterns)] - match signed_tx.tx_type() { - TxType::Legacy | TxType::Eip2930 => { - GasPrice { gas_price: Some(signed_tx.max_fee_per_gas()), max_fee_per_gas: None } - } - TxType::Eip1559 | TxType::Eip4844 | TxType::Eip7702 => { - // the gas price field for EIP1559 is set to `min(tip, gasFeeCap - baseFee) + - // baseFee` - let gas_price = base_fee - .and_then(|base_fee| { - signed_tx - .effective_tip_per_gas(Some(base_fee)) - .map(|tip| tip + base_fee as u128) - }) - .unwrap_or_else(|| signed_tx.max_fee_per_gas()); - - GasPrice { - gas_price: Some(gas_price), - max_fee_per_gas: Some(signed_tx.max_fee_per_gas()), - } - } - _ => GasPrice::default(), - } - } - - /// Create a new rpc transaction result for a _pending_ signed transaction, setting block - /// environment related fields to `None`. - fn fill(tx: TransactionSignedEcRecovered, tx_inf: TransactionInfo) -> Self::Transaction; - - /// Truncates the input of a transaction to only the first 4 bytes. - // todo: remove in favour of using constructor on `TransactionResponse` or similar - // . - fn otterscan_api_truncate_input(tx: &mut Self::Transaction); - - /// Returns the transaction type. - // todo: remove when alloy TransactionResponse trait it updated. - fn tx_type(tx: &Self::Transaction) -> u8; -} - -impl TransactionCompat for () { - // this noop impl depends on integration in `reth_rpc_eth_api::EthApiTypes` noop impl, and - // `alloy_network::AnyNetwork` - type Transaction = WithOtherFields; - - fn fill(_tx: TransactionSignedEcRecovered, _tx_info: TransactionInfo) -> Self::Transaction { - WithOtherFields::default() - } - - fn otterscan_api_truncate_input(_tx: &mut Self::Transaction) {} - - fn tx_type(_tx: &Self::Transaction) -> u8 { - 0 - } -} - -/// Gas price and max fee per gas for a transaction. Helper type to format transaction RPC response. -#[derive(Debug, Default)] -pub struct GasPrice { - /// Gas price for transaction. - pub gas_price: Option, - /// Max fee per gas for transaction. - pub max_fee_per_gas: Option, -} - -/// Convert [`TransactionSignedEcRecovered`] to [`TransactionRequest`] -pub fn transaction_to_call_request(tx: TransactionSignedEcRecovered) -> TransactionRequest { - let from = tx.signer(); - let to = Some(tx.transaction.to().into()); - let gas = tx.transaction.gas_limit(); - let value = tx.transaction.value(); - let input = tx.transaction.input().clone(); - let nonce = tx.transaction.nonce(); - let chain_id = tx.transaction.chain_id(); - let access_list = tx.transaction.access_list().cloned(); - let max_fee_per_blob_gas = tx.transaction.max_fee_per_blob_gas(); - let authorization_list = tx.transaction.authorization_list().map(|l| l.to_vec()); - let blob_versioned_hashes = tx.transaction.blob_versioned_hashes(); - let tx_type = tx.transaction.tx_type(); - - // fees depending on the transaction type - let (gas_price, max_fee_per_gas) = if tx.is_dynamic_fee() { - (None, Some(tx.max_fee_per_gas())) - } else { - (Some(tx.max_fee_per_gas()), None) - }; - let max_priority_fee_per_gas = tx.transaction.max_priority_fee_per_gas(); - - TransactionRequest { - from: Some(from), - to, - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas, - gas: Some(gas), - value: Some(value), - input: TransactionInput::new(input), - nonce: Some(nonce), - chain_id, - access_list, - max_fee_per_blob_gas, - blob_versioned_hashes, - transaction_type: Some(tx_type.into()), - sidecar: None, - authorization_list, - } -} diff --git a/crates/rpc/rpc-types-compat/src/transaction/signature.rs b/crates/rpc/rpc-types-compat/src/transaction/signature.rs deleted file mode 100644 index 536f6ac5e5c..00000000000 --- a/crates/rpc/rpc-types-compat/src/transaction/signature.rs +++ /dev/null @@ -1,52 +0,0 @@ -use alloy_primitives::U256; -use alloy_rpc_types::{Parity, Signature}; -use reth_primitives::{transaction::legacy_parity, Signature as PrimitiveSignature, TxType}; - -/// Creates a new rpc signature from a legacy [primitive -/// signature](reth_primitives::Signature), using the give chain id to compute the signature's -/// recovery id. -/// -/// If the chain id is `Some`, the recovery id is computed according to [EIP-155](https://eips.ethereum.org/EIPS/eip-155). -pub fn from_legacy_primitive_signature( - signature: PrimitiveSignature, - chain_id: Option, -) -> Signature { - Signature { - r: signature.r(), - s: signature.s(), - v: U256::from(legacy_parity(&signature, chain_id).to_u64()), - y_parity: None, - } -} - -/// Creates a new rpc signature from a non-legacy [primitive -/// signature](reth_primitives::Signature). This sets the `v` value to `0` or `1` depending on -/// the signature's `odd_y_parity`. -pub fn from_typed_primitive_signature(signature: PrimitiveSignature) -> Signature { - Signature { - r: signature.r(), - s: signature.s(), - v: U256::from(signature.v().y_parity_byte()), - y_parity: Some(Parity(signature.v().y_parity())), - } -} - -/// Creates a new rpc signature from a legacy [primitive -/// signature](reth_primitives::Signature). -/// -/// The tx type is used to determine whether or not to use the `chain_id` to compute the -/// signature's recovery id. -/// -/// If the transaction is a legacy transaction, it will use the `chain_id` to compute the -/// signature's recovery id. If the transaction is a typed transaction, it will set the `v` -/// value to `0` or `1` depending on the signature's `odd_y_parity`. -pub fn from_primitive_signature( - signature: PrimitiveSignature, - tx_type: TxType, - chain_id: Option, -) -> Signature { - match tx_type { - TxType::Legacy => from_legacy_primitive_signature(signature, chain_id), - _ => from_typed_primitive_signature(signature), - } -} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index fe150e36eed..14519860e76 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -15,14 +15,17 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true +reth-engine-primitives.workspace = true reth-errors.workspace = true +reth-ethereum-consensus.workspace = true reth-provider.workspace = true reth-transaction-pool.workspace = true reth-network-api.workspace = true reth-rpc-engine-api.workspace = true -reth-revm.workspace = true +reth-revm = { workspace = true, features = ["witness"] } reth-tasks = { workspace = true, features = ["rayon"] } reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true @@ -31,27 +34,28 @@ reth-network-peers = { workspace = true, features = ["secp256k1"] } reth-evm.workspace = true reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true -reth-node-api.workspace = true reth-network-types.workspace = true -reth-trie.workspace = true +reth-consensus.workspace = true # ethereum alloy-consensus.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true -alloy-eips.workspace = true +alloy-eips = { workspace = true, features = ["kzg"] } alloy-dyn-abi.workspace = true alloy-genesis.workspace = true alloy-network.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true +alloy-rpc-types-beacon.workspace = true alloy-rpc-types.workspace = true -alloy-rpc-types-eth = { workspace = true, features = ["jsonrpsee-types"] } +alloy-rpc-types-eth = { workspace = true, features = ["jsonrpsee-types", "serde"] } alloy-rpc-types-debug.workspace = true alloy-rpc-types-trace.workspace = true alloy-rpc-types-mev.workspace = true alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true +alloy-rpc-types-engine.workspace = true alloy-serde.workspace = true revm = { workspace = true, features = [ "optional_block_gas_limit", @@ -78,7 +82,7 @@ parking_lot.workspace = true # misc tracing.workspace = true -tracing-futures = "0.2" +tracing-futures.workspace = true futures.workspace = true rand.workspace = true serde.workspace = true diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 311719a04ed..0358aa3a8d4 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -115,6 +115,7 @@ where .get_final_paris_total_difficulty() .is_some(), terminal_total_difficulty: self.chain_spec.fork(EthereumHardfork::Paris).ttd(), + deposit_contract_address: self.chain_spec.deposit_contract().map(|dc| dc.address), ..self.chain_spec.genesis().config.clone() }; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index acf215b3b2c..5e799dd69ca 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,11 +1,12 @@ -use alloy_eips::eip2718::Encodable2718; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rlp::{Decodable, Encodable}; -use alloy_rpc_types::{ - state::EvmOverrides, Block as RpcBlock, BlockError, Bundle, StateContext, TransactionInfo, -}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::transaction::TransactionRequest; +use alloy_rpc_types_eth::{ + state::EvmOverrides, transaction::TransactionRequest, Block as RpcBlock, BlockError, Bundle, + StateContext, TransactionInfo, +}; use alloy_rpc_types_trace::geth::{ call::FlatCallFrame, BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, @@ -18,21 +19,21 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{Block, BlockId, BlockNumberOrTag, TransactionSignedEcRecovered}; +use reth_primitives::{BlockExt, NodePrimitives, ReceiptWithBloom, SealedBlockWithSenders}; +use reth_primitives_traits::{Block as _, BlockBody, SignedTransaction}; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProofProvider, - StateProviderFactory, TransactionVariant, + BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, ProviderBlock, + ReceiptProviderIdExt, StateProofProvider, TransactionVariant, }; -use reth_revm::database::StateProviderDatabase; +use reth_revm::{database::StateProviderDatabase, witness::ExecutionWitnessRecord}; use reth_rpc_api::DebugApiServer; use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, TraceExt}, - EthApiTypes, FromEthApiError, + helpers::{EthTransactions, TraceExt}, + EthApiTypes, FromEthApiError, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, StateCacheDb}; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_tasks::pool::BlockingTaskGuard; -use reth_trie::{HashedPostState, HashedStorage}; use revm::{ db::{CacheDB, State}, primitives::{db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg}, @@ -40,29 +41,26 @@ use revm::{ use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, }; -use revm_primitives::{keccak256, HashMap}; use std::sync::Arc; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `debug` API implementation. /// /// This type provides the functionality for handling `debug` related requests. -pub struct DebugApi { - inner: Arc>, +pub struct DebugApi { + inner: Arc>, } // === impl DebugApi === -impl DebugApi { +impl DebugApi { /// Create a new instance of the [`DebugApi`] pub fn new( - provider: Provider, eth: Eth, blocking_task_guard: BlockingTaskGuard, block_executor: BlockExecutor, ) -> Self { - let inner = - Arc::new(DebugApiInner { provider, eth_api: eth, blocking_task_guard, block_executor }); + let inner = Arc::new(DebugApiInner { eth_api: eth, blocking_task_guard, block_executor }); Self { inner } } @@ -72,18 +70,20 @@ impl DebugApi { } } +impl DebugApi { + /// Access the underlying provider. + pub fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } +} + // === impl DebugApi === -impl DebugApi +impl DebugApi where - Provider: BlockReaderIdExt - + HeaderProvider - + ChainSpecProvider - + StateProviderFactory - + EvmEnvProvider - + 'static, Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider, + BlockExecutor: + BlockExecutorProvider>>, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -93,47 +93,47 @@ where /// Trace the entire block asynchronously async fn trace_block( &self, - at: BlockId, - transactions: Vec, + block: Arc>>, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { - if transactions.is_empty() { - // nothing to trace - return Ok(Vec::new()) - } - // replay all transactions of the block let this = self.clone(); self.eth_api() - .spawn_with_state_at_block(at, move |state| { - let block_hash = at.as_block_hash(); - let mut results = Vec::with_capacity(transactions.len()); + .spawn_with_state_at_block(block.parent_hash().into(), move |state| { + let mut results = Vec::with_capacity(block.body.transactions().len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); - let mut transactions = transactions.into_iter().enumerate().peekable(); - while let Some((index, tx)) = transactions.next() { - let tx_hash = tx.hash; + + this.eth_api().apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; + + let mut transactions = block.transactions_with_sender().enumerate().peekable(); + let mut inspector = None; + while let Some((index, (signer, tx))) = transactions.next() { + let tx_hash = *tx.tx_hash(); let env = EnvWithHandlerCfg { env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - Call::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()), + this.eth_api().evm_config().tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; let (result, state_changes) = this.trace_transaction( - opts.clone(), + &opts, env, &mut db, Some(TransactionContext { - block_hash, + block_hash: Some(block.hash()), tx_hash: Some(tx_hash), tx_index: Some(index), }), + &mut inspector, )?; + inspector = inspector.map(|insp| insp.fused()); + results.push(TraceResult::Success { result, tx_hash: Some(tx_hash) }); if transactions.peek().is_some() { // need to apply the state changes of this transaction before executing the @@ -157,41 +157,45 @@ where rlp_block: Bytes, opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { - let block = Block::decode(&mut rlp_block.as_ref()) + let block: ProviderBlock = Decodable::decode(&mut rlp_block.as_ref()) .map_err(BlockError::RlpDecodeRawBlock) .map_err(Eth::Error::from_eth_err)?; - let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(&block.header).await?; - // we trace on top the block's parent block - let parent = block.parent_hash; + let (cfg, block_env) = self.eth_api().evm_env_for_raw_block(block.header()).await?; // Depending on EIP-2 we need to recover the transactions differently - let transactions = - if self.inner.provider.chain_spec().is_homestead_active_at_block(block.number) { + let senders = + if self.provider().chain_spec().is_homestead_active_at_block(block.header().number()) { block - .body - .transactions - .into_iter() + .body() + .transactions() + .iter() .map(|tx| { - tx.into_ecrecovered() + tx.recover_signer() .ok_or(EthApiError::InvalidTransactionSignature) .map_err(Eth::Error::from_eth_err) }) .collect::, Eth::Error>>()? } else { block - .body - .transactions - .into_iter() + .body() + .transactions() + .iter() .map(|tx| { - tx.into_ecrecovered_unchecked() + tx.recover_signer_unchecked() .ok_or(EthApiError::InvalidTransactionSignature) .map_err(Eth::Error::from_eth_err) }) .collect::, Eth::Error>>()? }; - self.trace_block(parent.into(), transactions, cfg, block_env, opts).await + self.trace_block( + Arc::new(block.with_senders_unchecked(senders).seal_slow()), + cfg, + block_env, + opts, + ) + .await } /// Replays a block and returns the trace of each transaction. @@ -201,8 +205,7 @@ where opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { let block_hash = self - .inner - .provider + .provider() .block_hash_for_id(block_id) .map_err(Eth::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -213,18 +216,8 @@ where )?; let block = block.ok_or(EthApiError::HeaderNotFound(block_id))?; - // we need to get the state of the parent block because we're replaying this block on top of - // its parent block's state - let state_at = block.parent_hash; - self.trace_block( - state_at.into(), - (*block).clone().into_transactions_ecrecovered().collect(), - cfg, - block_env, - opts, - ) - .await + self.trace_block(block, cfg, block_env, opts).await } /// Trace the transaction according to the provided options. @@ -243,7 +236,7 @@ where // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in - let state_at: BlockId = block.parent_hash.into(); + let state_at: BlockId = block.parent_hash().into(); let block_hash = block.hash(); let this = self.clone(); @@ -255,33 +248,37 @@ where let tx = transaction.into_recovered(); let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + this.eth_api().apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; + // replay all transactions prior to the targeted transaction let index = this.eth_api().replay_transactions_until( &mut db, cfg.clone(), block_env.clone(), block_txs, - tx.hash, + *tx.tx_hash(), )?; let env = EnvWithHandlerCfg { env: Env::boxed( cfg.cfg_env.clone(), block_env, - Call::evm_config(this.eth_api()).tx_env(tx.as_signed(), tx.signer()), + this.eth_api().evm_config().tx_env(tx.as_signed(), tx.signer()), ), handler_cfg: cfg.handler_cfg, }; this.trace_transaction( - opts, + &opts, env, &mut db, Some(TransactionContext { block_hash: Some(block_hash), tx_index: Some(index), - tx_hash: Some(tx.hash), + tx_hash: Some(*tx.tx_hash()), }), + &mut None, ) .map(|(trace, _)| trace) }) @@ -386,10 +383,22 @@ where // let db = db.0; + let tx_info = TransactionInfo { + block_number: Some( + env.block.number.try_into().unwrap_or_default(), + ), + base_fee: Some( + env.block.basefee.try_into().unwrap_or_default(), + ), + hash: None, + block_hash: None, + index: None, + }; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector - .try_into_mux_frame(&res, db) + .try_into_mux_frame(&res, db, tx_info) .map_err(Eth::Error::from_eth_err)?; Ok(frame.into()) }) @@ -504,15 +513,15 @@ where // we're essentially replaying the transactions in the block here, hence we need the state // that points to the beginning of the block, which is the state at the parent block - let mut at = block.parent_hash; + let mut at = block.parent_hash(); let mut replay_block_txs = true; // if a transaction index is provided, we need to replay the transactions until the index - let num_txs = transaction_index.index().unwrap_or(block.body.transactions.len()); + let num_txs = transaction_index.index().unwrap_or_else(|| block.body.transactions().len()); // but if all transactions are to be replayed, we can use the state at the block itself // this works with the exception of the PENDING block, because its state might not exist if // built locally - if !target_block.is_pending() && num_txs == block.body.transactions.len() { + if !target_block.is_pending() && num_txs == block.body.transactions().len() { at = block.hash(); replay_block_txs = false; } @@ -536,7 +545,7 @@ where env: Env::boxed( cfg.cfg_env.clone(), block_env.clone(), - Call::evm_config(this.eth_api()).tx_env(tx, *signer), + this.eth_api().evm_config().tx_env(tx, *signer), ), handler_cfg: cfg.handler_cfg, }; @@ -552,6 +561,7 @@ where let Bundle { transactions, block_override } = bundle; let block_overrides = block_override.map(Box::new); + let mut inspector = None; let mut transactions = transactions.into_iter().peekable(); while let Some(tx) = transactions.next() { @@ -567,8 +577,15 @@ where overrides, )?; - let (trace, state) = - this.trace_transaction(tracing_options.clone(), env, &mut db, None)?; + let (trace, state) = this.trace_transaction( + &tracing_options, + env, + &mut db, + None, + &mut inspector, + )?; + + inspector = inspector.map(|insp| insp.fused()); // If there is more transactions, commit the database // If there is no transactions, but more bundles, commit to the database too @@ -604,73 +621,39 @@ where .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; self.eth_api() - .spawn_with_state_at_block(block.parent_hash.into(), move |state_provider| { + .spawn_with_state_at_block(block.parent_hash().into(), move |state_provider| { let db = StateProviderDatabase::new(&state_provider); let block_executor = this.inner.block_executor.executor(db); - let mut hashed_state = HashedPostState::default(); - let mut keys = HashMap::default(); - let mut codes = HashMap::default(); + let mut witness_record = ExecutionWitnessRecord::default(); let _ = block_executor .execute_with_state_closure( - (&(*block).clone().unseal(), block.difficulty).into(), + (&(*block).clone().unseal(), block.difficulty()).into(), |statedb: &State<_>| { - codes = statedb - .cache - .contracts - .iter() - .map(|(hash, code)| (*hash, code.original_bytes())) - .chain( - // cache state does not have all the contracts, especially when - // a contract is created within the block - // the contract only exists in bundle state, therefore we need - // to include them as well - statedb - .bundle_state - .contracts - .iter() - .map(|(hash, code)| (*hash, code.original_bytes())), - ) - .collect(); - - for (address, account) in &statedb.cache.accounts { - let hashed_address = keccak256(address); - hashed_state.accounts.insert( - hashed_address, - account.account.as_ref().map(|a| a.info.clone().into()), - ); - - let storage = - hashed_state.storages.entry(hashed_address).or_insert_with( - || HashedStorage::new(account.status.was_destroyed()), - ); - - if let Some(account) = &account.account { - keys.insert(hashed_address, address.to_vec().into()); - - for (slot, value) in &account.storage { - let slot = B256::from(*slot); - let hashed_slot = keccak256(slot); - storage.storage.insert(hashed_slot, *value); - - keys.insert(hashed_slot, slot.into()); - } - } - } + witness_record.record_executed_state(statedb); }, ) .map_err(|err| EthApiError::Internal(err.into()))?; + let ExecutionWitnessRecord { hashed_state, codes, keys } = witness_record; + let state = state_provider.witness(Default::default(), hashed_state).map_err(Into::into)?; - Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys: Some(keys) }) + Ok(ExecutionWitness { state: state.into_iter().collect(), codes, keys }) }) .await } /// Executes the configured transaction with the environment on the given database. /// + /// It optionally takes fused inspector ([`TracingInspector::fused`]) to avoid re-creating the + /// inspector for each transaction. This is useful when tracing multiple transactions in a + /// block. This is only useful for block tracing which uses the same tracer for all transactions + /// in the block. + /// + /// Caution: If the inspector is provided then `opts.tracer_config` is ignored. + /// /// Returns the trace frame and the state that got updated after executing the transaction. /// /// Note: this does not apply any state overrides if they're configured in the `opts`. @@ -678,13 +661,25 @@ where /// Caution: this is blocking and should be performed on a blocking task. fn trace_transaction( &self, - opts: GethDebugTracingOptions, + opts: &GethDebugTracingOptions, env: EnvWithHandlerCfg, db: &mut StateCacheDb<'_>, transaction_context: Option, + fused_inspector: &mut Option, ) -> Result<(GethTrace, revm_primitives::EvmState), Eth::Error> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; + let tx_info = TransactionInfo { + hash: transaction_context.as_ref().map(|c| c.tx_hash).unwrap_or_default(), + index: transaction_context + .as_ref() + .map(|c| c.tx_index.map(|i| i as u64)) + .unwrap_or_default(), + block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), + block_number: Some(env.block.number.try_into().unwrap_or_default()), + base_fee: Some(env.block.basefee.try_into().unwrap_or_default()), + }; + if let Some(tracer) = tracer { return match tracer { GethDebugTracerType::BuiltInTracer(tracer) => match tracer { @@ -695,35 +690,42 @@ where } GethDebugBuiltInTracerType::CallTracer => { let call_config = tracer_config + .clone() .into_call_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_call_config(&call_config), - ); + let mut inspector = fused_inspector.get_or_insert_with(|| { + TracingInspector::new(TracingInspectorConfig::from_geth_call_config( + &call_config, + )) + }); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + inspector.set_transaction_gas_limit(env.tx.gas_limit); + let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() + .geth_builder() .geth_call_traces(call_config, res.result.gas_used()); return Ok((frame.into(), res.state)) } GethDebugBuiltInTracerType::PreStateTracer => { let prestate_config = tracer_config + .clone() .into_pre_state_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; - let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_prestate_config(&prestate_config), - ); + let mut inspector = fused_inspector.get_or_insert_with(|| { + TracingInspector::new( + TracingInspectorConfig::from_geth_prestate_config(&prestate_config), + ) + }); let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + inspector.set_transaction_gas_limit(env.tx.gas_limit); let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() + .geth_builder() .geth_prestate_traces(&res, &prestate_config, db) .map_err(Eth::Error::from_eth_err)?; @@ -734,6 +736,7 @@ where } GethDebugBuiltInTracerType::MuxTracer => { let mux_config = tracer_config + .clone() .into_mux_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; @@ -742,12 +745,13 @@ where let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector - .try_into_mux_frame(&res, db) + .try_into_mux_frame(&res, db, tx_info) .map_err(Eth::Error::from_eth_err)?; return Ok((frame.into(), res.state)) } GethDebugBuiltInTracerType::FlatCallTracer => { let flat_call_config = tracer_config + .clone() .into_flat_call_config() .map_err(|_| EthApiError::InvalidTracerConfig)?; @@ -756,14 +760,6 @@ where ); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; - - let tx_info = TransactionInfo { - hash: transaction_context.unwrap().tx_hash, - index: transaction_context.unwrap().tx_index.map(|index| index as u64), - block_hash: transaction_context.unwrap().block_hash, - block_number: Some(env.block.number.try_into().unwrap_or_default()), - base_fee: Some(env.block.basefee.try_into().unwrap_or_default()), - }; let frame: FlatCallFrame = inspector .with_transaction_gas_limit(env.tx.gas_limit) .into_parity_builder() @@ -778,10 +774,10 @@ where } #[cfg(feature = "js-tracer")] GethDebugTracerType::JsTracer(code) => { - let config = tracer_config.into_json(); + let config = tracer_config.clone().into_json(); let mut inspector = revm_inspectors::tracing::js::JsInspector::with_transaction_context( - code, + code.clone(), config, transaction_context.unwrap_or_default(), ) @@ -797,46 +793,40 @@ where } // default structlog tracer - let inspector_config = TracingInspectorConfig::from_geth_config(&config); - - let mut inspector = TracingInspector::new(inspector_config); - + let mut inspector = fused_inspector.get_or_insert_with(|| { + let inspector_config = TracingInspectorConfig::from_geth_config(config); + TracingInspector::new(inspector_config) + }); let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) - .into_geth_builder() - .geth_traces(gas_used, return_value, config); + inspector.set_transaction_gas_limit(env.tx.gas_limit); + let frame = inspector.geth_builder().geth_traces(gas_used, return_value, *config); Ok((frame.into(), res.state)) } } #[async_trait] -impl DebugApiServer for DebugApi +impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt - + HeaderProvider - + ChainSpecProvider - + StateProviderFactory - + EvmEnvProvider - + 'static, - Eth: EthApiSpec + EthTransactions + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider, + Eth: EthApiTypes + EthTransactions + TraceExt + 'static, + BlockExecutor: + BlockExecutorProvider>>, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { let header = match block_id { - BlockId::Hash(hash) => self.inner.provider.header(&hash.into()).to_rpc_result()?, + BlockId::Hash(hash) => self.provider().header(&hash.into()).to_rpc_result()?, BlockId::Number(number_or_tag) => { let number = self - .inner - .provider + .provider() .convert_block_number(number_or_tag) .to_rpc_result()? - .ok_or_else(|| internal_rpc_err("Pending block not supported".to_string()))?; - self.inner.provider.header_by_number(number).to_rpc_result()? + .ok_or_else(|| { + internal_rpc_err("Pending block not supported".to_string()) + })?; + self.provider().header_by_number(number).to_rpc_result()? } }; @@ -851,8 +841,7 @@ where /// Handler for `debug_getRawBlock` async fn raw_block(&self, block_id: BlockId) -> RpcResult { let block = self - .inner - .provider + .provider() .block_by_id(block_id) .to_rpc_result()? .ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -874,8 +863,7 @@ where /// Returns the bytes of the transaction for the given hash. async fn raw_transactions(&self, block_id: BlockId) -> RpcResult> { let block = self - .inner - .provider + .provider() .block_with_senders_by_id(block_id, TransactionVariant::NoHash) .to_rpc_result()? .unwrap_or_default(); @@ -885,13 +873,12 @@ where /// Handler for `debug_getRawReceipts` async fn raw_receipts(&self, block_id: BlockId) -> RpcResult> { Ok(self - .inner - .provider + .provider() .receipts_by_block_id(block_id) .to_rpc_result()? .unwrap_or_default() .into_iter() - .map(|receipt| receipt.with_bloom().envelope_encoded()) + .map(|receipt| ReceiptWithBloom::from(receipt).encoded_2718().into()) .collect()) } @@ -957,15 +944,6 @@ where .map_err(Into::into) } - /// Handler for `debug_executionWitness` - async fn debug_execution_witness( - &self, - block: BlockNumberOrTag, - ) -> RpcResult { - let _permit = self.acquire_trace_permit().await; - Self::debug_execution_witness(self, block).await.map_err(Into::into) - } - /// Handler for `debug_traceCall` async fn debug_trace_call( &self, @@ -989,6 +967,15 @@ where Self::debug_trace_call_many(self, bundles, state_context, opts).await.map_err(Into::into) } + /// Handler for `debug_executionWitness` + async fn debug_execution_witness( + &self, + block: BlockNumberOrTag, + ) -> RpcResult { + let _permit = self.acquire_trace_permit().await; + Self::debug_execution_witness(self, block).await.map_err(Into::into) + } + async fn debug_backtrace_at(&self, _location: &str) -> RpcResult<()> { Ok(()) } @@ -1201,21 +1188,19 @@ where } } -impl std::fmt::Debug for DebugApi { +impl std::fmt::Debug for DebugApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DebugApi").finish_non_exhaustive() } } -impl Clone for DebugApi { +impl Clone for DebugApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct DebugApiInner { - /// The provider that can interact with the chain. - provider: Provider, +struct DebugApiInner { /// The implementation of `eth` API eth_api: Eth, // restrict the number of concurrent calls to blocking calls diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 928e2050a5c..a9c316571ac 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,15 +1,15 @@ +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256, U64}; -use alloy_rpc_types::{ - state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, +use alloy_rpc_types_eth::{ + state::StateOverride, transaction::TransactionRequest, BlockOverrides, + EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_api::{EngineEthApiServer, EthApiServer, EthFilterApiServer}; /// Re-export for convenience pub use reth_rpc_engine_api::EngineApi; -use reth_rpc_eth_api::{FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction}; +use reth_rpc_eth_api::{FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; use tracing_futures::Instrument; macro_rules! engine_span { @@ -41,6 +41,7 @@ where RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + FullEthApiTypes, EthFilter: EthFilterApiServer>, { diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index e97497786ed..b12e021335e 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,31 +1,29 @@ //! `Eth` bundle implementation and helpers. -use std::sync::Arc; - +use alloy_consensus::{BlockHeader, Transaction as _}; use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm_primitives::db::{DatabaseCommit, DatabaseRef}, - PooledTransactionsElement, -}; +use reth_primitives::PooledTransactionsElement; +use reth_primitives_traits::SignedTransaction; +use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; -use reth_rpc_eth_api::{FromEthApiError, FromEvmError}; +use reth_rpc_eth_api::{ + helpers::{Call, EthTransactions, LoadPendingBlock}, + EthCallBundleApiServer, FromEthApiError, FromEvmError, RpcNodeCore, +}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; use reth_tasks::pool::BlockingTaskGuard; +use reth_transaction_pool::{PoolConsensusTx, PoolPooledTx, PoolTransaction, TransactionPool}; use revm::{ - db::CacheDB, + db::{CacheDB, DatabaseCommit, DatabaseRef}, primitives::{ResultAndState, TxEnv}, }; use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId, MAX_BLOB_GAS_PER_BLOCK}; +use std::sync::Arc; -use reth_provider::{ChainSpecProvider, HeaderProvider}; -use reth_rpc_eth_api::{ - helpers::{Call, EthTransactions, LoadPendingBlock}, - EthCallBundleApiServer, -}; -use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError, RpcInvalidTransactionError}; /// `Eth` bundle implementation. pub struct EthBundle { /// All nested fields bundled together. @@ -46,7 +44,16 @@ impl EthBundle { impl EthBundle where - Eth: EthTransactions + LoadPendingBlock + Call + 'static, + Eth: EthTransactions< + Pool: TransactionPool< + Transaction: PoolTransaction< + Consensus: From, + Pooled = PooledTransactionsElement, + >, + >, + > + LoadPendingBlock + + Call + + 'static, { /// Simulates a bundle of transactions at the top of a given block number with the state of /// another (or the same) block. This can be used to simulate future blocks with the current @@ -59,11 +66,14 @@ where let EthCallBundle { txs, block_number, + coinbase, state_block_number, + timeout: _, timestamp, gas_limit, difficulty, base_fee, + .. } = bundle; if txs.is_empty() { return Err(EthApiError::InvalidParams( @@ -80,10 +90,10 @@ where let transactions = txs .into_iter() - .map(recover_raw_transaction) + .map(|tx| recover_raw_transaction::>(&tx)) .collect::, _>>()? .into_iter() - .map(|tx| tx.into_components()) + .map(|tx| tx.to_components()) .collect::>(); // Validate that the bundle does not contain more than MAX_BLOB_NUMBER_PER_BLOCK blob @@ -92,7 +102,7 @@ where .iter() .filter_map(|(tx, _)| { if let PooledTransactionsElement::BlobTransaction(tx) = tx { - Some(tx.transaction.tx.blob_gas()) + Some(tx.tx().tx().blob_gas()) } else { None } @@ -106,10 +116,14 @@ where .into()) } - let block_id: alloy_rpc_types::BlockId = state_block_number.into(); + let block_id: alloy_rpc_types_eth::BlockId = state_block_number.into(); // Note: the block number is considered the `parent` block: let (cfg, mut block_env, at) = self.eth_api().evm_env_at(block_id).await?; + if let Some(coinbase) = coinbase { + block_env.coinbase = coinbase; + } + // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { block_env.timestamp = U256::from(timestamp); @@ -121,8 +135,16 @@ where block_env.difficulty = U256::from(difficulty); } + // default to call gas limit unless user requests a smaller limit + block_env.gas_limit = U256::from(self.inner.eth_api.call_gas_limit()); if let Some(gas_limit) = gas_limit { - block_env.gas_limit = U256::from(gas_limit); + let gas_limit = U256::from(gas_limit); + if gas_limit > block_env.gas_limit { + return Err( + EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() + ) + } + block_env.gas_limit = gas_limit; } if let Some(base_fee) = base_fee { @@ -130,12 +152,12 @@ where } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { let parent_block = block_env.number.saturating_to::(); // here we need to fetch the _next_ block's basefee based on the parent block - let parent = LoadPendingBlock::provider(self.eth_api()) + let parent = RpcNodeCore::provider(self.eth_api()) .header_by_number(parent_block) .map_err(Eth::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(parent_block.into()))?; if let Some(base_fee) = parent.next_block_base_fee( - LoadPendingBlock::provider(self.eth_api()) + RpcNodeCore::provider(self.eth_api()) .chain_spec() .base_fee_params_at_block(parent_block), ) { @@ -156,7 +178,8 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, TxEnv::default()); let db = CacheDB::new(StateProviderDatabase::new(state)); - let initial_coinbase = DatabaseRef::basic_ref(&db, coinbase) + let initial_coinbase = db + .basic_ref(coinbase) .map_err(Eth::Error::from_eth_err)? .map(|acc| acc.balance) .unwrap_or_default(); @@ -166,7 +189,7 @@ where let mut total_gas_fess = U256::ZERO; let mut hasher = Keccak256::new(); - let mut evm = Call::evm_config(ð_api).evm_with_env(db, env); + let mut evm = eth_api.evm_config().evm_with_env(db, env); let mut results = Vec::with_capacity(transactions.len()); let mut transactions = transactions.into_iter().peekable(); @@ -175,19 +198,16 @@ where // Verify that the given blob data, commitments, and proofs are all valid for // this transaction. if let PooledTransactionsElement::BlobTransaction(ref tx) = tx { - tx.validate(EnvKzgSettings::Default.get()).map_err(|e| { + tx.tx().validate_blob(EnvKzgSettings::Default.get()).map_err(|e| { Eth::Error::from_eth_err(EthApiError::InvalidParams(e.to_string())) })?; } - let tx = tx.into_transaction(); + let tx: PoolConsensusTx = tx.into(); - hasher.update(tx.hash()); - let gas_price = tx - .effective_tip_per_gas(basefee) - .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow) - .map_err(Eth::Error::from_eth_err)?; - Call::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); + hasher.update(*tx.tx_hash()); + let gas_price = tx.effective_gas_price(basefee); + eth_api.evm_config().fill_tx_env(evm.tx_mut(), &tx, signer); let ResultAndState { result, state } = evm.transact().map_err(Eth::Error::from_evm_err)?; @@ -224,7 +244,7 @@ where gas_price: U256::from(gas_price), gas_used, to_address: tx.to(), - tx_hash: tx.hash(), + tx_hash: *tx.tx_hash(), value, revert, }; @@ -265,10 +285,14 @@ where #[async_trait::async_trait] impl EthCallBundleApiServer for EthBundle where - Eth: EthTransactions + LoadPendingBlock + Call + 'static, + Eth: EthTransactions< + Pool: TransactionPool>, + > + LoadPendingBlock + + Call + + 'static, { async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { - Self::call_bundle(self, request).await.map_err(Into::into) + self.call_bundle(request).await.map_err(Into::into) } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 6da46804005..1fe08d1c57f 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -3,14 +3,20 @@ use std::sync::Arc; -use alloy_network::AnyNetwork; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumberOrTag; +use alloy_network::Ethereum; use alloy_primitives::U256; use derive_more::Deref; -use reth_primitives::BlockNumberOrTag; -use reth_provider::{BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider}; +use reth_primitives::NodePrimitives; +use reth_provider::{ + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ProviderBlock, + ProviderReceipt, +}; use reth_rpc_eth_api::{ helpers::{EthSigner, SpawnBlocking}, - EthApiTypes, + node::RpcNodeCoreExt, + EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{ EthApiBuilderCtx, EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, @@ -34,14 +40,20 @@ use crate::eth::EthTxBuilder; /// This way [`EthApi`] is not limited to [`jsonrpsee`] and can be used standalone or in other /// network handlers (for example ipc). #[derive(Deref)] -pub struct EthApi { +pub struct EthApi { /// All nested fields bundled together. + #[deref] pub(super) inner: Arc>, + /// Transaction RPC response builder. + pub tx_resp_builder: EthTxBuilder, } -impl Clone for EthApi { +impl Clone for EthApi +where + Provider: BlockReader, +{ fn clone(&self) -> Self { - Self { inner: self.inner.clone() } + Self { inner: self.inner.clone(), tx_resp_builder: EthTxBuilder } } } @@ -55,7 +67,7 @@ where provider: Provider, pool: Pool, network: Network, - eth_cache: EthStateCache, + eth_cache: EthStateCache, gas_oracle: GasPriceOracle, gas_cap: impl Into, max_simulate_blocks: u64, @@ -81,7 +93,7 @@ where proof_permits, ); - Self { inner: Arc::new(inner) } + Self { inner: Arc::new(inner), tx_resp_builder: EthTxBuilder } } } @@ -98,7 +110,12 @@ where ) -> Self where Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions, + Events: CanonStateSubscriptions< + Primitives: NodePrimitives< + Block = ProviderBlock, + Receipt = ProviderReceipt, + >, + >, { let blocking_task_pool = BlockingTaskPool::build().expect("failed to build blocking task pool"); @@ -119,22 +136,76 @@ where ctx.config.proof_permits, ); - Self { inner: Arc::new(inner) } + Self { inner: Arc::new(inner), tx_resp_builder: EthTxBuilder } } } impl EthApiTypes for EthApi where Self: Send + Sync, + Provider: BlockReader, { type Error = EthApiError; - // todo: replace with alloy_network::Ethereum - type NetworkTypes = AnyNetwork; + type NetworkTypes = Ethereum; type TransactionCompat = EthTxBuilder; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + &self.tx_resp_builder + } +} + +impl RpcNodeCore for EthApi +where + Provider: BlockReader + Send + Sync + Clone + Unpin, + Pool: Send + Sync + Clone + Unpin, + Network: Send + Sync + Clone, + EvmConfig: Send + Sync + Clone + Unpin, +{ + type Provider = Provider; + type Pool = Pool; + type Evm = EvmConfig; + type Network = Network; + type PayloadBuilder = (); + + fn pool(&self) -> &Self::Pool { + self.inner.pool() + } + + fn evm_config(&self) -> &Self::Evm { + self.inner.evm_config() + } + + fn network(&self) -> &Self::Network { + self.inner.network() + } + + fn payload_builder(&self) -> &Self::PayloadBuilder { + &() + } + + fn provider(&self) -> &Self::Provider { + self.inner.provider() + } +} + +impl RpcNodeCoreExt + for EthApi +where + Provider: BlockReader + Send + Sync + Clone + Unpin, + Pool: Send + Sync + Clone + Unpin, + Network: Send + Sync + Clone, + EvmConfig: Send + Sync + Clone + Unpin, +{ + #[inline] + fn cache(&self) -> &EthStateCache, ProviderReceipt> { + self.inner.cache() + } } impl std::fmt::Debug for EthApi +where + Provider: BlockReader, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthApi").finish_non_exhaustive() @@ -145,6 +216,7 @@ impl SpawnBlocking for EthApi where Self: Clone + Send + Sync + 'static, + Provider: BlockReader, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -164,7 +236,7 @@ where /// Container type `EthApi` #[allow(missing_debug_implementations)] -pub struct EthApiInner { +pub struct EthApiInner { /// The transaction pool. pool: Pool, /// The provider that can interact with the chain. @@ -172,9 +244,9 @@ pub struct EthApiInner { /// An interface to interact with the network network: Network, /// All configured Signers - signers: parking_lot::RwLock>>, + signers: parking_lot::RwLock>>>, /// The async cache frontend for eth related data - eth_cache: EthStateCache, + eth_cache: EthStateCache, /// The async gas oracle frontend for gas price suggestions gas_oracle: GasPriceOracle, /// Maximum gas limit for `eth_call` and call tracing RPC methods. @@ -188,7 +260,7 @@ pub struct EthApiInner { /// The type that can spawn tasks which would otherwise block. task_spawner: Box, /// Cached pending block if any - pending_block: Mutex>, + pending_block: Mutex>>, /// A pool dedicated to CPU heavy blocking tasks. blocking_task_pool: BlockingTaskPool, /// Cache for block fees history @@ -210,7 +282,7 @@ where provider: Provider, pool: Pool, network: Network, - eth_cache: EthStateCache, + eth_cache: EthStateCache, gas_oracle: GasPriceOracle, gas_cap: impl Into, max_simulate_blocks: u64, @@ -228,7 +300,7 @@ where .header_by_number_or_tag(BlockNumberOrTag::Latest) .ok() .flatten() - .map(|header| header.number) + .map(|header| header.number()) .unwrap_or_default(), ); @@ -253,7 +325,10 @@ where } } -impl EthApiInner { +impl EthApiInner +where + Provider: BlockReader, +{ /// Returns a handle to data on disk. #[inline] pub const fn provider(&self) -> &Provider { @@ -262,13 +337,15 @@ impl EthApiInner &EthStateCache { + pub const fn cache(&self) -> &EthStateCache { &self.eth_cache } /// Returns a handle to the pending block. #[inline] - pub const fn pending_block(&self) -> &Mutex> { + pub const fn pending_block( + &self, + ) -> &Mutex>> { &self.pending_block } @@ -322,7 +399,9 @@ impl EthApiInner &parking_lot::RwLock>> { + pub const fn signers( + &self, + ) -> &parking_lot::RwLock>>> { &self.signers } @@ -353,13 +432,15 @@ impl EthApiInner + BlockReader + ChainSpecProvider + EvmEnvProvider + StateProviderFactory @@ -390,9 +474,8 @@ mod tests { provider: P, ) -> EthApi { let evm_config = EthEvmConfig::new(provider.chain_spec()); - let cache = EthStateCache::spawn(provider.clone(), Default::default(), evm_config.clone()); - let fee_history_cache = - FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); + let cache = EthStateCache::spawn(provider.clone(), Default::default()); + let fee_history_cache = FeeHistoryCache::new(FeeHistoryCacheConfig::default()); let gas_cap = provider.chain_spec().max_gas_limit(); EthApi::new( @@ -421,8 +504,8 @@ mod tests { let mut rng = generators::rng(); // Build mock data - let mut gas_used_ratios = Vec::new(); - let mut base_fees_per_gas = Vec::new(); + let mut gas_used_ratios = Vec::with_capacity(block_count as usize); + let mut base_fees_per_gas = Vec::with_capacity(block_count as usize); let mut last_header = None; let mut parent_hash = B256::default(); @@ -444,28 +527,27 @@ mod tests { last_header = Some(header.clone()); parent_hash = hash; - let mut transactions = vec![]; - for _ in 0..100 { + const TOTAL_TRANSACTIONS: usize = 100; + let mut transactions = Vec::with_capacity(TOTAL_TRANSACTIONS); + for _ in 0..TOTAL_TRANSACTIONS { let random_fee: u128 = rng.gen(); if let Some(base_fee_per_gas) = header.base_fee_per_gas { - let transaction = TransactionSigned { - transaction: reth_primitives::Transaction::Eip1559( - alloy_consensus::TxEip1559 { - max_priority_fee_per_gas: random_fee, - max_fee_per_gas: random_fee + base_fee_per_gas as u128, - ..Default::default() - }, - ), - ..Default::default() - }; + let transaction = TransactionSigned::new_unhashed( + reth_primitives::Transaction::Eip1559(alloy_consensus::TxEip1559 { + max_priority_fee_per_gas: random_fee, + max_fee_per_gas: random_fee + base_fee_per_gas as u128, + ..Default::default() + }), + Signature::test_signature(), + ); transactions.push(transaction); } else { - let transaction = TransactionSigned { - transaction: reth_primitives::Transaction::Legacy(Default::default()), - ..Default::default() - }; + let transaction = TransactionSigned::new_unhashed( + reth_primitives::Transaction::Legacy(Default::default()), + Signature::test_signature(), + ); transactions.push(transaction); } @@ -501,7 +583,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -523,7 +605,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -546,7 +628,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -569,7 +651,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index b136861c796..6441db70459 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,27 +1,23 @@ //! `eth_` `Filter` RPC handler implementation -use std::{ - collections::HashMap, - fmt, - iter::StepBy, - marker::PhantomData, - ops::RangeInclusive, - sync::Arc, - time::{Duration, Instant}, -}; - +use alloy_consensus::BlockHeader; use alloy_primitives::TxHash; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log, PendingTransactionFilterKind, }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; -use reth_node_api::EthApiTypes; -use reth_primitives::{Receipt, SealedBlockWithSenders, TransactionSignedEcRecovered}; -use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; -use reth_rpc_eth_api::{EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat}; +use reth_primitives::SealedBlockWithSenders; +use reth_provider::{ + BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock, + ProviderError, ProviderReceipt, +}; +use reth_rpc_eth_api::{ + EthApiTypes, EthFilterApiServer, FullEthApiTypes, RpcNodeCoreExt, RpcTransaction, + TransactionCompat, +}; use reth_rpc_eth_types::{ logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, EthApiError, EthFilterConfig, EthStateCache, EthSubscriptionIdProvider, @@ -30,36 +26,40 @@ use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult}; use reth_rpc_types_compat::transaction::from_recovered; use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; +use std::{ + collections::HashMap, + fmt, + iter::StepBy, + ops::RangeInclusive, + sync::Arc, + time::{Duration, Instant}, +}; use tokio::{ sync::{mpsc::Receiver, Mutex}, time::MissedTickBehavior, }; -use tracing::trace; +use tracing::{error, trace}; /// The maximum number of headers we read at once when handling a range filter. const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb /// `Eth` filter RPC implementation. -pub struct EthFilter { +pub struct EthFilter { /// All nested fields bundled together - inner: Arc>>, - /// Assembles response data w.r.t. network. - _tx_resp_builder: PhantomData, + inner: Arc>, } -impl Clone for EthFilter +impl Clone for EthFilter where Eth: EthApiTypes, { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), _tx_resp_builder: PhantomData } + Self { inner: self.inner.clone() } } } -impl EthFilter +impl EthFilter where - Provider: Send + Sync + 'static, - Pool: Send + Sync + 'static, Eth: EthApiTypes + 'static, { /// Creates a new, shareable instance. @@ -70,21 +70,13 @@ where /// See also [`EthFilterConfig`]. /// /// This also spawns a task that periodically clears stale filters. - pub fn new( - provider: Provider, - pool: Pool, - eth_cache: EthStateCache, - config: EthFilterConfig, - task_spawner: Box, - ) -> Self { + pub fn new(eth_api: Eth, config: EthFilterConfig, task_spawner: Box) -> Self { let EthFilterConfig { max_blocks_per_filter, max_logs_per_response, stale_filter_ttl } = config; let inner = EthFilterInner { - provider, + eth_api, active_filters: ActiveFilters::new(), - pool, id_provider: Arc::new(EthSubscriptionIdProvider::default()), - eth_cache, max_headers_range: MAX_HEADERS_RANGE, task_spawner, stale_filter_ttl, @@ -93,7 +85,7 @@ where max_logs_per_response: max_logs_per_response.unwrap_or(usize::MAX), }; - let eth_filter = Self { inner: Arc::new(inner), _tx_resp_builder: PhantomData }; + let eth_filter = Self { inner: Arc::new(inner) }; let this = eth_filter.clone(); eth_filter.inner.task_spawner.spawn_critical( @@ -141,19 +133,26 @@ where } } -impl EthFilter +impl EthFilter where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, - Pool: TransactionPool + 'static, - ::Transaction: 'static, - Eth: FullEthApiTypes, + Eth: FullEthApiTypes + RpcNodeCoreExt, { + /// Access the underlying provider. + fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } + + /// Access the underlying pool. + fn pool(&self) -> &Eth::Pool { + self.inner.eth_api.pool() + } + /// Returns all the filter changes for the given id, if any pub async fn filter_changes( &self, id: FilterId, ) -> Result>, EthFilterError> { - let info = self.inner.provider.chain_info()?; + let info = self.provider().chain_info()?; let best_number = info.best_number; // start_block is the block from which we should start fetching changes, the next block from @@ -184,7 +183,7 @@ where // [start_block..best_block] let end_block = best_number + 1; let block_hashes = - self.inner.provider.canonical_hashes_range(start_block, end_block).map_err( + self.provider().canonical_hashes_range(start_block, end_block).map_err( |_| EthApiError::HeaderRangeNotFound(start_block.into(), end_block.into()), )?; Ok(FilterChanges::Hashes(block_hashes)) @@ -193,11 +192,11 @@ where let (from_block_number, to_block_number) = match filter.block_option { FilterBlockOption::Range { from_block, to_block } => { let from = from_block - .map(|num| self.inner.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let to = to_block - .map(|num| self.inner.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); logs_utils::get_filter_block_range(from, to, start_block, info) @@ -241,12 +240,9 @@ where } #[async_trait] -impl EthFilterApiServer> - for EthFilter +impl EthFilterApiServer> for EthFilter where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, - Pool: TransactionPool + 'static, - Eth: FullEthApiTypes + 'static, + Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -271,14 +267,16 @@ where let transaction_kind = match kind.unwrap_or_default() { PendingTransactionFilterKind::Hashes => { - let receiver = self.inner.pool.pending_transactions_listener(); + let receiver = self.pool().pending_transactions_listener(); let pending_txs_receiver = PendingTransactionsReceiver::new(receiver); FilterKind::PendingTransaction(PendingTransactionKind::Hashes(pending_txs_receiver)) } PendingTransactionFilterKind::Full => { - let stream = self.inner.pool.new_pending_pool_transactions_listener(); - let full_txs_receiver = - FullTransactionsReceiver::<_, Eth::TransactionCompat>::new(stream); + let stream = self.pool().new_pending_pool_transactions_listener(); + let full_txs_receiver = FullTransactionsReceiver::new( + stream, + self.inner.eth_api.tx_resp_builder().clone(), + ); FilterKind::PendingTransaction(PendingTransactionKind::FullTransaction(Arc::new( full_txs_receiver, ))) @@ -331,7 +329,7 @@ where } } -impl std::fmt::Debug for EthFilter +impl std::fmt::Debug for EthFilter where Eth: EthApiTypes, { @@ -342,21 +340,17 @@ where /// Container type `EthFilter` #[derive(Debug)] -struct EthFilterInner { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, +struct EthFilterInner { + /// Inner `eth` API implementation. + eth_api: Eth, /// All currently installed filters. - active_filters: ActiveFilters, + active_filters: ActiveFilters>, /// Provides ids to identify filters id_provider: Arc, /// Maximum number of blocks that could be scanned per filter max_blocks_per_filter: u64, /// Maximum number of logs that can be returned in a response max_logs_per_response: usize, - /// The async cache frontend for eth related data - eth_cache: EthStateCache, /// maximum number of headers to read at once for range filter max_headers_range: u64, /// The type that can spawn tasks. @@ -365,11 +359,22 @@ struct EthFilterInner { stale_filter_ttl: Duration, } -impl EthFilterInner +impl EthFilterInner where - Provider: BlockReader + BlockIdReader + EvmEnvProvider + 'static, - Pool: TransactionPool + 'static, + Eth: RpcNodeCoreExt + EthApiTypes, { + /// Access the underlying provider. + fn provider(&self) -> &Eth::Provider { + self.eth_api.provider() + } + + /// Access the underlying [`EthStateCache`]. + fn eth_cache( + &self, + ) -> &EthStateCache, ProviderReceipt> { + self.eth_api.cache() + } + /// Returns logs matching given filter object. async fn logs_for_filter(&self, filter: Filter) -> Result, EthFilterError> { match filter.block_option { @@ -377,18 +382,18 @@ where // for all matching logs in the block // get the block header with the hash let header = self - .provider + .provider() .header_by_hash_or_number(block_hash.into())? .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?; - let block_num_hash = BlockNumHash::new(header.number, block_hash); + let block_num_hash = BlockNumHash::new(header.number(), block_hash); // we also need to ensure that the receipts are available and return an error if // not, in case the block hash been reorged let (receipts, maybe_block) = self .receipts_and_maybe_block( &block_num_hash, - self.provider.chain_info()?.best_number, + self.provider().chain_info()?.best_number, ) .await? .ok_or(EthApiError::HeaderNotFound(block_hash.into()))?; @@ -397,29 +402,29 @@ where append_matching_block_logs( &mut all_logs, maybe_block - .map(|b| ProviderOrBlock::Block(b)) - .unwrap_or_else(|| ProviderOrBlock::Provider(&self.provider)), + .map(ProviderOrBlock::Block) + .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), &FilteredParams::new(Some(filter)), block_num_hash, &receipts, false, - header.timestamp, + header.timestamp(), )?; Ok(all_logs) } FilterBlockOption::Range { from_block, to_block } => { // compute the range - let info = self.provider.chain_info()?; + let info = self.provider().chain_info()?; // we start at the most recent block if unset in filter let start_block = info.best_number; let from = from_block - .map(|num| self.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let to = to_block - .map(|num| self.provider.convert_block_number(num)) + .map(|num| self.provider().convert_block_number(num)) .transpose()? .flatten(); let (from_block_number, to_block_number) = @@ -431,8 +436,11 @@ where } /// Installs a new filter and returns the new identifier. - async fn install_filter(&self, kind: FilterKind) -> RpcResult { - let last_poll_block_number = self.provider.best_block_number().to_rpc_result()?; + async fn install_filter( + &self, + kind: FilterKind>, + ) -> RpcResult { + let last_poll_block_number = self.provider().best_block_number().to_rpc_result()?; let id = FilterId::from(self.id_provider.next_id()); let mut filters = self.active_filters.inner.lock().await; filters.insert( @@ -480,46 +488,48 @@ where for (from, to) in BlockRangeInclusiveIter::new(from_block..=to_block, self.max_headers_range) { - let headers = self.provider.headers_range(from..=to)?; + let headers = self.provider().headers_range(from..=to)?; for (idx, header) in headers.iter().enumerate() { // only if filter matches - if FilteredParams::matches_address(header.logs_bloom, &address_filter) && - FilteredParams::matches_topics(header.logs_bloom, &topics_filter) + if FilteredParams::matches_address(header.logs_bloom(), &address_filter) && + FilteredParams::matches_topics(header.logs_bloom(), &topics_filter) { // these are consecutive headers, so we can use the parent hash of the next // block to get the current header's hash let block_hash = match headers.get(idx + 1) { - Some(parent) => parent.parent_hash, + Some(parent) => parent.parent_hash(), None => self - .provider - .block_hash(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?, + .provider() + .block_hash(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, }; - let num_hash = BlockNumHash::new(header.number, block_hash); + let num_hash = BlockNumHash::new(header.number(), block_hash); if let Some((receipts, maybe_block)) = self.receipts_and_maybe_block(&num_hash, chain_info.best_number).await? { append_matching_block_logs( &mut all_logs, maybe_block - .map(|block| ProviderOrBlock::Block(block)) - .unwrap_or_else(|| ProviderOrBlock::Provider(&self.provider)), + .map(ProviderOrBlock::Block) + .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), &filter_params, num_hash, &receipts, false, - header.timestamp, + header.timestamp(), )?; // size check but only if range is multiple blocks, so we always return all // logs of a single block let is_multi_block_range = from_block != to_block; if is_multi_block_range && all_logs.len() > self.max_logs_per_response { - return Err(EthFilterError::QueryExceedsMaxResults( - self.max_logs_per_response, - )) + return Err(EthFilterError::QueryExceedsMaxResults { + max_logs: self.max_logs_per_response, + from_block, + to_block: num_hash.number.saturating_sub(1), + }); } } } @@ -534,17 +544,22 @@ where &self, block_num_hash: &BlockNumHash, best_number: u64, - ) -> Result>, Option>)>, EthFilterError> - { + ) -> Result< + Option<( + Arc>>, + Option>>>, + )>, + EthFilterError, + > { // The last 4 blocks are most likely cached, so we can just fetch them let cached_range = best_number.saturating_sub(4)..=best_number; let receipts_block = if cached_range.contains(&block_num_hash.number) { - self.eth_cache + self.eth_cache() .get_block_and_receipts(block_num_hash.hash) .await? .map(|(b, r)| (r, Some(b))) } else { - self.eth_cache.get_receipts(block_num_hash.hash).await?.map(|r| (r, None)) + self.eth_cache().get_receipts(block_num_hash.hash).await?.map(|r| (r, None)) }; Ok(receipts_block) } @@ -603,29 +618,34 @@ impl PendingTransactionsReceiver { #[derive(Debug, Clone)] struct FullTransactionsReceiver { txs_stream: Arc>>, - _tx_resp_builder: PhantomData, + tx_resp_builder: TxCompat, } impl FullTransactionsReceiver where T: PoolTransaction + 'static, - TxCompat: TransactionCompat, + TxCompat: TransactionCompat, { /// Creates a new `FullTransactionsReceiver` encapsulating the provided transaction stream. - fn new(stream: NewSubpoolTransactionStream) -> Self { - Self { txs_stream: Arc::new(Mutex::new(stream)), _tx_resp_builder: PhantomData } + fn new(stream: NewSubpoolTransactionStream, tx_resp_builder: TxCompat) -> Self { + Self { txs_stream: Arc::new(Mutex::new(stream)), tx_resp_builder } } /// Returns all new pending transactions received since the last poll. - async fn drain(&self) -> FilterChanges - where - T: PoolTransaction>, - { + async fn drain(&self) -> FilterChanges { let mut pending_txs = Vec::new(); let mut prepared_stream = self.txs_stream.lock().await; while let Ok(tx) = prepared_stream.try_recv() { - pending_txs.push(from_recovered::(tx.transaction.to_recovered_transaction())) + match from_recovered(tx.transaction.to_consensus(), &self.tx_resp_builder) { + Ok(tx) => pending_txs.push(tx), + Err(err) => { + error!(target: "rpc", + %err, + "Failed to fill txn with block context" + ); + } + } } FilterChanges::Transactions(pending_txs) } @@ -641,8 +661,8 @@ trait FullTransactionsFilter: fmt::Debug + Send + Sync + Unpin + 'static { impl FullTransactionsFilter for FullTransactionsReceiver where - T: PoolTransaction> + 'static, - TxCompat: TransactionCompat + 'static, + T: PoolTransaction + 'static, + TxCompat: TransactionCompat + 'static, { async fn drain(&self) -> FilterChanges { Self::drain(self).await @@ -716,8 +736,15 @@ pub enum EthFilterError { #[error("query exceeds max block range {0}")] QueryExceedsMaxBlocks(u64), /// Query result is too large. - #[error("query exceeds max results {0}")] - QueryExceedsMaxResults(usize), + #[error("query exceeds max results {max_logs}, retry with the range {from_block}-{to_block}")] + QueryExceedsMaxResults { + /// Maximum number of logs allowed per response + max_logs: usize, + /// Start block of the suggested retry range + from_block: u64, + /// End block of the suggested retry range (last successfully processed block) + to_block: u64, + }, /// Error serving request in `eth_` namespace. #[error(transparent)] EthAPIError(#[from] EthApiError), @@ -739,7 +766,7 @@ impl From for jsonrpsee::types::error::ErrorObject<'static> { EthFilterError::EthAPIError(err) => err.into(), err @ (EthFilterError::InvalidBlockRangeParams | EthFilterError::QueryExceedsMaxBlocks(_) | - EthFilterError::QueryExceedsMaxResults(_)) => { + EthFilterError::QueryExceedsMaxResults { .. }) => { rpc_error_with_code(jsonrpsee::types::error::INVALID_PARAMS_CODE, err.to_string()) } } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index b2ff30b88f2..51a76f4e98f 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,13 +1,15 @@ //! Contains RPC handler implementations specific to blocks. -use alloy_rpc_types::{AnyTransactionReceipt, BlockId}; +use alloy_consensus::BlockHeader; +use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_primitives::TransactionMeta; -use reth_provider::{BlockReaderIdExt, HeaderProvider}; +use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_provider::BlockReader; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - RpcReceipt, + RpcNodeCoreExt, RpcReceipt, }; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use crate::EthApi; @@ -15,15 +17,14 @@ impl EthBlocks for EthApi, + NetworkTypes: alloy_network::Network, + Provider: BlockReader< + Transaction = reth_primitives::TransactionSigned, + Receipt = reth_primitives::Receipt, + >, >, - Provider: HeaderProvider, + Provider: BlockReader, { - #[inline] - fn provider(&self) -> impl HeaderProvider { - self.inner.provider() - } - async fn block_receipts( &self, block_id: BlockId, @@ -32,22 +33,21 @@ where Self: LoadReceipt, { if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number; - let base_fee = block.base_fee_per_gas; + let block_number = block.number(); + let base_fee = block.base_fee_per_gas(); let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas; - let timestamp = block.timestamp; - let block = block.unseal(); + let excess_blob_gas = block.excess_blob_gas(); + let timestamp = block.timestamp(); return block .body - .transactions - .into_iter() + .transactions() + .iter() .zip(receipts.iter()) .enumerate() .map(|(idx, (tx, receipt))| { let meta = TransactionMeta { - tx_hash: tx.hash, + tx_hash: *tx.tx_hash(), index: idx as u64, block_hash, block_number, @@ -55,8 +55,7 @@ where excess_blob_gas, timestamp, }; - - ReceiptBuilder::new(&tx, meta, receipt, &receipts) + EthReceiptBuilder::new(tx, meta, receipt, &receipts) .map(|builder| builder.build()) }) .collect::, Self::Error>>() @@ -69,16 +68,7 @@ where impl LoadBlock for EthApi where - Self: LoadPendingBlock + SpawnBlocking, - Provider: BlockReaderIdExt, + Self: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt, + Provider: BlockReader, { - #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 396bf9bd08e..2620165b907 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,20 +1,26 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. -use reth_evm::ConfigureEvm; -use reth_primitives::Header; -use reth_rpc_eth_api::helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}; - use crate::EthApi; +use alloy_consensus::Header; +use reth_evm::ConfigureEvm; +use reth_provider::{BlockReader, ProviderHeader}; +use reth_rpc_eth_api::{ + helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, + FullEthApiTypes, +}; -impl EthCall for EthApi where - Self: Call + LoadPendingBlock +impl EthCall for EthApi +where + Self: EstimateCall + LoadPendingBlock + FullEthApiTypes, + Provider: BlockReader, { } impl Call for EthApi where - Self: LoadState + SpawnBlocking, + Self: LoadState>> + SpawnBlocking, EvmConfig: ConfigureEvm
, + Provider: BlockReader, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -25,9 +31,11 @@ where fn max_simulate_blocks(&self) -> u64 { self.inner.max_simulate_blocks() } +} - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } +impl EstimateCall for EthApi +where + Self: Call, + Provider: BlockReader, +{ } diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index a792f728951..045d6dcb545 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,37 +1,31 @@ //! Contains RPC handler implementations for fee history. -use reth_chainspec::EthereumHardforks; -use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; - +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_provider::{ + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, +}; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; -use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; +use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; use crate::EthApi; -impl EthFees for EthApi where - Self: LoadFee +impl EthFees for EthApi +where + Self: LoadFee, + Provider: BlockReader, { } impl LoadFee for EthApi where - Self: LoadBlock, - Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider, + Self: LoadBlock, + Provider: BlockReaderIdExt + + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, { #[inline] - fn provider( - &self, - ) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn gas_oracle(&self) -> &GasPriceOracle { + fn gas_oracle(&self) -> &GasPriceOracle { self.inner.gas_oracle() } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 69d55f58bfa..2af82ef511b 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,48 +1,126 @@ //! Support for building a pending block with transactions from local view of mempool. -use reth_chainspec::EthereumHardforks; +use alloy_consensus::{constants::EMPTY_WITHDRAWALS, Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; +use alloy_primitives::U256; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_primitives::Header; -use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_rpc_eth_api::helpers::{LoadPendingBlock, SpawnBlocking}; +use reth_primitives::{ + logs_bloom, + proofs::{calculate_receipt_root_no_memo, calculate_transaction_root}, + BlockBody, Receipt, +}; +use reth_provider::{ + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ProviderBlock, + ProviderReceipt, ProviderTx, StateProviderFactory, +}; +use reth_rpc_eth_api::{ + helpers::{LoadPendingBlock, SpawnBlocking}, + RpcNodeCore, +}; use reth_rpc_eth_types::PendingBlock; -use reth_transaction_pool::TransactionPool; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use revm_primitives::{BlockEnv, B256}; use crate::EthApi; impl LoadPendingBlock for EthApi where - Self: SpawnBlocking, - Provider: BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool, - EvmConfig: ConfigureEvm
, + Self: SpawnBlocking< + NetworkTypes: alloy_network::Network, + > + RpcNodeCore< + Provider: BlockReaderIdExt< + Transaction = reth_primitives::TransactionSigned, + Block = reth_primitives::Block, + Receipt = reth_primitives::Receipt, + Header = reth_primitives::Header, + > + EvmEnvProvider + + ChainSpecProvider + + StateProviderFactory, + Pool: TransactionPool< + Transaction: PoolTransaction>, + >, + Evm: ConfigureEvm
>, + >, + Provider: BlockReader, { #[inline] - fn provider( + fn pending_block( &self, - ) -> impl BlockReaderIdExt - + EvmEnvProvider - + ChainSpecProvider - + StateProviderFactory { - self.inner.provider() + ) -> &tokio::sync::Mutex< + Option, ProviderReceipt>>, + > { + self.inner.pending_block() } - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } + fn assemble_block( + &self, + block_env: &BlockEnv, + parent_hash: revm_primitives::B256, + state_root: revm_primitives::B256, + transactions: Vec>, + receipts: &[ProviderReceipt], + ) -> reth_provider::ProviderBlock { + let chain_spec = self.provider().chain_spec(); - #[inline] - fn pending_block(&self) -> &tokio::sync::Mutex> { - self.inner.pending_block() + let transactions_root = calculate_transaction_root(&transactions); + let receipts_root = calculate_receipt_root_no_memo(&receipts.iter().collect::>()); + + let logs_bloom = logs_bloom(receipts.iter().flat_map(|r| &r.logs)); + + let timestamp = block_env.timestamp.to::(); + let is_shanghai = chain_spec.is_shanghai_active_at_timestamp(timestamp); + let is_cancun = chain_spec.is_cancun_active_at_timestamp(timestamp); + let is_prague = chain_spec.is_prague_active_at_timestamp(timestamp); + + let header = Header { + parent_hash, + ommers_hash: EMPTY_OMMER_ROOT_HASH, + beneficiary: block_env.coinbase, + state_root, + transactions_root, + receipts_root, + withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS), + logs_bloom, + timestamp: block_env.timestamp.to::(), + mix_hash: block_env.prevrandao.unwrap_or_default(), + nonce: BEACON_NONCE.into(), + base_fee_per_gas: Some(block_env.basefee.to::()), + number: block_env.number.to::(), + gas_limit: block_env.gas_limit.to::(), + difficulty: U256::ZERO, + gas_used: receipts.last().map(|r| r.cumulative_gas_used).unwrap_or_default(), + blob_gas_used: is_cancun.then(|| { + transactions.iter().map(|tx| tx.blob_gas_used().unwrap_or_default()).sum::() + }), + excess_blob_gas: block_env.get_blob_excess_gas().map(Into::into), + extra_data: Default::default(), + parent_beacon_block_root: is_cancun.then_some(B256::ZERO), + requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH), + target_blobs_per_block: None, + }; + + // seal the block + reth_primitives::Block { + header, + body: BlockBody { transactions, ommers: vec![], withdrawals: None }, + } } - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() + fn assemble_receipt( + &self, + tx: &ProviderTx, + result: revm_primitives::ExecutionResult, + cumulative_gas_used: u64, + ) -> reth_provider::ProviderReceipt { + #[allow(clippy::needless_update)] + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + ..Default::default() + } } } diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 2ac36094494..12fbf095734 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,20 +1,20 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{EthApiError, EthStateCache, ReceiptBuilder}; +use reth_provider::{BlockReader, ReceiptProvider, TransactionsProvider}; +use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; +use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use crate::EthApi; impl LoadReceipt for EthApi where - Self: Send + Sync, + Self: RpcNodeCoreExt< + Provider: TransactionsProvider + + ReceiptProvider, + >, + Provider: BlockReader, { - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - async fn build_transaction_receipt( &self, tx: TransactionSigned, @@ -30,6 +30,6 @@ where .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(hash.into()))?; - Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) + Ok(EthReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) } } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index a5818aa494f..3528a966e3f 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -6,16 +6,18 @@ use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; -use alloy_primitives::{eip191_hash_message, Address, B256}; +use alloy_primitives::{eip191_hash_message, Address, PrimitiveSignature as Signature, B256}; use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; -use reth_primitives::{Signature, TransactionSigned}; +use reth_provider::BlockReader; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; impl AddDevSigners for EthApi +where + Provider: BlockReader, { fn with_dev_accounts(&self) { *self.inner.signers().write() = DevSigner::random_signers(20) @@ -32,15 +34,15 @@ pub struct DevSigner { #[allow(dead_code)] impl DevSigner { /// Generates a random dev signer which satisfies [`EthSigner`] trait - pub fn random() -> Box { + pub fn random() -> Box> { let mut signers = Self::random_signers(1); signers.pop().expect("expect to generate at least one signer") } /// Generates provided number of random dev signers /// which satisfy [`EthSigner`] trait - pub fn random_signers(num: u32) -> Vec> { - let mut signers = Vec::new(); + pub fn random_signers(num: u32) -> Vec + 'static>> { + let mut signers = Vec::with_capacity(num as usize); for _ in 0..num { let sk = PrivateKeySigner::random_with(&mut rand::thread_rng()); @@ -48,7 +50,7 @@ impl DevSigner { let addresses = vec![address]; let accounts = HashMap::from([(address, sk)]); - signers.push(Box::new(Self { addresses, accounts }) as Box); + signers.push(Box::new(Self { addresses, accounts }) as Box>); } signers } @@ -64,7 +66,7 @@ impl DevSigner { } #[async_trait::async_trait] -impl EthSigner for DevSigner { +impl EthSigner for DevSigner { fn accounts(&self) -> Vec
{ self.addresses.clone() } @@ -80,11 +82,7 @@ impl EthSigner for DevSigner { self.sign_hash(hash, address) } - async fn sign_transaction( - &self, - request: TransactionRequest, - address: &Address, - ) -> Result { + async fn sign_transaction(&self, request: TransactionRequest, address: &Address) -> Result { // create local signer wallet from signing key let signer = self.accounts.get(address).ok_or(SignError::NoAccount)?.clone(); let wallet = EthereumWallet::from(signer); @@ -95,7 +93,7 @@ impl EthSigner for DevSigner { // decode transaction into signed transaction type let encoded = txn_envelope.encoded_2718(); - let txn_signed = TransactionSigned::decode_2718(&mut encoded.as_ref()) + let txn_signed = T::decode_2718(&mut encoded.as_ref()) .map_err(|_| SignError::InvalidTransactionRequest)?; Ok(txn_signed) @@ -109,8 +107,10 @@ impl EthSigner for DevSigner { #[cfg(test)] mod tests { - use alloy_primitives::{Bytes, Parity, U256}; + use alloy_consensus::Transaction; + use alloy_primitives::{Bytes, U256}; use alloy_rpc_types_eth::TransactionInput; + use reth_primitives::TransactionSigned; use revm_primitives::TxKind; use super::*; @@ -193,7 +193,9 @@ mod tests { let data: TypedData = serde_json::from_str(eip_712_example).unwrap(); let signer = build_signer(); let from = *signer.addresses.first().unwrap(); - let sig = signer.sign_typed_data(from, &data).unwrap(); + let sig = + EthSigner::::sign_typed_data(&signer, from, &data) + .unwrap(); let expected = Signature::new( U256::from_str_radix( "5318aee9942b84885761bb20e768372b76e7ee454fc4d39b59ce07338d15a06c", @@ -205,7 +207,7 @@ mod tests { 16, ) .unwrap(), - Parity::Parity(false), + false, ); assert_eq!(sig, expected) } @@ -215,7 +217,9 @@ mod tests { let message = b"Test message"; let signer = build_signer(); let from = *signer.addresses.first().unwrap(); - let sig = signer.sign(from, message).await.unwrap(); + let sig = EthSigner::::sign(&signer, from, message) + .await + .unwrap(); let expected = Signature::new( U256::from_str_radix( "54313da7432e4058b8d22491b2e7dbb19c7186c35c24155bec0820a8a2bfe0c1", @@ -227,7 +231,7 @@ mod tests { 16, ) .unwrap(), - Parity::Parity(true), + true, ); assert_eq!(sig, expected) } @@ -251,7 +255,8 @@ mod tests { nonce: Some(0u64), ..Default::default() }; - let txn_signed = signer.sign_transaction(request, &from).await; + let txn_signed: std::result::Result = + signer.sign_transaction(request, &from).await; assert!(txn_signed.is_ok()); assert_eq!(Bytes::from(message.to_vec()), txn_signed.unwrap().input().0); diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index 92445bf5ed1..41c4a5b07c3 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,38 +1,33 @@ use alloy_primitives::U256; use reth_chainspec::EthereumHardforks; use reth_network_api::NetworkInfo; -use reth_provider::{BlockNumReader, ChainSpecProvider, StageCheckpointReader}; -use reth_rpc_eth_api::helpers::EthApiSpec; -use reth_transaction_pool::TransactionPool; +use reth_provider::{ + BlockNumReader, BlockReader, ChainSpecProvider, ProviderTx, StageCheckpointReader, +}; +use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; use crate::EthApi; impl EthApiSpec for EthApi where - Pool: TransactionPool + 'static, - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader - + 'static, - Network: NetworkInfo + 'static, - EvmConfig: Send + Sync, + Self: RpcNodeCore< + Provider: ChainSpecProvider + + BlockNumReader + + StageCheckpointReader, + Network: NetworkInfo, + >, + Provider: BlockReader, { - fn provider( - &self, - ) -> impl ChainSpecProvider + BlockNumReader + StageCheckpointReader - { - self.inner.provider() - } - - fn network(&self) -> impl NetworkInfo { - self.inner.network() - } + type Transaction = ProviderTx; fn starting_block(&self) -> U256 { self.inner.starting_block() } - fn signers(&self) -> &parking_lot::RwLock>> { + fn signers( + &self, + ) -> &parking_lot::RwLock>>> + { self.inner.signers() } } diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 8a35842798b..99d2856ad83 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -1,17 +1,20 @@ //! Contains RPC handler implementations specific to state. use reth_chainspec::EthereumHardforks; -use reth_provider::{ChainSpecProvider, StateProviderFactory}; +use reth_provider::{BlockReader, ChainSpecProvider, StateProviderFactory}; use reth_transaction_pool::TransactionPool; -use reth_rpc_eth_api::helpers::{EthState, LoadState, SpawnBlocking}; -use reth_rpc_eth_types::EthStateCache; +use reth_rpc_eth_api::{ + helpers::{EthState, LoadState, SpawnBlocking}, + RpcNodeCoreExt, +}; use crate::EthApi; impl EthState for EthApi where Self: LoadState + SpawnBlocking, + Provider: BlockReader, { fn max_proof_window(&self) -> u64 { self.inner.eth_proof_window() @@ -20,36 +23,24 @@ where impl LoadState for EthApi where - Self: Send + Sync, - Provider: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Self: RpcNodeCoreExt< + Provider: BlockReader + + StateProviderFactory + + ChainSpecProvider, + Pool: TransactionPool, + >, + Provider: BlockReader, { - #[inline] - fn provider( - &self, - ) -> impl StateProviderFactory + ChainSpecProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn pool(&self) -> impl TransactionPool { - self.inner.pool() - } } #[cfg(test)] mod tests { use super::*; + use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; use reth_chainspec::MAINNET; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_rpc_eth_api::helpers::EthState; use reth_rpc_eth_types::{ @@ -66,19 +57,18 @@ mod tests { let pool = testing_pool(); let evm_config = EthEvmConfig::new(MAINNET.clone()); - let cache = - EthStateCache::spawn(NoopProvider::default(), Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(NoopProvider::default(), Default::default()); EthApi::new( NoopProvider::default(), pool, NoopNetwork::default(), cache.clone(), - GasPriceOracle::new(NoopProvider::default(), Default::default(), cache.clone()), + GasPriceOracle::new(NoopProvider::default(), Default::default(), cache), ETHEREUM_BLOCK_GAS_LIMIT, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW, BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), + FeeHistoryCache::new(FeeHistoryCacheConfig::default()), evm_config, DEFAULT_PROOF_PERMITS, ) @@ -93,19 +83,18 @@ mod tests { let evm_config = EthEvmConfig::new(mock_provider.chain_spec()); mock_provider.extend_accounts(accounts); - let cache = - EthStateCache::spawn(mock_provider.clone(), Default::default(), evm_config.clone()); + let cache = EthStateCache::spawn(mock_provider.clone(), Default::default()); EthApi::new( mock_provider.clone(), pool, (), cache.clone(), - GasPriceOracle::new(mock_provider, Default::default(), cache.clone()), + GasPriceOracle::new(mock_provider, Default::default(), cache), ETHEREUM_BLOCK_GAS_LIMIT, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_ETH_PROOF_WINDOW + 1, BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), + FeeHistoryCache::new(FeeHistoryCacheConfig::default()), evm_config, DEFAULT_PROOF_PERMITS, ) diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index c40b7acf50d..69b4d9806bf 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -1,18 +1,20 @@ //! Contains RPC handler implementations specific to tracing. use reth_evm::ConfigureEvm; -use reth_primitives::Header; +use reth_provider::{BlockReader, ProviderHeader, ProviderTx}; use reth_rpc_eth_api::helpers::{LoadState, Trace}; use crate::EthApi; impl Trace for EthApi where - Self: LoadState, - EvmConfig: ConfigureEvm
, + Self: LoadState< + Provider: BlockReader, + Evm: ConfigureEvm< + Header = ProviderHeader, + Transaction = ProviderTx, + >, + >, + Provider: BlockReader, { - #[inline] - fn evm_config(&self) -> &impl ConfigureEvm
{ - self.inner.evm_config() - } } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 8bd9997f6e8..04ed812fab2 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -1,11 +1,10 @@ //! Contains RPC handler implementations specific to transactions -use reth_provider::{BlockReaderIdExt, TransactionsProvider}; +use reth_provider::{BlockReader, BlockReaderIdExt, ProviderTx, TransactionsProvider}; use reth_rpc_eth_api::{ helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FullEthApiTypes, + FullEthApiTypes, RpcNodeCoreExt, }; -use reth_rpc_eth_types::EthStateCache; use reth_transaction_pool::TransactionPool; use crate::EthApi; @@ -13,17 +12,11 @@ use crate::EthApi; impl EthTransactions for EthApi where - Self: LoadTransaction, - Pool: TransactionPool + 'static, - Provider: BlockReaderIdExt, + Self: LoadTransaction, + Provider: BlockReader>, { #[inline] - fn provider(&self) -> impl BlockReaderIdExt { - self.inner.provider() - } - - #[inline] - fn signers(&self) -> &parking_lot::RwLock>> { + fn signers(&self) -> &parking_lot::RwLock>>>> { self.inner.signers() } } @@ -31,35 +24,20 @@ where impl LoadTransaction for EthApi where - Self: SpawnBlocking + FullEthApiTypes, - Provider: TransactionsProvider, - Pool: TransactionPool, + Self: SpawnBlocking + + FullEthApiTypes + + RpcNodeCoreExt, + Provider: BlockReader, { - type Pool = Pool; - - #[inline] - fn provider(&self) -> impl TransactionsProvider { - self.inner.provider() - } - - #[inline] - fn cache(&self) -> &EthStateCache { - self.inner.cache() - } - - #[inline] - fn pool(&self) -> &Self::Pool { - self.inner.pool() - } } #[cfg(test)] mod tests { + use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use alloy_primitives::{hex_literal::hex, Bytes}; use reth_chainspec::ChainSpecProvider; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::NoopProvider; use reth_rpc_eth_api::helpers::EthTransactions; use reth_rpc_eth_types::{ @@ -81,9 +59,8 @@ mod tests { let pool = testing_pool(); let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); - let cache = EthStateCache::spawn(noop_provider, Default::default(), evm_config.clone()); - let fee_history_cache = - FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); + let cache = EthStateCache::spawn(noop_provider, Default::default()); + let fee_history_cache = FeeHistoryCache::new(FeeHistoryCacheConfig::default()); let eth_api = EthApi::new( noop_provider, pool.clone(), diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 982afdcac0a..28c66967e2f 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -1,88 +1,116 @@ //! L1 `eth` API types. -use alloy_consensus::Transaction as _; -use alloy_network::{AnyNetwork, Network}; -use alloy_primitives::{Address, TxKind}; -use alloy_rpc_types::{Transaction, TransactionInfo}; -use alloy_serde::WithOtherFields; -use reth_primitives::TransactionSignedEcRecovered; -use reth_rpc_types_compat::{ - transaction::{from_primitive_signature, GasPrice}, - TransactionCompat, -}; +use alloy_consensus::{Signed, Transaction as _, TxEip4844Variant, TxEnvelope}; +use alloy_network::{Ethereum, Network}; +use alloy_primitives::PrimitiveSignature as Signature; +use alloy_rpc_types::TransactionRequest; +use alloy_rpc_types_eth::{Transaction, TransactionInfo}; +use reth_primitives::{RecoveredTx, TransactionSigned}; +use reth_rpc_eth_api::EthApiTypes; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_types_compat::TransactionCompat; + +/// A standalone [`EthApiTypes`] implementation for Ethereum. +#[derive(Debug, Clone, Copy, Default)] +pub struct EthereumEthApiTypes(EthTxBuilder); + +impl EthApiTypes for EthereumEthApiTypes { + type Error = EthApiError; + type NetworkTypes = Ethereum; + type TransactionCompat = EthTxBuilder; + + fn tx_resp_builder(&self) -> &Self::TransactionCompat { + &self.0 + } +} /// Builds RPC transaction response for l1. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, Default)] +#[non_exhaustive] pub struct EthTxBuilder; impl TransactionCompat for EthTxBuilder where Self: Send + Sync, { - type Transaction = ::TransactionResponse; + type Transaction = ::TransactionResponse; + + type Error = EthApiError; - fn fill(tx: TransactionSignedEcRecovered, tx_info: TransactionInfo) -> Self::Transaction { - let signer = tx.signer(); - let signed_tx = tx.into_signed(); + fn fill( + &self, + tx: RecoveredTx, + tx_info: TransactionInfo, + ) -> Result { + let from = tx.signer(); + let hash = tx.hash(); + let TransactionSigned { transaction, signature, .. } = tx.into_signed(); - let to: Option
= match signed_tx.kind() { - TxKind::Create => None, - TxKind::Call(to) => Some(Address(*to)), + let inner: TxEnvelope = match transaction { + reth_primitives::Transaction::Legacy(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip2930(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip1559(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip4844(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + reth_primitives::Transaction::Eip7702(tx) => { + Signed::new_unchecked(tx, signature, hash).into() + } + #[allow(unreachable_patterns)] + _ => unreachable!(), }; let TransactionInfo { - base_fee, block_hash, block_number, index: transaction_index, .. + block_hash, block_number, index: transaction_index, base_fee, .. } = tx_info; - let GasPrice { gas_price, max_fee_per_gas } = - Self::gas_price(&signed_tx, base_fee.map(|fee| fee as u64)); + let effective_gas_price = base_fee + .map(|base_fee| { + inner.effective_tip_per_gas(base_fee as u64).unwrap_or_default() + base_fee + }) + .unwrap_or_else(|| inner.max_fee_per_gas()); - let chain_id = signed_tx.chain_id(); - let blob_versioned_hashes = signed_tx.blob_versioned_hashes(); - let access_list = signed_tx.access_list().cloned(); - let authorization_list = signed_tx.authorization_list().map(|l| l.to_vec()); + Ok(Transaction { + inner, + block_hash, + block_number, + transaction_index, + from, + effective_gas_price: Some(effective_gas_price), + }) + } - let signature = from_primitive_signature( - *signed_tx.signature(), - signed_tx.tx_type(), - signed_tx.chain_id(), - ); + fn build_simulate_v1_transaction( + &self, + request: TransactionRequest, + ) -> Result { + let Ok(tx) = request.build_typed_tx() else { + return Err(EthApiError::TransactionConversionError) + }; - WithOtherFields { - inner: Transaction { - hash: signed_tx.hash(), - nonce: signed_tx.nonce(), - from: signer, - to, - value: signed_tx.value(), - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas: signed_tx.max_priority_fee_per_gas(), - signature: Some(signature), - gas: signed_tx.gas_limit(), - input: signed_tx.input().clone(), - chain_id, - access_list, - transaction_type: Some(signed_tx.tx_type() as u8), - // These fields are set to None because they are not stored as part of the - // transaction - block_hash, - block_number, - transaction_index, - // EIP-4844 fields - max_fee_per_blob_gas: signed_tx.max_fee_per_blob_gas(), - blob_versioned_hashes, - authorization_list, - }, - ..Default::default() - } + // Create an empty signature for the transaction. + let signature = Signature::new(Default::default(), Default::default(), false); + Ok(TransactionSigned::new_unhashed(tx.into(), signature)) } fn otterscan_api_truncate_input(tx: &mut Self::Transaction) { - tx.inner.input = tx.inner.input.slice(..4); - } - - fn tx_type(tx: &Self::Transaction) -> u8 { - tx.inner.transaction_type.unwrap_or(0) + let input = match &mut tx.inner { + TxEnvelope::Eip1559(tx) => &mut tx.tx_mut().input, + TxEnvelope::Eip2930(tx) => &mut tx.tx_mut().input, + TxEnvelope::Legacy(tx) => &mut tx.tx_mut().input, + TxEnvelope::Eip4844(tx) => match tx.tx_mut() { + TxEip4844Variant::TxEip4844(tx) => &mut tx.input, + TxEip4844Variant::TxEip4844WithSidecar(tx) => &mut tx.tx.input, + }, + TxEnvelope::Eip7702(tx) => &mut tx.tx_mut().input, + _ => return, + }; + *input = input.slice(..4); } } diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 99919110da7..d8a5b95f55e 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -13,6 +13,9 @@ pub use core::EthApi; pub use filter::EthFilter; pub use pubsub::EthPubSub; -pub use helpers::{signer::DevSigner, types::EthTxBuilder}; +pub use helpers::{ + signer::DevSigner, + types::{EthTxBuilder, EthereumEthApiTypes}, +}; -pub use reth_rpc_eth_api::EthApiServer; +pub use reth_rpc_eth_api::{EthApiServer, EthApiTypes, FullEthApiServer, RpcNodeCore}; diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index 7bd1fd03d3b..fc02b0da067 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -1,84 +1,73 @@ //! `eth_` `PubSub` RPC handler implementation -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; use alloy_primitives::TxHash; -use alloy_rpc_types::{ - pubsub::{ - Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, - SyncStatusMetadata, - }, - FilteredParams, Header, Log, Transaction, +use alloy_rpc_types_eth::{ + pubsub::{Params, PubSubSyncStatus, SubscriptionKind, SyncStatusMetadata}, + FilteredParams, Header, Log, }; -use alloy_serde::WithOtherFields; use futures::StreamExt; use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, }; use reth_network_api::NetworkInfo; -use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; -use reth_rpc_eth_api::{pubsub::EthPubSubApiServer, FullEthApiTypes, RpcTransaction}; +use reth_primitives::NodePrimitives; +use reth_provider::{BlockNumReader, CanonStateSubscriptions}; +use reth_rpc_eth_api::{ + pubsub::EthPubSubApiServer, EthApiTypes, RpcNodeCore, RpcTransaction, TransactionCompat, +}; use reth_rpc_eth_types::logs_utils; use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types_compat::transaction::from_recovered; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; +use reth_transaction_pool::{NewTransactionEvent, PoolConsensusTx, TransactionPool}; use serde::Serialize; use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, Stream, }; +use tracing::error; /// `Eth` pubsub RPC implementation. /// /// This handles `eth_subscribe` RPC calls. #[derive(Clone)] -pub struct EthPubSub { +pub struct EthPubSub { /// All nested fields bundled together. - inner: Arc>, + inner: Arc>, /// The type that's used to spawn subscription tasks. subscription_task_spawner: Box, - _tx_resp_builder: PhantomData, } // === impl EthPubSub === -impl EthPubSub { +impl EthPubSub { /// Creates a new, shareable instance. /// /// Subscription tasks are spawned via [`tokio::task::spawn`] - pub fn new(provider: Provider, pool: Pool, chain_events: Events, network: Network) -> Self { - Self::with_spawner( - provider, - pool, - chain_events, - network, - Box::::default(), - ) + pub fn new(eth_api: Eth, chain_events: Events) -> Self { + Self::with_spawner(eth_api, chain_events, Box::::default()) } /// Creates a new, shareable instance. pub fn with_spawner( - provider: Provider, - pool: Pool, + eth_api: Eth, chain_events: Events, - network: Network, subscription_task_spawner: Box, ) -> Self { - let inner = EthPubSubInner { provider, pool, chain_events, network }; - Self { inner: Arc::new(inner), subscription_task_spawner, _tx_resp_builder: PhantomData } + let inner = EthPubSubInner { eth_api, chain_events }; + Self { inner: Arc::new(inner), subscription_task_spawner } } } #[async_trait::async_trait] -impl EthPubSubApiServer> - for EthPubSub +impl EthPubSubApiServer> for EthPubSub where - Provider: BlockReader + EvmEnvProvider + Clone + 'static, - Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - Network: NetworkInfo + Clone + 'static, - Eth: FullEthApiTypes + 'static, + Events: CanonStateSubscriptions + 'static, + Eth: RpcNodeCore + + EthApiTypes>> + + 'static, { /// Handler for `eth_subscribe` async fn subscribe( @@ -90,7 +79,7 @@ where let sink = pending.accept().await?; let pubsub = self.inner.clone(); self.subscription_task_spawner.spawn(Box::pin(async move { - let _ = handle_accepted::<_, _, _, _, Eth>(pubsub, sink, kind, params).await; + let _ = handle_accepted(pubsub, sink, kind, params).await; })); Ok(()) @@ -98,27 +87,20 @@ where } /// The actual handler for an accepted [`EthPubSub::subscribe`] call. -async fn handle_accepted( - pubsub: Arc>, +async fn handle_accepted( + pubsub: Arc>, accepted_sink: SubscriptionSink, kind: SubscriptionKind, params: Option, ) -> Result<(), ErrorObject<'static>> where - Provider: BlockReader + EvmEnvProvider + Clone + 'static, - Pool: TransactionPool + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - Network: NetworkInfo + Clone + 'static, - Eth: FullEthApiTypes, + Events: CanonStateSubscriptions + 'static, + Eth: RpcNodeCore + + EthApiTypes>>, { match kind { SubscriptionKind::NewHeads => { - let stream = pubsub.new_headers_stream().map(|header| { - EthSubscriptionResult::>::Header(Box::new( - header.into(), - )) - }); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.new_headers_stream()).await } SubscriptionKind::Logs => { // if no params are provided, used default filter params @@ -129,22 +111,28 @@ where } _ => FilteredParams::default(), }; - let stream = pubsub.log_stream(filter).map(|log| { - EthSubscriptionResult::>::Log(Box::new(log)) - }); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.log_stream(filter)).await } SubscriptionKind::NewPendingTransactions => { if let Some(params) = params { match params { Params::Bool(true) => { // full transaction objects requested - let stream = pubsub.full_pending_transaction_stream().map(|tx| { - EthSubscriptionResult::FullTransaction(Box::new(from_recovered::< - Eth::TransactionCompat, - >( - tx.transaction.to_recovered_transaction(), - ))) + let stream = pubsub.full_pending_transaction_stream().filter_map(|tx| { + let tx_value = match from_recovered( + tx.transaction.to_consensus(), + pubsub.eth_api.tx_resp_builder(), + ) { + Ok(tx) => Some(tx), + Err(err) => { + error!(target = "rpc", + %err, + "Failed to fill transaction with block context" + ); + None + } + }; + std::future::ready(tx_value) }); return pipe_from_stream(accepted_sink, stream).await } @@ -159,17 +147,14 @@ where } } - let stream = pubsub - .pending_transaction_hashes_stream() - .map(EthSubscriptionResult::>::TransactionHash); - pipe_from_stream(accepted_sink, stream).await + pipe_from_stream(accepted_sink, pubsub.pending_transaction_hashes_stream()).await } SubscriptionKind::Syncing => { // get new block subscription let mut canon_state = BroadcastStream::new(pubsub.chain_events.subscribe_to_canonical_state()); // get current sync status - let mut initial_sync_status = pubsub.network.is_syncing(); + let mut initial_sync_status = pubsub.eth_api.network().is_syncing(); let current_sub_res = pubsub.sync_status(initial_sync_status); // send the current status immediately @@ -180,7 +165,7 @@ where } while canon_state.next().await.is_some() { - let current_syncing = pubsub.network.is_syncing(); + let current_syncing = pubsub.eth_api.network().is_syncing(); // Only send a new response if the sync status has changed if current_syncing != initial_sync_status { // Update the sync status on each new block @@ -250,9 +235,7 @@ where } } -impl std::fmt::Debug - for EthPubSub -{ +impl std::fmt::Debug for EthPubSub { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthPubSub").finish_non_exhaustive() } @@ -260,70 +243,69 @@ impl std::fmt::Debug /// Container type `EthPubSub` #[derive(Clone)] -struct EthPubSubInner { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, +struct EthPubSubInner { + /// The `eth` API. + eth_api: EthApi, /// A type that allows to create new event subscriptions. chain_events: Events, - /// The network. - network: Network, } // == impl EthPubSubInner === -impl EthPubSubInner +impl EthPubSubInner where - Provider: BlockReader + 'static, + Eth: RpcNodeCore, { /// Returns the current sync status for the `syncing` subscription - fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { + fn sync_status(&self, is_syncing: bool) -> PubSubSyncStatus { if is_syncing { - let current_block = - self.provider.chain_info().map(|info| info.best_number).unwrap_or_default(); - EthSubscriptionResult::SyncState(PubSubSyncStatus::Detailed(SyncStatusMetadata { + let current_block = self + .eth_api + .provider() + .chain_info() + .map(|info| info.best_number) + .unwrap_or_default(); + PubSubSyncStatus::Detailed(SyncStatusMetadata { syncing: true, starting_block: 0, current_block, highest_block: Some(current_block), - })) + }) } else { - EthSubscriptionResult::SyncState(PubSubSyncStatus::Simple(false)) + PubSubSyncStatus::Simple(false) } } } -impl EthPubSubInner +impl EthPubSubInner where - Pool: TransactionPool + 'static, + Eth: RpcNodeCore, { /// Returns a stream that yields all transaction hashes emitted by the txpool. fn pending_transaction_hashes_stream(&self) -> impl Stream { - ReceiverStream::new(self.pool.pending_transactions_listener()) + ReceiverStream::new(self.eth_api.pool().pending_transactions_listener()) } /// Returns a stream that yields all transactions emitted by the txpool. fn full_pending_transaction_stream( &self, - ) -> impl Stream::Transaction>> { - self.pool.new_pending_pool_transactions_listener() + ) -> impl Stream::Transaction>> { + self.eth_api.pool().new_pending_pool_transactions_listener() } } -impl EthPubSubInner +impl EthPubSubInner where - Provider: BlockReader + EvmEnvProvider + 'static, - Events: CanonStateSubscriptions + 'static, - Network: NetworkInfo + 'static, - Pool: 'static, + Events: CanonStateSubscriptions, { /// Returns a stream that yields all new RPC blocks. - fn new_headers_stream(&self) -> impl Stream { + fn new_headers_stream( + &self, + ) -> impl Stream::BlockHeader>> { self.chain_events.canonical_state_stream().flat_map(|new_chain| { let headers = new_chain.committed().headers().collect::>(); futures::stream::iter( - headers.into_iter().map(reth_rpc_types_compat::block::from_primitive_with_hash), + headers.into_iter().map(|h| Header::from_consensus(h.into(), None, None)), ) }) } diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index 46dbb45d962..6cfeb0934f4 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -1,15 +1,70 @@ //! `Eth` Sim bundle implementation and helpers. -use std::sync::Arc; - -use alloy_rpc_types_mev::{SendBundleRequest, SimBundleOverrides, SimBundleResponse}; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumberOrTag; +use alloy_primitives::U256; +use alloy_rpc_types_eth::BlockId; +use alloy_rpc_types_mev::{ + BundleItem, Inclusion, Privacy, RefundConfig, SendBundleRequest, SimBundleLogs, + SimBundleOverrides, SimBundleResponse, Validity, +}; use jsonrpsee::core::RpcResult; +use reth_chainspec::EthChainSpec; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_provider::{ChainSpecProvider, HeaderProvider, ProviderTx}; +use reth_revm::database::StateProviderDatabase; use reth_rpc_api::MevSimApiServer; -use reth_rpc_eth_api::helpers::{Call, EthTransactions, LoadPendingBlock}; -use reth_rpc_eth_types::EthApiError; +use reth_rpc_eth_api::{ + helpers::{Call, EthTransactions, LoadPendingBlock}, + FromEthApiError, RpcNodeCore, +}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_tasks::pool::BlockingTaskGuard; +use reth_transaction_pool::{PoolConsensusTx, PoolPooledTx, PoolTransaction, TransactionPool}; +use revm::{ + db::CacheDB, + primitives::{Address, EnvWithHandlerCfg, ResultAndState, SpecId, TxEnv}, + DatabaseCommit, DatabaseRef, +}; +use std::{sync::Arc, time::Duration}; use tracing::info; +/// Maximum bundle depth +const MAX_NESTED_BUNDLE_DEPTH: usize = 5; + +/// Maximum body size +const MAX_BUNDLE_BODY_SIZE: usize = 50; + +/// Default simulation timeout +const DEFAULT_SIM_TIMEOUT: Duration = Duration::from_secs(5); + +/// Maximum simulation timeout +const MAX_SIM_TIMEOUT: Duration = Duration::from_secs(30); + +/// Maximum payout cost +const SBUNDLE_PAYOUT_MAX_COST: u64 = 30_000; + +/// A flattened representation of a bundle item containing transaction and associated metadata. +#[derive(Clone, Debug)] +pub struct FlattenedBundleItem { + /// The signed transaction + pub tx: T, + /// The address that signed the transaction + pub signer: Address, + /// Whether the transaction is allowed to revert + pub can_revert: bool, + /// Item-level inclusion constraints + pub inclusion: Inclusion, + /// Optional validity constraints for the bundle item + pub validity: Option, + /// Optional privacy settings for the bundle item + pub privacy: Option, + /// Optional refund percent for the bundle item + pub refund_percent: Option, + /// Optional refund configs for the bundle item + pub refund_configs: Option>, +} + /// `Eth` sim bundle implementation. pub struct EthSimBundle { /// All nested fields bundled together. @@ -21,20 +76,367 @@ impl EthSimBundle { pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { Self { inner: Arc::new(EthSimBundleInner { eth_api, blocking_task_guard }) } } + + /// Access the underlying `Eth` API. + pub fn eth_api(&self) -> &Eth { + &self.inner.eth_api + } } impl EthSimBundle where Eth: EthTransactions + LoadPendingBlock + Call + 'static, { - /// Simulates a bundle of transactions. - pub async fn sim_bundle( + /// Flattens a potentially nested bundle into a list of individual transactions in a + /// `FlattenedBundleItem` with their associated metadata. This handles recursive bundle + /// processing up to `MAX_NESTED_BUNDLE_DEPTH` and `MAX_BUNDLE_BODY_SIZE`, preserving + /// inclusion, validity and privacy settings from parent bundles. + fn parse_and_flatten_bundle( + &self, + request: &SendBundleRequest, + ) -> Result>>, EthApiError> { + let mut items = Vec::new(); + + // Stack for processing bundles + let mut stack = Vec::new(); + + // Start with initial bundle, index 0, and depth 1 + stack.push((request, 0, 1)); + + while let Some((current_bundle, mut idx, depth)) = stack.pop() { + // Check max depth + if depth > MAX_NESTED_BUNDLE_DEPTH { + return Err(EthApiError::InvalidParams(EthSimBundleError::MaxDepth.to_string())); + } + + // Determine inclusion, validity, and privacy + let inclusion = ¤t_bundle.inclusion; + let validity = ¤t_bundle.validity; + let privacy = ¤t_bundle.privacy; + + // Validate inclusion parameters + let block_number = inclusion.block_number(); + let max_block_number = inclusion.max_block_number().unwrap_or(block_number); + + if max_block_number < block_number || block_number == 0 { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidInclusion.to_string(), + )); + } + + // Validate bundle body size + if current_bundle.bundle_body.len() > MAX_BUNDLE_BODY_SIZE { + return Err(EthApiError::InvalidParams( + EthSimBundleError::BundleTooLarge.to_string(), + )); + } + + // Validate validity and refund config + if let Some(validity) = ¤t_bundle.validity { + // Validate refund entries + if let Some(refunds) = &validity.refund { + let mut total_percent = 0; + for refund in refunds { + if refund.body_idx as usize >= current_bundle.bundle_body.len() { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + if 100 - total_percent < refund.percent { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + total_percent += refund.percent; + } + } + + // Validate refund configs + if let Some(refund_configs) = &validity.refund_config { + let mut total_percent = 0; + for refund_config in refund_configs { + if 100 - total_percent < refund_config.percent { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidValidity.to_string(), + )); + } + total_percent += refund_config.percent; + } + } + } + + let body = ¤t_bundle.bundle_body; + + // Process items in the current bundle + while idx < body.len() { + match &body[idx] { + BundleItem::Tx { tx, can_revert } => { + let recovered_tx = recover_raw_transaction::>(tx) + .map_err(EthApiError::from)?; + let (tx, signer) = recovered_tx.to_components(); + let tx: PoolConsensusTx = + ::Transaction::pooled_into_consensus(tx); + + let refund_percent = + validity.as_ref().and_then(|v| v.refund.as_ref()).and_then(|refunds| { + refunds.iter().find_map(|refund| { + (refund.body_idx as usize == idx).then_some(refund.percent) + }) + }); + let refund_configs = + validity.as_ref().and_then(|v| v.refund_config.clone()); + + // Create FlattenedBundleItem with current inclusion, validity, and privacy + let flattened_item = FlattenedBundleItem { + tx, + signer, + can_revert: *can_revert, + inclusion: inclusion.clone(), + validity: validity.clone(), + privacy: privacy.clone(), + refund_percent, + refund_configs, + }; + + // Add to items + items.push(flattened_item); + + idx += 1; + } + BundleItem::Bundle { bundle } => { + // Push the current bundle and next index onto the stack to resume later + stack.push((current_bundle, idx + 1, depth)); + + // process the nested bundle next + stack.push((bundle, 0, depth + 1)); + break; + } + BundleItem::Hash { hash: _ } => { + // Hash-only items are not allowed + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidBundle.to_string(), + )); + } + } + } + } + + Ok(items) + } + + async fn sim_bundle( &self, request: SendBundleRequest, overrides: SimBundleOverrides, - ) -> RpcResult { - info!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); - Err(EthApiError::Unsupported("mev_simBundle is not supported").into()) + logs: bool, + ) -> Result { + let SimBundleOverrides { + parent_block, + block_number, + coinbase, + timestamp, + gas_limit, + base_fee, + .. + } = overrides; + + // Parse and validate bundle + // Also, flatten the bundle here so that its easier to process + let flattened_bundle = self.parse_and_flatten_bundle(&request)?; + + let block_id = parent_block.unwrap_or(BlockId::Number(BlockNumberOrTag::Pending)); + let (cfg, mut block_env, current_block) = self.eth_api().evm_env_at(block_id).await?; + + let parent_header = RpcNodeCore::provider(&self.inner.eth_api) + .header_by_number(block_env.number.saturating_to::()) + .map_err(EthApiError::from_eth_err)? // Explicitly map the error + .ok_or_else(|| { + EthApiError::HeaderNotFound((block_env.number.saturating_to::()).into()) + })?; + + // apply overrides + if let Some(block_number) = block_number { + block_env.number = U256::from(block_number); + } + + if let Some(coinbase) = coinbase { + block_env.coinbase = coinbase; + } + + if let Some(timestamp) = timestamp { + block_env.timestamp = U256::from(timestamp); + } + + if let Some(gas_limit) = gas_limit { + block_env.gas_limit = U256::from(gas_limit); + } + + if let Some(base_fee) = base_fee { + block_env.basefee = U256::from(base_fee); + } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { + if let Some(base_fee) = parent_header.next_block_base_fee( + RpcNodeCore::provider(&self.inner.eth_api) + .chain_spec() + .base_fee_params_at_block(block_env.number.saturating_to::()), + ) { + block_env.basefee = U256::from(base_fee); + } + } + + let eth_api = self.inner.eth_api.clone(); + + let sim_response = self + .inner + .eth_api + .spawn_with_state_at_block(current_block, move |state| { + // Setup environment + let current_block_number = current_block.as_u64().unwrap(); + let coinbase = block_env.coinbase; + let basefee = block_env.basefee; + let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, TxEnv::default()); + let db = CacheDB::new(StateProviderDatabase::new(state)); + + let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase) + .map_err(EthApiError::from_eth_err)? + .map(|acc| acc.balance) + .unwrap_or_default(); + + let mut coinbase_balance_before_tx = initial_coinbase_balance; + let mut total_gas_used = 0; + let mut total_profit = U256::ZERO; + let mut refundable_value = U256::ZERO; + let mut body_logs: Vec = Vec::new(); + + let mut evm = eth_api.evm_config().evm_with_env(db, env); + + for item in &flattened_bundle { + // Check inclusion constraints + let block_number = item.inclusion.block_number(); + let max_block_number = + item.inclusion.max_block_number().unwrap_or(block_number); + + if current_block_number < block_number || + current_block_number > max_block_number + { + return Err(EthApiError::InvalidParams( + EthSimBundleError::InvalidInclusion.to_string(), + ) + .into()); + } + eth_api.evm_config().fill_tx_env(evm.tx_mut(), &item.tx, item.signer); + + let ResultAndState { result, state } = + evm.transact().map_err(EthApiError::from_eth_err)?; + + if !result.is_success() && !item.can_revert { + return Err(EthApiError::InvalidParams( + EthSimBundleError::BundleTransactionFailed.to_string(), + ) + .into()); + } + + let gas_used = result.gas_used(); + total_gas_used += gas_used; + + // coinbase is always present in the result state + let coinbase_balance_after_tx = + state.get(&coinbase).map(|acc| acc.info.balance).unwrap_or_default(); + + let coinbase_diff = + coinbase_balance_after_tx.saturating_sub(coinbase_balance_before_tx); + total_profit += coinbase_diff; + + // Add to refundable value if this tx does not have a refund percent + if item.refund_percent.is_none() { + refundable_value += coinbase_diff; + } + + // Update coinbase balance before next tx + coinbase_balance_before_tx = coinbase_balance_after_tx; + + // Collect logs if requested + // TODO: since we are looping over iteratively, we are not collecting bundle + // logs. We should collect bundle logs when we are processing the bundle items. + if logs { + let tx_logs = result.logs().to_vec(); + let sim_bundle_logs = + SimBundleLogs { tx_logs: Some(tx_logs), bundle_logs: None }; + body_logs.push(sim_bundle_logs); + } + + // Apply state changes + evm.context.evm.db.commit(state); + } + + // After processing all transactions, process refunds + for item in &flattened_bundle { + if let Some(refund_percent) = item.refund_percent { + // Get refund configurations + let refund_configs = item.refund_configs.clone().unwrap_or_else(|| { + vec![RefundConfig { address: item.signer, percent: 100 }] + }); + + // Calculate payout transaction fee + let payout_tx_fee = basefee * + U256::from(SBUNDLE_PAYOUT_MAX_COST) * + U256::from(refund_configs.len() as u64); + + // Add gas used for payout transactions + total_gas_used += SBUNDLE_PAYOUT_MAX_COST * refund_configs.len() as u64; + + // Calculate allocated refundable value (payout value) + let payout_value = + refundable_value * U256::from(refund_percent) / U256::from(100); + + if payout_tx_fee > payout_value { + return Err(EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ) + .into()); + } + + // Subtract payout value from total profit + total_profit = total_profit.checked_sub(payout_value).ok_or( + EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ), + )?; + + // Adjust refundable value + refundable_value = refundable_value.checked_sub(payout_value).ok_or( + EthApiError::InvalidParams( + EthSimBundleError::NegativeProfit.to_string(), + ), + )?; + } + } + + // Calculate mev gas price + let mev_gas_price = if total_gas_used != 0 { + total_profit / U256::from(total_gas_used) + } else { + U256::ZERO + }; + + Ok(SimBundleResponse { + success: true, + state_block: current_block_number, + error: None, + logs: Some(body_logs), + gas_used: total_gas_used, + mev_gas_price, + profit: total_profit, + refundable_value, + exec_error: None, + revert: None, + }) + }) + .await + .map_err(|_| { + EthApiError::InvalidParams(EthSimBundleError::BundleTimeout.to_string()) + })?; + + Ok(sim_response) } } @@ -48,7 +450,23 @@ where request: SendBundleRequest, overrides: SimBundleOverrides, ) -> RpcResult { - Self::sim_bundle(self, request, overrides).await + info!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); + + let override_timeout = overrides.timeout; + + let timeout = override_timeout + .map(Duration::from_secs) + .filter(|&custom_duration| custom_duration <= MAX_SIM_TIMEOUT) + .unwrap_or(DEFAULT_SIM_TIMEOUT); + + let bundle_res = + tokio::time::timeout(timeout, Self::sim_bundle(self, request, overrides, true)) + .await + .map_err(|_| { + EthApiError::InvalidParams(EthSimBundleError::BundleTimeout.to_string()) + })?; + + bundle_res.map_err(Into::into) } } @@ -74,3 +492,35 @@ impl Clone for EthSimBundle { Self { inner: Arc::clone(&self.inner) } } } + +/// [`EthSimBundle`] specific errors. +#[derive(Debug, thiserror::Error)] +pub enum EthSimBundleError { + /// Thrown when max depth is reached + #[error("max depth reached")] + MaxDepth, + /// Thrown when a bundle is unmatched + #[error("unmatched bundle")] + UnmatchedBundle, + /// Thrown when a bundle is too large + #[error("bundle too large")] + BundleTooLarge, + /// Thrown when validity is invalid + #[error("invalid validity")] + InvalidValidity, + /// Thrown when inclusion is invalid + #[error("invalid inclusion")] + InvalidInclusion, + /// Thrown when a bundle is invalid + #[error("invalid bundle")] + InvalidBundle, + /// Thrown when a bundle simulation times out + #[error("bundle simulation timed out")] + BundleTimeout, + /// Thrown when a transaction is reverted in a bundle + #[error("bundle transaction failed")] + BundleTransactionFailed, + /// Thrown when a bundle simulation returns negative profit + #[error("bundle simulation returned negative profit")] + NegativeProfit, +} diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index eec14981bf5..d957913dffb 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -36,21 +36,26 @@ mod admin; mod debug; mod engine; pub mod eth; +mod miner; mod net; mod otterscan; mod reth; mod rpc; mod trace; mod txpool; +mod validation; mod web3; + pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; pub use eth::{EthApi, EthBundle, EthFilter, EthPubSub}; +pub use miner::MinerApi; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; +pub use validation::{ValidationApi, ValidationApiConfig}; pub use web3::Web3Api; diff --git a/crates/rpc/rpc/src/miner.rs b/crates/rpc/rpc/src/miner.rs new file mode 100644 index 00000000000..ab8fa5e0cd2 --- /dev/null +++ b/crates/rpc/rpc/src/miner.rs @@ -0,0 +1,25 @@ +use alloy_primitives::{Bytes, U128}; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_rpc_api::MinerApiServer; + +/// `miner` API implementation. +/// +/// This type provides the functionality for handling `miner` related requests. +#[derive(Clone, Debug, Default)] +pub struct MinerApi {} + +#[async_trait] +impl MinerApiServer for MinerApi { + fn set_extra(&self, _record: Bytes) -> RpcResult { + Ok(false) + } + + fn set_gas_price(&self, _gas_price: U128) -> RpcResult { + Ok(false) + } + + fn set_gas_limit(&self, _gas_price: U128) -> RpcResult { + Ok(false) + } +} diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 45722978f9f..173a2ff3495 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,7 +1,8 @@ -use alloy_consensus::Transaction; +use alloy_consensus::{BlockHeader, Transaction}; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_network::{ReceiptResponse, TransactionResponse}; use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; -use alloy_rpc_types::{BlockTransactions, Header, TransactionReceipt}; +use alloy_rpc_types_eth::{BlockTransactions, TransactionReceipt}; use alloy_rpc_types_trace::{ otterscan::{ BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions, @@ -11,11 +12,10 @@ use alloy_rpc_types_trace::{ }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, types::ErrorObjectOwned}; -use reth_primitives::{BlockId, BlockNumberOrTag}; use reth_rpc_api::{EthApiServer, OtterscanServer}; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, - FullEthApiTypes, RpcBlock, RpcReceipt, RpcTransaction, TransactionCompat, + FullEthApiTypes, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, TransactionCompat, }; use reth_rpc_eth_types::{utils::binary_search, EthApiError}; use reth_rpc_server_types::result::internal_rpc_err; @@ -49,7 +49,7 @@ where &self, block: RpcBlock, receipts: Vec>, - ) -> RpcResult { + ) -> RpcResult>> { // blob fee is burnt, so we don't need to calculate it let total_fees = receipts .iter() @@ -61,18 +61,23 @@ where } #[async_trait] -impl OtterscanServer> for OtterscanApi +impl OtterscanServer, RpcHeader> + for OtterscanApi where Eth: EthApiServer< RpcTransaction, RpcBlock, RpcReceipt, + RpcHeader, > + EthTransactions + TraceExt + 'static, { /// Handler for `{ots,erigon}_getHeaderByNumber` - async fn get_header_by_number(&self, block_number: u64) -> RpcResult> { + async fn get_header_by_number( + &self, + block_number: u64, + ) -> RpcResult>> { self.eth.header_by_number(BlockNumberOrTag::Number(block_number)).await } @@ -165,7 +170,10 @@ where } /// Handler for `ots_getBlockDetails` - async fn get_block_details(&self, block_number: u64) -> RpcResult { + async fn get_block_details( + &self, + block_number: u64, + ) -> RpcResult>> { let block_id = block_number.into(); let block = self.eth.block_by_number(block_id, true); let block_id = block_id.into(); @@ -178,7 +186,10 @@ where } /// Handler for `getBlockDetailsByHash` - async fn get_block_details_by_hash(&self, block_hash: B256) -> RpcResult { + async fn get_block_details_by_hash( + &self, + block_hash: B256, + ) -> RpcResult>> { let block = self.eth.block_by_hash(block_hash, true); let block_id = block_hash.into(); let receipts = self.eth.block_receipts(block_id); @@ -195,7 +206,9 @@ where block_number: u64, page_number: usize, page_size: usize, - ) -> RpcResult>> { + ) -> RpcResult< + OtsBlockTransactions, RpcHeader>, + > { let block_id = block_number.into(); // retrieve full block and its receipts let block = self.eth.block_by_number(block_id, true); @@ -227,7 +240,8 @@ where *transactions = transactions.drain(page_start..page_end).collect::>(); // The input field returns only the 4 bytes method selector instead of the entire - // calldata byte blob. + // calldata byte blob + // See also: for tx in transactions.iter_mut() { if tx.input().len() > 4 { Eth::TransactionCompat::otterscan_api_truncate_input(tx); @@ -235,10 +249,10 @@ where } // Crop receipts and transform them into OtsTransactionReceipt - let timestamp = Some(block.header.timestamp); + let timestamp = Some(block.header.timestamp()); let receipts = receipts .drain(page_start..page_end) - .zip(transactions.iter().map(Eth::TransactionCompat::tx_type)) + .zip(transactions.iter().map(Transaction::ty)) .map(|(receipt, tx_ty)| { let inner = OtsReceipt { status: receipt.status(), @@ -261,7 +275,6 @@ where from: receipt.from(), to: receipt.to(), contract_address: receipt.contract_address(), - state_root: receipt.state_root(), authorization_list: receipt .authorization_list() .map(<[SignedAuthorization]>::to_vec), diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 6d5897df131..c33f97f5301 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -1,10 +1,10 @@ use std::{collections::HashMap, future::Future, sync::Arc}; +use alloy_eips::BlockId; use alloy_primitives::{Address, U256}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_errors::RethResult; -use reth_primitives::BlockId; use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_rpc_api::RethApiServer; use reth_rpc_eth_types::{EthApiError, EthResult}; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 8ac532ff341..b164e3c19eb 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,9 +1,11 @@ +use alloy_consensus::BlockHeader as _; +use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256, U256}; -use alloy_rpc_types::{ +use alloy_rpc_types_eth::{ state::{EvmOverrides, StateOverride}, + transaction::TransactionRequest, BlockOverrides, Index, }; -use alloy_rpc_types_eth::transaction::TransactionRequest; use alloy_rpc_types_trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, @@ -17,16 +19,14 @@ use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; use reth_evm::ConfigureEvmEnv; -use reth_primitives::{BlockId, Header}; -use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_primitives_traits::{BlockBody, BlockHeader}; +use reth_provider::{BlockNumReader, BlockReader, ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; -use reth_rpc_eth_api::{ - helpers::{Call, TraceExt}, - FromEthApiError, -}; +use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError, RpcNodeCore}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; +use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; use revm::{ db::{CacheDB, DatabaseCommit}, primitives::EnvWithHandlerCfg, @@ -41,21 +41,16 @@ use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `trace` API implementation. /// /// This type provides the functionality for handling `trace` related requests. -pub struct TraceApi { - inner: Arc>, +pub struct TraceApi { + inner: Arc>, } // === impl TraceApi === -impl TraceApi { - /// The provider that can interact with the chain. - pub fn provider(&self) -> &Provider { - &self.inner.provider - } - +impl TraceApi { /// Create a new instance of the [`TraceApi`] - pub fn new(provider: Provider, eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { - let inner = Arc::new(TraceApiInner { provider, eth_api, blocking_task_guard }); + pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { + let inner = Arc::new(TraceApiInner { eth_api, blocking_task_guard }); Self { inner } } @@ -72,15 +67,17 @@ impl TraceApi { } } +impl TraceApi { + /// Access the underlying provider. + pub fn provider(&self) -> &Eth::Provider { + self.inner.eth_api.provider() + } +} + // === impl TraceApi === -impl TraceApi +impl TraceApi where - Provider: BlockReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + 'static, Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. @@ -117,14 +114,15 @@ where trace_types: HashSet, block_id: Option, ) -> Result { - let tx = recover_raw_transaction(tx)?.into_ecrecovered_transaction(); + let tx = recover_raw_transaction::>(&tx)? + .map_transaction(::Transaction::pooled_into_consensus); let (cfg, block, at) = self.eth_api().evm_env_at(block_id.unwrap_or_default()).await?; let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, block, - Call::evm_config(self.eth_api()).tx_env(tx.as_signed(), tx.signer()), + self.eth_api().evm_config().tx_env(tx.as_signed(), tx.signer()), ); let config = TracingInspectorConfig::from_parity_config(&trace_types); @@ -313,14 +311,18 @@ where // add reward traces for all blocks for block in &blocks { - if let Some(base_block_reward) = self.calculate_base_block_reward(&block.header)? { - let mut traces = self.extract_reward_traces( - &block.header, - &block.body.ommers, - base_block_reward, + if let Some(base_block_reward) = + self.calculate_base_block_reward(block.header.header())? + { + all_traces.extend( + self.extract_reward_traces( + block.header.header(), + block.body.ommers(), + base_block_reward, + ) + .into_iter() + .filter(|trace| matcher.matches(&trace.trace)), ); - traces.retain(|trace| matcher.matches(&trace.trace)); - all_traces.extend(traces); } else { // no block reward, means we're past the Paris hardfork and don't expect any rewards // because the blocks in ascending order @@ -391,10 +393,12 @@ where maybe_traces.map(|traces| traces.into_iter().flatten().collect::>()); if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) { - if let Some(base_block_reward) = self.calculate_base_block_reward(&block.header)? { + if let Some(base_block_reward) = + self.calculate_base_block_reward(block.header.header())? + { traces.extend(self.extract_reward_traces( - &block.header, - &block.body.ommers, + block.block.header(), + block.body.ommers(), base_block_reward, )); } @@ -488,7 +492,7 @@ where Ok(Some(BlockOpcodeGas { block_hash: block.hash(), - block_number: block.header.number, + block_number: block.header.number(), transactions, })) } @@ -498,25 +502,28 @@ where /// - if Paris hardfork is activated, no block rewards are given /// - if Paris hardfork is not activated, calculate block rewards with block number only /// - if Paris hardfork is unknown, calculate block rewards with block number and ttd - fn calculate_base_block_reward(&self, header: &Header) -> Result, Eth::Error> { + fn calculate_base_block_reward( + &self, + header: &H, + ) -> Result, Eth::Error> { let chain_spec = self.provider().chain_spec(); - let is_paris_activated = chain_spec.is_paris_active_at_block(header.number); + let is_paris_activated = chain_spec.is_paris_active_at_block(header.number()); Ok(match is_paris_activated { Some(true) => None, - Some(false) => Some(base_block_reward_pre_merge(&chain_spec, header.number)), + Some(false) => Some(base_block_reward_pre_merge(&chain_spec, header.number())), None => { // if Paris hardfork is unknown, we need to fetch the total difficulty at the // block's height and check if it is pre-merge to calculate the base block reward if let Some(header_td) = self .provider() - .header_td_by_number(header.number) + .header_td_by_number(header.number()) .map_err(Eth::Error::from_eth_err)? { base_block_reward( chain_spec.as_ref(), - header.number, - header.difficulty, + header.number(), + header.difficulty(), header_td, ) } else { @@ -529,30 +536,33 @@ where /// Extracts the reward traces for the given block: /// - block reward /// - uncle rewards - fn extract_reward_traces( + fn extract_reward_traces( &self, - header: &Header, - ommers: &[Header], + header: &H, + ommers: Option<&[H]>, base_block_reward: u128, ) -> Vec { - let mut traces = Vec::with_capacity(ommers.len() + 1); + let ommers_cnt = ommers.map(|o| o.len()).unwrap_or_default(); + let mut traces = Vec::with_capacity(ommers_cnt + 1); - let block_reward = block_reward(base_block_reward, ommers.len()); + let block_reward = block_reward(base_block_reward, ommers_cnt); traces.push(reward_trace( header, RewardAction { - author: header.beneficiary, + author: header.beneficiary(), reward_type: RewardType::Block, value: U256::from(block_reward), }, )); + let Some(ommers) = ommers else { return traces }; + for uncle in ommers { - let uncle_reward = ommer_reward(base_block_reward, header.number, uncle.number); + let uncle_reward = ommer_reward(base_block_reward, header.number(), uncle.number()); traces.push(reward_trace( header, RewardAction { - author: uncle.beneficiary, + author: uncle.beneficiary(), reward_type: RewardType::Uncle, value: U256::from(uncle_reward), }, @@ -563,13 +573,8 @@ where } #[async_trait] -impl TraceApiServer for TraceApi +impl TraceApiServer for TraceApi where - Provider: BlockReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + 'static, Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. @@ -691,20 +696,18 @@ where } } -impl std::fmt::Debug for TraceApi { +impl std::fmt::Debug for TraceApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TraceApi").finish_non_exhaustive() } } -impl Clone for TraceApi { +impl Clone for TraceApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct TraceApiInner { - /// The provider that can interact with the chain. - provider: Provider, +struct TraceApiInner { /// Access to commonly used code of the `eth` namespace eth_api: Eth, // restrict the number of concurrent calls to `trace_*` @@ -713,10 +716,10 @@ struct TraceApiInner { /// Helper to construct a [`LocalizedTransactionTrace`] that describes a reward to the block /// beneficiary. -fn reward_trace(header: &Header, reward: RewardAction) -> LocalizedTransactionTrace { +fn reward_trace(header: &H, reward: RewardAction) -> LocalizedTransactionTrace { LocalizedTransactionTrace { block_hash: Some(header.hash_slow()), - block_number: Some(header.number), + block_number: Some(header.number()), transaction_hash: None, transaction_position: None, trace: TransactionTrace { diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 47aaac0bbfd..4709c9878fa 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -1,4 +1,5 @@ -use std::{collections::BTreeMap, marker::PhantomData}; +use core::fmt; +use std::collections::BTreeMap; use alloy_consensus::Transaction; use alloy_primitives::Address; @@ -6,12 +7,12 @@ use alloy_rpc_types_txpool::{ TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus, }; use async_trait::async_trait; -use jsonrpsee::core::RpcResult as Result; -use reth_primitives::TransactionSignedEcRecovered; +use jsonrpsee::core::RpcResult; use reth_rpc_api::TxPoolApiServer; -use reth_rpc_eth_api::{FullEthApiTypes, RpcTransaction}; use reth_rpc_types_compat::{transaction::from_recovered, TransactionCompat}; -use reth_transaction_pool::{AllPoolTransactions, PoolTransaction, TransactionPool}; +use reth_transaction_pool::{ + AllPoolTransactions, PoolConsensusTx, PoolTransaction, TransactionPool, +}; use tracing::trace; /// `txpool` API implementation. @@ -21,62 +22,66 @@ use tracing::trace; pub struct TxPoolApi { /// An interface to interact with the pool pool: Pool, - _tx_resp_builder: PhantomData, + tx_resp_builder: Eth, } impl TxPoolApi { /// Creates a new instance of `TxpoolApi`. - pub const fn new(pool: Pool) -> Self { - Self { pool, _tx_resp_builder: PhantomData } + pub const fn new(pool: Pool, tx_resp_builder: Eth) -> Self { + Self { pool, tx_resp_builder } } } impl TxPoolApi where - Pool: TransactionPool + 'static, - Eth: FullEthApiTypes, + Pool: TransactionPool> + 'static, + Eth: TransactionCompat>, { - fn content(&self) -> TxpoolContent> { + fn content(&self) -> Result, Eth::Error> { #[inline] fn insert( tx: &Tx, content: &mut BTreeMap>, - ) where - Tx: PoolTransaction>, - RpcTxB: TransactionCompat, + resp_builder: &RpcTxB, + ) -> Result<(), RpcTxB::Error> + where + Tx: PoolTransaction, + RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered::(tx.clone().into_consensus().into()), + from_recovered(tx.clone_into_consensus(), resp_builder)?, ); + + Ok(()) } let AllPoolTransactions { pending, queued } = self.pool.all_transactions(); let mut content = TxpoolContent { pending: BTreeMap::new(), queued: BTreeMap::new() }; for pending in pending { - insert::<_, Eth::TransactionCompat>(&pending.transaction, &mut content.pending); + insert::<_, Eth>(&pending.transaction, &mut content.pending, &self.tx_resp_builder)?; } for queued in queued { - insert::<_, Eth::TransactionCompat>(&queued.transaction, &mut content.queued); + insert::<_, Eth>(&queued.transaction, &mut content.queued, &self.tx_resp_builder)?; } - content + Ok(content) } } #[async_trait] -impl TxPoolApiServer> for TxPoolApi +impl TxPoolApiServer for TxPoolApi where - Pool: TransactionPool + 'static, - Eth: FullEthApiTypes + 'static, + Pool: TransactionPool> + 'static, + Eth: TransactionCompat> + 'static, { /// Returns the number of transactions currently pending for inclusion in the next block(s), as /// well as the ones that are being scheduled for future execution only. /// Ref: [Here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_status) /// /// Handler for `txpool_status` - async fn txpool_status(&self) -> Result { + async fn txpool_status(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_status"); let all = self.pool.all_transactions(); Ok(TxpoolStatus { pending: all.pending.len() as u64, queued: all.queued.len() as u64 }) @@ -88,23 +93,23 @@ where /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_inspect) for more details /// /// Handler for `txpool_inspect` - async fn txpool_inspect(&self) -> Result { + async fn txpool_inspect(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_inspect"); #[inline] - fn insert>>( + fn insert>( tx: &T, inspect: &mut BTreeMap>, ) { let entry = inspect.entry(tx.sender()).or_default(); - let tx: TransactionSignedEcRecovered = tx.clone().into_consensus().into(); + let tx = tx.clone_into_consensus(); entry.insert( tx.nonce().to_string(), TxpoolInspectSummary { to: tx.to(), value: tx.value(), gas: tx.gas_limit() as u128, - gas_price: tx.transaction.max_fee_per_gas(), + gas_price: tx.max_fee_per_gas(), }, ); } @@ -131,9 +136,9 @@ where async fn txpool_content_from( &self, from: Address, - ) -> Result>> { + ) -> RpcResult> { trace!(target: "rpc::eth", ?from, "Serving txpool_contentFrom"); - Ok(self.content().remove_from(&from)) + Ok(self.content().map_err(Into::into)?.remove_from(&from)) } /// Returns the details of all transactions currently pending for inclusion in the next @@ -141,14 +146,14 @@ where /// /// See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content) for more details /// Handler for `txpool_content` - async fn txpool_content(&self) -> Result>> { + async fn txpool_content(&self) -> RpcResult> { trace!(target: "rpc::eth", "Serving txpool_content"); - Ok(self.content()) + Ok(self.content().map_err(Into::into)?) } } -impl std::fmt::Debug for TxPoolApi { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Debug for TxPoolApi { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TxpoolApi").finish_non_exhaustive() } } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs new file mode 100644 index 00000000000..a7042126cba --- /dev/null +++ b/crates/rpc/rpc/src/validation.rs @@ -0,0 +1,526 @@ +use alloy_consensus::{ + BlobTransactionValidationError, BlockHeader, EnvKzgSettings, Transaction, TxReceipt, +}; +use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash}; +use alloy_rpc_types_beacon::relay::{ + BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, + BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, +}; +use alloy_rpc_types_engine::{ + BlobsBundleV1, CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, PayloadError, + PraguePayloadFields, +}; +use async_trait::async_trait; +use jsonrpsee::core::RpcResult; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; +use reth_engine_primitives::PayloadValidator; +use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; +use reth_ethereum_consensus::GAS_LIMIT_BOUND_DIVISOR; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_primitives::{GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives_traits::{Block as _, BlockBody}; +use reth_provider::{ + BlockExecutionInput, BlockExecutionOutput, BlockReaderIdExt, StateProviderFactory, +}; +use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; +use reth_rpc_api::BlockSubmissionValidationApiServer; +use reth_rpc_server_types::result::internal_rpc_err; +use reth_tasks::TaskSpawner; +use revm_primitives::{Address, B256, U256}; +use serde::{Deserialize, Serialize}; +use std::{collections::HashSet, sync::Arc}; +use tokio::sync::{oneshot, RwLock}; + +/// The type that implements the `validation` rpc namespace trait +#[derive(Clone, Debug, derive_more::Deref)] +pub struct ValidationApi { + #[deref] + inner: Arc>, +} + +impl ValidationApi +where + E: BlockExecutorProvider, +{ + /// Create a new instance of the [`ValidationApi`] + pub fn new( + provider: Provider, + consensus: Arc>, + executor_provider: E, + config: ValidationApiConfig, + task_spawner: Box, + payload_validator: Arc< + dyn PayloadValidator::Block>, + >, + ) -> Self { + let ValidationApiConfig { disallow } = config; + + let inner = Arc::new(ValidationApiInner { + provider, + consensus, + payload_validator, + executor_provider, + disallow, + cached_state: Default::default(), + task_spawner, + }); + + Self { inner } + } + + /// Returns the cached reads for the given head hash. + async fn cached_reads(&self, head: B256) -> CachedReads { + let cache = self.inner.cached_state.read().await; + if cache.0 == head { + cache.1.clone() + } else { + Default::default() + } + } + + /// Updates the cached state for the given head hash. + async fn update_cached_reads(&self, head: B256, cached_state: CachedReads) { + let mut cache = self.inner.cached_state.write().await; + if cache.0 == head { + cache.1.extend(cached_state); + } else { + *cache = (head, cached_state) + } + } +} + +impl ValidationApi +where + Provider: BlockReaderIdExt
::BlockHeader> + + ChainSpecProvider + + StateProviderFactory + + 'static, + E: BlockExecutorProvider, +{ + /// Validates the given block and a [`BidTrace`] against it. + pub async fn validate_message_against_block( + &self, + block: SealedBlockWithSenders<::Block>, + message: BidTrace, + registered_gas_limit: u64, + ) -> Result<(), ValidationApiError> { + self.validate_message_against_header(&block.header, &message)?; + + self.consensus.validate_header_with_total_difficulty(&block.header, U256::MAX)?; + self.consensus.validate_header(&block.header)?; + self.consensus.validate_block_pre_execution(&block)?; + + if !self.disallow.is_empty() { + if self.disallow.contains(&block.beneficiary()) { + return Err(ValidationApiError::Blacklist(block.beneficiary())) + } + if self.disallow.contains(&message.proposer_fee_recipient) { + return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient)) + } + for (sender, tx) in block.senders.iter().zip(block.transactions()) { + if self.disallow.contains(sender) { + return Err(ValidationApiError::Blacklist(*sender)) + } + if let Some(to) = tx.to() { + if self.disallow.contains(&to) { + return Err(ValidationApiError::Blacklist(to)) + } + } + } + } + + let latest_header = + self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; + + if latest_header.hash() != block.header.parent_hash() { + return Err(ConsensusError::ParentHashMismatch( + GotExpected { got: block.header.parent_hash(), expected: latest_header.hash() } + .into(), + ) + .into()) + } + self.consensus.validate_header_against_parent(&block.header, &latest_header)?; + self.validate_gas_limit(registered_gas_limit, &latest_header, &block.header)?; + + let latest_header_hash = latest_header.hash(); + let state_provider = self.provider.state_by_block_hash(latest_header_hash)?; + + let mut request_cache = self.cached_reads(latest_header_hash).await; + + let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); + let executor = self.executor_provider.executor(cached_db); + + let block = block.unseal(); + let mut accessed_blacklisted = None; + let output = executor.execute_with_state_closure( + BlockExecutionInput::new(&block, U256::MAX), + |state| { + if !self.disallow.is_empty() { + for account in state.cache.accounts.keys() { + if self.disallow.contains(account) { + accessed_blacklisted = Some(*account); + } + } + } + }, + )?; + + // update the cached reads + self.update_cached_reads(latest_header_hash, request_cache).await; + + if let Some(account) = accessed_blacklisted { + return Err(ValidationApiError::Blacklist(account)) + } + + self.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&output.receipts, &output.requests), + )?; + + self.ensure_payment(&block, &output, &message)?; + + let state_root = + state_provider.state_root(state_provider.hashed_post_state(&output.state))?; + + if state_root != block.header().state_root() { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.header().state_root() }.into(), + ) + .into()) + } + + Ok(()) + } + + /// Ensures that fields of [`BidTrace`] match the fields of the [`SealedHeader`]. + fn validate_message_against_header( + &self, + header: &SealedHeader<::BlockHeader>, + message: &BidTrace, + ) -> Result<(), ValidationApiError> { + if header.hash() != message.block_hash { + Err(ValidationApiError::BlockHashMismatch(GotExpected { + got: message.block_hash, + expected: header.hash(), + })) + } else if header.parent_hash() != message.parent_hash { + Err(ValidationApiError::ParentHashMismatch(GotExpected { + got: message.parent_hash, + expected: header.parent_hash(), + })) + } else if header.gas_limit() != message.gas_limit { + Err(ValidationApiError::GasLimitMismatch(GotExpected { + got: message.gas_limit, + expected: header.gas_limit(), + })) + } else if header.gas_used() != message.gas_used { + return Err(ValidationApiError::GasUsedMismatch(GotExpected { + got: message.gas_used, + expected: header.gas_used(), + })) + } else { + Ok(()) + } + } + + /// Ensures that the chosen gas limit is the closest possible value for the validator's + /// registered gas limit. + /// + /// Ref: + fn validate_gas_limit( + &self, + registered_gas_limit: u64, + parent_header: &SealedHeader<::BlockHeader>, + header: &SealedHeader<::BlockHeader>, + ) -> Result<(), ValidationApiError> { + let max_gas_limit = + parent_header.gas_limit() + parent_header.gas_limit() / GAS_LIMIT_BOUND_DIVISOR - 1; + let min_gas_limit = + parent_header.gas_limit() - parent_header.gas_limit() / GAS_LIMIT_BOUND_DIVISOR + 1; + + let best_gas_limit = + std::cmp::max(min_gas_limit, std::cmp::min(max_gas_limit, registered_gas_limit)); + + if best_gas_limit != header.gas_limit() { + return Err(ValidationApiError::GasLimitMismatch(GotExpected { + got: header.gas_limit(), + expected: best_gas_limit, + })) + } + + Ok(()) + } + + /// Ensures that the proposer has received [`BidTrace::value`] for this block. + /// + /// Firstly attempts to verify the payment by checking the state changes, otherwise falls back + /// to checking the latest block transaction. + fn ensure_payment( + &self, + block: &::Block, + output: &BlockExecutionOutput<::Receipt>, + message: &BidTrace, + ) -> Result<(), ValidationApiError> { + let (mut balance_before, balance_after) = if let Some(acc) = + output.state.state.get(&message.proposer_fee_recipient) + { + let balance_before = acc.original_info.as_ref().map(|i| i.balance).unwrap_or_default(); + let balance_after = acc.info.as_ref().map(|i| i.balance).unwrap_or_default(); + + (balance_before, balance_after) + } else { + // account might have balance but considering it zero is fine as long as we know + // that balance have not changed + (U256::ZERO, U256::ZERO) + }; + + if let Some(withdrawals) = block.body().withdrawals() { + for withdrawal in withdrawals { + if withdrawal.address == message.proposer_fee_recipient { + balance_before += withdrawal.amount_wei(); + } + } + } + + if balance_after >= balance_before + message.value { + return Ok(()) + } + + let (receipt, tx) = output + .receipts + .last() + .zip(block.body().transactions().last()) + .ok_or(ValidationApiError::ProposerPayment)?; + + if !receipt.status() { + return Err(ValidationApiError::ProposerPayment) + } + + if tx.to() != Some(message.proposer_fee_recipient) { + return Err(ValidationApiError::ProposerPayment) + } + + if tx.value() != message.value { + return Err(ValidationApiError::ProposerPayment) + } + + if !tx.input().is_empty() { + return Err(ValidationApiError::ProposerPayment) + } + + if let Some(block_base_fee) = block.header().base_fee_per_gas() { + if tx.effective_tip_per_gas(block_base_fee).unwrap_or_default() != 0 { + return Err(ValidationApiError::ProposerPayment) + } + } + + Ok(()) + } + + /// Validates the given [`BlobsBundleV1`] and returns versioned hashes for blobs. + pub fn validate_blobs_bundle( + &self, + mut blobs_bundle: BlobsBundleV1, + ) -> Result, ValidationApiError> { + if blobs_bundle.commitments.len() != blobs_bundle.proofs.len() || + blobs_bundle.commitments.len() != blobs_bundle.blobs.len() + { + return Err(ValidationApiError::InvalidBlobsBundle) + } + + let versioned_hashes = blobs_bundle + .commitments + .iter() + .map(|c| kzg_to_versioned_hash(c.as_slice())) + .collect::>(); + + let sidecar = blobs_bundle.pop_sidecar(blobs_bundle.blobs.len()); + + sidecar.validate(&versioned_hashes, EnvKzgSettings::default().get())?; + + Ok(versioned_hashes) + } + + /// Core logic for validating the builder submission v3 + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> Result<(), ValidationApiError> { + let block = self + .payload_validator + .ensure_well_formed_payload( + ExecutionPayload::V3(request.request.execution_payload), + ExecutionPayloadSidecar::v3(CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self.validate_blobs_bundle(request.request.blobs_bundle)?, + }), + )? + .try_seal_with_senders() + .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .await + } + + /// Core logic for validating the builder submission v4 + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> Result<(), ValidationApiError> { + let block = self + .payload_validator + .ensure_well_formed_payload( + ExecutionPayload::V3(request.request.execution_payload), + ExecutionPayloadSidecar::v4( + CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle(request.request.blobs_bundle)?, + }, + PraguePayloadFields { + requests: RequestsOrHash::Requests( + request.request.execution_requests.into(), + ), + target_blobs_per_block: request.request.target_blobs_per_block, + }, + ), + )? + .try_seal_with_senders() + .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .await + } +} + +#[async_trait] +impl BlockSubmissionValidationApiServer for ValidationApi +where + Provider: BlockReaderIdExt
::BlockHeader> + + ChainSpecProvider + + StateProviderFactory + + Clone + + 'static, + E: BlockExecutorProvider, +{ + async fn validate_builder_submission_v1( + &self, + _request: BuilderBlockValidationRequest, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + async fn validate_builder_submission_v2( + &self, + _request: BuilderBlockValidationRequestV2, + ) -> RpcResult<()> { + Err(internal_rpc_err("unimplemented")) + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v3( + &self, + request: BuilderBlockValidationRequestV3, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v3(&this, request) + .await + .map_err(|err| internal_rpc_err(err.to_string())); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? + } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v4( + &self, + request: BuilderBlockValidationRequestV4, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v4(&this, request) + .await + .map_err(|err| internal_rpc_err(err.to_string())); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? + } +} + +#[derive(Debug)] +pub struct ValidationApiInner { + /// The provider that can interact with the chain. + provider: Provider, + /// Consensus implementation. + consensus: Arc>, + /// Execution payload validator. + payload_validator: Arc::Block>>, + /// Block executor factory. + executor_provider: E, + /// Set of disallowed addresses + disallow: HashSet
, + /// Cached state reads to avoid redundant disk I/O across multiple validation attempts + /// targeting the same state. Stores a tuple of (`block_hash`, `cached_reads`) for the + /// latest head block state. Uses async `RwLock` to safely handle concurrent validation + /// requests. + cached_state: RwLock<(B256, CachedReads)>, + /// Task spawner for blocking operations + task_spawner: Box, +} + +/// Configuration for validation API. +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] +pub struct ValidationApiConfig { + /// Disallowed addresses. + pub disallow: HashSet
, +} + +/// Errors thrown by the validation API. +#[derive(Debug, thiserror::Error)] +pub enum ValidationApiError { + #[error("block gas limit mismatch: {_0}")] + GasLimitMismatch(GotExpected), + #[error("block gas used mismatch: {_0}")] + GasUsedMismatch(GotExpected), + #[error("block parent hash mismatch: {_0}")] + ParentHashMismatch(GotExpected), + #[error("block hash mismatch: {_0}")] + BlockHashMismatch(GotExpected), + #[error("missing latest block in database")] + MissingLatestBlock, + #[error("could not verify proposer payment")] + ProposerPayment, + #[error("invalid blobs bundle")] + InvalidBlobsBundle, + /// When the transaction signature is invalid + #[error("invalid transaction signature")] + InvalidTransactionSignature, + #[error("block accesses blacklisted address: {_0}")] + Blacklist(Address), + #[error(transparent)] + Blob(#[from] BlobTransactionValidationError), + #[error(transparent)] + Consensus(#[from] ConsensusError), + #[error(transparent)] + Provider(#[from] ProviderError), + #[error(transparent)] + Execution(#[from] BlockExecutionError), + #[error(transparent)] + Payload(#[from] PayloadError), +} diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 352d3e02476..ffa34afa71e 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -23,7 +23,9 @@ reth-errors.workspace = true reth-stages-types.workspace = true reth-static-file-types.workspace = true +# alloy alloy-primitives.workspace = true +alloy-eips.workspace = true # metrics reth-metrics.workspace = true @@ -46,4 +48,10 @@ tokio-stream.workspace = true reth-testing-utils.workspace = true [features] -test-utils = [] +test-utils = [ + "reth-consensus/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives-traits/test-utils", + "reth-provider/test-utils", + "reth-stages-types/test-utils" +] diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index 68e1d00fdae..b63dd20f77c 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -1,9 +1,8 @@ use crate::PipelineEvent; -use alloy_primitives::{BlockNumber, TxNumber}; +use alloy_eips::eip1898::BlockWithParent; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, DatabaseError, RethError}; use reth_network_p2p::error::DownloadError; -use reth_primitives_traits::SealedHeader; use reth_provider::ProviderError; use reth_prune::{PruneSegment, PruneSegmentError, PrunerError}; use reth_static_file_types::StaticFileSegment; @@ -35,10 +34,10 @@ impl BlockErrorKind { #[derive(Error, Debug)] pub enum StageError { /// The stage encountered an error related to a block. - #[error("stage encountered an error in block #{number}: {error}", number = block.number)] + #[error("stage encountered an error in block #{number}: {error}", number = block.block.number)] Block { /// The block that caused the error. - block: Box, + block: Box, /// The specific error type, either consensus or execution error. #[source] error: BlockErrorKind, @@ -49,16 +48,16 @@ pub enum StageError { "stage encountered inconsistent chain: \ downloaded header #{header_number} ({header_hash}) is detached from \ local head #{head_number} ({head_hash}): {error}", - header_number = header.number, - header_hash = header.hash(), - head_number = local_head.number, - head_hash = local_head.hash(), + header_number = header.block.number, + header_hash = header.block.hash, + head_number = local_head.block.number, + head_hash = local_head.block.hash, )] DetachedHead { /// The local head we attempted to attach to. - local_head: Box, + local_head: Box, /// The header we attempted to attach. - header: Box, + header: Box, /// The error that occurred when attempting to attach the header. #[source] error: Box, @@ -93,35 +92,13 @@ pub enum StageError { #[error("invalid download response: {0}")] Download(#[from] DownloadError), /// Database is ahead of static file data. - #[error("missing static file data for block number: {number}", number = block.number)] + #[error("missing static file data for block number: {number}", number = block.block.number)] MissingStaticFileData { /// Starting block with missing data. - block: Box, + block: Box, /// Static File segment segment: StaticFileSegment, }, - /// Unrecoverable inconsistency error related to a transaction number in a static file segment. - #[error( - "inconsistent transaction number for {segment}. db: {database}, static_file: {static_file}" - )] - InconsistentTxNumber { - /// Static File segment where this error was encountered. - segment: StaticFileSegment, - /// Expected database transaction number. - database: TxNumber, - /// Expected static file transaction number. - static_file: TxNumber, - }, - /// Unrecoverable inconsistency error related to a block number in a static file segment. - #[error("inconsistent block number for {segment}. db: {database}, static_file: {static_file}")] - InconsistentBlockNumber { - /// Static File segment where this error was encountered. - segment: StaticFileSegment, - /// Expected database block number. - database: BlockNumber, - /// Expected static file block number. - static_file: BlockNumber, - }, /// The prune checkpoint for the given segment is missing. #[error("missing prune checkpoint for {0}")] MissingPruneCheckpoint(PruneSegment), @@ -156,8 +133,6 @@ impl StageError { Self::MissingDownloadBuffer | Self::MissingSyncGap | Self::ChannelClosed | - Self::InconsistentBlockNumber { .. } | - Self::InconsistentTxNumber { .. } | Self::Internal(_) | Self::Fatal(_) ) @@ -188,4 +163,7 @@ pub enum PipelineError { /// Internal error #[error(transparent)] Internal(#[from] RethError), + /// The pipeline encountered an unwind when `fail_on_unwind` was set to `true`. + #[error("unexpected unwind")] + UnexpectedUnwind, } diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 68ca887fe79..45bdc2d8942 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -14,6 +14,7 @@ pub struct PipelineBuilder { /// A receiver for the current chain tip to sync to. tip_tx: Option>, metrics_tx: Option, + fail_on_unwind: bool, } impl PipelineBuilder { @@ -34,7 +35,9 @@ impl PipelineBuilder { /// [`builder`][StageSet::builder] on the set which will convert it to a /// [`StageSetBuilder`][crate::StageSetBuilder]. pub fn add_stages>(mut self, set: Set) -> Self { - for stage in set.builder().build() { + let states = set.builder().build(); + self.stages.reserve_exact(states.len()); + for stage in states { self.stages.push(stage); } self @@ -60,6 +63,12 @@ impl PipelineBuilder { self } + /// Set whether pipeline should fail on unwind. + pub const fn with_fail_on_unwind(mut self, yes: bool) -> Self { + self.fail_on_unwind = yes; + self + } + /// Builds the final [`Pipeline`] using the given database. pub fn build( self, @@ -70,7 +79,7 @@ impl PipelineBuilder { N: ProviderNodeTypes, ProviderFactory: DatabaseProviderFactory, { - let Self { stages, max_block, tip_tx, metrics_tx } = self; + let Self { stages, max_block, tip_tx, metrics_tx, fail_on_unwind } = self; Pipeline { provider_factory, stages, @@ -80,13 +89,20 @@ impl PipelineBuilder { event_sender: Default::default(), progress: Default::default(), metrics_tx, + fail_on_unwind, } } } impl Default for PipelineBuilder { fn default() -> Self { - Self { stages: Vec::new(), max_block: None, tip_tx: None, metrics_tx: None } + Self { + stages: Vec::new(), + max_block: None, + tip_tx: None, + metrics_tx: None, + fail_on_unwind: false, + } } } @@ -95,6 +111,7 @@ impl std::fmt::Debug for PipelineBuilder { f.debug_struct("PipelineBuilder") .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) .field("max_block", &self.max_block) + .field("fail_on_unwind", &self.fail_on_unwind) .finish() } } diff --git a/crates/stages/api/src/pipeline/ctrl.rs b/crates/stages/api/src/pipeline/ctrl.rs index 16185755245..378385e97b7 100644 --- a/crates/stages/api/src/pipeline/ctrl.rs +++ b/crates/stages/api/src/pipeline/ctrl.rs @@ -1,5 +1,5 @@ +use alloy_eips::eip1898::BlockWithParent; use alloy_primitives::BlockNumber; -use reth_primitives_traits::SealedHeader; /// Determines the control flow during pipeline execution. /// @@ -11,7 +11,7 @@ pub enum ControlFlow { /// The block to unwind to. target: BlockNumber, /// The block that caused the unwind. - bad_block: Box, + bad_block: Box, }, /// The pipeline made progress. Continue { diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 14225a59528..2cb98d44f93 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -9,7 +9,7 @@ use reth_primitives_traits::constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH; use reth_provider::{ providers::ProviderNodeTypes, writer::UnifiedStorageWriter, ChainStateBlockReader, ChainStateBlockWriter, DatabaseProviderFactory, ProviderFactory, StageCheckpointReader, - StageCheckpointWriter, StaticFileProviderFactory, + StageCheckpointWriter, }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; @@ -78,6 +78,9 @@ pub struct Pipeline { /// A receiver for the current chain tip to sync to. tip_tx: Option>, metrics_tx: Option, + /// Whether an unwind should fail the syncing process. Should only be set when downloading + /// blocks from trusted sources and expecting them to be valid. + fail_on_unwind: bool, } impl Pipeline { @@ -164,13 +167,17 @@ impl Pipeline { loop { let next_action = self.run_loop().await?; + if next_action.is_unwind() && self.fail_on_unwind { + return Err(PipelineError::UnexpectedUnwind) + } + // Terminate the loop early if it's reached the maximum user // configured block. if next_action.should_continue() && self.progress .minimum_block_number .zip(self.max_block) - .map_or(false, |(progress, target)| progress >= target) + .is_some_and(|(progress, target)| progress >= target) { trace!( target: "sync::pipeline", @@ -216,7 +223,7 @@ impl Pipeline { } ControlFlow::Continue { block_number } => self.progress.update(block_number), ControlFlow::Unwind { target, bad_block } => { - self.unwind(target, Some(bad_block.number))?; + self.unwind(target, Some(bad_block.block.number))?; return Ok(ControlFlow::Unwind { target, bad_block }) } } @@ -249,7 +256,7 @@ impl Pipeline { pub fn move_to_static_files(&self) -> RethResult<()> { // Copies data from database to static files let lowest_static_file_height = - self.static_file_producer.lock().copy_to_static_files()?.min(); + self.static_file_producer.lock().copy_to_static_files()?.min_block_num(); // Deletes data which has been copied to static files. if let Some(prune_tip) = lowest_static_file_height { @@ -351,10 +358,7 @@ impl Pipeline { ))?; } - UnifiedStorageWriter::commit_unwind( - provider_rw, - self.provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; stage.post_unwind_commit()?; @@ -389,7 +393,7 @@ impl Pipeline { let stage_reached_max_block = prev_checkpoint .zip(self.max_block) - .map_or(false, |(prev_progress, target)| prev_progress.block_number >= target); + .is_some_and(|(prev_progress, target)| prev_progress.block_number >= target); if stage_reached_max_block { warn!( target: "sync::pipeline", @@ -462,10 +466,7 @@ impl Pipeline { result: out.clone(), }); - UnifiedStorageWriter::commit( - provider_rw, - self.provider_factory.static_file_provider(), - )?; + UnifiedStorageWriter::commit(provider_rw)?; stage.post_execute_commit()?; @@ -504,7 +505,7 @@ fn on_stage_error( // We unwind because of a detached head. let unwind_to = - local_head.number.saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH).max(1); + local_head.block.number.saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH).max(1); Ok(Some(ControlFlow::Unwind { target: unwind_to, bad_block: local_head })) } else if let StageError::Block { block, error } = err { match error { @@ -512,7 +513,7 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, "Stage encountered a validation error: {validation_error}" ); @@ -526,7 +527,7 @@ fn on_stage_error( prev_checkpoint.unwrap_or_default(), )?; - UnifiedStorageWriter::commit(provider_rw, factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; // We unwind because of a validation error. If the unwind itself // fails, we bail entirely, @@ -541,7 +542,7 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, "Stage encountered an execution error: {execution_error}" ); @@ -559,12 +560,12 @@ fn on_stage_error( error!( target: "sync::pipeline", stage = %stage_id, - bad_block = %block.number, + bad_block = %block.block.number, segment = %segment, "Stage is missing static file data." ); - Ok(Some(ControlFlow::Unwind { target: block.number - 1, bad_block: block })) + Ok(Some(ControlFlow::Unwind { target: block.block.number - 1, bad_block: block })) } else if err.is_fatal() { error!(target: "sync::pipeline", stage = %stage_id, "Stage encountered a fatal error: {err}"); Err(err.into()) @@ -586,6 +587,7 @@ impl std::fmt::Debug for Pipeline { .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) .field("max_block", &self.max_block) .field("event_sender", &self.event_sender) + .field("fail_on_unwind", &self.fail_on_unwind) .finish() } } @@ -601,7 +603,7 @@ mod tests { use reth_errors::ProviderError; use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB}; use reth_prune::PruneModes; - use reth_testing_utils::{generators, generators::random_header}; + use reth_testing_utils::generators::{self, random_block_with_parent}; use tokio_stream::StreamExt; #[test] @@ -973,7 +975,7 @@ mod tests { .add_stage( TestStage::new(StageId::Other("B")) .add_exec(Err(StageError::Block { - block: Box::new(random_header( + block: Box::new(random_block_with_parent( &mut generators::rng(), 5, Default::default(), diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 3d4227d8a27..e7114eeb16a 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -24,7 +24,9 @@ reth-evm.workspace = true reth-exex.workspace = true reth-network-p2p.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } -reth-primitives-traits = { workspace = true, features = ["serde-bincode-compat"] } +reth-primitives-traits = { workspace = true, features = [ + "serde-bincode-compat", +] } reth-provider.workspace = true reth-execution-types.workspace = true reth-prune.workspace = true @@ -37,7 +39,9 @@ reth-trie-db = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } +alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true # async tokio = { workspace = true, features = ["sync"] } @@ -66,6 +70,7 @@ reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true reth-revm.workspace = true reth-static-file.workspace = true +reth-stages-api = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true reth-trie = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } @@ -82,9 +87,6 @@ tempfile.workspace = true # Stage benchmarks criterion = { workspace = true, features = ["async_tokio"] } -# io -serde_json.workspace = true - [target.'cfg(not(target_os = "windows"))'.dev-dependencies] pprof = { workspace = true, features = [ "flamegraph", @@ -101,6 +103,18 @@ test-utils = [ "reth-stages-api/test-utils", "dep:reth-testing-utils", "dep:tempfile", + "reth-chainspec?/test-utils", + "reth-consensus/test-utils", + "reth-evm/test-utils", + "reth-downloaders/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-revm/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "reth-trie/test-utils", + "reth-prune-types/test-utils", ] [[bench]] diff --git a/crates/stages/stages/benches/criterion.rs b/crates/stages/stages/benches/criterion.rs index 7519d81a362..0f876dd7011 100644 --- a/crates/stages/stages/benches/criterion.rs +++ b/crates/stages/stages/benches/criterion.rs @@ -2,12 +2,11 @@ use criterion::{criterion_main, measurement::WallTime, BenchmarkGroup, Criterion}; #[cfg(not(target_os = "windows"))] use pprof::criterion::{Output, PProfProfiler}; -use reth_chainspec::ChainSpec; use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; use alloy_primitives::BlockNumber; -use reth_provider::{DatabaseProvider, DatabaseProviderFactory}; +use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory}; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TransactionLookupStage}, test_utils::TestStageDB, @@ -148,7 +147,8 @@ fn measure_stage( block_interval: RangeInclusive, label: String, ) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, F: Fn(S, &TestStageDB, StageRange), { let stage_range = ( diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 4812fb13c39..c1c3ff89d72 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -1,14 +1,15 @@ #![allow(unreachable_pub)] -use alloy_primitives::{Address, Sealable, B256, U256}; +use alloy_primitives::{Address, B256, U256}; use itertools::concat; -use reth_chainspec::ChainSpec; use reth_db::{tables, test_utils::TempDatabase, Database, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, SealedBlock, SealedHeader}; -use reth_provider::{DatabaseProvider, DatabaseProviderFactory, TrieWriter}; +use reth_provider::{ + test_utils::MockNodeTypesWithDB, DatabaseProvider, DatabaseProviderFactory, TrieWriter, +}; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, test_utils::{StorageKind, TestStageDB}, @@ -31,7 +32,8 @@ use reth_trie_db::DatabaseStateRoot; pub(crate) type StageRange = (ExecInput, UnwindInput); pub(crate) fn stage_unwind< - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, >( stage: S, db: &TestStageDB, @@ -63,7 +65,8 @@ pub(crate) fn stage_unwind< pub(crate) fn unwind_hashes(stage: S, db: &TestStageDB, range: StageRange) where - S: Clone + Stage as Database>::TXMut, ChainSpec>>, + S: Clone + + Stage as Database>::TXMut, MockNodeTypesWithDB>>, { let (input, unwind) = range; @@ -144,9 +147,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_second = second_block.clone(); let mut updated_header = cloned_second.header.unseal(); updated_header.state_root = root; - let sealed = updated_header.seal_slow(); - let (header, seal) = sealed.into_parts(); - *second_block = SealedBlock { header: SealedHeader::new(header, seal), ..cloned_second }; + *second_block = SealedBlock { header: SealedHeader::seal(updated_header), ..cloned_second }; let offset = transitions.len() as u64; @@ -179,9 +180,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_last = last_block.clone(); let mut updated_header = cloned_last.header.unseal(); updated_header.state_root = root; - let sealed = updated_header.seal_slow(); - let (header, seal) = sealed.into_parts(); - *last_block = SealedBlock { header: SealedHeader::new(header, seal), ..cloned_last }; + *last_block = SealedBlock { header: SealedHeader::seal(updated_header), ..cloned_last }; db.insert_blocks(blocks.iter(), StorageKind::Static).unwrap(); diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index 38a0f209dbd..ce6a96cf349 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -37,7 +37,7 @@ //! # let consensus: Arc = Arc::new(TestConsensus::default()); //! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build( //! # Arc::new(TestHeadersClient::default()), -//! # consensus.clone() +//! # consensus.clone().as_header_validator() //! # ); //! # let provider_factory = create_test_provider_factory(); //! # let bodies_downloader = BodiesDownloaderBuilder::default().build( diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index a25fcd4e1e5..df5a4c542bf 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -20,8 +20,9 @@ //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::StageConfig; //! # use reth_evm::execute::BlockExecutorProvider; +//! # use reth_primitives::EthPrimitives; //! -//! # fn create(exec: impl BlockExecutorProvider) { +//! # fn create(exec: impl BlockExecutorProvider) { //! //! let provider_factory = create_test_provider_factory(); //! let static_file_producer = @@ -76,7 +77,11 @@ use tokio::sync::watch; /// - [`PruneStage`] (execute) /// - [`FinishStage`] #[derive(Debug)] -pub struct DefaultStages { +pub struct DefaultStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Configuration for the online stages online: OnlineStages, /// Executor factory needs for execution stage @@ -87,13 +92,17 @@ pub struct DefaultStages { prune_modes: PruneModes, } -impl DefaultStages { +impl DefaultStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Create a new set of default stages with default values. #[allow(clippy::too_many_arguments)] pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, header_downloader: H, body_downloader: B, executor_factory: E, @@ -122,6 +131,8 @@ impl DefaultStages { impl DefaultStages where E: BlockExecutorProvider, + H: HeaderDownloader, + B: BodyDownloader, { /// Appends the default offline stages and default finish stage to the given builder. pub fn add_offline_stages( @@ -164,13 +175,17 @@ where /// These stages *can* be run without network access if the specified downloaders are /// themselves offline. #[derive(Debug)] -pub struct OnlineStages { +pub struct OnlineStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Sync gap provider for the headers stage. provider: Provider, /// The tip for the headers stage. tip: watch::Receiver, /// The consensus engine used to validate incoming data. - consensus: Arc, + consensus: Arc>, /// The block header downloader header_downloader: H, /// The block body downloader @@ -179,12 +194,16 @@ pub struct OnlineStages { stages_config: StageConfig, } -impl OnlineStages { +impl OnlineStages +where + H: HeaderDownloader, + B: BodyDownloader, +{ /// Create a new set of online stages with default values. pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, header_downloader: H, body_downloader: B, stages_config: StageConfig, @@ -196,7 +215,7 @@ impl OnlineStages { impl OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
+ 'static, B: BodyDownloader + 'static, { /// Create a new builder using the given headers stage. @@ -229,7 +248,7 @@ where provider, header_downloader, tip, - consensus.clone(), + consensus.clone().as_header_validator(), stages_config.etl, )) .add_stage(bodies) @@ -239,7 +258,7 @@ where impl StageSet for OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
+ 'static, B: BodyDownloader + 'static, HeaderStage: Stage, BodyStage: Stage, @@ -250,7 +269,7 @@ where self.provider, self.header_downloader, self.tip, - self.consensus.clone(), + self.consensus.clone().as_header_validator(), self.stages_config.etl.clone(), )) .add_stage(BodyStage::new(self.body_downloader)) diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 2d441dee292..0f311b1bc9e 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -1,29 +1,25 @@ -use std::{ - cmp::Ordering, - task::{ready, Context, Poll}, -}; - +use super::missing_static_data_error; use futures_util::TryStreamExt; -use tracing::*; - -use alloy_primitives::TxNumber; -use reth_db::tables; -use reth_db_api::{ - cursor::{DbCursorRO, DbCursorRW}, - models::{StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}, - transaction::DbTxMut, -}; +use reth_codecs::Compact; +use reth_db::{tables, transaction::DbTx}; +use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; +use reth_primitives_traits::{Block, BlockBody, BlockHeader}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, + providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError, + StaticFileProviderFactory, StatsReader, StorageLocation, }; use reth_stages_api::{ EntitiesCheckpoint, ExecInput, ExecOutput, Stage, StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; use reth_storage_errors::provider::ProviderResult; +use std::{ + cmp::Ordering, + task::{ready, Context, Poll}, +}; +use tracing::*; /// The body stage downloads block bodies. /// @@ -60,7 +56,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>, + buffer: Option>>, } impl BodyStage { @@ -68,11 +64,94 @@ impl BodyStage { pub const fn new(downloader: D) -> Self { Self { downloader, buffer: None } } + + /// Ensures that static files and database are in sync. + fn ensure_consistency( + &self, + provider: &Provider, + unwind_block: Option, + ) -> Result<(), StageError> + where + Provider: DBProvider + BlockReader + StaticFileProviderFactory, + { + // Get id for the next tx_num of zero if there are no transactions. + let next_tx_num = provider + .tx_ref() + .cursor_read::()? + .last()? + .map(|(id, _)| id + 1) + .unwrap_or_default(); + + let static_file_provider = provider.static_file_provider(); + + // Make sure Transactions static file is at the same height. If it's further, this + // input execution was interrupted previously and we need to unwind the static file. + let next_static_file_tx_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions) + .map(|id| id + 1) + .unwrap_or_default(); + + match next_static_file_tx_num.cmp(&next_tx_num) { + // If static files are ahead, we are currently unwinding the stage or we didn't reach + // the database commit in a previous stage run. So, our only solution is to unwind the + // static files and proceed from the database expected height. + Ordering::Greater => { + let highest_db_block = + provider.tx_ref().entries::()? as u64; + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + static_file_producer + .prune_transactions(next_static_file_tx_num - next_tx_num, highest_db_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + // If static files are behind, then there was some corruption or loss of files. This + // error will trigger an unwind, that will bring the database to the same height as the + // static files. + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_block { + let next_tx_num_after_unwind = provider + .tx_ref() + .get::(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + // This means we need a deeper unwind. + if next_tx_num_after_unwind > next_static_file_tx_num { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Transactions, + )?) + } + } else { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Transactions, + )?) + } + } + Ordering::Equal => {} + } + + Ok(()) + } } -impl Stage for BodyStage +impl Stage for BodyStage where - Provider: DBProvider + StaticFileProviderFactory + StatsReader + BlockReader, + Provider: DBProvider + + StaticFileProviderFactory + + StatsReader + + BlockReader + + BlockWriter>, + D: BodyDownloader>, { /// Return the id of the stage fn id(&self) -> StageId { @@ -115,141 +194,23 @@ where } let (from_block, to_block) = input.next_block_range().into_inner(); - // Cursors used to write bodies, ommers and transactions - let tx = provider.tx_ref(); - let mut block_indices_cursor = tx.cursor_write::()?; - let mut tx_block_cursor = tx.cursor_write::()?; - let mut ommers_cursor = tx.cursor_write::()?; - let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; - - // Get id for the next tx_num of zero if there are no transactions. - let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); - - let static_file_provider = provider.static_file_provider(); - let mut static_file_producer = - static_file_provider.get_writer(from_block, StaticFileSegment::Transactions)?; - - // Make sure Transactions static file is at the same height. If it's further, this - // input execution was interrupted previously and we need to unwind the static file. - let next_static_file_tx_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .map(|id| id + 1) - .unwrap_or_default(); - - match next_static_file_tx_num.cmp(&next_tx_num) { - // If static files are ahead, then we didn't reach the database commit in a previous - // stage run. So, our only solution is to unwind the static files and proceed from the - // database expected height. - Ordering::Greater => { - static_file_producer - .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; - // Since this is a database <-> static file inconsistency, we commit the change - // straight away. - static_file_producer.commit()?; - } - // If static files are behind, then there was some corruption or loss of files. This - // error will trigger an unwind, that will bring the database to the same height as the - // static files. - Ordering::Less => { - return Err(missing_static_data_error( - next_static_file_tx_num.saturating_sub(1), - &static_file_provider, - provider, - )?) - } - Ordering::Equal => {} - } + self.ensure_consistency(provider, None)?; - debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, start_tx_id = next_tx_num, "Commencing sync"); + debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, "Commencing sync"); let buffer = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?; trace!(target: "sync::stages::bodies", bodies_len = buffer.len(), "Writing blocks"); - let mut highest_block = from_block; - for response in buffer { - // Write block - let block_number = response.block_number(); - - let block_indices = StoredBlockBodyIndices { - first_tx_num: next_tx_num, - tx_count: match &response { - BlockResponse::Full(block) => block.body.transactions.len() as u64, - BlockResponse::Empty(_) => 0, - }, - }; - - // Increment block on static file header. - if block_number > 0 { - let appended_block_number = static_file_producer.increment_block(block_number)?; - - if appended_block_number != block_number { - // This scenario indicates a critical error in the logic of adding new - // items. It should be treated as an `expect()` failure. - return Err(StageError::InconsistentBlockNumber { - segment: StaticFileSegment::Transactions, - database: block_number, - static_file: appended_block_number, - }) - } - } - - match response { - BlockResponse::Full(block) => { - // write transaction block index - if !block.body.transactions.is_empty() { - tx_block_cursor.append(block_indices.last_tx_num(), block.number)?; - } - - // Write transactions - for transaction in block.body.transactions { - let appended_tx_number = static_file_producer - .append_transaction(next_tx_num, &transaction.into())?; - - if appended_tx_number != next_tx_num { - // This scenario indicates a critical error in the logic of adding new - // items. It should be treated as an `expect()` failure. - return Err(StageError::InconsistentTxNumber { - segment: StaticFileSegment::Transactions, - database: next_tx_num, - static_file: appended_tx_number, - }) - } - - // Increment transaction id for each transaction. - next_tx_num += 1; - } - - // Write ommers if any - if !block.body.ommers.is_empty() { - ommers_cursor.append( - block_number, - StoredBlockOmmers { ommers: block.body.ommers }, - )?; - } - - // Write withdrawals if any - if let Some(withdrawals) = block.body.withdrawals { - if !withdrawals.is_empty() { - withdrawals_cursor - .append(block_number, StoredBlockWithdrawals { withdrawals })?; - } - } - - // Write requests if any - if let Some(requests) = block.body.requests { - if !requests.0.is_empty() { - requests_cursor.append(block_number, requests)?; - } - } - } - BlockResponse::Empty(_) => {} - }; - - // insert block meta - block_indices_cursor.append(block_number, block_indices)?; - - highest_block = block_number; - } + let highest_block = buffer.last().map(|r| r.block_number()).unwrap_or(from_block); + + // Write bodies to database. + provider.append_block_bodies( + buffer + .into_iter() + .map(|response| (response.block_number(), response.into_body())) + .collect(), + // We are writing transactions directly to static files. + StorageLocation::StaticFiles, + )?; // The stage is "done" if: // - We got fewer blocks than our target @@ -270,72 +231,8 @@ where ) -> Result { self.buffer.take(); - let static_file_provider = provider.static_file_provider(); - let tx = provider.tx_ref(); - // Cursors to unwind bodies, ommers - let mut body_cursor = tx.cursor_write::()?; - let mut ommers_cursor = tx.cursor_write::()?; - let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; - // Cursors to unwind transitions - let mut tx_block_cursor = tx.cursor_write::()?; - - let mut rev_walker = body_cursor.walk_back(None)?; - while let Some((number, block_meta)) = rev_walker.next().transpose()? { - if number <= input.unwind_to { - break - } - - // Delete the ommers entry if any - if ommers_cursor.seek_exact(number)?.is_some() { - ommers_cursor.delete_current()?; - } - - // Delete the withdrawals entry if any - if withdrawals_cursor.seek_exact(number)?.is_some() { - withdrawals_cursor.delete_current()?; - } - - // Delete the requests entry if any - if requests_cursor.seek_exact(number)?.is_some() { - requests_cursor.delete_current()?; - } - - // Delete all transaction to block values. - if !block_meta.is_empty() && - tx_block_cursor.seek_exact(block_meta.last_tx_num())?.is_some() - { - tx_block_cursor.delete_current()?; - } - - // Delete the current body value - rev_walker.delete_current()?; - } - - let mut static_file_producer = - static_file_provider.latest_writer(StaticFileSegment::Transactions)?; - - // Unwind from static files. Get the current last expected transaction from DB, and match it - // on static file - let db_tx_num = - body_cursor.last()?.map(|(_, block_meta)| block_meta.last_tx_num()).unwrap_or_default(); - let static_file_tx_num: u64 = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Transactions) - .unwrap_or_default(); - - // If there are more transactions on database, then we are missing static file data and we - // need to unwind further. - if db_tx_num > static_file_tx_num { - return Err(missing_static_data_error( - static_file_tx_num, - &static_file_provider, - provider, - )?) - } - - // Unwinds static file - static_file_producer - .prune_transactions(static_file_tx_num.saturating_sub(db_tx_num), input.unwind_to)?; + self.ensure_consistency(provider, Some(input.unwind_to))?; + provider.remove_bodies_above(input.unwind_to, StorageLocation::Both)?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) @@ -344,40 +241,6 @@ where } } -fn missing_static_data_error( - last_tx_num: TxNumber, - static_file_provider: &StaticFileProvider, - provider: &Provider, -) -> Result -where - Provider: BlockReader, -{ - let mut last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Transactions) - .unwrap_or_default(); - - // To be extra safe, we make sure that the last tx num matches the last block from its indices. - // If not, get it. - loop { - if let Some(indices) = provider.block_body_indices(last_block)? { - if indices.last_tx_num() <= last_tx_num { - break - } - } - if last_block == 0 { - break - } - last_block -= 1; - } - - let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - Ok(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Transactions, - }) -} - // TODO(alexey): ideally, we want to measure Bodies stage progress in bytes, but it's hard to know // beforehand how many bytes we need to download. So the good solution would be to measure the // progress in gas as a proxy to size. Execution stage uses a similar approach. @@ -396,18 +259,15 @@ where #[cfg(test)] mod tests { + use super::*; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, + }; use assert_matches::assert_matches; - use reth_provider::StaticFileProviderFactory; use reth_stages_api::StageUnitCheckpoint; use test_utils::*; - use crate::test_utils::{ - stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, - }; - - use super::*; - stage_test_suite_ext!(BodyTestRunner, body); /// Checks that the stage downloads at most `batch_size` blocks. @@ -622,9 +482,10 @@ mod tests { UnwindStageTestRunner, }, }; - use alloy_primitives::{BlockHash, BlockNumber, TxNumber, B256}; + use alloy_consensus::{BlockHeader, Header}; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use futures_util::Stream; - use reth_db::{static_file::HeaderMask, tables}; + use reth_db::{static_file::HeaderWithHashMask, tables}; use reth_db_api::{ cursor::DbCursorRO, models::{StoredBlockBodyIndices, StoredBlockOmmers}, @@ -637,7 +498,7 @@ mod tests { }, error::DownloadResult, }; - use reth_primitives::{BlockBody, Header, SealedBlock, SealedHeader, StaticFileSegment}; + use reth_primitives::{BlockBody, SealedBlock, SealedHeader, StaticFileSegment}; use reth_provider::{ providers::StaticFileWriter, test_utils::MockNodeTypesWithDB, HeaderProvider, ProviderFactory, StaticFileProviderFactory, TransactionsProvider, @@ -738,9 +599,7 @@ mod tests { body.tx_num_range().try_for_each(|tx_num| { let transaction = random_signed_tx(&mut rng); - static_file_producer - .append_transaction(tx_num, &transaction.into()) - .map(drop) + static_file_producer.append_transaction(tx_num, &transaction).map(drop) })?; if body.tx_count != 0 { @@ -903,6 +762,9 @@ mod tests { } impl BodyDownloader for TestBodyDownloader { + type Header = Header; + type Body = BlockBody; + fn set_download_range( &mut self, range: RangeInclusive, @@ -912,7 +774,7 @@ mod tests { for header in static_file_provider.fetch_range_iter( StaticFileSegment::Headers, *range.start()..*range.end() + 1, - |cursor, number| cursor.get_two::>(number.into()), + |cursor, number| cursor.get_two::>(number.into()), )? { let (header, hash) = header?; self.headers.push_back(SealedHeader::new(header, hash)); @@ -923,7 +785,7 @@ mod tests { } impl Stream for TestBodyDownloader { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -931,7 +793,8 @@ mod tests { return Poll::Ready(None) } - let mut response = Vec::default(); + let mut response = + Vec::with_capacity(std::cmp::min(this.headers.len(), this.batch_size as usize)); while let Some(header) = this.headers.pop_front() { if header.is_empty() { response.push(BlockResponse::Empty(header)) diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 7bb6ebc59e0..91afc33efaa 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -1,5 +1,7 @@ use crate::stages::MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD; -use alloy_primitives::{BlockNumber, Sealable}; +use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; +use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; @@ -8,16 +10,15 @@ use reth_evm::{ execute::{BatchExecutor, BlockExecutorProvider}, metrics::ExecutorMetrics, }; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::Chain; use reth_exex::{ExExManagerHandle, ExExNotification, ExExNotificationSource}; -use reth_primitives::{Header, SealedHeader, StaticFileSegment}; -use reth_primitives_traits::format_gas_throughput; +use reth_primitives::StaticFileSegment; +use reth_primitives_traits::{format_gas_throughput, Block, BlockBody, NodePrimitives}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, - writer::UnifiedStorageWriter, - BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, - ProviderError, StateChangeWriter, StateWriter, StaticFileProviderFactory, StatsReader, - TransactionVariant, + providers::{StaticFileProvider, StaticFileWriter}, + BlockHashReader, BlockReader, DBProvider, HeaderProvider, LatestStateProviderRef, + OriginalValuesKnown, ProviderError, StateCommitmentProvider, StateWriter, + StaticFileProviderFactory, StatsReader, StorageLocation, TransactionVariant, }; use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; @@ -35,6 +36,8 @@ use std::{ }; use tracing::*; +use super::missing_static_data_error; + /// The execution stage executes all transactions and /// update history indexes. /// @@ -65,7 +68,10 @@ use tracing::*; /// values to [`tables::PlainStorageState`] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] -pub struct ExecutionStage { +pub struct ExecutionStage +where + E: BlockExecutorProvider, +{ /// The stage's internal block executor executor_provider: E, /// The commit thresholds of the execution stage. @@ -80,25 +86,28 @@ pub struct ExecutionStage { /// Input for the post execute commit hook. /// Set after every [`ExecutionStage::execute`] and cleared after /// [`ExecutionStage::post_execute_commit`]. - post_execute_commit_input: Option, + post_execute_commit_input: Option>, /// Input for the post unwind commit hook. /// Set after every [`ExecutionStage::unwind`] and cleared after /// [`ExecutionStage::post_unwind_commit`]. - post_unwind_commit_input: Option, + post_unwind_commit_input: Option>, /// Handle to communicate with `ExEx` manager. - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, /// Executor metrics. metrics: ExecutorMetrics, } -impl ExecutionStage { +impl ExecutionStage +where + E: BlockExecutorProvider, +{ /// Create new execution stage with specified config. pub fn new( executor_provider: E, thresholds: ExecutionStageThresholds, external_clean_threshold: u64, prune_modes: PruneModes, - exex_manager_handle: ExExManagerHandle, + exex_manager_handle: ExExManagerHandle, ) -> Self { Self { external_clean_threshold, @@ -169,14 +178,105 @@ impl ExecutionStage { } Ok(prune_modes) } + + /// Performs consistency check on static files. + /// + /// This function compares the highest receipt number recorded in the database with that in the + /// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks. + /// **If the height in the static file is higher**, it rolls back (unwinds) the static file. + /// **Conversely, if the height in the database is lower**, it triggers a rollback in the + /// database (by returning [`StageError`]) until the heights in both the database and static + /// file match. + fn ensure_consistency( + &self, + provider: &Provider, + checkpoint: u64, + unwind_to: Option, + ) -> Result<(), StageError> + where + Provider: StaticFileProviderFactory + + DBProvider + + BlockReader + + HeaderProvider
, + { + // If thre's any receipts pruning configured, receipts are written directly to database and + // inconsistencies are expected. + if self.prune_modes.has_receipts_pruning() { + return Ok(()) + } + + // Get next expected receipt number + let tx = provider.tx_ref(); + let next_receipt_num = tx + .cursor_read::()? + .seek_exact(checkpoint)? + .map(|(_, value)| value.next_tx_num()) + .unwrap_or(0); + + let static_file_provider = provider.static_file_provider(); + + // Get next expected receipt number in static files + let next_static_file_receipt_num = static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Receipts) + .map(|num| num + 1) + .unwrap_or(0); + + // Check if we had any unexpected shutdown after committing to static files, but + // NOT committing to database. + match next_static_file_receipt_num.cmp(&next_receipt_num) { + // It can be equal when it's a chain of empty blocks, but we still need to update the + // last block in the range. + Ordering::Greater | Ordering::Equal => { + let mut static_file_producer = + static_file_provider.latest_writer(StaticFileSegment::Receipts)?; + static_file_producer + .prune_receipts(next_static_file_receipt_num - next_receipt_num, checkpoint)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer.commit()?; + } + Ordering::Less => { + // If we are already in the process of unwind, this might be fine because we will + // fix the inconsistency right away. + if let Some(unwind_to) = unwind_to { + let next_receipt_num_after_unwind = provider + .tx_ref() + .get::(unwind_to)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; + + if next_receipt_num_after_unwind > next_static_file_receipt_num { + // This means we need a deeper unwind. + } else { + return Ok(()) + } + } + + return Err(missing_static_data_error( + next_static_file_receipt_num.saturating_sub(1), + &static_file_provider, + provider, + StaticFileSegment::Receipts, + )?) + } + } + + Ok(()) + } } impl Stage for ExecutionStage where - E: BlockExecutorProvider, - Provider: - DBProvider + BlockReader + StaticFileProviderFactory + StatsReader + StateChangeWriter, - for<'a> UnifiedStorageWriter<'a, Provider, StaticFileProviderRWRefMut<'a>>: StateWriter, + E: BlockExecutorProvider>, + Provider: DBProvider + + BlockReader< + Block = ::Block, + Header = ::BlockHeader, + > + StaticFileProviderFactory + + StatsReader + + BlockHashReader + + StateWriter::Receipt> + + StateCommitmentProvider, { /// Return the id of the stage fn id(&self) -> StageId { @@ -204,25 +304,9 @@ where let prune_modes = self.adjust_prune_modes(provider, start_block, max_block)?; let static_file_provider = provider.static_file_provider(); - // We only use static files for Receipts, if there is no receipt pruning of any kind. - let static_file_producer = if self.prune_modes.receipts.is_none() && - self.prune_modes.receipts_log_filter.is_empty() - { - debug!(target: "sync::stages::execution", start = start_block, "Preparing static file producer"); - let mut producer = - prepare_static_file_producer(provider, &static_file_provider, start_block)?; - // Since there might be a database <-> static file inconsistency (read - // `prepare_static_file_producer` for context), we commit the change straight away. - producer.commit()?; - Some(producer) - } else { - None - }; + self.ensure_consistency(provider, input.checkpoint().block_number, None)?; - let db = StateProviderDatabase(LatestStateProviderRef::new( - provider.tx_ref(), - provider.static_file_provider(), - )); + let db = StateProviderDatabase(LatestStateProviderRef::new(provider)); let mut executor = self.executor_provider.batch_executor(db); executor.set_tip(max_block); executor.set_prune_modes(prune_modes); @@ -267,21 +351,24 @@ where fetch_block_duration += fetch_block_start.elapsed(); - cumulative_gas += block.gas_used; + cumulative_gas += block.header().gas_used(); // Configure the executor to use the current state. - trace!(target: "sync::stages::execution", number = block_number, txs = block.body.transactions.len(), "Executing block"); + trace!(target: "sync::stages::execution", number = block_number, txs = block.body().transactions().len(), "Executing block"); // Execute the block let execute_start = Instant::now(); self.metrics.metered_one((&block, td).into(), |input| { - let sealed = block.header.clone().seal_slow(); - let (header, seal) = sealed.into_parts(); - - executor.execute_and_verify_one(input).map_err(|error| StageError::Block { - block: Box::new(SealedHeader::new(header, seal)), - error: BlockErrorKind::Execution(error), + executor.execute_and_verify_one(input).map_err(|error| { + let header = block.header(); + StageError::Block { + block: Box::new(BlockWithParent::new( + header.parent_hash(), + NumHash::new(header.number(), header.hash_slow()), + )), + error: BlockErrorKind::Execution(error), + } }) })?; @@ -304,7 +391,7 @@ where } stage_progress = block_number; - stage_checkpoint.progress.processed += block.gas_used; + stage_checkpoint.progress.processed += block.header().gas_used(); // If we have ExExes we need to save the block in memory for later if self.exex_manager_handle.has_exexs() { @@ -325,8 +412,7 @@ where // prepare execution output for writing let time = Instant::now(); - let ExecutionOutcome { bundle, receipts, requests, first_block } = executor.finalize(); - let state = ExecutionOutcome::new(bundle, receipts, first_block, requests); + let state = executor.finalize(); let write_preparation_duration = time.elapsed(); // log the gas per second for the range we just executed @@ -344,7 +430,7 @@ where // the `has_exexs` check here as well if !blocks.is_empty() { let blocks = blocks.into_iter().map(|block| { - let hash = block.header.hash_slow(); + let hash = block.header().hash_slow(); block.seal(hash) }); @@ -363,8 +449,7 @@ where let time = Instant::now(); // write output - let mut writer = UnifiedStorageWriter::new(provider, static_file_producer); - writer.write_to_storage(state, OriginalValuesKnown::Yes)?; + provider.write_state(state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; let db_write_duration = time.elapsed(); debug!( @@ -411,10 +496,13 @@ where }) } + self.ensure_consistency(provider, input.checkpoint.block_number, Some(unwind_to))?; + // Unwind account and storage changesets, as well as receipts. // // This also updates `PlainStorageState` and `PlainAccountState`. - let bundle_state_with_receipts = provider.take_state(range.clone())?; + let bundle_state_with_receipts = + provider.take_state_above(unwind_to, StorageLocation::Both)?; // Prepare the input for post unwind commit hook, where an `ExExNotification` will be sent. if self.exex_manager_handle.has_exexs() { @@ -435,25 +523,6 @@ where } } - let static_file_provider = provider.static_file_provider(); - - // Unwind all receipts for transactions in the block range - if self.prune_modes.receipts.is_none() && self.prune_modes.receipts_log_filter.is_empty() { - // We only use static files for Receipts, if there is no receipt pruning of any kind. - - // prepare_static_file_producer does a consistency check that will unwind static files - // if the expected highest receipt in the files is higher than the database. - // Which is essentially what happens here when we unwind this stage. - let _static_file_producer = - prepare_static_file_producer(provider, &static_file_provider, *range.start())?; - } else { - // If there is any kind of receipt pruning/filtering we use the database, since static - // files do not support filters. - // - // If we hit this case, the receipts have already been unwound by the call to - // `take_state`. - } - // Update the checkpoint. let mut stage_checkpoint = input.checkpoint.execution_stage_checkpoint(); if let Some(stage_checkpoint) = stage_checkpoint.as_mut() { @@ -461,7 +530,8 @@ where stage_checkpoint.progress.processed -= provider .block_by_number(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))? - .gas_used; + .header() + .gas_used(); } } let checkpoint = if let Some(stage_checkpoint) = stage_checkpoint { @@ -487,8 +557,8 @@ where } } -fn execution_checkpoint( - provider: &StaticFileProvider, +fn execution_checkpoint( + provider: &StaticFileProvider, start_block: BlockNumber, max_block: BlockNumber, checkpoint: StageCheckpoint, @@ -554,8 +624,8 @@ fn execution_checkpoint( }) } -fn calculate_gas_used_from_headers( - provider: &StaticFileProvider, +fn calculate_gas_used_from_headers( + provider: &StaticFileProvider, range: RangeInclusive, ) -> Result { debug!(target: "sync::stages::execution", ?range, "Calculating gas used from headers"); @@ -579,85 +649,6 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -/// Returns a `StaticFileProviderRWRefMut` static file producer after performing a consistency -/// check. -/// -/// This function compares the highest receipt number recorded in the database with that in the -/// static file to detect any discrepancies due to unexpected shutdowns or database rollbacks. **If -/// the height in the static file is higher**, it rolls back (unwinds) the static file. -/// **Conversely, if the height in the database is lower**, it triggers a rollback in the database -/// (by returning [`StageError`]) until the heights in both the database and static file match. -fn prepare_static_file_producer<'a, 'b, Provider>( - provider: &'b Provider, - static_file_provider: &'a StaticFileProvider, - start_block: u64, -) -> Result, StageError> -where - Provider: DBProvider + BlockReader + HeaderProvider, - 'b: 'a, -{ - // Get next expected receipt number - let tx = provider.tx_ref(); - let next_receipt_num = tx - .cursor_read::()? - .seek_exact(start_block)? - .map(|(_, value)| value.first_tx_num) - .unwrap_or(0); - - // Get next expected receipt number in static files - let next_static_file_receipt_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .map(|num| num + 1) - .unwrap_or(0); - - let mut static_file_producer = - static_file_provider.get_writer(start_block, StaticFileSegment::Receipts)?; - - // Check if we had any unexpected shutdown after committing to static files, but - // NOT committing to database. - match next_static_file_receipt_num.cmp(&next_receipt_num) { - // It can be equal when it's a chain of empty blocks, but we still need to update the last - // block in the range. - Ordering::Greater | Ordering::Equal => static_file_producer.prune_receipts( - next_static_file_receipt_num - next_receipt_num, - start_block.saturating_sub(1), - )?, - Ordering::Less => { - let mut last_block = static_file_provider - .get_highest_static_file_block(StaticFileSegment::Receipts) - .unwrap_or(0); - - let last_receipt_num = static_file_provider - .get_highest_static_file_tx(StaticFileSegment::Receipts) - .unwrap_or(0); - - // To be extra safe, we make sure that the last receipt num matches the last block from - // its indices. If not, get it. - loop { - if let Some(indices) = provider.block_body_indices(last_block)? { - if indices.last_tx_num() <= last_receipt_num { - break - } - } - if last_block == 0 { - break - } - last_block -= 1; - } - - let missing_block = - Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - - return Err(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Receipts, - }) - } - } - - Ok(static_file_producer) -} - #[cfg(test)] mod tests { use super::*; @@ -667,7 +658,8 @@ mod tests { use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut}; - use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_evm::execute::BasicBlockExecutorProvider; + use reth_evm_ethereum::execute::EthExecutionStrategyFactory; use reth_execution_errors::BlockValidationError; use reth_primitives::{Account, Bytecode, SealedBlock, StorageEntry}; use reth_provider::{ @@ -678,10 +670,11 @@ mod tests { use reth_stages_api::StageUnitCheckpoint; use std::collections::BTreeMap; - fn stage() -> ExecutionStage { - let executor_provider = EthExecutorProvider::ethereum(Arc::new( + fn stage() -> ExecutionStage> { + let strategy_factory = EthExecutionStrategyFactory::ethereum(Arc::new( ChainSpecBuilder::mainnet().berlin_activated().build(), )); + let executor_provider = BasicBlockExecutorProvider::new(strategy_factory); ExecutionStage::new( executor_provider, ExecutionStageThresholds { @@ -901,7 +894,7 @@ mod tests { // Tests node with database and node with static files for mut mode in modes { - let provider = factory.database_provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); if let Some(mode) = &mut mode { // Simulating a full node where we write receipts to database @@ -910,6 +903,7 @@ mod tests { let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let output = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); @@ -974,9 +968,10 @@ mod tests { "Post changed of a account" ); - let provider = factory.database_provider_rw().unwrap(); + let mut provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); - stage.prune_modes = mode.unwrap_or_default(); + stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.unwrap_or_default()); let _result = stage .unwind( @@ -1051,6 +1046,7 @@ mod tests { // Test Execution let mut execution_stage = stage(); execution_stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let result = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); @@ -1058,7 +1054,8 @@ mod tests { // Test Unwind provider = factory.database_provider_rw().unwrap(); let mut stage = stage(); - stage.prune_modes = mode.unwrap_or_default(); + stage.prune_modes = mode.clone().unwrap_or_default(); + provider.set_prune_modes(mode.clone().unwrap_or_default()); let result = stage .unwind( diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 14afb37d81d..551c10d7711 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -58,13 +58,16 @@ impl AccountHashingStage { /// /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSets` table. - pub fn seed< - Tx: DbTx + DbTxMut + 'static, - Spec: Send + Sync + 'static + reth_chainspec::EthereumHardforks, - >( - provider: &reth_provider::DatabaseProvider, + pub fn seed( + provider: &reth_provider::DatabaseProvider, opts: SeedOpts, - ) -> Result, StageError> { + ) -> Result, StageError> + where + N::Primitives: reth_primitives_traits::FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, + { use alloy_primitives::U256; use reth_db_api::models::AccountBeforeTx; use reth_provider::{StaticFileProviderFactory, StaticFileWriter}; @@ -234,7 +237,7 @@ where input.unwind_block_range_with_threshold(self.commit_threshold); // Aggregate all transition changesets and make a list of accounts that have been changed. - provider.unwind_account_hashing(range)?; + provider.unwind_account_hashing_range(range)?; let mut stage_checkpoint = input.checkpoint.account_hashing_stage_checkpoint().unwrap_or_default(); diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index ef070d30c6d..0be84665bee 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -169,7 +169,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_hashing(BlockNumberAddress::range(range))?; + provider.unwind_storage_hashing_range(BlockNumberAddress::range(range))?; let mut stage_checkpoint = input.checkpoint.storage_hashing_stage_checkpoint().unwrap_or_default(); @@ -359,10 +359,7 @@ mod tests { transaction.hash(), next_tx_num, )?; - tx.put::( - next_tx_num, - transaction.clone().into(), - )?; + tx.put::(next_tx_num, transaction.clone())?; let (addr, _) = accounts.get_mut(rng.gen::() % n_accounts as usize).unwrap(); diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 199e015c2dc..2a104d7eb6b 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -1,8 +1,10 @@ +use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::BlockWithParent, NumHash}; use alloy_primitives::{BlockHash, BlockNumber, Bytes, B256}; use futures_util::StreamExt; use reth_config::config::EtlConfig; -use reth_consensus::Consensus; -use reth_db::{tables, RawKey, RawTable, RawValue}; +use reth_consensus::HeaderValidator; +use reth_db::{table::Value, tables, transaction::DbTx, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::DbTxMut, @@ -10,12 +12,11 @@ use reth_db_api::{ }; use reth_etl::Collector; use reth_network_p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}; -use reth_primitives::{SealedHeader, StaticFileSegment}; -use reth_primitives_traits::serde_bincode_compat; +use reth_primitives::{NodePrimitives, SealedHeader, StaticFileSegment}; +use reth_primitives_traits::{serde_bincode_compat, FullBlockHeader}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, - StaticFileProviderFactory, + providers::StaticFileWriter, BlockHashReader, DBProvider, HeaderProvider, HeaderSyncGap, + HeaderSyncGapProvider, StaticFileProviderFactory, }; use reth_stages_api::{ BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, @@ -49,9 +50,9 @@ pub struct HeaderStage { /// The tip for the stage. tip: watch::Receiver, /// Consensus client implementation - consensus: Arc, + consensus: Arc>, /// Current sync gap. - sync_gap: Option, + sync_gap: Option>, /// ETL collector with `HeaderHash` -> `BlockNumber` hash_collector: Collector, /// ETL collector with `BlockNumber` -> `BincodeSealedHeader` @@ -71,7 +72,7 @@ where database: Provider, downloader: Downloader, tip: watch::Receiver, - consensus: Arc, + consensus: Arc>, etl_config: EtlConfig, ) -> Self { Self { @@ -90,15 +91,18 @@ where /// /// Writes to static files ( `Header | HeaderTD | HeaderHash` ) and [`tables::HeaderNumbers`] /// database table. - fn write_headers( - &mut self, - provider: &impl DBProvider, - static_file_provider: StaticFileProvider, - ) -> Result { + fn write_headers

(&mut self, provider: &P) -> Result + where + P: DBProvider + StaticFileProviderFactory, + Downloader: HeaderDownloader

::BlockHeader>, + ::BlockHeader: Value + FullBlockHeader, + { let total_headers = self.header_collector.len(); info!(target: "sync::stages::headers", total = total_headers, "Writing headers"); + let static_file_provider = provider.static_file_provider(); + // Consistency check of expected headers in static files vs DB is done on provider::sync_gap // when poll_execute_ready is polled. let mut last_header_number = static_file_provider @@ -121,24 +125,27 @@ where info!(target: "sync::stages::headers", progress = %format!("{:.2}%", (index as f64 / total_headers as f64) * 100.0), "Writing headers"); } - let sealed_header: SealedHeader = - bincode::deserialize::>(&header_buf) + let sealed_header: SealedHeader = + bincode::deserialize::>(&header_buf) .map_err(|err| StageError::Fatal(Box::new(err)))? .into(); let (header, header_hash) = sealed_header.split(); - if header.number == 0 { + if header.number() == 0 { continue } - last_header_number = header.number; + last_header_number = header.number(); // Increase total difficulty - td += header.difficulty; + td += header.difficulty(); // Header validation self.consensus.validate_header_with_total_difficulty(&header, td).map_err(|error| { StageError::Block { - block: Box::new(SealedHeader::new(header.clone(), header_hash)), + block: Box::new(BlockWithParent::new( + header.parent_hash(), + NumHash::new(header.number(), header_hash), + )), error: BlockErrorKind::Validation(error), } })?; @@ -155,11 +162,13 @@ where // If we only have the genesis block hash, then we are at first sync, and we can remove it, // add it to the collector and use tx.append on all hashes. - if let Some((hash, block_number)) = cursor_header_numbers.last()? { - if block_number.value()? == 0 { - self.hash_collector.insert(hash.key()?, 0)?; - cursor_header_numbers.delete_current()?; - first_sync = true; + if provider.tx_ref().entries::>()? == 1 { + if let Some((hash, block_number)) = cursor_header_numbers.last()? { + if block_number.value()? == 0 { + self.hash_collector.insert(hash.key()?, 0)?; + cursor_header_numbers.delete_current()?; + first_sync = true; + } } } @@ -191,9 +200,10 @@ where impl Stage for HeaderStage where - P: HeaderSyncGapProvider, - D: HeaderDownloader, Provider: DBProvider + StaticFileProviderFactory, + P: HeaderSyncGapProvider
::BlockHeader>, + D: HeaderDownloader
::BlockHeader>, + ::BlockHeader: FullBlockHeader + Value, { /// Return the id of the stage fn id(&self) -> StageId { @@ -230,7 +240,7 @@ where } debug!(target: "sync::stages::headers", ?tip, head = ?gap.local_head.hash(), "Commencing sync"); - let local_head_number = gap.local_head.number; + let local_head_number = gap.local_head.number(); // let the downloader know what to sync self.downloader.update_sync_gap(gap.local_head, gap.target); @@ -239,9 +249,9 @@ where loop { match ready!(self.downloader.poll_next_unpin(cx)) { Some(Ok(headers)) => { - info!(target: "sync::stages::headers", total = headers.len(), from_block = headers.first().map(|h| h.number), to_block = headers.last().map(|h| h.number), "Received headers"); + info!(target: "sync::stages::headers", total = headers.len(), from_block = headers.first().map(|h| h.number()), to_block = headers.last().map(|h| h.number()), "Received headers"); for header in headers { - let header_number = header.number; + let header_number = header.number(); self.hash_collector.insert(header.hash(), header_number)?; self.header_collector.insert( @@ -264,7 +274,11 @@ where } Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => { error!(target: "sync::stages::headers", %error, "Cannot attach header to head"); - return Poll::Ready(Err(StageError::DetachedHead { local_head, header, error })) + return Poll::Ready(Err(StageError::DetachedHead { + local_head: Box::new(local_head.block_with_parent()), + header: Box::new(header.block_with_parent()), + error, + })) } None => return Poll::Ready(Err(StageError::ChannelClosed)), } @@ -291,7 +305,7 @@ where // Write the headers and related tables to DB from ETL space let to_be_processed = self.hash_collector.len() as u64; - let last_header_number = self.write_headers(provider, provider.static_file_provider())?; + let last_header_number = self.write_headers(provider)?; // Clear ETL collectors self.hash_collector.clear(); @@ -390,7 +404,7 @@ mod tests { use crate::test_utils::{ stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; - use alloy_primitives::{Sealable, B256}; + use alloy_primitives::B256; use assert_matches::assert_matches; use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockBody, SealedBlock, SealedBlockWithSenders}; @@ -439,7 +453,9 @@ mod tests { } } - impl StageTestRunner for HeadersTestRunner { + impl + 'static> StageTestRunner + for HeadersTestRunner + { type S = HeaderStage, D>; fn db(&self) -> &TestStageDB { @@ -457,7 +473,9 @@ mod tests { } } - impl ExecuteStageTestRunner for HeadersTestRunner { + impl + 'static> ExecuteStageTestRunner + for HeadersTestRunner + { type Seed = Vec; fn seed_execution(&mut self, input: ExecInput) -> Result { @@ -503,9 +521,7 @@ mod tests { // validate the header let header = provider.header_by_number(block_num)?; assert!(header.is_some()); - let sealed = header.unwrap().seal_slow(); - let (header, seal) = sealed.into_parts(); - let header = SealedHeader::new(header, seal); + let header = SealedHeader::seal(header.unwrap()); assert_eq!(header.hash(), hash); // validate the header total difficulty @@ -535,7 +551,9 @@ mod tests { } } - impl UnwindStageTestRunner for HeadersTestRunner { + impl + 'static> UnwindStageTestRunner + for HeadersTestRunner + { fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> { self.check_no_header_entry_above(input.unwind_to) } diff --git a/crates/stages/stages/src/stages/index_account_history.rs b/crates/stages/stages/src/stages/index_account_history.rs index 8b10283fb4b..38c238e5d98 100644 --- a/crates/stages/stages/src/stages/index_account_history.rs +++ b/crates/stages/stages/src/stages/index_account_history.rs @@ -134,7 +134,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_account_history_indices(range)?; + provider.unwind_account_history_indices_range(range)?; // from HistoryIndex higher than that number. Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) diff --git a/crates/stages/stages/src/stages/index_storage_history.rs b/crates/stages/stages/src/stages/index_storage_history.rs index ac645b8dd75..ba61e631230 100644 --- a/crates/stages/stages/src/stages/index_storage_history.rs +++ b/crates/stages/stages/src/stages/index_storage_history.rs @@ -140,7 +140,7 @@ where let (range, unwind_progress, _) = input.unwind_block_range_with_threshold(self.commit_threshold); - provider.unwind_storage_history_indices(BlockNumberAddress::range(range))?; + provider.unwind_storage_history_indices_range(BlockNumberAddress::range(range))?; Ok(UnwindOutput { checkpoint: StageCheckpoint::new(unwind_progress) }) } diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index d1d3496d917..ff4d37cf3f6 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,4 +1,5 @@ -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_consensus::BlockHeader; +use alloy_primitives::{BlockNumber, B256}; use reth_codecs::Compact; use reth_consensus::ConsensusError; use reth_db::tables; @@ -135,7 +136,7 @@ where Provider: DBProvider + TrieWriter + StatsReader - + HeaderProvider + + HeaderProvider
+ StageCheckpointReader + StageCheckpointWriter, { @@ -168,7 +169,7 @@ where let target_block = provider .header_by_number(to_block)? .ok_or_else(|| ProviderError::HeaderNotFound(to_block.into()))?; - let target_block_root = target_block.state_root; + let target_block_root = target_block.state_root(); let mut checkpoint = self.get_execution_checkpoint(provider)?; let (trie_root, entities_checkpoint) = if range.is_empty() { @@ -276,10 +277,7 @@ where // Reset the checkpoint self.save_execution_checkpoint(provider, None)?; - let sealed = target_block.seal_slow(); - let (header, seal) = sealed.into_parts(); - - validate_state_root(trie_root, SealedHeader::new(header, seal), to_block)?; + validate_state_root(trie_root, SealedHeader::seal(target_block), to_block)?; Ok(ExecOutput { checkpoint: StageCheckpoint::new(to_block) @@ -332,10 +330,7 @@ where .header_by_number(input.unwind_to)? .ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?; - let sealed = target.seal_slow(); - let (header, seal) = sealed.into_parts(); - - validate_state_root(block_root, SealedHeader::new(header, seal), input.unwind_to)?; + validate_state_root(block_root, SealedHeader::seal(target), input.unwind_to)?; // Validation passed, apply unwind changes to the database. provider.write_trie_updates(&updates)?; @@ -362,7 +357,7 @@ fn validate_state_root( error: BlockErrorKind::Validation(ConsensusError::BodyStateRootDiff( GotExpected { got, expected: expected.state_root }.into(), )), - block: Box::new(expected), + block: Box::new(expected.block_with_parent()), }) } } @@ -538,9 +533,7 @@ mod tests { .into_iter() .map(|(address, account)| (address, (account, std::iter::empty()))), ); - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let sealed_head = SealedBlock { header: SealedHeader::new(header, seal), body }; + let sealed_head = SealedBlock { header: SealedHeader::seal(header), body }; let head_hash = sealed_head.hash(); let mut blocks = vec![sealed_head]; diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 4b9f9295103..9d7cc685a7e 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -296,8 +296,8 @@ mod tests { ) { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. - let static_file_provider = - StaticFileProvider::read_write(db.factory.static_file_provider().path()).unwrap(); + let mut static_file_provider = db.factory.static_file_provider(); + static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); // Simulate corruption by removing `prune_count` rows from the data file without updating // its offset list and configuration. @@ -314,9 +314,10 @@ mod tests { // We recreate the static file provider, since consistency heals are done on fetching the // writer for the first time. + let mut static_file_provider = db.factory.static_file_provider(); + static_file_provider = StaticFileProvider::read_write(static_file_provider.path()).unwrap(); assert_eq!( - StaticFileProvider::read_write(db.factory.static_file_provider().path()) - .unwrap() + static_file_provider .check_consistency(&db.factory.database_provider_ro().unwrap(), is_full_node,), Ok(expected) ); diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 8adf2fcad54..7e5d7af46ee 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -1,4 +1,5 @@ -use reth_db::transaction::DbTxMut; +use reth_db::{table::Value, transaction::DbTxMut}; +use reth_primitives::NodePrimitives; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, @@ -41,7 +42,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::Prune @@ -130,7 +131,7 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory, + + StaticFileProviderFactory>, { fn id(&self) -> StageId { StageId::PruneSenderRecovery @@ -171,6 +172,7 @@ mod tests { }; use alloy_primitives::B256; use reth_primitives::SealedBlock; + use reth_primitives_traits::SignedTransaction; use reth_provider::{ providers::StaticFileWriter, TransactionsProvider, TransactionsProviderExt, }; diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index a85b0bc60cc..b5506068f48 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -1,13 +1,14 @@ use alloy_primitives::{Address, TxNumber}; use reth_config::config::SenderRecoveryConfig; use reth_consensus::ConsensusError; -use reth_db::{static_file::TransactionMask, tables, RawValue}; +use reth_db::{static_file::TransactionMask, table::Value, tables, RawValue}; use reth_db_api::{ cursor::DbCursorRW, transaction::{DbTx, DbTxMut}, DbTxUnwindExt, }; -use reth_primitives::{GotExpected, StaticFileSegment, TransactionSignedNoHash}; +use reth_primitives::{GotExpected, NodePrimitives, StaticFileSegment}; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, DBProvider, HeaderProvider, ProviderError, PruneCheckpointReader, StaticFileProviderFactory, StatsReader, @@ -29,6 +30,9 @@ const BATCH_SIZE: usize = 100_000; /// Maximum number of senders to recover per rayon worker job. const WORKER_CHUNK_SIZE: usize = 100; +/// Type alias for a sender that transmits the result of sender recovery. +type RecoveryResultSender = mpsc::Sender>>; + /// The sender recovery stage iterates over existing transactions, /// recovers the transaction signer and stores them /// in [`TransactionSenders`][reth_db::tables::TransactionSenders] table. @@ -55,8 +59,8 @@ impl Default for SenderRecoveryStage { impl Stage for SenderRecoveryStage where Provider: DBProvider - + BlockReader - + StaticFileProviderFactory + + BlockReader
+ + StaticFileProviderFactory> + StatsReader + PruneCheckpointReader, { @@ -100,8 +104,10 @@ where .map(|start| start..std::cmp::min(start + BATCH_SIZE as u64, tx_range.end)) .collect::>>(); + let tx_batch_sender = setup_range_recovery(provider); + for range in batch { - recover_range(range, provider, &mut senders_cursor)?; + recover_range(range, provider, tx_batch_sender.clone(), &mut senders_cursor)?; } Ok(ExecOutput { @@ -136,15 +142,17 @@ where fn recover_range( tx_range: Range, provider: &Provider, + tx_batch_sender: mpsc::Sender, RecoveryResultSender)>>, senders_cursor: &mut CURSOR, ) -> Result<(), StageError> where - Provider: DBProvider + HeaderProvider + StaticFileProviderFactory, + Provider: + DBProvider + HeaderProvider
+ StaticFileProviderFactory, CURSOR: DbCursorRW, { - debug!(target: "sync::stages::sender_recovery", ?tx_range, "Recovering senders batch"); + debug!(target: "sync::stages::sender_recovery", ?tx_range, "Sending batch for processing"); - // Preallocate channels + // Preallocate channels for each chunks in the batch let (chunks, receivers): (Vec<_>, Vec<_>) = tx_range .clone() .step_by(WORKER_CHUNK_SIZE) @@ -156,62 +164,9 @@ where }) .unzip(); - let static_file_provider = provider.static_file_provider(); - - // We do not use `tokio::task::spawn_blocking` because, during a shutdown, - // there will be a timeout grace period in which Tokio does not allow spawning - // additional blocking tasks. This would cause this function to return - // `SenderRecoveryStageError::RecoveredSendersMismatch` at the end. - // - // However, using `std::thread::spawn` allows us to utilize the timeout grace - // period to complete some work without throwing errors during the shutdown. - std::thread::spawn(move || { - for (chunk_range, recovered_senders_tx) in chunks { - // Read the raw value, and let the rayon worker to decompress & decode. - let chunk = match static_file_provider.fetch_range_with_predicate( - StaticFileSegment::Transactions, - chunk_range.clone(), - |cursor, number| { - Ok(cursor - .get_one::>>( - number.into(), - )? - .map(|tx| (number, tx))) - }, - |_| true, - ) { - Ok(chunk) => chunk, - Err(err) => { - // We exit early since we could not process this chunk. - let _ = recovered_senders_tx - .send(Err(Box::new(SenderRecoveryStageError::StageError(err.into())))); - break - } - }; - - // Spawn the task onto the global rayon pool - // This task will send the results through the channel after it has read the transaction - // and calculated the sender. - rayon::spawn(move || { - let mut rlp_buf = Vec::with_capacity(128); - for (number, tx) in chunk { - let res = tx - .value() - .map_err(|err| Box::new(SenderRecoveryStageError::StageError(err.into()))) - .and_then(|tx| recover_sender((number, tx), &mut rlp_buf)); - - let is_err = res.is_err(); - - let _ = recovered_senders_tx.send(res); - - // Finish early - if is_err { - break - } - } - }); - } - }); + if let Some(err) = tx_batch_sender.send(chunks).err() { + return Err(StageError::Fatal(err.into())); + } debug!(target: "sync::stages::sender_recovery", ?tx_range, "Appending recovered senders to the database"); @@ -235,8 +190,9 @@ where provider.sealed_header(block_number)?.ok_or_else(|| { ProviderError::HeaderNotFound(block_number.into()) })?; + Err(StageError::Block { - block: Box::new(sealed_header), + block: Box::new(sealed_header.block_with_parent()), error: BlockErrorKind::Validation( ConsensusError::TransactionSignerRecoveryError, ), @@ -269,22 +225,97 @@ where .into(), )); } - Ok(()) } +/// Spawns a thread to handle the recovery of transaction senders for +/// specified chunks of a given batch. It processes incoming ranges, fetching and recovering +/// transactions in parallel using global rayon pool +fn setup_range_recovery( + provider: &Provider, +) -> mpsc::Sender, RecoveryResultSender)>> +where + Provider: DBProvider + + HeaderProvider + + StaticFileProviderFactory>, +{ + let (tx_sender, tx_receiver) = mpsc::channel::, RecoveryResultSender)>>(); + let static_file_provider = provider.static_file_provider(); + + // We do not use `tokio::task::spawn_blocking` because, during a shutdown, + // there will be a timeout grace period in which Tokio does not allow spawning + // additional blocking tasks. This would cause this function to return + // `SenderRecoveryStageError::RecoveredSendersMismatch` at the end. + // + // However, using `std::thread::spawn` allows us to utilize the timeout grace + // period to complete some work without throwing errors during the shutdown. + std::thread::spawn(move || { + while let Ok(chunks) = tx_receiver.recv() { + for (chunk_range, recovered_senders_tx) in chunks { + // Read the raw value, and let the rayon worker to decompress & decode. + let chunk = match static_file_provider.fetch_range_with_predicate( + StaticFileSegment::Transactions, + chunk_range.clone(), + |cursor, number| { + Ok(cursor + .get_one::::SignedTx>, + >>(number.into())? + .map(|tx| (number, tx))) + }, + |_| true, + ) { + Ok(chunk) => chunk, + Err(err) => { + // We exit early since we could not process this chunk. + let _ = recovered_senders_tx + .send(Err(Box::new(SenderRecoveryStageError::StageError(err.into())))); + break + } + }; + + // Spawn the task onto the global rayon pool + // This task will send the results through the channel after it has read the + // transaction and calculated the sender. + rayon::spawn(move || { + let mut rlp_buf = Vec::with_capacity(128); + for (number, tx) in chunk { + let res = tx + .value() + .map_err(|err| { + Box::new(SenderRecoveryStageError::StageError(err.into())) + }) + .and_then(|tx| recover_sender((number, tx), &mut rlp_buf)); + + let is_err = res.is_err(); + + let _ = recovered_senders_tx.send(res); + + // Finish early + if is_err { + break + } + } + }); + } + } + }); + tx_sender +} + #[inline] -fn recover_sender( - (tx_id, tx): (TxNumber, TransactionSignedNoHash), +fn recover_sender( + (tx_id, tx): (TxNumber, T), rlp_buf: &mut Vec, ) -> Result<(u64, Address), Box> { + rlp_buf.clear(); // We call [Signature::encode_and_recover_unchecked] because transactions run in the pipeline // are known to be valid - this means that we do not need to check whether or not the `s` // value is greater than `secp256k1n / 2` if past EIP-2. There are transactions // pre-homestead which have large `s` values, so using [Signature::recover_signer] here // would not be backwards-compatible. let sender = tx - .encode_and_recover_unchecked(rlp_buf) + .recover_signer_unchecked_with_buf(rlp_buf) .ok_or(SenderRecoveryStageError::FailedRecovery(FailedSenderRecoveryError { tx: tx_id }))?; Ok((tx_id, sender)) @@ -335,10 +366,16 @@ struct FailedSenderRecoveryError { #[cfg(test)] mod tests { + use super::*; + use crate::test_utils::{ + stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, + TestRunnerError, TestStageDB, UnwindStageTestRunner, + }; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; use reth_db_api::cursor::DbCursorRO; use reth_primitives::{SealedBlock, TransactionSigned}; + use reth_primitives_traits::SignedTransaction; use reth_provider::{ providers::StaticFileWriter, DatabaseProviderFactory, PruneCheckpointWriter, StaticFileProviderFactory, TransactionsProvider, @@ -349,12 +386,6 @@ mod tests { self, random_block, random_block_range, BlockParams, BlockRangeParams, }; - use super::*; - use crate::test_utils::{ - stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, StorageKind, - TestRunnerError, TestStageDB, UnwindStageTestRunner, - }; - stage_test_suite_ext!(SenderRecoveryTestRunner, sender_recovery); /// Execute a block range with a single transaction @@ -526,7 +557,7 @@ mod tests { blocks[..=max_pruned_block as usize] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), + .sum(), ), prune_mode: PruneMode::Full, }, @@ -541,8 +572,8 @@ mod tests { processed: blocks[..=max_processed_block] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), - total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum::() + .sum(), + total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum() } ); } @@ -641,11 +672,9 @@ mod tests { while let Some((_, body)) = body_cursor.next()? { for tx_id in body.tx_num_range() { let transaction: TransactionSigned = provider - .transaction_by_id_no_hash(tx_id)? - .map(|tx| TransactionSigned { - hash: Default::default(), // we don't require the hash - signature: tx.signature, - transaction: tx.transaction, + .transaction_by_id_unhashed(tx_id)? + .map(|tx| { + TransactionSigned::new_unhashed(tx.transaction, tx.signature) }) .expect("no transaction entry"); let signer = diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 60c958abf86..fab10b0f953 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -1,12 +1,15 @@ +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{TxHash, TxNumber}; use num_traits::Zero; use reth_config::config::{EtlConfig, TransactionLookupConfig}; -use reth_db::{tables, RawKey, RawValue}; +use reth_db::{table::Value, tables, RawKey, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, transaction::{DbTx, DbTxMut}, }; use reth_etl::Collector; +use reth_primitives::NodePrimitives; +use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockReader, DBProvider, PruneCheckpointReader, PruneCheckpointWriter, StaticFileProviderFactory, StatsReader, TransactionsProvider, TransactionsProviderExt, @@ -60,7 +63,7 @@ where + BlockReader + PruneCheckpointReader + StatsReader - + StaticFileProviderFactory + + StaticFileProviderFactory> + TransactionsProviderExt, { /// Return the id of the stage @@ -206,7 +209,7 @@ where for tx_id in body.tx_num_range() { // First delete the transaction and hash to id mapping if let Some(transaction) = static_file_provider.transaction_by_id(tx_id)? { - if tx_hash_number_cursor.seek_exact(transaction.hash())?.is_some() { + if tx_hash_number_cursor.seek_exact(transaction.trie_hash())?.is_some() { tx_hash_number_cursor.delete_current()?; } } @@ -383,7 +386,7 @@ mod tests { for block in &blocks[..=max_processed_block] { for transaction in &block.body.transactions { if block.number > max_pruned_block { - tx_hash_numbers.push((transaction.hash, tx_hash_number)); + tx_hash_numbers.push((transaction.hash(), tx_hash_number)); } tx_hash_number += 1; } @@ -416,8 +419,8 @@ mod tests { processed: blocks[..=max_processed_block] .iter() .map(|block| block.body.transactions.len() as u64) - .sum::(), - total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum::() + .sum(), + total: blocks.iter().map(|block| block.body.transactions.len() as u64).sum() } ); } diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index caf039faca1..169d556348b 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -1,5 +1,5 @@ //! Utils for `stages`. -use alloy_primitives::BlockNumber; +use alloy_primitives::{BlockNumber, TxNumber}; use reth_config::config::EtlConfig; use reth_db::BlockNumberList; use reth_db_api::{ @@ -10,7 +10,11 @@ use reth_db_api::{ DatabaseError, }; use reth_etl::Collector; -use reth_provider::DBProvider; +use reth_primitives::StaticFileSegment; +use reth_provider::{ + providers::StaticFileProvider, BlockReader, DBProvider, ProviderError, + StaticFileProviderFactory, +}; use reth_stages_api::StageError; use std::{collections::HashMap, hash::Hash, ops::RangeBounds}; use tracing::info; @@ -244,3 +248,39 @@ impl LoadMode { matches!(self, Self::Flush) } } + +/// Called when database is ahead of static files. Attempts to find the first block we are missing +/// transactions for. +pub(crate) fn missing_static_data_error( + last_tx_num: TxNumber, + static_file_provider: &StaticFileProvider, + provider: &Provider, + segment: StaticFileSegment, +) -> Result +where + Provider: BlockReader + StaticFileProviderFactory, +{ + let mut last_block = + static_file_provider.get_highest_static_file_block(segment).unwrap_or_default(); + + // To be extra safe, we make sure that the last tx num matches the last block from its indices. + // If not, get it. + loop { + if let Some(indices) = provider.block_body_indices(last_block)? { + if indices.last_tx_num() <= last_tx_num { + break + } + } + if last_block == 0 { + break + } + last_block -= 1; + } + + let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); + + Ok(StageError::MissingStaticFileData { + block: Box::new(missing_block.block_with_parent()), + segment, + }) +} diff --git a/crates/stages/stages/src/test_utils/runner.rs b/crates/stages/stages/src/test_utils/runner.rs index 26f245c1304..c3d25b99536 100644 --- a/crates/stages/stages/src/test_utils/runner.rs +++ b/crates/stages/stages/src/test_utils/runner.rs @@ -1,7 +1,6 @@ use super::TestStageDB; -use reth_chainspec::ChainSpec; use reth_db::{test_utils::TempDatabase, Database, DatabaseEnv}; -use reth_provider::{DatabaseProvider, ProviderError}; +use reth_provider::{test_utils::MockNodeTypesWithDB, DatabaseProvider, ProviderError}; use reth_stages_api::{ ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput, }; @@ -20,7 +19,7 @@ pub(crate) enum TestRunnerError { /// A generic test runner for stages. pub(crate) trait StageTestRunner { - type S: Stage as Database>::TXMut, ChainSpec>> + type S: Stage as Database>::TXMut, MockNodeTypesWithDB>> + 'static; /// Return a reference to the database. diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 4c43d4cdcd1..5a6c12d8e00 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -15,7 +15,7 @@ use reth_db_api::{ DatabaseError as DbError, }; use reth_primitives::{ - Account, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, + Account, EthPrimitives, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, }; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, @@ -24,7 +24,7 @@ use reth_provider::{ }; use reth_storage_errors::provider::ProviderResult; use reth_testing_utils::generators::ChangeSet; -use std::{collections::BTreeMap, path::Path}; +use std::{collections::BTreeMap, fmt::Debug, path::Path}; use tempfile::TempDir; /// Test database that is used for testing stage implementations. @@ -142,7 +142,7 @@ impl TestStageDB { /// Insert header to static file if `writer` exists, otherwise to DB. pub fn insert_header( - writer: Option<&mut StaticFileProviderRWRefMut<'_>>, + writer: Option<&mut StaticFileProviderRWRefMut<'_, EthPrimitives>>, tx: &TX, header: &SealedHeader, td: U256, @@ -265,9 +265,9 @@ impl TestStageDB { let res = block.body.transactions.iter().try_for_each(|body_tx| { if let Some(txs_writer) = &mut txs_writer { - txs_writer.append_transaction(next_tx_num, &body_tx.clone().into())?; + txs_writer.append_transaction(next_tx_num, body_tx)?; } else { - tx.put::(next_tx_num, body_tx.clone().into())? + tx.put::(next_tx_num, body_tx.clone())? } next_tx_num += 1; Ok::<(), ProviderError>(()) diff --git a/crates/stages/types/Cargo.toml b/crates/stages/types/Cargo.toml index 54b14b335cb..0243415942b 100644 --- a/crates/stages/types/Cargo.toml +++ b/crates/stages/types/Cargo.toml @@ -12,17 +12,41 @@ description = "Commonly used types for stages usage in reth." workspace = true [dependencies] -reth-codecs.workspace = true +reth-codecs = { workspace = true, optional = true } reth-trie-common.workspace = true alloy-primitives.workspace = true -modular-bitfield.workspace = true -bytes.workspace = true serde.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } + +bytes = { workspace = true, optional = true } +modular-bitfield = { workspace = true, optional = true } [dev-dependencies] +reth-codecs.workspace = true +alloy-primitives = { workspace = true, features = ["arbitrary", "rand"] } arbitrary = { workspace = true, features = ["derive"] } +modular-bitfield.workspace = true proptest.workspace = true proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true rand.workspace = true +bytes.workspace = true + +[features] +reth-codec = [ + "dep:reth-codecs", + "dep:bytes", + "dep:modular-bitfield", + "reth-trie-common/reth-codec" +] +test-utils = [ + "dep:arbitrary", + "reth-codecs/test-utils", + "reth-trie-common/test-utils" +] +arbitrary = [ + "alloy-primitives/arbitrary", + "reth-codecs/arbitrary", + "reth-trie-common/arbitrary" +] diff --git a/crates/stages/types/src/checkpoints.rs b/crates/stages/types/src/checkpoints.rs index 79e896bf4d9..160c901e1cb 100644 --- a/crates/stages/types/src/checkpoints.rs +++ b/crates/stages/types/src/checkpoints.rs @@ -1,11 +1,9 @@ +use super::StageId; +use alloc::vec::Vec; use alloy_primitives::{Address, BlockNumber, B256}; -use bytes::Buf; -use reth_codecs::{add_arbitrary_tests, Compact}; +use core::ops::RangeInclusive; use reth_trie_common::{hash_builder::HashBuilderState, StoredSubNode}; use serde::{Deserialize, Serialize}; -use std::ops::RangeInclusive; - -use super::StageId; /// Saves the progress of Merkle stage. #[derive(Default, Debug, Clone, PartialEq, Eq)] @@ -32,7 +30,8 @@ impl MerkleCheckpoint { } } -impl Compact for MerkleCheckpoint { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for MerkleCheckpoint { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -56,6 +55,7 @@ impl Compact for MerkleCheckpoint { } fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; let target_block = buf.get_u64(); let last_account_key = B256::from_slice(&buf[..32]); @@ -75,9 +75,10 @@ impl Compact for MerkleCheckpoint { } /// Saves the progress of AccountHashing stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct AccountHashingCheckpoint { /// The next account to start hashing from. pub address: Option
, @@ -88,9 +89,10 @@ pub struct AccountHashingCheckpoint { } /// Saves the progress of StorageHashing stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct StorageHashingCheckpoint { /// The next account to start hashing from. pub address: Option
, @@ -103,9 +105,10 @@ pub struct StorageHashingCheckpoint { } /// Saves the progress of Execution stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct ExecutionCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -114,9 +117,10 @@ pub struct ExecutionCheckpoint { } /// Saves the progress of Headers stage. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct HeadersCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -125,9 +129,10 @@ pub struct HeadersCheckpoint { } /// Saves the progress of Index History stages. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct IndexHistoryCheckpoint { /// Block range which this checkpoint is valid for. pub block_range: CheckpointBlockRange, @@ -136,9 +141,10 @@ pub struct IndexHistoryCheckpoint { } /// Saves the progress of abstract stage iterating over or downloading entities. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct EntitiesCheckpoint { /// Number of entities already processed. pub processed: u64, @@ -165,9 +171,10 @@ impl EntitiesCheckpoint { /// Saves the block range. Usually, it's used to check the validity of some stage checkpoint across /// multiple executions. -#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct CheckpointBlockRange { /// The first block of the range, inclusive. pub from: BlockNumber, @@ -188,9 +195,10 @@ impl From<&RangeInclusive> for CheckpointBlockRange { } /// Saves the progress of a stage. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub struct StageCheckpoint { /// The maximum block processed by the stage. pub block_number: BlockNumber, @@ -255,9 +263,10 @@ impl StageCheckpoint { // TODO(alexey): add a merkle checkpoint. Currently it's hard because [`MerkleCheckpoint`] // is not a Copy type. /// Stage-specific checkpoint metrics. -#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary))] -#[add_arbitrary_tests(compact)] +#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(any(test, feature = "test-utils"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::Compact))] +#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] pub enum StageUnitCheckpoint { /// Saves the progress of AccountHashing stage. Account(AccountHashingCheckpoint), @@ -386,6 +395,7 @@ stage_unit_checkpoints!( mod tests { use super::*; use rand::Rng; + use reth_codecs::Compact; #[test] fn merkle_checkpoint_roundtrip() { diff --git a/crates/stages/types/src/execution.rs b/crates/stages/types/src/execution.rs index 61f7313a380..a334951abef 100644 --- a/crates/stages/types/src/execution.rs +++ b/crates/stages/types/src/execution.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use core::time::Duration; /// The thresholds at which the execution stage writes state changes to the database. /// diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 0132c8b410d..4e01bf7dbf4 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -8,6 +8,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +extern crate alloc; + mod id; use alloy_primitives::{BlockHash, BlockNumber}; pub use id::StageId; @@ -65,7 +67,7 @@ impl From for PipelineTarget { } } -impl std::fmt::Display for PipelineTarget { +impl core::fmt::Display for PipelineTarget { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Sync(block) => { diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index 8fa89e12e0f..89f60687895 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -13,17 +13,16 @@ workspace = true [dependencies] # reth -reth-chainspec.workspace = true +reth-codecs.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-provider.workspace = true reth-storage-errors.workspace = true -reth-nippy-jar.workspace = true reth-tokio-util.workspace = true reth-prune-types.workspace = true +reth-primitives-traits.workspace = true reth-static-file-types.workspace = true reth-stages-types.workspace = true -reth-node-types.workspace = true alloy-primitives.workspace = true diff --git a/crates/static-file/static-file/src/lib.rs b/crates/static-file/static-file/src/lib.rs index 1bfe4134e95..6c95baaae92 100644 --- a/crates/static-file/static-file/src/lib.rs +++ b/crates/static-file/static-file/src/lib.rs @@ -7,14 +7,12 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod event; pub mod segments; mod static_file_producer; -pub use event::StaticFileProducerEvent; pub use static_file_producer::{ StaticFileProducer, StaticFileProducerInner, StaticFileProducerResult, - StaticFileProducerWithResult, StaticFileTargets, + StaticFileProducerWithResult, }; // Re-export for convenience. diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index 54d5bee65cf..dff80a23f83 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -1,11 +1,10 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; -use reth_db::tables; +use reth_codecs::Compact; +use reth_db::{table::Value, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - DBProvider, -}; +use reth_primitives_traits::NodePrimitives; +use reth_provider::{providers::StaticFileWriter, DBProvider, StaticFileProviderFactory}; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -14,7 +13,11 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Headers; -impl Segment for Headers { +impl Segment for Headers +where + Provider: StaticFileProviderFactory> + + DBProvider, +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Headers } @@ -22,13 +25,16 @@ impl Segment for Headers { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Headers)?; - let mut headers_cursor = provider.tx_ref().cursor_read::()?; + let mut headers_cursor = provider + .tx_ref() + .cursor_read::::BlockHeader>>( + )?; let headers_walker = headers_cursor.walk_range(block_range.clone())?; let mut header_td_cursor = @@ -49,9 +55,7 @@ impl Segment for Headers { debug_assert_eq!(header_block, header_td_block); debug_assert_eq!(header_td_block, canonical_header_block); - let _static_file_block = - static_file_writer.append_header(&header, header_td.0, &canonical_header)?; - debug_assert_eq!(_static_file_block, header_block); + static_file_writer.append_header(&header, header_td.0, &canonical_header)?; } Ok(()) diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index 3d961c7b119..fc79effdd5a 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -10,22 +10,22 @@ mod receipts; pub use receipts::Receipts; use alloy_primitives::BlockNumber; -use reth_provider::providers::StaticFileProvider; +use reth_provider::StaticFileProviderFactory; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; /// A segment represents moving some portion of the data to static files. -pub trait Segment: Send + Sync { +pub trait Segment: Send + Sync { /// Returns the [`StaticFileSegment`]. fn segment(&self) -> StaticFileSegment; - /// Move data to static files for the provided block range. [`StaticFileProvider`] will handle + /// Move data to static files for the provided block range. + /// [`StaticFileProvider`](reth_provider::providers::StaticFileProvider) will handle /// the management of and writing to files. fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()>; } diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index 4e2185a598a..bd808b4d839 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -3,8 +3,7 @@ use alloy_primitives::BlockNumber; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, + providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -14,7 +13,9 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Receipts; -impl Segment for Receipts { +impl Segment + for Receipts +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Receipts } @@ -22,15 +23,14 @@ impl Segment for Receipts { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Receipts)?; for block in block_range { - let _static_file_block = static_file_writer.increment_block(block)?; - debug_assert_eq!(_static_file_block, block); + static_file_writer.increment_block(block)?; let block_body_indices = provider .block_body_indices(block)? diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index 52e0ca8b575..5b686cfe109 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -1,10 +1,11 @@ use crate::segments::Segment; use alloy_primitives::BlockNumber; -use reth_db::tables; +use reth_codecs::Compact; +use reth_db::{table::Value, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ - providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DBProvider, + providers::StaticFileWriter, BlockReader, DBProvider, StaticFileProviderFactory, }; use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -14,7 +15,12 @@ use std::ops::RangeInclusive; #[derive(Debug, Default)] pub struct Transactions; -impl Segment for Transactions { +impl Segment for Transactions +where + Provider: StaticFileProviderFactory> + + DBProvider + + BlockReader, +{ fn segment(&self) -> StaticFileSegment { StaticFileSegment::Transactions } @@ -24,22 +30,22 @@ impl Segment for Transactions { fn copy_to_static_files( &self, provider: Provider, - static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()> { + let static_file_provider = provider.static_file_provider(); let mut static_file_writer = static_file_provider .get_writer(*block_range.start(), StaticFileSegment::Transactions)?; for block in block_range { - let _static_file_block = static_file_writer.increment_block(block)?; - debug_assert_eq!(_static_file_block, block); + static_file_writer.increment_block(block)?; let block_body_indices = provider .block_body_indices(block)? .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; - let mut transactions_cursor = - provider.tx_ref().cursor_read::()?; + let mut transactions_cursor = provider.tx_ref().cursor_read::::SignedTx, + >>()?; let transactions_walker = transactions_cursor.walk_range(block_body_indices.tx_num_range())?; diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 2c442aedfa3..30a72561b23 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -4,13 +4,16 @@ use crate::{segments, segments::Segment, StaticFileProducerEvent}; use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; +use reth_codecs::Compact; +use reth_db::table::Value; +use reth_primitives_traits::NodePrimitives; use reth_provider::{ providers::StaticFileWriter, BlockReader, ChainStateBlockReader, DBProvider, DatabaseProviderFactory, StageCheckpointReader, StaticFileProviderFactory, }; use reth_prune_types::PruneModes; use reth_stages_types::StageId; -use reth_static_file_types::HighestStaticFiles; +use reth_static_file_types::{HighestStaticFiles, StaticFileTargets}; use reth_storage_errors::provider::ProviderResult; use reth_tokio_util::{EventSender, EventStream}; use std::{ @@ -66,40 +69,6 @@ pub struct StaticFileProducerInner { event_sender: EventSender, } -/// Static File targets, per data segment, measured in [`BlockNumber`]. -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct StaticFileTargets { - headers: Option>, - receipts: Option>, - transactions: Option>, -} - -impl StaticFileTargets { - /// Returns `true` if any of the targets are [Some]. - pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() - } - - // Returns `true` if all targets are either [`None`] or has beginning of the range equal to the - // highest static_file. - fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { - [ - (self.headers.as_ref(), static_files.headers), - (self.receipts.as_ref(), static_files.receipts), - (self.transactions.as_ref(), static_files.transactions), - ] - .iter() - .all(|(target_block_range, highest_static_fileted_block)| { - target_block_range.map_or(true, |target_block_range| { - *target_block_range.start() == - highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { - highest_static_fileted_block + 1 - }) - }) - }) - } -} - impl StaticFileProducerInner { fn new(provider: Provider, prune_modes: PruneModes) -> Self { Self { provider, prune_modes, event_sender: Default::default() } @@ -119,7 +88,12 @@ where impl StaticFileProducerInner where Provider: StaticFileProviderFactory - + DatabaseProviderFactory, + + DatabaseProviderFactory< + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + StageCheckpointReader + + BlockReader, + >, { /// Listen for events on the `static_file_producer`. pub fn events(&self) -> EventStream { @@ -170,7 +144,7 @@ where // Create a new database transaction on every segment to prevent long-lived read-only // transactions let provider = self.provider.database_provider_ro()?.disable_long_read_transaction_safety(); - segment.copy_to_static_files(provider, self.provider.static_file_provider(), block_range.clone())?; + segment.copy_to_static_files(provider, block_range.clone())?; let elapsed = start.elapsed(); // TODO(alexey): track in metrics debug!(target: "static_file", segment = %segment.segment(), ?block_range, ?elapsed, "Finished StaticFileProducer segment"); diff --git a/crates/static-file/static-file/src/event.rs b/crates/static-file/types/src/event.rs similarity index 87% rename from crates/static-file/static-file/src/event.rs rename to crates/static-file/types/src/event.rs index a11333ce53a..1e5d2cb6032 100644 --- a/crates/static-file/static-file/src/event.rs +++ b/crates/static-file/types/src/event.rs @@ -1,7 +1,7 @@ use crate::StaticFileTargets; use std::time::Duration; -/// An event emitted by a [`StaticFileProducer`][crate::StaticFileProducer]. +/// An event emitted by the static file producer. #[derive(Debug, PartialEq, Eq, Clone)] pub enum StaticFileProducerEvent { /// Emitted when static file producer started running. diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 38093113886..7a9980b3559 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -9,11 +9,14 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod compression; +mod event; mod segment; use alloy_primitives::BlockNumber; pub use compression::Compression; +pub use event::StaticFileProducerEvent; pub use segment::{SegmentConfig, SegmentHeader, SegmentRangeInclusive, StaticFileSegment}; +use std::ops::RangeInclusive; /// Default static file block count. pub const DEFAULT_BLOCKS_PER_STATIC_FILE: u64 = 500_000; @@ -52,16 +55,53 @@ impl HighestStaticFiles { } /// Returns the minimum block of all segments. - pub fn min(&self) -> Option { + pub fn min_block_num(&self) -> Option { [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).min() } /// Returns the maximum block of all segments. - pub fn max(&self) -> Option { + pub fn max_block_num(&self) -> Option { [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).max() } } +/// Static File targets, per data segment, measured in [`BlockNumber`]. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct StaticFileTargets { + /// Targeted range of headers. + pub headers: Option>, + /// Targeted range of receipts. + pub receipts: Option>, + /// Targeted range of transactions. + pub transactions: Option>, +} + +impl StaticFileTargets { + /// Returns `true` if any of the targets are [Some]. + pub const fn any(&self) -> bool { + self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + } + + /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the + /// highest static file. + pub fn is_contiguous_to_highest_static_files(&self, static_files: HighestStaticFiles) -> bool { + [ + (self.headers.as_ref(), static_files.headers), + (self.receipts.as_ref(), static_files.receipts), + (self.transactions.as_ref(), static_files.transactions), + ] + .iter() + .all(|(target_block_range, highest_static_fileted_block)| { + target_block_range.is_none_or(|target_block_range| { + *target_block_range.start() == + highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { + highest_static_fileted_block + 1 + }) + }) + }) + } +} + /// Each static file has a fixed number of blocks. This gives out the range where the requested /// block is positioned. Used for segment filename. pub const fn find_fixed_range( @@ -71,3 +111,81 @@ pub const fn find_fixed_range( let start = (block / blocks_per_static_file) * blocks_per_static_file; SegmentRangeInclusive::new(start, start + blocks_per_static_file - 1) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_highest_static_files_highest() { + let files = + HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None }; + + // Test for headers segment + assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); + + // Test for receipts segment + assert_eq!(files.highest(StaticFileSegment::Receipts), Some(200)); + + // Test for transactions segment + assert_eq!(files.highest(StaticFileSegment::Transactions), None); + } + + #[test] + fn test_highest_static_files_as_mut() { + let mut files = HighestStaticFiles::default(); + + // Modify headers value + *files.as_mut(StaticFileSegment::Headers) = Some(150); + assert_eq!(files.headers, Some(150)); + + // Modify receipts value + *files.as_mut(StaticFileSegment::Receipts) = Some(250); + assert_eq!(files.receipts, Some(250)); + + // Modify transactions value + *files.as_mut(StaticFileSegment::Transactions) = Some(350); + assert_eq!(files.transactions, Some(350)); + } + + #[test] + fn test_highest_static_files_min() { + let files = + HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; + + // Minimum value among the available segments + assert_eq!(files.min_block_num(), Some(100)); + + let empty_files = HighestStaticFiles::default(); + // No values, should return None + assert_eq!(empty_files.min_block_num(), None); + } + + #[test] + fn test_highest_static_files_max() { + let files = + HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; + + // Maximum value among the available segments + assert_eq!(files.max_block_num(), Some(500)); + + let empty_files = HighestStaticFiles::default(); + // No values, should return None + assert_eq!(empty_files.max_block_num(), None); + } + + #[test] + fn test_find_fixed_range() { + // Test with default block size + let block: BlockNumber = 600_000; + let range = find_fixed_range(block, DEFAULT_BLOCKS_PER_STATIC_FILE); + assert_eq!(range.start(), 500_000); + assert_eq!(range.end(), 999_999); + + // Test with a custom block size + let block: BlockNumber = 1_200_000; + let range = find_fixed_range(block, 1_000_000); + assert_eq!(range.start(), 1_000_000); + assert_eq!(range.end(), 1_999_999); + } +} diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 640ec8c9561..76a3721629a 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -26,7 +26,10 @@ op-alloy-consensus = { workspace = true, optional = true } # misc bytes.workspace = true -modular-bitfield = { workspace = true, optional = true } +modular-bitfield.workspace = true +visibility = { version = "0.1.1", optional = true} +serde.workspace = true +arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] alloy-eips = { workspace = true, default-features = false, features = [ @@ -39,25 +42,51 @@ alloy-primitives = { workspace = true, features = [ "rand", ] } alloy-consensus = { workspace = true, features = ["arbitrary"] } -alloy-rlp.workspace = true -rand.workspace = true test-fuzz.workspace = true serde_json.workspace = true arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -serde.workspace = true +rstest.workspace = true [features] default = ["std", "alloy"] -std = ["alloy-primitives/std", "bytes/std"] +std = [ + "alloy-primitives/std", + "bytes/std", + "alloy-consensus?/std", + "alloy-eips?/std", + "alloy-genesis?/std", + "alloy-trie?/std", + "serde/std" +] alloy = [ "dep:alloy-consensus", "dep:alloy-eips", "dep:alloy-genesis", - "dep:modular-bitfield", "dep:alloy-trie", ] -optimism = ["alloy", "dep:op-alloy-consensus"] -test-utils = [] +op = ["alloy", "dep:op-alloy-consensus"] +test-utils = [ + "std", + "alloy", + "arbitrary", + "dep:visibility", + "dep:arbitrary" +] +serde = [ + "alloy-consensus?/serde", + "alloy-eips?/serde", + "alloy-primitives/serde", + "alloy-trie?/serde", + "bytes/serde", + "op-alloy-consensus?/serde", +] +arbitrary = [ + "alloy-consensus?/arbitrary", + "alloy-eips?/arbitrary", + "alloy-primitives/arbitrary", + "alloy-trie?/arbitrary", + "op-alloy-consensus?/arbitrary" +] diff --git a/crates/storage/codecs/derive/src/arbitrary.rs b/crates/storage/codecs/derive/src/arbitrary.rs index 8aa44062e21..753bb1e33a5 100644 --- a/crates/storage/codecs/derive/src/arbitrary.rs +++ b/crates/storage/codecs/derive/src/arbitrary.rs @@ -18,10 +18,26 @@ pub fn maybe_generate_tests( let mut traits = vec![]; let mut roundtrips = vec![]; let mut additional_tests = vec![]; + let mut is_crate = false; - for arg in args { + let mut iter = args.into_iter().peekable(); + + // we check if there's a crate argument which is used from inside the codecs crate directly + if let Some(arg) = iter.peek() { + if arg.to_string() == "crate" { + is_crate = true; + iter.next(); + } + } + + for arg in iter { if arg.to_string() == "compact" { - traits.push(quote! { use super::Compact; }); + let path = if is_crate { + quote! { use crate::Compact; } + } else { + quote! { use reth_codecs::Compact; } + }; + traits.push(path); roundtrips.push(quote! { { let mut buf = vec![]; diff --git a/crates/storage/codecs/derive/src/compact/flags.rs b/crates/storage/codecs/derive/src/compact/flags.rs index 3242a611eb3..798c9ad53b4 100644 --- a/crates/storage/codecs/derive/src/compact/flags.rs +++ b/crates/storage/codecs/derive/src/compact/flags.rs @@ -1,9 +1,11 @@ use super::*; +use syn::Attribute; /// Generates the flag fieldset struct that is going to be used to store the length of fields and /// their potential presence. pub(crate) fn generate_flag_struct( ident: &Ident, + attrs: &[Attribute], has_lifetime: bool, fields: &FieldList, is_zstd: bool, @@ -13,6 +15,8 @@ pub(crate) fn generate_flag_struct( let flags_ident = format_ident!("{ident}Flags"); let mod_flags_ident = format_ident!("{ident}_flags"); + let reth_codecs = parse_reth_codecs_path(attrs).unwrap(); + let mut field_flags = vec![]; let total_bits = if is_enum { @@ -88,8 +92,9 @@ pub(crate) fn generate_flag_struct( pub use #mod_flags_ident::#flags_ident; #[allow(non_snake_case)] mod #mod_flags_ident { - use bytes::Buf; - use modular_bitfield::prelude::*; + use #reth_codecs::__private::Buf; + use #reth_codecs::__private::modular_bitfield; + use #reth_codecs::__private::modular_bitfield::prelude::*; #[doc = #docs] #[bitfield] diff --git a/crates/storage/codecs/derive/src/compact/generator.rs b/crates/storage/codecs/derive/src/compact/generator.rs index 1fb6d40fa2b..63fef05ad70 100644 --- a/crates/storage/codecs/derive/src/compact/generator.rs +++ b/crates/storage/codecs/derive/src/compact/generator.rs @@ -2,10 +2,12 @@ use super::*; use convert_case::{Case, Casing}; +use syn::{Attribute, LitStr}; /// Generates code to implement the `Compact` trait for a data type. pub fn generate_from_to( ident: &Ident, + attrs: &[Attribute], has_lifetime: bool, fields: &FieldList, is_zstd: bool, @@ -20,6 +22,8 @@ pub fn generate_from_to( let fuzz = format_ident!("fuzz_test_{snake_case_ident}"); let test = format_ident!("fuzz_{snake_case_ident}"); + let reth_codecs = parse_reth_codecs_path(attrs).unwrap(); + let lifetime = if has_lifetime { quote! { 'a } } else { @@ -28,11 +32,11 @@ pub fn generate_from_to( let impl_compact = if has_lifetime { quote! { - impl<#lifetime> Compact for #ident<#lifetime> + impl<#lifetime> #reth_codecs::Compact for #ident<#lifetime> } } else { quote! { - impl Compact for #ident + impl #reth_codecs::Compact for #ident } }; @@ -53,6 +57,7 @@ pub fn generate_from_to( #[allow(dead_code)] #[test_fuzz::test_fuzz] fn #fuzz(obj: #ident) { + use #reth_codecs::Compact; let mut buf = vec![]; let len = obj.clone().to_compact(&mut buf); let (same_obj, buf) = #ident::from_compact(buf.as_ref(), len); @@ -191,7 +196,7 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< } // Just because a type supports compression, doesn't mean all its values are to be compressed. - // We skip the smaller ones, and thus require a flag `__zstd` to specify if this value is + // We skip the smaller ones, and thus require a flag` __zstd` to specify if this value is // compressed or not. if is_zstd { lines.push(quote! { @@ -232,3 +237,25 @@ fn generate_to_compact(fields: &FieldList, ident: &Ident, is_zstd: bool) -> Vec< lines } + +/// Function to extract the crate path from `reth_codecs(crate = "...")` attribute. +pub(crate) fn parse_reth_codecs_path(attrs: &[Attribute]) -> syn::Result { + // let default_crate_path: syn::Path = syn::parse_str("reth-codecs").unwrap(); + let mut reth_codecs_path: syn::Path = syn::parse_quote!(reth_codecs); + for attr in attrs { + if attr.path().is_ident("reth_codecs") { + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("crate") { + let value = meta.value()?; + let lit: LitStr = value.parse()?; + reth_codecs_path = syn::parse_str(&lit.value())?; + Ok(()) + } else { + Err(meta.error("unsupported attribute")) + } + })?; + } + } + + Ok(reth_codecs_path) +} diff --git a/crates/storage/codecs/derive/src/compact/mod.rs b/crates/storage/codecs/derive/src/compact/mod.rs index e5a79b3fe53..1c1723d2ec9 100644 --- a/crates/storage/codecs/derive/src/compact/mod.rs +++ b/crates/storage/codecs/derive/src/compact/mod.rs @@ -43,13 +43,13 @@ pub enum FieldTypes { pub fn derive(input: TokenStream, is_zstd: bool) -> TokenStream { let mut output = quote! {}; - let DeriveInput { ident, data, generics, .. } = parse_macro_input!(input); + let DeriveInput { ident, data, generics, attrs, .. } = parse_macro_input!(input); let has_lifetime = has_lifetime(&generics); let fields = get_fields(&data); - output.extend(generate_flag_struct(&ident, has_lifetime, &fields, is_zstd)); - output.extend(generate_from_to(&ident, has_lifetime, &fields, is_zstd)); + output.extend(generate_flag_struct(&ident, &attrs, has_lifetime, &fields, is_zstd)); + output.extend(generate_from_to(&ident, &attrs, has_lifetime, &fields, is_zstd)); output.into() } @@ -233,10 +233,10 @@ mod tests { // Generate code that will impl the `Compact` trait. let mut output = quote! {}; - let DeriveInput { ident, data, .. } = parse2(f_struct).unwrap(); + let DeriveInput { ident, data, attrs, .. } = parse2(f_struct).unwrap(); let fields = get_fields(&data); - output.extend(generate_flag_struct(&ident, false, &fields, false)); - output.extend(generate_from_to(&ident, false, &fields, false)); + output.extend(generate_flag_struct(&ident, &attrs, false, &fields, false)); + output.extend(generate_from_to(&ident, &attrs, false, &fields, false)); // Expected output in a TokenStream format. Commas matter! let should_output = quote! { @@ -255,8 +255,9 @@ mod tests { #[allow(non_snake_case)] mod TestStruct_flags { - use bytes::Buf; - use modular_bitfield::prelude::*; + use reth_codecs::__private::Buf; + use reth_codecs::__private::modular_bitfield; + use reth_codecs::__private::modular_bitfield::prelude::*; #[doc = "Fieldset that facilitates compacting the parent type. Used bytes: 2 | Unused bits: 1"] #[bitfield] #[derive(Clone, Copy, Debug, Default)] @@ -285,6 +286,7 @@ mod tests { #[allow(dead_code)] #[test_fuzz::test_fuzz] fn fuzz_test_test_struct(obj: TestStruct) { + use reth_codecs::Compact; let mut buf = vec![]; let len = obj.clone().to_compact(&mut buf); let (same_obj, buf) = TestStruct::from_compact(buf.as_ref(), len); @@ -295,7 +297,7 @@ mod tests { pub fn fuzz_test_struct() { fuzz_test_test_struct(TestStruct::default()) } - impl Compact for TestStruct { + impl reth_codecs::Compact for TestStruct { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]> { let mut flags = TestStructFlags::default(); let mut total_length = 0; diff --git a/crates/storage/codecs/derive/src/lib.rs b/crates/storage/codecs/derive/src/lib.rs index 4ffdbfd6ef6..0b4015830f5 100644 --- a/crates/storage/codecs/derive/src/lib.rs +++ b/crates/storage/codecs/derive/src/lib.rs @@ -49,14 +49,14 @@ mod compact; /// own encoding and do not rely on the bitflag struct. /// - `Bytes` fields and any types containing a `Bytes` field should be placed last to ensure /// efficient decoding. -#[proc_macro_derive(Compact, attributes(maybe_zero))] +#[proc_macro_derive(Compact, attributes(maybe_zero, reth_codecs))] pub fn derive(input: TokenStream) -> TokenStream { let is_zstd = false; compact::derive(input, is_zstd) } /// Adds `zstd` compression to derived [`Compact`]. -#[proc_macro_derive(CompactZstd, attributes(maybe_zero))] +#[proc_macro_derive(CompactZstd, attributes(maybe_zero, reth_codecs))] pub fn derive_zstd(input: TokenStream) -> TokenStream { let is_zstd = true; compact::derive(input, is_zstd) diff --git a/crates/storage/codecs/src/alloy/access_list.rs b/crates/storage/codecs/src/alloy/access_list.rs index 306b64d7e4b..304b6bd388c 100644 --- a/crates/storage/codecs/src/alloy/access_list.rs +++ b/crates/storage/codecs/src/alloy/access_list.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AccessList`] + use crate::Compact; use alloc::vec::Vec; use alloy_eips::eip2930::{AccessList, AccessListItem}; diff --git a/crates/storage/codecs/src/alloy/authorization_list.rs b/crates/storage/codecs/src/alloy/authorization_list.rs index 3efe1359062..15285f36047 100644 --- a/crates/storage/codecs/src/alloy/authorization_list.rs +++ b/crates/storage/codecs/src/alloy/authorization_list.rs @@ -1,19 +1,25 @@ -use core::ops::Deref; +//! Compact implementation for [`AlloyAuthorization`] use crate::Compact; use alloy_eips::eip7702::{Authorization as AlloyAuthorization, SignedAuthorization}; use alloy_primitives::{Address, U256}; use bytes::Buf; +use core::ops::Deref; use reth_codecs_derive::add_arbitrary_tests; /// Authorization acts as bridge which simplifies Compact implementation for AlloyAuthorization. /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip7702::Authorization` #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct Authorization { - chain_id: U256, + chain_id: u64, address: Address, nonce: u64, } @@ -44,11 +50,9 @@ impl Compact for SignedAuthorization { where B: bytes::BufMut + AsMut<[u8]>, { - let signature = self.signature(); - let (v, r, s) = (signature.v(), signature.r(), signature.s()); - buf.put_u8(v.y_parity_byte()); - buf.put_slice(r.as_le_slice()); - buf.put_slice(s.as_le_slice()); + buf.put_u8(self.y_parity()); + buf.put_slice(self.r().as_le_slice()); + buf.put_slice(self.s().as_le_slice()); // to_compact doesn't write the len to buffer. // By placing it as last, we don't need to store it either. @@ -56,17 +60,15 @@ impl Compact for SignedAuthorization { } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { - let y = alloy_primitives::Parity::Parity(buf.get_u8() == 1); + let y_parity = buf.get_u8(); let r = U256::from_le_slice(&buf[0..32]); buf.advance(32); let s = U256::from_le_slice(&buf[0..32]); buf.advance(32); - let signature = alloy_primitives::Signature::from_rs_and_parity(r, s, y) - .expect("invalid authorization signature"); let (auth, buf) = AlloyAuthorization::from_compact(buf, len); - (auth.into_signed(signature), buf) + (Self::new_unchecked(auth, y_parity, r, s), buf) } } @@ -78,17 +80,16 @@ mod tests { #[test] fn test_roundtrip_compact_authorization_list_item() { let authorization = AlloyAuthorization { - chain_id: U256::from(1), + chain_id: 1u64, address: address!("dac17f958d2ee523a2206206994597c13d831ec7"), nonce: 1, } .into_signed( - alloy_primitives::Signature::from_rs_and_parity( + alloy_primitives::PrimitiveSignature::new( b256!("1fd474b1f9404c0c5df43b7620119ffbc3a1c3f942c73b6e14e9f55255ed9b1d").into(), b256!("29aca24813279a901ec13b5f7bb53385fa1fc627b946592221417ff74a49600d").into(), false, ) - .unwrap(), ); let mut compacted_authorization = Vec::::new(); let len = authorization.to_compact(&mut compacted_authorization); diff --git a/crates/storage/codecs/src/alloy/genesis_account.rs b/crates/storage/codecs/src/alloy/genesis_account.rs index 938ad1375b1..a35d4947db7 100644 --- a/crates/storage/codecs/src/alloy/genesis_account.rs +++ b/crates/storage/codecs/src/alloy/genesis_account.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyGenesisAccount`] + use crate::Compact; use alloc::vec::Vec; use alloy_genesis::GenesisAccount as AlloyGenesisAccount; @@ -9,6 +11,7 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_genesis::GenesisAccount` #[derive(Debug, Clone, PartialEq, Eq, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct GenesisAccountRef<'a> { /// The nonce of the account at genesis. nonce: Option, @@ -22,9 +25,16 @@ pub(crate) struct GenesisAccountRef<'a> { private_key: Option<&'a B256>, } +/// Acts as bridge which simplifies Compact implementation for +/// `AlloyGenesisAccount`. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct GenesisAccount { /// The nonce of the account at genesis. nonce: Option, @@ -39,15 +49,23 @@ pub(crate) struct GenesisAccount { } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct StorageEntries { entries: Vec, } #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct StorageEntry { key: B256, value: B256, diff --git a/crates/storage/codecs/src/alloy/header.rs b/crates/storage/codecs/src/alloy/header.rs index 3a17ed1fdcd..418b8b9032b 100644 --- a/crates/storage/codecs/src/alloy/header.rs +++ b/crates/storage/codecs/src/alloy/header.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyHeader`] + use crate::Compact; use alloy_consensus::Header as AlloyHeader; use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; @@ -10,8 +12,13 @@ use alloy_primitives::{Address, BlockNumber, Bloom, Bytes, B256, U256}; /// will automatically apply to this type. /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::Header`] -#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(serde::Serialize, serde::Deserialize, arbitrary::Arbitrary) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct Header { parent_hash: B256, ommers_hash: B256, @@ -42,10 +49,16 @@ pub(crate) struct Header { /// used as a field of [`Header`] for backwards compatibility. /// /// More information: & [`reth_codecs_derive::Compact`]. -#[cfg_attr(test, derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(serde::Serialize, serde::Deserialize, arbitrary::Arbitrary) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] +#[reth_codecs(crate = "crate")] pub(crate) struct HeaderExt { - requests_root: Option, + requests_hash: Option, + target_blobs_per_block: Option, } impl HeaderExt { @@ -53,7 +66,7 @@ impl HeaderExt { /// /// Required since [`Header`] uses `Option` as a field. const fn into_option(self) -> Option { - if self.requests_root.is_some() { + if self.requests_hash.is_some() || self.target_blobs_per_block.is_some() { Some(self) } else { None @@ -66,7 +79,7 @@ impl Compact for AlloyHeader { where B: bytes::BufMut + AsMut<[u8]>, { - let extra_fields = HeaderExt { requests_root: self.requests_root }; + let extra_fields = HeaderExt { requests_hash: self.requests_hash, target_blobs_per_block: self.target_blobs_per_block }; let header = Header { parent_hash: self.parent_hash, @@ -116,8 +129,9 @@ impl Compact for AlloyHeader { blob_gas_used: header.blob_gas_used, excess_blob_gas: header.excess_blob_gas, parent_beacon_block_root: header.parent_beacon_block_root, - requests_root: header.extra_fields.and_then(|h| h.requests_root), + requests_hash: header.extra_fields.as_ref().and_then(|h| h.requests_hash), extra_data: header.extra_data, + target_blobs_per_block: header.extra_fields.as_ref().and_then(|h| h.target_blobs_per_block), }; (alloy_header, buf) } @@ -126,12 +140,13 @@ impl Compact for AlloyHeader { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_primitives::{address, b256, bloom, bytes, hex}; /// Holesky block #1947953 const HOLESKY_BLOCK: Header = Header { parent_hash: b256!("8605e0c46689f66b3deed82598e43d5002b71a929023b665228728f0c6e62a95"), - ommers_hash: b256!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: address!("c6e2459991bfe27cca6d86722f35da23a1e4cb97"), state_root: b256!("edad188ca5647d62f4cca417c11a1afbadebce30d23260767f6f587e9b3b9993"), transactions_root: b256!("4daf25dc08a841aa22aa0d3cb3e1f159d4dcaf6a6063d4d36bfac11d3fdb63ee"), @@ -175,7 +190,7 @@ mod tests { #[test] fn test_extra_fields() { let mut header = HOLESKY_BLOCK; - header.extra_fields = Some(HeaderExt { requests_root: Some(B256::random()) }); + header.extra_fields = Some(HeaderExt { requests_hash: Some(B256::random()), target_blobs_per_block: Some(3) }); let mut encoded_header = vec![]; let len = header.to_compact(&mut encoded_header); diff --git a/crates/storage/codecs/src/alloy/mod.rs b/crates/storage/codecs/src/alloy/mod.rs index 942258d0647..697bac901e4 100644 --- a/crates/storage/codecs/src/alloy/mod.rs +++ b/crates/storage/codecs/src/alloy/mod.rs @@ -1,20 +1,34 @@ -mod access_list; -mod authorization_list; -mod genesis_account; -mod header; -mod log; -mod request; -mod signature; -mod transaction; -mod trie; -mod txkind; -mod withdrawal; +//! Implements Compact for alloy types. + +/// Will make it a pub mod if test-utils is enabled +macro_rules! cond_mod { + ($($mod_name:ident),*) => { + $( + #[cfg(feature = "test-utils")] + pub mod $mod_name; + #[cfg(not(feature = "test-utils"))] + pub(crate) mod $mod_name; + )* + }; +} + +cond_mod!( + access_list, + authorization_list, + genesis_account, + header, + log, + signature, + transaction, + trie, + txkind, + withdrawal +); #[cfg(test)] mod tests { use crate::{ alloy::{ - authorization_list::Authorization, genesis_account::{GenesisAccount, GenesisAccountRef, StorageEntries, StorageEntry}, header::{Header, HeaderExt}, transaction::{ @@ -38,7 +52,6 @@ mod tests { validate_bitflag_backwards_compat!(StorageEntries, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageEntry, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Authorization, UnusedBits::NotZero); validate_bitflag_backwards_compat!(GenesisAccountRef<'_>, UnusedBits::NotZero); validate_bitflag_backwards_compat!(GenesisAccount, UnusedBits::NotZero); validate_bitflag_backwards_compat!(TxEip1559, UnusedBits::NotZero); diff --git a/crates/storage/codecs/src/alloy/request.rs b/crates/storage/codecs/src/alloy/request.rs deleted file mode 100644 index 2447160beb6..00000000000 --- a/crates/storage/codecs/src/alloy/request.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Native Compact codec impl for EIP-7685 requests. - -use crate::Compact; -use alloy_consensus::Request; -use alloy_eips::eip7685::{Decodable7685, Encodable7685}; -use alloy_primitives::Bytes; -use bytes::BufMut; - -impl Compact for Request { - fn to_compact(&self, buf: &mut B) -> usize - where - B: BufMut + AsMut<[u8]>, - { - let encoded: Bytes = self.encoded_7685().into(); - encoded.to_compact(buf) - } - - fn from_compact(buf: &[u8], _: usize) -> (Self, &[u8]) { - let (raw, buf) = Bytes::from_compact(buf, buf.len()); - - (Self::decode_7685(&mut raw.as_ref()).expect("invalid eip-7685 request in db"), buf) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use proptest::proptest; - use proptest_arbitrary_interop::arb; - - proptest! { - #[test] - fn roundtrip(request in arb::()) { - let mut buf = Vec::::new(); - request.to_compact(&mut buf); - let (decoded, _) = Request::from_compact(&buf, buf.len()); - assert_eq!(request, decoded); - } - } -} diff --git a/crates/storage/codecs/src/alloy/signature.rs b/crates/storage/codecs/src/alloy/signature.rs index 70290ea96c1..b8fd19cf35a 100644 --- a/crates/storage/codecs/src/alloy/signature.rs +++ b/crates/storage/codecs/src/alloy/signature.rs @@ -1,6 +1,7 @@ -use alloy_primitives::{Parity, Signature, U256}; +//! Compact implementation for [`Signature`] use crate::Compact; +use alloy_primitives::{PrimitiveSignature as Signature, U256}; impl Compact for Signature { fn to_compact(&self, buf: &mut B) -> usize @@ -9,7 +10,7 @@ impl Compact for Signature { { buf.put_slice(&self.r().as_le_bytes()); buf.put_slice(&self.s().as_le_bytes()); - self.v().y_parity() as usize + self.v() as usize } fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { @@ -18,6 +19,6 @@ impl Compact for Signature { let r = U256::from_le_slice(&buf[0..32]); let s = U256::from_le_slice(&buf[32..64]); buf.advance(64); - (Self::new(r, s, Parity::Parity(identifier != 0)), buf) + (Self::new(r, s, identifier != 0), buf) } } diff --git a/crates/storage/codecs/src/alloy/transaction/eip1559.rs b/crates/storage/codecs/src/alloy/transaction/eip1559.rs index 8e7594951fa..6d910a6900c 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip1559.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip1559.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip1559`] + use crate::Compact; use alloy_consensus::TxEip1559 as AlloyTxEip1559; use alloy_eips::eip2930::AccessList; @@ -11,8 +13,13 @@ use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip1559`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Compact, Default)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[cfg_attr(test, crate::add_arbitrary_tests(compact))] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(any(test, feature = "test-utils"), crate::add_arbitrary_tests(crate, compact))] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxEip1559 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/eip2930.rs b/crates/storage/codecs/src/alloy/transaction/eip2930.rs index e0c78a3e4c0..aeb08f361be 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip2930.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip2930.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip2930`] + use crate::Compact; use alloy_consensus::TxEip2930 as AlloyTxEip2930; use alloy_eips::eip2930::AccessList; @@ -13,8 +15,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip2930`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip2930 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/eip4844.rs b/crates/storage/codecs/src/alloy/transaction/eip4844.rs index 27c6b924090..fac9ab9a1b2 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip4844.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip4844.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip4844`] + use crate::{Compact, CompactPlaceholder}; use alloc::vec::Vec; use alloy_consensus::TxEip4844 as AlloyTxEip4844; @@ -14,8 +16,10 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip4844`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr(any(test, feature = "test-utils"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip4844 { chain_id: ChainId, nonce: u64, @@ -25,6 +29,13 @@ pub(crate) struct TxEip4844 { /// TODO(debt): this should be removed if we break the DB. /// Makes sure that the Compact bitflag struct has one bit after the above field: /// + #[cfg_attr( + feature = "test-utils", + serde( + serialize_with = "serialize_placeholder", + deserialize_with = "deserialize_placeholder" + ) + )] placeholder: Option, to: Address, value: U256, @@ -75,6 +86,54 @@ impl Compact for AlloyTxEip4844 { } } +#[cfg(any(test, feature = "test-utils"))] +impl<'a> arbitrary::Arbitrary<'a> for TxEip4844 { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Self { + chain_id: ChainId::arbitrary(u)?, + nonce: u64::arbitrary(u)?, + gas_limit: u64::arbitrary(u)?, + max_fee_per_gas: u128::arbitrary(u)?, + max_priority_fee_per_gas: u128::arbitrary(u)?, + // Should always be Some for TxEip4844 + placeholder: Some(()), + to: Address::arbitrary(u)?, + value: U256::arbitrary(u)?, + access_list: AccessList::arbitrary(u)?, + blob_versioned_hashes: Vec::::arbitrary(u)?, + max_fee_per_blob_gas: u128::arbitrary(u)?, + input: Bytes::arbitrary(u)?, + }) + } +} + +#[cfg(feature = "test-utils")] +fn serialize_placeholder(value: &Option<()>, serializer: S) -> Result +where + S: serde::Serializer, +{ + // Required otherwise `serde_json` will serialize it as null and would be `None` when decoding + // it again. + match value { + Some(()) => serializer.serialize_str("placeholder"), // Custom serialization + None => serializer.serialize_none(), + } +} + +#[cfg(feature = "test-utils")] +fn deserialize_placeholder<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + use serde::de::Deserialize; + let s: Option = Option::deserialize(deserializer)?; + match s.as_deref() { + Some("placeholder") => Ok(Some(())), + None => Ok(None), + _ => Err(serde::de::Error::custom("unexpected value")), + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/storage/codecs/src/alloy/transaction/eip7702.rs b/crates/storage/codecs/src/alloy/transaction/eip7702.rs index e714be1c3f6..eab10af0b66 100644 --- a/crates/storage/codecs/src/alloy/transaction/eip7702.rs +++ b/crates/storage/codecs/src/alloy/transaction/eip7702.rs @@ -1,3 +1,5 @@ +//! Compact implementation for [`AlloyTxEip7702`] + use crate::Compact; use alloc::vec::Vec; use alloy_consensus::TxEip7702 as AlloyTxEip7702; @@ -14,8 +16,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`alloy_consensus::TxEip7702`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxEip7702 { chain_id: ChainId, nonce: u64, diff --git a/crates/storage/codecs/src/alloy/transaction/legacy.rs b/crates/storage/codecs/src/alloy/transaction/legacy.rs index 27e799a790e..60250ba64af 100644 --- a/crates/storage/codecs/src/alloy/transaction/legacy.rs +++ b/crates/storage/codecs/src/alloy/transaction/legacy.rs @@ -1,11 +1,18 @@ +//! Compact implementation for [`AlloyTxLegacy`] + use crate::Compact; use alloy_consensus::TxLegacy as AlloyTxLegacy; use alloy_primitives::{Bytes, ChainId, TxKind, U256}; /// Legacy transaction. #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[cfg_attr(test, crate::add_arbitrary_tests(compact))] +#[reth_codecs(crate = "crate")] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize), + crate::add_arbitrary_tests(crate, compact) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] pub(crate) struct TxLegacy { /// Added as EIP-155: Simple replay attack protection chain_id: Option, diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index 5b1d173a528..5c829ddf56b 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -1,10 +1,19 @@ -pub(crate) mod eip1559; -pub(crate) mod eip2930; -pub(crate) mod eip4844; -pub(crate) mod eip7702; -pub(crate) mod legacy; -#[cfg(feature = "optimism")] -pub(crate) mod optimism; +//! Compact implementation for transaction types + +cond_mod!( + eip1559, + eip2930, + eip4844, + eip7702, + legacy, + txtype +); + + +#[cfg(all(feature = "test-utils", feature = "op"))] +pub mod optimism; +#[cfg(all(not(feature = "test-utils"), feature = "op"))] +mod optimism; #[cfg(test)] mod tests { @@ -15,9 +24,13 @@ mod tests { // this check is to ensure we do not inadvertently add too many fields to a struct which would // expand the flags field and break backwards compatibility - use crate::alloy::transaction::{ - eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, - legacy::TxLegacy, + use alloy_primitives::hex; + use crate::{ + alloy::{header::Header, transaction::{ + eip1559::TxEip1559, eip2930::TxEip2930, eip4844::TxEip4844, eip7702::TxEip7702, + legacy::TxLegacy, + }}, + test_utils::test_decode, }; #[test] @@ -29,9 +42,59 @@ mod tests { assert_eq!(TxEip7702::bitflag_encoded_bytes(), 4); } - #[cfg(feature = "optimism")] + #[cfg(feature = "op")] #[test] fn test_ensure_backwards_compatibility_optimism() { assert_eq!(crate::alloy::transaction::optimism::TxDeposit::bitflag_encoded_bytes(), 2); } + + #[test] + fn test_decode_header() { + test_decode::
(&hex!( + "01000000fbbb564baeafd064b979c2ac032df5cd987098066a8c6969514dfb8ecfbf043e667fa19efcc00d1dd197c309a3cc42dec820cd627af8f7f38f3274f842406891b22624431d0ea858422db8415b1181f8d19befbd21287debaf98a94e84b3ec20be846f35abfbf743ee3eda4fdda6a6f9124d295da97e26eaa1cedd09936f0a3c560b6bc10316dba5e82abd21afcf519a985feb09a6ce7fba2e8163b10f06c99828b8049c29b993d88d1d112dca60a03ebd8ebc6d69a7e1f301ca6d67c21fe0949d67bca251edf36c96a2cf7c84d98fc60a53988ac95820f434eb35280d98c8ba4d7484e7ee8fefd63591ad4c937ccaaea23871d05c77bac754c5759b34cf9b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + )); + } + + #[test] + fn test_decode_eip1559() { + test_decode::(&hex!( + "88086110b81b05bc5bb59ec3e4cd44e895a9dcb2656d5003e2f64ecb2e15443898cc1cc19af19ca96fc2b4eafc4abc26e4bbd70a3ddb10b7530b65eea128f4095c97164f712c04239902c1b08acf3949d4687123cdd72d5c73df113d2dc6ed7e519f410ace5553ca805975240a208b57013532de78c5cb407423ea11921ab11b13e93ef35d4d01c9a23166c4d627987545fe4675528d0ab111b0a1dc83fba0a4e1cd5c826a94db3f" + )); + } + + #[test] + fn test_decode_eip2930() { + test_decode::(&hex!( + "7810833fce14e3e2921e94fd3727eb71e91551d2c1e029697a654bfab510f3963aa57074015e152065d1c807f8830079fb0aeadc251d248eaec7147e78580ed638c4e667827775e24270edd5aad475776533ece65373afa71722bfeba3c900" + )); + } + + #[test] + fn test_decode_eip4844() { + test_decode::(&hex!( + "88086110025c359180ea680b5007c856f9e1ad4d1be7a5019feb42133f4fc4bdf74da1b457ab787462385a28a1bf8edb401adabf3ff21ac18f695e30180348ea67246fc4dc25e88add12b7c317651a0ce08946d98dbbe5b38883aa758a0f247e23b0fe3ac1bcc43d7212c984d6ccc770d70135890c9a07d715cacb9032c90d539d0b3d209a8d600178bcfb416fd489e5d5dd56d9cfc6addae810ae70bdaee65672b871dc2b3f35ec00dbaa0d872f78cb58b3199984c608c8ba" + )); + } + + #[test] + fn test_decode_eip7702() { + test_decode::(&hex!( + "8808210881415c034feba383d7a6efd3f2601309b33a6d682ad47168cac0f7a5c5136a33370e5e7ca7f570d5530d7a0d18bf5eac33583fdc27b6580f61e8cbd34d6de596f925c1f353188feb2c1e9e20de82a80b57f0be425d8c5896280d4f5f66cdcfba256d0c9ac8abd833859a62ec019501b4585fa176f048de4f88b93bdefecfcaf4d8f0dd04767bc683a4569c893632e44ba9d53f90d758125c9b24c0192a649166520cd5eecbc110b53eda400cf184b8ef9932c81d0deb2ea27dfa863392a87bfd53af3ec67379f20992501e76e387cbe3933861beead1b49649383cf8b2a2d5c6d04b7edc376981ed9b12cf7199fe7fabf5198659e001bed40922969b82a6cd000000000000" + )); + } + + #[test] + fn test_decode_legacy() { + test_decode::(&hex!( + "112210080a8ba06a8d108540bb3140e9f71a0812c46226f9ea77ae880d98d19fe27e5911801175c3b32620b2e887af0296af343526e439b775ee3b1c06750058e9e5fc4cd5965c3010f86184" + )); + } + + #[cfg(feature = "op")] + #[test] + fn test_decode_deposit() { + test_decode::(&hex!( + "8108ac8f15983d59b6ae4911a00ff7bfcd2e53d2950926f8c82c12afad02861c46fcb293e776204052725e1c08ff2e9ff602ca916357601fa972a14094891fe3598b718758f22c46f163c18bcaa6296ce87e5267ef3fd932112842fbbf79011548cdf067d93ce6098dfc0aaf5a94531e439f30d6dfd0c6" + )); + } } diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index f4fdcf5ee44..631f5c406ee 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -1,7 +1,11 @@ +//! Compact implementation for [`AlloyTxDeposit`] + +use alloy_consensus::constants::EIP7702_TX_TYPE_ID; use crate::Compact; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; -use op_alloy_consensus::TxDeposit as AlloyTxDeposit; +use op_alloy_consensus::{OpTxType, TxDeposit as AlloyTxDeposit}; use reth_codecs_derive::add_arbitrary_tests; +use crate::txtype::{COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY}; /// Deposit transactions, also known as deposits are initiated on L1, and executed on L2. /// @@ -12,8 +16,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with [`op_alloy_consensus::TxDeposit`] #[derive(Debug, Clone, PartialEq, Eq, Hash, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[reth_codecs(crate = "crate")] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct TxDeposit { source_hash: B256, from: Address, @@ -58,3 +67,51 @@ impl Compact for AlloyTxDeposit { (alloy_tx, buf) } } + + +impl crate::Compact for OpTxType { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + use crate::txtype::*; + + match self { + Self::Legacy => COMPACT_IDENTIFIER_LEGACY, + Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + Self::Eip1559 => COMPACT_IDENTIFIER_EIP1559, + Self::Eip7702 => { + buf.put_u8(EIP7702_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + Self::Deposit => { + buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + } + } + + // For backwards compatibility purposes only 2 bits of the type are encoded in the identifier + // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type + // is read from the buffer as a single byte. + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + ( + match identifier { + COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + let extended_identifier = buf.get_u8(); + match extended_identifier { + EIP7702_TX_TYPE_ID => Self::Eip7702, + op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, + _ => panic!("Unsupported TxType identifier: {extended_identifier}"), + } + } + _ => panic!("Unknown identifier for TxType: {identifier}"), + }, + buf, + ) + } +} \ No newline at end of file diff --git a/crates/storage/codecs/src/alloy/transaction/txtype.rs b/crates/storage/codecs/src/alloy/transaction/txtype.rs new file mode 100644 index 00000000000..63f80bfaf20 --- /dev/null +++ b/crates/storage/codecs/src/alloy/transaction/txtype.rs @@ -0,0 +1,97 @@ +//! Compact implementation for [`TxType`] + +use crate::txtype::{COMPACT_EXTENDED_IDENTIFIER_FLAG, COMPACT_IDENTIFIER_EIP1559, COMPACT_IDENTIFIER_EIP2930, COMPACT_IDENTIFIER_LEGACY}; +use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; +use alloy_consensus::TxType; + +impl crate::Compact for TxType { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + use crate::txtype::*; + + match self { + Self::Legacy => COMPACT_IDENTIFIER_LEGACY, + Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, + Self::Eip1559 => COMPACT_IDENTIFIER_EIP1559, + Self::Eip4844 => { + buf.put_u8(EIP4844_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + Self::Eip7702 => { + buf.put_u8(EIP7702_TX_TYPE_ID); + COMPACT_EXTENDED_IDENTIFIER_FLAG + } + } + } + + // For backwards compatibility purposes only 2 bits of the type are encoded in the identifier + // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type + // is read from the buffer as a single byte. + fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { + use bytes::Buf; + ( + match identifier { + COMPACT_IDENTIFIER_LEGACY => Self::Legacy, + COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, + COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, + COMPACT_EXTENDED_IDENTIFIER_FLAG => { + let extended_identifier = buf.get_u8(); + match extended_identifier { + EIP4844_TX_TYPE_ID => Self::Eip4844, + EIP7702_TX_TYPE_ID => Self::Eip7702, + _ => panic!("Unsupported TxType identifier: {extended_identifier}"), + } + } + _ => panic!("Unknown identifier for TxType: {identifier}"), + }, + buf, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rstest::rstest; + + use alloy_consensus::constants::{EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}; + use crate::Compact; + + + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + fn test_txtype_to_compact( + #[case] tx_type: TxType, + #[case] expected_identifier: usize, + #[case] expected_buf: Vec, + ) { + let mut buf = vec![]; + let identifier = tx_type.to_compact(&mut buf); + + assert_eq!(identifier, expected_identifier, "Unexpected identifier for TxType {tx_type:?}",); + assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}",); + } + + #[rstest] + #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] + #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] + #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] + #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] + #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] + fn test_txtype_from_compact( + #[case] expected_type: TxType, + #[case] identifier: usize, + #[case] buf: Vec, + ) { + let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); + + assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); + assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); + } +} \ No newline at end of file diff --git a/crates/storage/codecs/src/alloy/withdrawal.rs b/crates/storage/codecs/src/alloy/withdrawal.rs index 16324c280cc..09e80d1faa7 100644 --- a/crates/storage/codecs/src/alloy/withdrawal.rs +++ b/crates/storage/codecs/src/alloy/withdrawal.rs @@ -1,5 +1,8 @@ +//! Compact implementation for [`AlloyWithdrawal`] + use crate::Compact; -use alloy_eips::eip4895::Withdrawal as AlloyWithdrawal; +use alloc::vec::Vec; +use alloy_eips::eip4895::{Withdrawal as AlloyWithdrawal, Withdrawals}; use alloy_primitives::Address; use reth_codecs_derive::add_arbitrary_tests; @@ -7,8 +10,13 @@ use reth_codecs_derive::add_arbitrary_tests; /// /// Notice: Make sure this struct is 1:1 with `alloy_eips::eip4895::Withdrawal` #[derive(Debug, Clone, PartialEq, Eq, Default, Compact)] -#[cfg_attr(test, derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize))] -#[add_arbitrary_tests(compact)] +#[cfg_attr( + any(test, feature = "test-utils"), + derive(arbitrary::Arbitrary, serde::Serialize, serde::Deserialize) +)] +#[reth_codecs(crate = "crate")] +#[cfg_attr(feature = "test-utils", allow(unreachable_pub), visibility::make(pub))] +#[add_arbitrary_tests(crate, compact)] pub(crate) struct Withdrawal { /// Monotonically increasing identifier issued by consensus layer. index: u64, @@ -46,6 +54,22 @@ impl Compact for AlloyWithdrawal { } } +impl Compact for Withdrawals { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.as_ref().to_compact(buf) + } + + fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { + let (withdrawals, new_buf) = Vec::from_compact(buf, buf.len()); + buf = new_buf; + let alloy_withdrawals = Self::new(withdrawals); + (alloy_withdrawals, buf) + } +} + #[cfg(test)] mod tests { use super::*; @@ -54,12 +78,20 @@ mod tests { proptest! { #[test] - fn roundtrip(withdrawal in arb::()) { + fn roundtrip_withdrawal(withdrawal in arb::()) { let mut compacted_withdrawal = Vec::::new(); let len = withdrawal.to_compact(&mut compacted_withdrawal); let (decoded, _) = AlloyWithdrawal::from_compact(&compacted_withdrawal, len); assert_eq!(withdrawal, decoded) } + + #[test] + fn roundtrip_withdrawals(withdrawals in arb::()) { + let mut compacted_withdrawals = Vec::::new(); + let len = withdrawals.to_compact(&mut compacted_withdrawals); + let (decoded, _) = Withdrawals::from_compact(&compacted_withdrawals, len); + assert_eq!(withdrawals, decoded); + } } // each value in the database has an extra field named flags that encodes metadata about other diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 8608c5eb8c1..8c6ba5e4c76 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -17,20 +17,33 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +extern crate alloc; + pub use reth_codecs_derive::*; +use serde as _; use alloy_primitives::{Address, Bloom, Bytes, FixedBytes, U256}; use bytes::{Buf, BufMut}; -extern crate alloc; use alloc::vec::Vec; +#[cfg(feature = "test-utils")] +pub mod alloy; + +#[cfg(not(feature = "test-utils"))] #[cfg(any(test, feature = "alloy"))] mod alloy; +pub mod txtype; + #[cfg(any(test, feature = "test-utils"))] pub mod test_utils; +// Used by generated code and doc tests. Not public API. +#[doc(hidden)] +#[path = "private.rs"] +pub mod __private; + /// Trait that implements the `Compact` codec. /// /// When deriving the trait for custom structs, be aware of certain limitations/recommendations: @@ -48,6 +61,12 @@ pub mod test_utils; /// Regarding the `specialized_to/from_compact` methods: Mainly used as a workaround for not being /// able to specialize an impl over certain types like `Vec`/`Option` where `T` is a fixed /// size array like `Vec`. +/// +/// ## Caution +/// +/// Due to the bitfields, every type change on the rust type (e.g. `U256` to `u64`) is a breaking +/// change and will lead to a new, incompatible [`Compact`] implementation. Implementers must take +/// special care when changing or rearranging fields. pub trait Compact: Sized { /// Takes a buffer which can be written to. *Ideally*, it returns the length written to. fn to_compact(&self, buf: &mut B) -> usize @@ -78,6 +97,21 @@ pub trait Compact: Sized { } } +impl Compact for alloc::string::String { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.as_bytes().to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (vec, buf) = Vec::::from_compact(buf, len); + let string = Self::from_utf8(vec).unwrap(); // Safe conversion + (string, buf) + } +} + impl Compact for &T { fn to_compact(&self, buf: &mut B) -> usize where @@ -484,7 +518,7 @@ mod tests { #[test] fn compact_address() { - let mut buf = vec![]; + let mut buf = Vec::with_capacity(21); assert_eq!(Address::ZERO.to_compact(&mut buf), 20); assert_eq!(buf, vec![0; 20]); @@ -636,7 +670,8 @@ mod tests { } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Compact, arbitrary::Arbitrary)] - #[add_arbitrary_tests(compact)] + #[add_arbitrary_tests(crate, compact)] + #[reth_codecs(crate = "crate")] struct TestStruct { f_u64: u64, f_u256: U256, @@ -688,7 +723,8 @@ mod tests { #[derive( Debug, PartialEq, Clone, Default, Serialize, Deserialize, Compact, arbitrary::Arbitrary, )] - #[add_arbitrary_tests(compact)] + #[add_arbitrary_tests(crate, compact)] + #[reth_codecs(crate = "crate")] enum TestEnum { #[default] Var0, diff --git a/crates/storage/codecs/src/private.rs b/crates/storage/codecs/src/private.rs new file mode 100644 index 00000000000..6f54d9c9ca8 --- /dev/null +++ b/crates/storage/codecs/src/private.rs @@ -0,0 +1,3 @@ +pub use modular_bitfield; + +pub use bytes::Buf; diff --git a/crates/storage/codecs/src/test_utils.rs b/crates/storage/codecs/src/test_utils.rs index bb377c69167..b845645cb1a 100644 --- a/crates/storage/codecs/src/test_utils.rs +++ b/crates/storage/codecs/src/test_utils.rs @@ -79,3 +79,12 @@ impl UnusedBits { matches!(self, Self::NotZero) } } + +/// Tests decoding and re-encoding to ensure correctness. +pub fn test_decode(buf: &[u8]) { + let (decoded, _) = T::from_compact(buf, buf.len()); + let mut encoded = Vec::with_capacity(buf.len()); + + decoded.to_compact(&mut encoded); + assert_eq!(buf, &encoded[..]); +} diff --git a/crates/storage/codecs/src/txtype.rs b/crates/storage/codecs/src/txtype.rs new file mode 100644 index 00000000000..ce392b59cd0 --- /dev/null +++ b/crates/storage/codecs/src/txtype.rs @@ -0,0 +1,15 @@ +//! Commonly used constants for transaction types. + +/// Identifier parameter for legacy transaction +pub const COMPACT_IDENTIFIER_LEGACY: usize = 0; + +/// Identifier parameter for EIP-2930 transaction +pub const COMPACT_IDENTIFIER_EIP2930: usize = 1; + +/// Identifier parameter for EIP-1559 transaction +pub const COMPACT_IDENTIFIER_EIP1559: usize = 2; + +/// For backwards compatibility purposes only 2 bits of the type are encoded in the identifier +/// parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type is +/// read from the buffer as a single byte. +pub const COMPACT_EXTENDED_IDENTIFIER_FLAG: usize = 3; diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index d674f9d7b68..c8d748b96f5 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -16,18 +16,20 @@ workspace = true reth-codecs.workspace = true reth-db-models.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-prune-types.workspace = true -reth-stages-types.workspace = true +reth-stages-types = { workspace = true, features = ["reth-codec"] } reth-storage-errors.workspace = true reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true alloy-genesis.workspace = true +alloy-consensus.workspace = true # codecs modular-bitfield.workspace = true +roaring = "0.10.2" parity-scale-codec = { version = "3.2.1", features = ["bytes"] } serde = { workspace = true, default-features = false } @@ -56,11 +58,28 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true [features] -test-utils = ["arbitrary"] +test-utils = [ + "arbitrary", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", + "reth-db-models/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", +] arbitrary = [ "reth-primitives/arbitrary", "reth-db-models/arbitrary", "dep:arbitrary", "dep:proptest", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "parity-scale-codec/arbitrary", + "reth-codecs/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary", + "alloy-consensus/arbitrary", ] -optimism = ["reth-primitives/optimism"] +optimism = ["reth-primitives/optimism", "reth-codecs/op"] diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 585aa4947a2..9297f738ab5 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -152,12 +152,7 @@ where impl> Iterator for Walker<'_, T, CURSOR> { type Item = Result, DatabaseError>; fn next(&mut self) -> Option { - let start = self.start.take(); - if start.is_some() { - return start - } - - self.cursor.next().transpose() + self.start.take().or_else(|| self.cursor.next().transpose()) } } diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index e972821d8fe..5580727fdbe 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -7,7 +7,7 @@ use crate::{ ReverseWalker, Walker, }, database::Database, - table::{DupSort, Table, TableImporter}, + table::{DupSort, Encode, Table, TableImporter}, transaction::{DbTx, DbTxMut}, DatabaseError, }; @@ -49,6 +49,13 @@ impl DbTx for TxMock { Ok(None) } + fn get_by_encoded_key( + &self, + _key: &::Encoded, + ) -> Result, DatabaseError> { + Ok(None) + } + fn commit(self) -> Result { Ok(true) } diff --git a/crates/storage/db-api/src/models/blocks.rs b/crates/storage/db-api/src/models/blocks.rs index 7268d82dd3c..7c4b37b254d 100644 --- a/crates/storage/db-api/src/models/blocks.rs +++ b/crates/storage/db-api/src/models/blocks.rs @@ -1,19 +1,37 @@ //! Block related models and types. +use alloy_consensus::Header; use alloy_primitives::B256; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::Header; use serde::{Deserialize, Serialize}; /// The storage representation of a block's ommers. /// /// It is stored as the headers of the block's uncles. -#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize, Compact)] +#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] -pub struct StoredBlockOmmers { +pub struct StoredBlockOmmers { /// The block headers of this block's uncles. - pub ommers: Vec
, + pub ommers: Vec, +} + +impl Compact for StoredBlockOmmers { + fn to_compact(&self, buf: &mut B) -> usize + where + B: bytes::BufMut + AsMut<[u8]>, + { + let mut buffer = bytes::BytesMut::new(); + self.ommers.to_compact(&mut buffer); + let total_length = buffer.len(); + buf.put(buffer); + total_length + } + + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + let (ommers, new_buf) = Vec::from_compact(buf, buf.len()); + (Self { ommers }, new_buf) + } } /// Hash of the block header. @@ -31,4 +49,18 @@ mod tests { ommer.ommers.push(Header::default()); assert_eq!(ommer.clone(), StoredBlockOmmers::decompress(&ommer.compress()).unwrap()); } + + #[test] + fn fuzz_stored_block_ommers() { + fuzz_test_stored_block_ommers(StoredBlockOmmers::default()) + } + + #[test_fuzz::test_fuzz] + fn fuzz_test_stored_block_ommers(obj: StoredBlockOmmers) { + use reth_codecs::Compact; + let mut buf = vec![]; + let len = obj.to_compact(&mut buf); + let (same_obj, _) = StoredBlockOmmers::from_compact(buf.as_ref(), len); + assert_eq!(obj, same_obj); + } } diff --git a/crates/storage/db-api/src/models/integer_list.rs b/crates/storage/db-api/src/models/integer_list.rs index 480b52a9e2c..5301ec303e5 100644 --- a/crates/storage/db-api/src/models/integer_list.rs +++ b/crates/storage/db-api/src/models/integer_list.rs @@ -4,7 +4,159 @@ use crate::{ table::{Compress, Decompress}, DatabaseError, }; -use reth_primitives_traits::IntegerList; +use bytes::BufMut; +use core::fmt; +use derive_more::Deref; +use roaring::RoaringTreemap; + +/// A data structure that uses Roaring Bitmaps to efficiently store a list of integers. +/// +/// This structure provides excellent compression while allowing direct access to individual +/// elements without the need for full decompression. +/// +/// Key features: +/// - Efficient compression: the underlying Roaring Bitmaps significantly reduce memory usage. +/// - Direct access: elements can be accessed or queried without needing to decode the entire list. +/// - [`RoaringTreemap`] backing: internally backed by [`RoaringTreemap`], which supports 64-bit +/// integers. +#[derive(Clone, PartialEq, Default, Deref)] +pub struct IntegerList(pub RoaringTreemap); + +impl fmt::Debug for IntegerList { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("IntegerList")?; + f.debug_list().entries(self.0.iter()).finish() + } +} + +impl IntegerList { + /// Creates a new empty [`IntegerList`]. + pub fn empty() -> Self { + Self(RoaringTreemap::new()) + } + + /// Creates an [`IntegerList`] from a list of integers. + /// + /// Returns an error if the list is not pre-sorted. + pub fn new(list: impl IntoIterator) -> Result { + RoaringTreemap::from_sorted_iter(list) + .map(Self) + .map_err(|_| IntegerListError::UnsortedInput) + } + + /// Creates an [`IntegerList`] from a pre-sorted list of integers. + /// + /// # Panics + /// + /// Panics if the list is not pre-sorted. + #[inline] + #[track_caller] + pub fn new_pre_sorted(list: impl IntoIterator) -> Self { + Self::new(list).expect("IntegerList must be pre-sorted and non-empty") + } + + /// Appends a list of integers to the current list. + pub fn append(&mut self, list: impl IntoIterator) -> Result { + self.0.append(list).map_err(|_| IntegerListError::UnsortedInput) + } + + /// Pushes a new integer to the list. + pub fn push(&mut self, value: u64) -> Result<(), IntegerListError> { + self.0.push(value).then_some(()).ok_or(IntegerListError::UnsortedInput) + } + + /// Clears the list. + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Serializes a [`IntegerList`] into a sequence of bytes. + pub fn to_bytes(&self) -> Vec { + let mut vec = Vec::with_capacity(self.0.serialized_size()); + self.0.serialize_into(&mut vec).expect("not able to encode IntegerList"); + vec + } + + /// Serializes a [`IntegerList`] into a sequence of bytes. + pub fn to_mut_bytes(&self, buf: &mut B) { + self.0.serialize_into(buf.writer()).unwrap(); + } + + /// Deserializes a sequence of bytes into a proper [`IntegerList`]. + pub fn from_bytes(data: &[u8]) -> Result { + RoaringTreemap::deserialize_from(data) + .map(Self) + .map_err(|_| IntegerListError::FailedToDeserialize) + } +} + +impl serde::Serialize for IntegerList { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + use serde::ser::SerializeSeq; + + let mut seq = serializer.serialize_seq(Some(self.len() as usize))?; + for e in &self.0 { + seq.serialize_element(&e)?; + } + seq.end() + } +} + +struct IntegerListVisitor; + +impl<'de> serde::de::Visitor<'de> for IntegerListVisitor { + type Value = IntegerList; + + fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("a usize array") + } + + fn visit_seq(self, mut seq: E) -> Result + where + E: serde::de::SeqAccess<'de>, + { + let mut list = IntegerList::empty(); + while let Some(item) = seq.next_element()? { + list.push(item).map_err(serde::de::Error::custom)?; + } + Ok(list) + } +} + +impl<'de> serde::Deserialize<'de> for IntegerList { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + deserializer.deserialize_byte_buf(IntegerListVisitor) + } +} + +#[cfg(any(test, feature = "arbitrary"))] +use arbitrary::{Arbitrary, Unstructured}; + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a> Arbitrary<'a> for IntegerList { + fn arbitrary(u: &mut Unstructured<'a>) -> Result { + let mut nums: Vec = Vec::arbitrary(u)?; + nums.sort_unstable(); + Self::new(nums).map_err(|_| arbitrary::Error::IncorrectFormat) + } +} + +/// Primitives error type. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum IntegerListError { + /// The provided input is unsorted. + #[display("the provided input is unsorted")] + UnsortedInput, + /// Failed to deserialize data into type. + #[display("failed to deserialize data into type")] + FailedToDeserialize, +} impl Compress for IntegerList { type Compressed = Vec; @@ -23,3 +175,30 @@ impl Decompress for IntegerList { Self::from_bytes(value).map_err(|_| DatabaseError::Decode) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn empty_list() { + assert_eq!(IntegerList::empty().len(), 0); + assert_eq!(IntegerList::new_pre_sorted(std::iter::empty()).len(), 0); + } + + #[test] + fn test_integer_list() { + let original_list = [1, 2, 3]; + let ef_list = IntegerList::new(original_list).unwrap(); + assert_eq!(ef_list.iter().collect::>(), original_list); + } + + #[test] + fn test_integer_list_serialization() { + let original_list = [1, 2, 3]; + let ef_list = IntegerList::new(original_list).unwrap(); + + let blist = ef_list.to_bytes(); + assert_eq!(IntegerList::from_bytes(&blist).unwrap(), ef_list) + } +} diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 0f35a558a35..7ded84e1720 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -4,12 +4,12 @@ use crate::{ table::{Compress, Decode, Decompress, Encode}, DatabaseError, }; +use alloy_consensus::Header; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, Bytes, Log, B256, U256}; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::{ - Account, Bytecode, Header, Receipt, Requests, StorageEntry, TransactionSignedNoHash, TxType, -}; +use reth_primitives::{Receipt, StorageEntry, TransactionSigned, TxType}; +use reth_primitives_traits::{Account, Bytecode}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StoredNibbles, StoredNibblesSubKey, *}; @@ -23,6 +23,7 @@ pub mod storage_sharded_key; pub use accounts::*; pub use blocks::*; +pub use integer_list::IntegerList; pub use reth_db_models::{ AccountBeforeTx, ClientVersion, StoredBlockBodyIndices, StoredBlockWithdrawals, }; @@ -188,9 +189,9 @@ impl Decode for ClientVersion { /// Implements compression for Compact type. macro_rules! impl_compression_for_compact { - ($($name:tt),+) => { + ($($name:ident$(<$($generic:ident),*>)?),+) => { $( - impl Compress for $name { + impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Compress for $name$(<$($generic),*>)? { type Compressed = Vec; fn compress_to_buf>(self, buf: &mut B) { @@ -198,8 +199,8 @@ macro_rules! impl_compression_for_compact { } } - impl Decompress for $name { - fn decompress(value: &[u8]) -> Result<$name, $crate::DatabaseError> { + impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Decompress for $name$(<$($generic),*>)? { + fn decompress(value: &[u8]) -> Result<$name$(<$($generic),*>)?, $crate::DatabaseError> { let (obj, _) = Compact::from_compact(value, value.len()); Ok(obj) } @@ -221,16 +222,15 @@ impl_compression_for_compact!( StoredNibblesSubKey, StorageTrieEntry, StoredBlockBodyIndices, - StoredBlockOmmers, + StoredBlockOmmers, StoredBlockWithdrawals, Bytecode, AccountBeforeTx, - TransactionSignedNoHash, + TransactionSigned, CompactU256, StageCheckpoint, PruneCheckpoint, ClientVersion, - Requests, // Non-DB GenesisAccount ); @@ -314,7 +314,7 @@ mod tests { fn test_ensure_backwards_compatibility() { use super::*; use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; - use reth_primitives::{Account, Receipt, ReceiptWithBloom, Withdrawals}; + use reth_primitives::{Account, Receipt}; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, @@ -335,14 +335,11 @@ mod tests { assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); assert_eq!(Receipt::bitflag_encoded_bytes(), 1); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); validate_bitflag_backwards_compat!(Account, UnusedBits::NotZero); validate_bitflag_backwards_compat!(AccountHashingCheckpoint, UnusedBits::NotZero); @@ -358,14 +355,10 @@ mod tests { validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); validate_bitflag_backwards_compat!(Receipt, UnusedBits::Zero); - validate_bitflag_backwards_compat!(ReceiptWithBloom, UnusedBits::Zero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); - validate_bitflag_backwards_compat!(StoredBlockOmmers, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockWithdrawals, UnusedBits::Zero); validate_bitflag_backwards_compat!(StorageHashingCheckpoint, UnusedBits::NotZero); - validate_bitflag_backwards_compat!(Withdrawals, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Requests, UnusedBits::Zero); } } diff --git a/crates/storage/db-api/src/models/storage_sharded_key.rs b/crates/storage/db-api/src/models/storage_sharded_key.rs index 5fd79ba655c..a7a1ffb71be 100644 --- a/crates/storage/db-api/src/models/storage_sharded_key.rs +++ b/crates/storage/db-api/src/models/storage_sharded_key.rs @@ -12,6 +12,10 @@ use super::ShardedKey; /// Number of indices in one shard. pub const NUM_OF_INDICES_IN_SHARD: usize = 2_000; +/// The size of [`StorageShardedKey`] encode bytes. +/// The fields are: 20-byte address, 32-byte key, and 8-byte block number +const STORAGE_SHARD_KEY_BYTES_SIZE: usize = 20 + 32 + 8; + /// Sometimes data can be too big to be saved for a single key. This helps out by dividing the data /// into different shards. Example: /// @@ -53,7 +57,8 @@ impl Encode for StorageShardedKey { type Encoded = Vec; fn encode(self) -> Self::Encoded { - let mut buf: Vec = Encode::encode(self.address).into(); + let mut buf: Vec = Vec::with_capacity(STORAGE_SHARD_KEY_BYTES_SIZE); + buf.extend_from_slice(&Encode::encode(self.address)); buf.extend_from_slice(&Encode::encode(self.sharded_key.key)); buf.extend_from_slice(&self.sharded_key.highest_block_number.to_be_bytes()); buf @@ -62,6 +67,9 @@ impl Encode for StorageShardedKey { impl Decode for StorageShardedKey { fn decode(value: &[u8]) -> Result { + if value.len() != STORAGE_SHARD_KEY_BYTES_SIZE { + return Err(DatabaseError::Decode) + } let tx_num_index = value.len() - 8; let highest_tx_number = u64::from_be_bytes( diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index 963457af05c..a4d3f87b40b 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -88,6 +88,9 @@ pub trait Table: Send + Sync + Debug + 'static { /// The table's name. const NAME: &'static str; + /// Whether the table is also a `DUPSORT` table. + const DUPSORT: bool; + /// Key element of `Table`. /// /// Sorting should be taken into account when encoding this. @@ -97,6 +100,15 @@ pub trait Table: Send + Sync + Debug + 'static { type Value: Value; } +/// Trait that provides object-safe access to the table's metadata. +pub trait TableInfo: Send + Sync + Debug + 'static { + /// The table's name. + fn name(&self) -> &'static str; + + /// Whether the table is a `DUPSORT` table. + fn is_dupsort(&self) -> bool; +} + /// Tuple with `T::Key` and `T::Value`. pub type TableRow = (::Key, ::Value); diff --git a/crates/storage/db-api/src/transaction.rs b/crates/storage/db-api/src/transaction.rs index f39cf92fb61..6dc79670a65 100644 --- a/crates/storage/db-api/src/transaction.rs +++ b/crates/storage/db-api/src/transaction.rs @@ -1,6 +1,6 @@ use crate::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, - table::{DupSort, Table}, + table::{DupSort, Encode, Table}, DatabaseError, }; @@ -11,8 +11,15 @@ pub trait DbTx: Send + Sync { /// `DupCursor` type for this read-only transaction type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; - /// Get value + /// Get value by an owned key fn get(&self, key: T::Key) -> Result, DatabaseError>; + /// Get value by a reference to the encoded key, especially useful for "raw" keys + /// that encode to themselves like Address and B256. Doesn't need to clone a + /// reference key like `get`. + fn get_by_encoded_key( + &self, + key: &::Encoded, + ) -> Result, DatabaseError>; /// Commit for read only transaction will consume and free transaction and allows /// freeing of memory pages fn commit(self) -> Result; diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 7fc48796986..28dbc33e90d 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -24,6 +24,7 @@ reth-fs-util.workspace = true reth-node-types.workspace = true # eth +alloy-consensus.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true @@ -42,6 +43,7 @@ tracing.workspace = true [dev-dependencies] reth-primitives-traits.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +alloy-consensus.workspace = true [lints] workspace = true diff --git a/crates/storage/db-common/src/db_tool/mod.rs b/crates/storage/db-common/src/db_tool/mod.rs index 67a5dd62762..3420f2089fd 100644 --- a/crates/storage/db-common/src/db_tool/mod.rs +++ b/crates/storage/db-common/src/db_tool/mod.rs @@ -12,7 +12,7 @@ use reth_db_api::{ }; use reth_fs_util as fs; use reth_node_types::NodeTypesWithDB; -use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory}; +use reth_provider::{providers::ProviderNodeTypes, ChainSpecProvider, DBProvider, ProviderFactory}; use std::{path::Path, rc::Rc, sync::Arc}; use tracing::info; diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 3962dfd6980..95b2a5d5c4a 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -1,5 +1,6 @@ //! Reth genesis initialization utility functions. +use alloy_consensus::BlockHeader; use alloy_genesis::GenesisAccount; use alloy_primitives::{Address, B256, U256}; use reth_chainspec::EthChainSpec; @@ -8,15 +9,15 @@ use reth_config::config::EtlConfig; use reth_db::tables; use reth_db_api::{transaction::DbTxMut, DatabaseError}; use reth_etl::Collector; -use reth_primitives::{Account, Bytecode, GotExpected, Receipts, StaticFileSegment, StorageEntry}; +use reth_primitives::{ + Account, Bytecode, GotExpected, NodePrimitives, Receipts, StaticFileSegment, StorageEntry, +}; use reth_provider::{ - errors::provider::ProviderResult, - providers::{StaticFileProvider, StaticFileWriter}, - writer::UnifiedStorageWriter, + errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter, BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter, - OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointWriter, StateChangeWriter, - StateWriter, StaticFileProviderFactory, TrieWriter, + OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, + StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter, }; use reth_stages_types::{StageCheckpoint, StageId}; use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress}; @@ -42,17 +43,20 @@ pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228; /// Soft limit for the number of flushed updates after which to log progress summary. const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000; -/// Database initialization error type. +/// Storage initialization error type. #[derive(Debug, thiserror::Error, PartialEq, Eq, Clone)] -pub enum InitDatabaseError { +pub enum InitStorageError { + /// Genesis header found on static files but the database is empty. + #[error("static files found, but the database is uninitialized. If attempting to re-syncing, delete both.")] + UninitializedDatabase, /// An existing genesis block was found in the database, and its hash did not match the hash of /// the chainspec. - #[error("genesis hash in the database does not match the specified chainspec: chainspec is {chainspec_hash}, database is {database_hash}")] + #[error("genesis hash in the storage does not match the specified chainspec: chainspec is {chainspec_hash}, database is {storage_hash}")] GenesisHashMismatch { /// Expected genesis hash. chainspec_hash: B256, /// Actual genesis hash. - database_hash: B256, + storage_hash: B256, }, /// Provider error. #[error(transparent)] @@ -62,22 +66,29 @@ pub enum InitDatabaseError { StateRootMismatch(GotExpected), } -impl From for InitDatabaseError { +impl From for InitStorageError { fn from(error: DatabaseError) -> Self { Self::Provider(ProviderError::Database(error)) } } /// Write the genesis block if it has not already been written -pub fn init_genesis(factory: &PF) -> Result +pub fn init_genesis(factory: &PF) -> Result where - PF: DatabaseProviderFactory + StaticFileProviderFactory + ChainSpecProvider + BlockHashReader, - PF::ProviderRW: StageCheckpointWriter + PF: DatabaseProviderFactory + + StaticFileProviderFactory> + + ChainSpecProvider + + StageCheckpointReader + + BlockHashReader, + PF::ProviderRW: StaticFileProviderFactory + + StageCheckpointWriter + HistoryWriter + HeaderProvider + HashingWriter - + StateChangeWriter + + StateWriter + + StateWriter + AsRef, + PF::ChainSpec: EthChainSpec
::BlockHeader>, { let chain = factory.chain_spec(); @@ -89,16 +100,27 @@ where Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {} Ok(Some(block_hash)) => { if block_hash == hash { + // Some users will at times attempt to re-sync from scratch by just deleting the + // database. Since `factory.block_hash` will only query the static files, we need to + // make sure that our database has been written to, and throw error if it's empty. + if factory.get_stage_checkpoint(StageId::Headers)?.is_none() { + error!(target: "reth::storage", "Genesis header found on static files, but database is uninitialized."); + return Err(InitStorageError::UninitializedDatabase) + } + debug!("Genesis already written, skipping."); return Ok(hash) } - return Err(InitDatabaseError::GenesisHashMismatch { + return Err(InitStorageError::GenesisHashMismatch { chainspec_hash: hash, - database_hash: block_hash, + storage_hash: block_hash, }) } - Err(e) => return Err(dbg!(e).into()), + Err(e) => { + debug!(?e); + return Err(e.into()); + } } debug!("Writing genesis block."); @@ -111,8 +133,7 @@ where insert_genesis_history(&provider_rw, alloc.iter())?; // Insert header - let static_file_provider = factory.static_file_provider(); - insert_genesis_header(&provider_rw, &static_file_provider, &chain)?; + insert_genesis_header(&provider_rw, &chain)?; insert_genesis_state(&provider_rw, alloc.iter())?; @@ -121,6 +142,7 @@ where provider_rw.save_stage_checkpoint(stage, Default::default())?; } + let static_file_provider = provider_rw.static_file_provider(); // Static file segments start empty, so we need to initialize the genesis block. let segment = StaticFileSegment::Receipts; static_file_provider.latest_writer(segment)?.increment_block(0)?; @@ -130,7 +152,7 @@ where // `commit_unwind`` will first commit the DB and then the static file provider, which is // necessary on `init_genesis`. - UnifiedStorageWriter::commit_unwind(provider_rw, static_file_provider)?; + UnifiedStorageWriter::commit_unwind(provider_rw)?; Ok(hash) } @@ -141,7 +163,11 @@ pub fn insert_genesis_state<'a, 'b, Provider>( alloc: impl Iterator, ) -> ProviderResult<()> where - Provider: DBProvider + StateChangeWriter + HeaderProvider + AsRef, + Provider: StaticFileProviderFactory + + DBProvider + + HeaderProvider + + StateWriter + + AsRef, { insert_state(provider, alloc, 0) } @@ -153,7 +179,11 @@ pub fn insert_state<'a, 'b, Provider>( block: u64, ) -> ProviderResult<()> where - Provider: DBProvider + StateChangeWriter + HeaderProvider + AsRef, + Provider: StaticFileProviderFactory + + DBProvider + + HeaderProvider + + StateWriter + + AsRef, { let capacity = alloc.size_hint().1.unwrap_or(0); let mut state_init: BundleStateInit = HashMap::with_capacity(capacity); @@ -220,8 +250,7 @@ where Vec::new(), ); - let mut storage_writer = UnifiedStorageWriter::from_database(&provider); - storage_writer.write_to_storage(execution_outcome, OriginalValuesKnown::Yes)?; + provider.write_state(execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database)?; trace!(target: "reth::cli", "Inserted state"); @@ -293,18 +322,19 @@ where /// Inserts header for the genesis state. pub fn insert_genesis_header( provider: &Provider, - static_file_provider: &StaticFileProvider, chain: &Spec, ) -> ProviderResult<()> where - Provider: DBProvider, - Spec: EthChainSpec, + Provider: StaticFileProviderFactory> + + DBProvider, + Spec: EthChainSpec
::BlockHeader>, { let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash()); + let static_file_provider = provider.static_file_provider(); match static_file_provider.block_hash(0) { Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => { - let (difficulty, hash) = (header.difficulty, block_hash); + let (difficulty, hash) = (header.difficulty(), block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; writer.append_header(header, difficulty, &hash)?; } @@ -330,7 +360,8 @@ pub fn init_from_state_dump( etl_config: EtlConfig, ) -> eyre::Result where - Provider: DBProvider + Provider: StaticFileProviderFactory + + DBProvider + BlockNumReader + BlockHashReader + ChainSpecProvider @@ -338,8 +369,8 @@ where + HistoryWriter + HeaderProvider + HashingWriter - + StateChangeWriter + TrieWriter + + StateWriter + AsRef, { let block = provider_rw.last_block_number()?; @@ -347,7 +378,7 @@ where let expected_state_root = provider_rw .header_by_number(block)? .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))? - .state_root; + .state_root(); // first line can be state root let dump_state_root = parse_state_root(&mut reader)?; @@ -357,7 +388,7 @@ where ?expected_state_root, "State root from state dump does not match state root in current header." ); - return Err(InitDatabaseError::StateRootMismatch(GotExpected { + return Err(InitStorageError::StateRootMismatch(GotExpected { got: dump_state_root, expected: expected_state_root, }) @@ -390,7 +421,7 @@ where "Computed state root does not match state root in state dump" ); - return Err(InitDatabaseError::StateRootMismatch(GotExpected { + return Err(InitStorageError::StateRootMismatch(GotExpected { got: computed_state_root, expected: expected_state_root, }) @@ -454,11 +485,12 @@ fn dump_state( block: u64, ) -> Result<(), eyre::Error> where - Provider: DBProvider + Provider: StaticFileProviderFactory + + DBProvider + HeaderProvider + HashingWriter + HistoryWriter - + StateChangeWriter + + StateWriter + AsRef, { let accounts_len = collector.len(); @@ -581,18 +613,19 @@ struct GenesisAccountWithAddress { #[cfg(test)] mod tests { use super::*; + use alloy_consensus::constants::{ + HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + }; use alloy_genesis::Genesis; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; use reth_db_api::{ cursor::DbCursorRO, - models::{storage_sharded_key::StorageShardedKey, ShardedKey}, + models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey}, table::{Table, TableRow}, transaction::DbTx, Database, }; - use reth_primitives::{HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; - use reth_primitives_traits::IntegerList; use reth_provider::{ test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, ProviderFactory, @@ -601,7 +634,7 @@ mod tests { fn collect_table_entries( tx: &::TX, - ) -> Result>, InitDatabaseError> + ) -> Result>, InitStorageError> where DB: Database, T: Table, @@ -651,9 +684,9 @@ mod tests { assert_eq!( genesis_hash.unwrap_err(), - InitDatabaseError::GenesisHashMismatch { + InitStorageError::GenesisHashMismatch { chainspec_hash: MAINNET_GENESIS_HASH, - database_hash: SEPOLIA_GENESIS_HASH + storage_hash: SEPOLIA_GENESIS_HASH } ) } diff --git a/crates/storage/db-models/Cargo.toml b/crates/storage/db-models/Cargo.toml index 9bcd54f3860..0997c08b784 100644 --- a/crates/storage/db-models/Cargo.toml +++ b/crates/storage/db-models/Cargo.toml @@ -14,10 +14,11 @@ workspace = true [dependencies] # reth reth-codecs.workspace = true -reth-primitives = { workspace = true, features = ["reth-codec"] } +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } # ethereum alloy-primitives.workspace = true +alloy-eips.workspace = true # codecs modular-bitfield.workspace = true @@ -32,18 +33,25 @@ proptest = { workspace = true, optional = true } [dev-dependencies] # reth -reth-primitives = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-codecs.workspace = true - arbitrary = { workspace = true, features = ["derive"] } -proptest-arbitrary-interop.workspace = true + proptest.workspace = true +proptest-arbitrary-interop.workspace = true test-fuzz.workspace = true [features] -test-utils = ["arbitrary"] +test-utils = [ + "reth-primitives-traits/test-utils", + "arbitrary", + "reth-codecs/test-utils", +] arbitrary = [ - "reth-primitives/arbitrary", + "reth-primitives-traits/arbitrary", "dep:arbitrary", "dep:proptest", + "alloy-primitives/arbitrary", + "alloy-eips/arbitrary", + "reth-codecs/arbitrary", ] diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index b0099d22d5f..29a5cf30592 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -2,13 +2,13 @@ use reth_codecs::{add_arbitrary_tests, Compact}; use serde::Serialize; use alloy_primitives::{bytes::Buf, Address}; -use reth_primitives::Account; +use reth_primitives_traits::Account; /// Account as it is saved in the database. /// /// [`Address`] is the subkey. #[derive(Debug, Default, Clone, Eq, PartialEq, Serialize)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary, serde::Deserialize))] #[add_arbitrary_tests(compact)] pub struct AccountBeforeTx { /// Address for the account. Acts as `DupSort::SubKey`. diff --git a/crates/storage/db-models/src/blocks.rs b/crates/storage/db-models/src/blocks.rs index 3e740a2e1aa..be7661c8b12 100644 --- a/crates/storage/db-models/src/blocks.rs +++ b/crates/storage/db-models/src/blocks.rs @@ -1,8 +1,8 @@ use std::ops::Range; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::TxNumber; use reth_codecs::{add_arbitrary_tests, Compact}; -use reth_primitives::Withdrawals; use serde::{Deserialize, Serialize}; /// Total number of transactions. @@ -12,7 +12,7 @@ pub type NumTransactions = u64; /// /// It has the pointer to the transaction Number of the first /// transaction in the block and the total number of transactions. -#[derive(Debug, Default, Eq, PartialEq, Clone, Serialize, Deserialize, Compact)] +#[derive(Debug, Default, Eq, PartialEq, Clone, Copy, Serialize, Deserialize, Compact)] #[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct StoredBlockBodyIndices { diff --git a/crates/storage/db-models/src/client_version.rs b/crates/storage/db-models/src/client_version.rs index de074ac88c6..a28e7385f65 100644 --- a/crates/storage/db-models/src/client_version.rs +++ b/crates/storage/db-models/src/client_version.rs @@ -28,20 +28,16 @@ impl Compact for ClientVersion { where B: bytes::BufMut + AsMut<[u8]>, { - self.version.as_bytes().to_compact(buf); - self.git_sha.as_bytes().to_compact(buf); - self.build_timestamp.as_bytes().to_compact(buf) + self.version.to_compact(buf); + self.git_sha.to_compact(buf); + self.build_timestamp.to_compact(buf) } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (version, buf) = Vec::::from_compact(buf, len); - let (git_sha, buf) = Vec::::from_compact(buf, len); - let (build_timestamp, buf) = Vec::::from_compact(buf, len); - let client_version = Self { - version: unsafe { String::from_utf8_unchecked(version) }, - git_sha: unsafe { String::from_utf8_unchecked(git_sha) }, - build_timestamp: unsafe { String::from_utf8_unchecked(build_timestamp) }, - }; + let (version, buf) = String::from_compact(buf, len); + let (git_sha, buf) = String::from_compact(buf, len); + let (build_timestamp, buf) = String::from_compact(buf, len); + let client_version = Self { version, git_sha, build_timestamp }; (client_version, buf) } } diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index a075f772463..fd313a40ae5 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -15,17 +15,18 @@ workspace = true # reth reth-db-api.workspace = true reth-primitives = { workspace = true, features = ["reth-codec"] } -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["serde", "reth-codec"] } reth-fs-util.workspace = true reth-storage-errors.workspace = true reth-nippy-jar.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true +reth-trie-common = { workspace = true, features = ["serde"] } reth-tracing.workspace = true -reth-trie-common.workspace = true # ethereum alloy-primitives.workspace = true +alloy-consensus.workspace = true # mdbx reth-libmdbx = { workspace = true, optional = true, features = [ @@ -47,9 +48,8 @@ page_size = { version = "0.6.0", optional = true } thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true -paste.workspace = true rustc-hash = { workspace = true, optional = true } -sysinfo = { version = "0.31", default-features = false, features = ["system"] } +sysinfo = { version = "0.32", default-features = false, features = ["system"] } parking_lot = { workspace = true, optional = true } # arbitrary utils @@ -58,7 +58,6 @@ strum = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # reth libs with arbitrary reth-primitives = { workspace = true, features = ["arbitrary"] } -rand.workspace = true serde_json.workspace = true tempfile.workspace = true test-fuzz.workspace = true @@ -90,10 +89,30 @@ mdbx = [ "dep:strum", "dep:rustc-hash", ] -test-utils = ["dep:tempfile", "arbitrary", "parking_lot"] +test-utils = [ + "dep:tempfile", + "arbitrary", + "parking_lot", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-db-api/test-utils", + "reth-nippy-jar/test-utils", + "reth-trie-common/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", +] bench = [] -arbitrary = ["reth-primitives/arbitrary", "reth-db-api/arbitrary"] -optimism = [] +arbitrary = [ + "reth-primitives/arbitrary", + "reth-db-api/arbitrary", + "reth-primitives-traits/arbitrary", + "reth-trie-common/arbitrary", + "alloy-primitives/arbitrary", + "reth-prune-types/arbitrary", + "reth-stages-types/arbitrary", + "alloy-consensus/arbitrary", +] +optimism = ["reth-primitives/optimism", "reth-db-api/optimism"] disable-lock = [] [[bench]] @@ -110,3 +129,8 @@ harness = false name = "iai" required-features = ["test-utils"] harness = false + +[[bench]] +name = "get" +required-features = ["test-utils"] +harness = false diff --git a/crates/storage/db/benches/get.rs b/crates/storage/db/benches/get.rs new file mode 100644 index 00000000000..04eda02e05e --- /dev/null +++ b/crates/storage/db/benches/get.rs @@ -0,0 +1,52 @@ +#![allow(missing_docs)] + +use alloy_primitives::TxHash; +use criterion::{criterion_group, criterion_main, Criterion}; +use pprof::criterion::{Output, PProfProfiler}; +use reth_db::{test_utils::create_test_rw_db_with_path, Database, TransactionHashNumbers}; +use reth_db_api::transaction::DbTx; +use std::{fs, sync::Arc}; + +mod utils; +use utils::BENCH_DB_PATH; + +criterion_group! { + name = benches; + config = Criterion::default().with_profiler(PProfProfiler::new(1, Output::Flamegraph(None))); + targets = get +} +criterion_main!(benches); + +// Small benchmark showing that [get_by_encoded_key] is slightly faster than [get] +// for a reference key, as [get] requires copying or cloning the key first. +fn get(c: &mut Criterion) { + let mut group = c.benchmark_group("Get"); + + // Random keys to get + let mut keys = Vec::new(); + for _ in 0..10_000_000 { + let key = TxHash::random(); + keys.push(key); + } + + // We don't bother mock the DB to reduce noise from DB I/O, value decoding, etc. + let _ = fs::remove_dir_all(BENCH_DB_PATH); + let db = Arc::try_unwrap(create_test_rw_db_with_path(BENCH_DB_PATH)).unwrap(); + let tx = db.tx().expect("tx"); + + group.bench_function("get", |b| { + b.iter(|| { + for key in &keys { + tx.get::(*key).unwrap(); + } + }) + }); + + group.bench_function("get_by_encoded_key", |b| { + b.iter(|| { + for key in &keys { + tx.get_by_encoded_key::(key).unwrap(); + } + }) + }); +} diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 9700ef94b24..62c4dfe6ecb 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -26,13 +26,11 @@ where T::Key: Default + Clone + for<'de> serde::Deserialize<'de>, T::Value: Default + Clone + for<'de> serde::Deserialize<'de>, { + let path = + format!("{}/../../../testdata/micro/db/{}.json", env!("CARGO_MANIFEST_DIR"), T::NAME); let list: Vec> = serde_json::from_reader(std::io::BufReader::new( - std::fs::File::open(format!( - "{}/../../../testdata/micro/db/{}.json", - env!("CARGO_MANIFEST_DIR"), - T::NAME - )) - .expect("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth -- test-vectors tables`."), + std::fs::File::open(&path) + .unwrap_or_else(|_| panic!("Test vectors not found. They can be generated from the workspace by calling `cargo run --bin reth --features dev -- test-vectors tables`: {:?}", path)) )) .unwrap(); diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 65b804e6a58..8c3d3630889 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -3,9 +3,9 @@ use crate::{ lockfile::StorageLock, metrics::DatabaseEnvMetrics, - tables::{self, TableType, Tables}, + tables::{self, Tables}, utils::default_page_size, - DatabaseError, + DatabaseError, TableSet, }; use eyre::Context; use metrics::{gauge, Label}; @@ -23,7 +23,7 @@ use reth_libmdbx::{ use reth_storage_errors::db::LogLevel; use reth_tracing::tracing::error; use std::{ - ops::Deref, + ops::{Deref, Range}, path::Path, sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -33,8 +33,14 @@ use tx::Tx; pub mod cursor; pub mod tx; -const GIGABYTE: usize = 1024 * 1024 * 1024; -const TERABYTE: usize = GIGABYTE * 1024; +/// 1 KB in bytes +pub const KILOBYTE: usize = 1024; +/// 1 MB in bytes +pub const MEGABYTE: usize = KILOBYTE * 1024; +/// 1 GB in bytes +pub const GIGABYTE: usize = MEGABYTE * 1024; +/// 1 TB in bytes +pub const TERABYTE: usize = GIGABYTE * 1024; /// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`), but we limit it to slightly below that const DEFAULT_MAX_READERS: u64 = 32_000; @@ -60,10 +66,12 @@ impl DatabaseEnvKind { } /// Arguments for database initialization. -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug)] pub struct DatabaseArguments { /// Client version that accesses the database. client_version: ClientVersion, + /// Database geometry settings. + geometry: Geometry>, /// Database log level. If [None], the default value is used. log_level: Option, /// Maximum duration of a read transaction. If [None], the default value is used. @@ -91,17 +99,45 @@ pub struct DatabaseArguments { exclusive: Option, } +impl Default for DatabaseArguments { + fn default() -> Self { + Self::new(ClientVersion::default()) + } +} + impl DatabaseArguments { /// Create new database arguments with given client version. - pub const fn new(client_version: ClientVersion) -> Self { + pub fn new(client_version: ClientVersion) -> Self { Self { client_version, + geometry: Geometry { + size: Some(0..(4 * TERABYTE)), + growth_step: Some(4 * GIGABYTE as isize), + shrink_threshold: Some(0), + page_size: Some(PageSize::Set(default_page_size())), + }, log_level: None, max_read_transaction_duration: None, exclusive: None, } } + /// Sets the upper size limit of the db environment, the maximum database size in bytes. + pub const fn with_geometry_max_size(mut self, max_size: Option) -> Self { + if let Some(max_size) = max_size { + self.geometry.size = Some(0..max_size); + } + self + } + + /// Configures the database growth step in bytes. + pub const fn with_growth_step(mut self, growth_step: Option) -> Self { + if let Some(growth_step) = growth_step { + self.geometry.growth_step = Some(growth_step as isize); + } + self + } + /// Set the log level. pub const fn with_log_level(mut self, log_level: Option) -> Self { self.log_level = log_level; @@ -278,15 +314,7 @@ impl DatabaseEnv { // environment creation. debug_assert!(Tables::ALL.len() <= 256, "number of tables exceed max dbs"); inner_env.set_max_dbs(256); - inner_env.set_geometry(Geometry { - // Maximum database size of 4 terabytes - size: Some(0..(4 * TERABYTE)), - // We grow the database in increments of 4 gigabytes - growth_step: Some(4 * GIGABYTE as isize), - // The database never shrinks - shrink_threshold: Some(0), - page_size: Some(PageSize::Set(default_page_size())), - }); + inner_env.set_geometry(args.geometry); fn is_current_process(id: u32) -> bool { #[cfg(unix)] @@ -416,15 +444,18 @@ impl DatabaseEnv { self } - /// Creates all the defined tables, if necessary. + /// Creates all the tables defined in [`Tables`], if necessary. pub fn create_tables(&self) -> Result<(), DatabaseError> { + self.create_tables_for::() + } + + /// Creates all the tables defined in the given [`TableSet`], if necessary. + pub fn create_tables_for(&self) -> Result<(), DatabaseError> { let tx = self.inner.begin_rw_txn().map_err(|e| DatabaseError::InitTx(e.into()))?; - for table in Tables::ALL { - let flags = match table.table_type() { - TableType::Table => DatabaseFlags::default(), - TableType::DupSort => DatabaseFlags::DUP_SORT, - }; + for table in TS::tables() { + let flags = + if table.is_dupsort() { DatabaseFlags::DUP_SORT } else { DatabaseFlags::default() }; tx.create_db(Some(table.name()), flags) .map_err(|e| DatabaseError::CreateTable(e.into()))?; @@ -475,15 +506,15 @@ mod tests { test_utils::*, AccountChangeSets, }; + use alloy_consensus::Header; use alloy_primitives::{Address, B256, U256}; use reth_db_api::{ cursor::{DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, - models::{AccountBeforeTx, ShardedKey}, + models::{AccountBeforeTx, IntegerList, ShardedKey}, table::{Encode, Table}, }; use reth_libmdbx::Error; - use reth_primitives::{Account, Header, StorageEntry}; - use reth_primitives_traits::IntegerList; + use reth_primitives_traits::{Account, StorageEntry}; use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; use tempfile::TempDir; diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 2ff2789ea69..09be53a5ffb 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -283,8 +283,15 @@ impl DbTx for Tx { type DupCursor = Cursor; fn get(&self, key: T::Key) -> Result::Value>, DatabaseError> { + self.get_by_encoded_key::(&key.encode()) + } + + fn get_by_encoded_key( + &self, + key: &::Encoded, + ) -> Result, DatabaseError> { self.execute_with_operation_metric::(Operation::Get, None, |tx| { - tx.get(self.get_dbi::()?, key.encode().as_ref()) + tx.get(self.get_dbi::()?, key.as_ref()) .map_err(|e| DatabaseError::Read(e.into()))? .map(decode_one::) .transpose() diff --git a/crates/storage/db/src/lockfile.rs b/crates/storage/db/src/lockfile.rs index 6dc063a167a..b28a83f11ca 100644 --- a/crates/storage/db/src/lockfile.rs +++ b/crates/storage/db/src/lockfile.rs @@ -30,31 +30,35 @@ impl StorageLock { /// Note: In-process exclusivity is not on scope. If called from the same process (or another /// with the same PID), it will succeed. pub fn try_acquire(path: &Path) -> Result { - let file_path = path.join(LOCKFILE_NAME); - #[cfg(feature = "disable-lock")] { + let file_path = path.join(LOCKFILE_NAME); // Too expensive for ef-tests to write/read lock to/from disk. Ok(Self(Arc::new(StorageLockInner { file_path }))) } #[cfg(not(feature = "disable-lock"))] - { - if let Some(process_lock) = ProcessUID::parse(&file_path)? { - if process_lock.pid != (process::id() as usize) && process_lock.is_active() { - error!( - target: "reth::db::lockfile", - path = ?file_path, - pid = process_lock.pid, - start_time = process_lock.start_time, - "Storage lock already taken." - ); - return Err(StorageLockError::Taken(process_lock.pid)) - } - } + Self::try_acquire_file_lock(path) + } - Ok(Self(Arc::new(StorageLockInner::new(file_path)?))) + /// Acquire a file write lock. + #[cfg(any(test, not(feature = "disable-lock")))] + fn try_acquire_file_lock(path: &Path) -> Result { + let file_path = path.join(LOCKFILE_NAME); + if let Some(process_lock) = ProcessUID::parse(&file_path)? { + if process_lock.pid != (process::id() as usize) && process_lock.is_active() { + error!( + target: "reth::db::lockfile", + path = ?file_path, + pid = process_lock.pid, + start_time = process_lock.start_time, + "Storage lock already taken." + ); + return Err(StorageLockError::Taken(process_lock.pid)) + } } + + Ok(Self(Arc::new(StorageLockInner::new(file_path)?))) } } @@ -106,6 +110,7 @@ impl ProcessUID { let pid2 = sysinfo::Pid::from(pid); system.refresh_processes_specifics( sysinfo::ProcessesToUpdate::Some(&[pid2]), + true, ProcessRefreshKind::new(), ); system.process(pid2).map(|process| Self { pid, start_time: process.start_time() }) @@ -164,10 +169,10 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); - let lock = StorageLock::try_acquire(temp_dir.path()).unwrap(); + let lock = StorageLock::try_acquire_file_lock(temp_dir.path()).unwrap(); // Same process can re-acquire the lock - assert_eq!(Ok(lock.clone()), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock.clone()), StorageLock::try_acquire_file_lock(temp_dir.path())); // A lock of a non existent PID can be acquired. let lock_file = temp_dir.path().join(LOCKFILE_NAME); @@ -177,18 +182,21 @@ mod tests { fake_pid += 1; } ProcessUID { pid: fake_pid, start_time: u64::MAX }.write(&lock_file).unwrap(); - assert_eq!(Ok(lock.clone()), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock.clone()), StorageLock::try_acquire_file_lock(temp_dir.path())); let mut pid_1 = ProcessUID::new(1).unwrap(); // If a parsed `ProcessUID` exists, the lock can NOT be acquired. pid_1.write(&lock_file).unwrap(); - assert_eq!(Err(StorageLockError::Taken(1)), StorageLock::try_acquire(temp_dir.path())); + assert_eq!( + Err(StorageLockError::Taken(1)), + StorageLock::try_acquire_file_lock(temp_dir.path()) + ); // A lock of a different but existing PID can be acquired ONLY IF the start_time differs. pid_1.start_time += 1; pid_1.write(&lock_file).unwrap(); - assert_eq!(Ok(lock), StorageLock::try_acquire(temp_dir.path())); + assert_eq!(Ok(lock), StorageLock::try_acquire_file_lock(temp_dir.path())); } #[test] @@ -198,7 +206,7 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let lock_file = temp_dir.path().join(LOCKFILE_NAME); - let lock = StorageLock::try_acquire(temp_dir.path()).unwrap(); + let lock = StorageLock::try_acquire_file_lock(temp_dir.path()).unwrap(); assert!(lock_file.exists()); drop(lock); diff --git a/crates/storage/db/src/mdbx.rs b/crates/storage/db/src/mdbx.rs index d6947e10bd2..c0e11079f3a 100644 --- a/crates/storage/db/src/mdbx.rs +++ b/crates/storage/db/src/mdbx.rs @@ -1,6 +1,6 @@ //! Bindings for [MDBX](https://libmdbx.dqdkfa.ru/). -use crate::is_database_empty; +use crate::{is_database_empty, TableSet, Tables}; use eyre::Context; use std::path::Path; @@ -28,12 +28,21 @@ pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Resu Ok(DatabaseEnv::open(rpath, DatabaseEnvKind::RW, args)?) } -/// Opens up an existing database or creates a new one at the specified path. Creates tables if -/// necessary. Read/Write mode. +/// Opens up an existing database or creates a new one at the specified path. Creates tables defined +/// in [`Tables`] if necessary. Read/Write mode. pub fn init_db>(path: P, args: DatabaseArguments) -> eyre::Result { + init_db_for::(path, args) +} + +/// Opens up an existing database or creates a new one at the specified path. Creates tables defined +/// in the given [`TableSet`] if necessary. Read/Write mode. +pub fn init_db_for, TS: TableSet>( + path: P, + args: DatabaseArguments, +) -> eyre::Result { let client_version = args.client_version().clone(); let db = create_db(path, args)?; - db.create_tables()?; + db.create_tables_for::()?; db.record_client_version(client_version)?; Ok(db) } diff --git a/crates/storage/db/src/metrics.rs b/crates/storage/db/src/metrics.rs index fecd691ee5d..ed265d6e3aa 100644 --- a/crates/storage/db/src/metrics.rs +++ b/crates/storage/db/src/metrics.rs @@ -104,10 +104,11 @@ impl DatabaseEnvMetrics { value_size: Option, f: impl FnOnce() -> R, ) -> R { - self.operations - .get(&(table, operation)) - .expect("operation & table metric handle not found") - .record(value_size, f) + if let Some(metrics) = self.operations.get(&(table, operation)) { + metrics.record(value_size, f) + } else { + f() + } } /// Record metrics for opening a database transaction. @@ -347,7 +348,7 @@ impl OperationMetrics { // Record duration only for large values to prevent the performance hit of clock syscall // on small operations - if value_size.map_or(false, |size| size > LARGE_VALUE_THRESHOLD_BYTES) { + if value_size.is_some_and(|size| size > LARGE_VALUE_THRESHOLD_BYTES) { let start = Instant::now(); let result = f(); self.large_value_duration_seconds.record(start.elapsed()); diff --git a/crates/storage/db/src/static_file/mask.rs b/crates/storage/db/src/static_file/mask.rs index f5d35a193d7..38831ea34ca 100644 --- a/crates/storage/db/src/static_file/mask.rs +++ b/crates/storage/db/src/static_file/mask.rs @@ -1,38 +1,5 @@ use reth_db_api::table::Decompress; -/// Generic Mask helper struct for selecting specific column values to read and decompress. -/// -/// #### Explanation: -/// -/// A `NippyJar` static file row can contain multiple column values. To specify the column values -/// to be read, a mask is utilized. -/// -/// For example, a static file with three columns, if the first and last columns are queried, the -/// mask `0b101` would be passed. To select only the second column, the mask `0b010` would be used. -/// -/// Since each static file has its own column distribution, different wrapper types are necessary. -/// For instance, `B256` might be the third column in the `Header` segment, while being the second -/// column in another segment. Hence, `Mask` would only be applicable to one of these -/// scenarios. -/// -/// Alongside, the column selector traits (eg. [`ColumnSelectorOne`]) this provides a structured way -/// to tie the types to be decoded to the mask necessary to query them. -#[derive(Debug)] -pub struct Mask(std::marker::PhantomData<(FIRST, SECOND, THIRD)>); - -macro_rules! add_segments { - ($($segment:tt),+) => { - paste::paste! { - $( - #[doc = concat!("Mask for ", stringify!($segment), " static file segment. See [`Mask`] for more.")] - #[derive(Debug)] - pub struct [<$segment Mask>](Mask); - )+ - } - }; -} -add_segments!(Header, Receipt, Transaction); - /// Trait for specifying a mask to select one column value. pub trait ColumnSelectorOne { /// First desired column value @@ -66,21 +33,45 @@ pub trait ColumnSelectorThree { #[macro_export] /// Add mask to select `N` column values from a specific static file segment row. macro_rules! add_static_file_mask { - ($mask_struct:tt, $type1:ty, $mask:expr) => { - impl ColumnSelectorOne for $mask_struct<$type1> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorOne for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; const MASK: usize = $mask; } }; - ($mask_struct:tt, $type1:ty, $type2:ty, $mask:expr) => { - impl ColumnSelectorTwo for $mask_struct<$type1, $type2> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $type2:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorTwo for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type2: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; type SECOND = $type2; const MASK: usize = $mask; } }; - ($mask_struct:tt, $type1:ty, $type2:ty, $type3:ty, $mask:expr) => { - impl ColumnSelectorThree for $mask_struct<$type1, $type2, $type3> { + ($(#[$attr:meta])* $mask_struct:ident $(<$generic:ident>)?, $type1:ty, $type2:ty, $type3:ty, $mask:expr) => { + $(#[$attr])* + #[derive(Debug)] + pub struct $mask_struct$(<$generic>)?$((std::marker::PhantomData<$generic>))?; + + impl$(<$generic>)? ColumnSelectorThree for $mask_struct$(<$generic>)? + where + $type1: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type2: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + $type3: Send + Sync + std::fmt::Debug + reth_db_api::table::Decompress, + { type FIRST = $type1; type SECOND = $type2; type THIRD = $type3; diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index ac2811a44d7..17833e7ee29 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -1,23 +1,44 @@ -use super::{ReceiptMask, TransactionMask}; use crate::{ add_static_file_mask, - static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, - HeaderTerminalDifficulties, RawValue, Receipts, Transactions, + static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo}, + HeaderTerminalDifficulties, }; use alloy_primitives::BlockHash; use reth_db_api::table::Table; -use reth_primitives::Header; // HEADER MASKS -add_static_file_mask!(HeaderMask, Header, 0b001); -add_static_file_mask!(HeaderMask, ::Value, 0b010); -add_static_file_mask!(HeaderMask, BlockHash, 0b100); -add_static_file_mask!(HeaderMask, Header, BlockHash, 0b101); -add_static_file_mask!(HeaderMask, ::Value, BlockHash, 0b110); +add_static_file_mask! { + #[doc = "Mask for selecting a single header from Headers static file segment"] + HeaderMask, H, 0b001 +} +add_static_file_mask! { + #[doc = "Mask for selecting a total difficulty value from Headers static file segment"] + TotalDifficultyMask, ::Value, 0b010 +} +add_static_file_mask! { + #[doc = "Mask for selecting a block hash value from Headers static file segment"] + BlockHashMask, BlockHash, 0b100 +} +add_static_file_mask! { + #[doc = "Mask for selecting a header along with block hash from Headers static file segment"] + HeaderWithHashMask, H, BlockHash, 0b101 +} +add_static_file_mask! { + #[doc = "Mask for selecting a total difficulty along with block hash from Headers static file segment"] + TDWithHashMask, + ::Value, + BlockHash, + 0b110 +} // RECEIPT MASKS -add_static_file_mask!(ReceiptMask, ::Value, 0b1); +add_static_file_mask! { + #[doc = "Mask for selecting a single receipt from Receipts static file segment"] + ReceiptMask, R, 0b1 +} // TRANSACTION MASKS -add_static_file_mask!(TransactionMask, ::Value, 0b1); -add_static_file_mask!(TransactionMask, RawValue<::Value>, 0b1); +add_static_file_mask! { + #[doc = "Mask for selecting a single transaction from Transactions static file segment"] + TransactionMask, T, 0b1 +} diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index f27a574f640..8491bd6ed77 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -17,6 +17,7 @@ use reth_primitives::{ }; mod masks; +pub use masks::*; /// Alias type for a map of [`StaticFileSegment`] and sorted lists of existing static file ranges. type SortedStaticFiles = @@ -38,7 +39,7 @@ pub fn iter_static_files(path: impl AsRef) -> Result>(); for entry in entries { - if entry.metadata().map_or(false, |metadata| metadata.is_file()) { + if entry.metadata().is_ok_and(|metadata| metadata.is_file()) { if let Some((segment, _)) = StaticFileSegment::parse_filename(&entry.file_name().to_string_lossy()) { diff --git a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs index bb26e8b9e21..da15c112e62 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/inputs.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/inputs.rs @@ -1,6 +1,6 @@ //! Curates the input coming from the fuzzer for certain types. -use reth_primitives_traits::IntegerList; +use reth_db_api::models::IntegerList; use serde::{Deserialize, Serialize}; /// Makes sure that the list provided by the fuzzer is not empty and pre-sorted diff --git a/crates/storage/db/src/tables/codecs/fuzz/mod.rs b/crates/storage/db/src/tables/codecs/fuzz/mod.rs index e64a3841df4..f6b68897e34 100644 --- a/crates/storage/db/src/tables/codecs/fuzz/mod.rs +++ b/crates/storage/db/src/tables/codecs/fuzz/mod.rs @@ -16,9 +16,6 @@ macro_rules! impl_fuzzer_with_input { pub mod $name { use reth_db_api::table; - #[allow(unused_imports)] - - #[allow(unused_imports)] use reth_primitives_traits::*; diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 83a063903e0..961fd41e97c 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -19,21 +19,20 @@ pub use raw::{RawDupSort, RawKey, RawTable, RawValue, TableRawRow}; #[cfg(feature = "mdbx")] pub(crate) mod utils; +use alloy_consensus::Header; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256}; use reth_db_api::{ models::{ accounts::BlockNumberAddress, blocks::{HeaderHash, StoredBlockOmmers}, storage_sharded_key::StorageShardedKey, - AccountBeforeTx, ClientVersion, CompactU256, ShardedKey, StoredBlockBodyIndices, - StoredBlockWithdrawals, + AccountBeforeTx, ClientVersion, CompactU256, IntegerList, ShardedKey, + StoredBlockBodyIndices, StoredBlockWithdrawals, }, - table::{Decode, DupSort, Encode, Table}, + table::{Decode, DupSort, Encode, Table, TableInfo}, }; -use reth_primitives::{ - Account, Bytecode, Header, Receipt, Requests, StorageEntry, TransactionSignedNoHash, -}; -use reth_primitives_traits::IntegerList; +use reth_primitives::{Receipt, StorageEntry, TransactionSigned}; +use reth_primitives_traits::{Account, Bytecode}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{BranchNodeCompact, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey}; @@ -99,6 +98,13 @@ pub trait TableViewer { } } +/// General trait for defining the set of tables +/// Used to initialize database +pub trait TableSet { + /// Returns an iterator over the tables + fn tables() -> Box>>; +} + /// Defines all the tables in the database. #[macro_export] macro_rules! tables { @@ -108,29 +114,42 @@ macro_rules! tables { (@view $name:ident $v:ident) => { $v.view::<$name>() }; (@view $name:ident $v:ident $_subkey:ty) => { $v.view_dupsort::<$name>() }; - ($( $(#[$attr:meta])* table $name:ident; )*) => { + (@value_doc $key:ty, $value:ty) => { + concat!("[`", stringify!($value), "`]") + }; + // Don't generate links if we have generics + (@value_doc $key:ty, $value:ty, $($generic:ident),*) => { + concat!("`", stringify!($value), "`") + }; + + ($($(#[$attr:meta])* table $name:ident$(<$($generic:ident $(= $default:ty)?),*>)? { type Key = $key:ty; type Value = $value:ty; $(type SubKey = $subkey:ty;)? } )*) => { // Table marker types. $( $(#[$attr])* /// - #[doc = concat!("Marker type representing a database table mapping [`", stringify!($key), "`] to [`", stringify!($value), "`].")] + #[doc = concat!("Marker type representing a database table mapping [`", stringify!($key), "`] to ", tables!(@value_doc $key, $value, $($($generic),*)?), ".")] $( #[doc = concat!("\n\nThis table's `DUPSORT` subkey is [`", stringify!($subkey), "`].")] )? - pub struct $name { - _private: (), + pub struct $name$(<$($generic $( = $default)?),*>)? { + _private: std::marker::PhantomData<($($($generic,)*)?)>, } // Ideally this implementation wouldn't exist, but it is necessary to derive `Debug` // when a type is generic over `T: Table`. See: https://github.com/rust-lang/rust/issues/26925 - impl fmt::Debug for $name { + impl$(<$($generic),*>)? fmt::Debug for $name$(<$($generic),*>)? { fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { unreachable!("this type cannot be instantiated") } } - impl reth_db_api::table::Table for $name { + impl$(<$($generic),*>)? reth_db_api::table::Table for $name$(<$($generic),*>)? + where + $value: reth_db_api::table::Value + 'static + $($(,$generic: Send + Sync)*)? + { const NAME: &'static str = table_names::$name; + const DUPSORT: bool = tables!(@bool $($subkey)?); type Key = $key; type Value = $value; @@ -231,6 +250,22 @@ macro_rules! tables { } } + impl TableInfo for Tables { + fn name(&self) -> &'static str { + self.name() + } + + fn is_dupsort(&self) -> bool { + self.is_dupsort() + } + } + + impl TableSet for Tables { + fn tables() -> Box>> { + Box::new(Self::ALL.iter().map(|table| Box::new(*table) as Box)) + } + } + // Need constants to match on in the `FromStr` implementation. #[allow(non_upper_case_globals)] mod table_names { @@ -250,7 +285,7 @@ macro_rules! tables { /// use reth_db_api::table::Table; /// /// let table = Tables::Headers; - /// let result = tables_to_generic!(table, |GenericTable| GenericTable::NAME); + /// let result = tables_to_generic!(table, |GenericTable| ::NAME); /// assert_eq!(result, table.name()); /// ``` #[macro_export] @@ -271,53 +306,96 @@ macro_rules! tables { tables! { /// Stores the header hashes belonging to the canonical chain. - table CanonicalHeaders; + table CanonicalHeaders { + type Key = BlockNumber; + type Value = HeaderHash; + } /// Stores the total difficulty from a block header. - table HeaderTerminalDifficulties; + table HeaderTerminalDifficulties { + type Key = BlockNumber; + type Value = CompactU256; + } /// Stores the block number corresponding to a header. - table HeaderNumbers; + table HeaderNumbers { + type Key = BlockHash; + type Value = BlockNumber; + } /// Stores header bodies. - table Headers; + table Headers { + type Key = BlockNumber; + type Value = H; + } /// Stores block indices that contains indexes of transaction and the count of them. /// /// More information about stored indices can be found in the [`StoredBlockBodyIndices`] struct. - table BlockBodyIndices; + table BlockBodyIndices { + type Key = BlockNumber; + type Value = StoredBlockBodyIndices; + } /// Stores the uncles/ommers of the block. - table BlockOmmers; + table BlockOmmers { + type Key = BlockNumber; + type Value = StoredBlockOmmers; + } /// Stores the block withdrawals. - table BlockWithdrawals; + table BlockWithdrawals { + type Key = BlockNumber; + type Value = StoredBlockWithdrawals; + } /// Canonical only Stores the transaction body for canonical transactions. - table Transactions; + table Transactions { + type Key = TxNumber; + type Value = T; + } /// Stores the mapping of the transaction hash to the transaction number. - table TransactionHashNumbers; + table TransactionHashNumbers { + type Key = TxHash; + type Value = TxNumber; + } /// Stores the mapping of transaction number to the blocks number. /// /// The key is the highest transaction ID in the block. - table TransactionBlocks; + table TransactionBlocks { + type Key = TxNumber; + type Value = BlockNumber; + } /// Canonical only Stores transaction receipts. - table Receipts; + table Receipts { + type Key = TxNumber; + type Value = R; + } /// Stores all smart contract bytecodes. /// There will be multiple accounts that have same bytecode /// So we would need to introduce reference counter. /// This will be small optimization on state. - table Bytecodes; + table Bytecodes { + type Key = B256; + type Value = Bytecode; + } /// Stores the current state of an [`Account`]. - table PlainAccountState; + table PlainAccountState { + type Key = Address; + type Value = Account; + } /// Stores the current value of a storage key. - table PlainStorageState; + table PlainStorageState { + type Key = Address; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores pointers to block changeset with changes for each account key. /// @@ -337,7 +415,10 @@ tables! { /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` - table AccountsHistory, Value = BlockNumberList>; + table AccountsHistory { + type Key = ShardedKey
; + type Value = BlockNumberList; + } /// Stores pointers to block number changeset with changes for each storage key. /// @@ -357,58 +438,98 @@ tables! { /// * If there were no shard we would get `None` entry or entry of different storage key. /// /// Code example can be found in `reth_provider::HistoricalStateProviderRef` - table StoragesHistory; + table StoragesHistory { + type Key = StorageShardedKey; + type Value = BlockNumberList; + } /// Stores the state of an account before a certain transaction changed it. /// Change on state can be: account is created, selfdestructed, touched while empty /// or changed balance,nonce. - table AccountChangeSets; + table AccountChangeSets { + type Key = BlockNumber; + type Value = AccountBeforeTx; + type SubKey = Address; + } /// Stores the state of a storage key before a certain transaction changed it. /// If [`StorageEntry::value`] is zero, this means storage was not existing /// and needs to be removed. - table StorageChangeSets; + table StorageChangeSets { + type Key = BlockNumberAddress; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores the current state of an [`Account`] indexed with `keccak256Address` /// This table is in preparation for merklization and calculation of state root. /// We are saving whole account data as it is needed for partial update when /// part of storage is changed. Benefit for merklization is that hashed addresses are sorted. - table HashedAccounts; + table HashedAccounts { + type Key = B256; + type Value = Account; + } /// Stores the current storage values indexed with `keccak256Address` and /// hash of storage key `keccak256key`. /// This table is in preparation for merklization and calculation of state root. /// Benefit for merklization is that hashed addresses/keys are sorted. - table HashedStorages; + table HashedStorages { + type Key = B256; + type Value = StorageEntry; + type SubKey = B256; + } /// Stores the current state's Merkle Patricia Tree. - table AccountsTrie; + table AccountsTrie { + type Key = StoredNibbles; + type Value = BranchNodeCompact; + } /// From HashedAddress => NibblesSubKey => Intermediate value - table StoragesTrie; + table StoragesTrie { + type Key = B256; + type Value = StorageTrieEntry; + type SubKey = StoredNibblesSubKey; + } /// Stores the transaction sender for each canonical transaction. /// It is needed to speed up execution stage and allows fetching signer without doing /// transaction signed recovery - table TransactionSenders; + table TransactionSenders { + type Key = TxNumber; + type Value = Address; + } /// Stores the highest synced block number and stage-specific checkpoint of each stage. - table StageCheckpoints; + table StageCheckpoints { + type Key = StageId; + type Value = StageCheckpoint; + } /// Stores arbitrary data to keep track of a stage first-sync progress. - table StageCheckpointProgresses>; + table StageCheckpointProgresses { + type Key = StageId; + type Value = Vec; + } /// Stores the highest pruned block number and prune mode of each prune segment. - table PruneCheckpoints; + table PruneCheckpoints { + type Key = PruneSegment; + type Value = PruneCheckpoint; + } /// Stores the history of client versions that have accessed the database with write privileges by unix timestamp in seconds. - table VersionHistory; - - /// Stores EIP-7685 EL -> CL requests, indexed by block number. - table BlockRequests; + table VersionHistory { + type Key = u64; + type Value = ClientVersion; + } /// Stores generic chain state info, like the last finalized block. - table ChainState; + table ChainState { + type Key = ChainStateKey; + type Value = BlockNumber; + } } /// Keys for the `ChainState` table. diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 6b6de41613e..453116ee5e3 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -14,6 +14,7 @@ pub struct RawTable { impl Table for RawTable { const NAME: &'static str = T::NAME; + const DUPSORT: bool = false; type Key = RawKey; type Value = RawValue; @@ -28,6 +29,7 @@ pub struct RawDupSort { impl Table for RawDupSort { const NAME: &'static str = T::NAME; + const DUPSORT: bool = true; type Key = RawKey; type Value = RawValue; diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml index 52c93ae4ef0..2e864e09d43 100644 --- a/crates/storage/errors/Cargo.toml +++ b/crates/storage/errors/Cargo.toml @@ -12,8 +12,9 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-fs-util.workspace = true +reth-static-file-types.workspace = true # ethereum alloy-eips.workspace = true @@ -25,4 +26,10 @@ derive_more.workspace = true [features] default = ["std"] -std = [] +std = [ + "alloy-eips/std", + "alloy-primitives/std", + "alloy-rlp/std", + "derive_more/std", + "reth-primitives-traits/std" +] diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index d60a2adb92b..d4b69cffb08 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -1,13 +1,10 @@ use crate::{db::DatabaseError, lockfile::StorageLockError, writer::UnifiedStorageWriterError}; -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256, U256}; -use derive_more::Display; -use reth_primitives::{GotExpected, StaticFileSegment, TxHashOrNumber}; - -#[cfg(feature = "std")] -use std::path::PathBuf; - use alloc::{boxed::Box, string::String}; +use alloy_eips::{BlockHashOrNumber, HashOrNumber}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxNumber, B256}; +use derive_more::Display; +use reth_primitives_traits::GotExpected; +use reth_static_file_types::StaticFileSegment; /// Provider result type. pub type ProviderResult = Result; @@ -66,12 +63,12 @@ pub enum ProviderError { /// when required header related data was not found but was required. #[display("no header found for {_0:?}")] HeaderNotFound(BlockHashOrNumber), - /// The specific transaction is missing. + /// The specific transaction identified by hash or id is missing. #[display("no transaction found for {_0:?}")] - TransactionNotFound(TxHashOrNumber), - /// The specific receipt is missing + TransactionNotFound(HashOrNumber), + /// The specific receipt for a transaction identified by hash or id is missing #[display("no receipt found for {_0:?}")] - ReceiptNotFound(TxHashOrNumber), + ReceiptNotFound(HashOrNumber), /// Unable to find the best block. #[display("best block does not exist")] BestBlockNotFound, @@ -81,15 +78,6 @@ pub enum ProviderError { /// Unable to find the safe block. #[display("safe block does not exist")] SafeBlockNotFound, - /// Mismatch of sender and transaction. - #[display("mismatch of sender and transaction id {tx_id}")] - MismatchOfTransactionAndSenderId { - /// The transaction ID. - tx_id: TxNumber, - }, - /// Block body wrong transaction count. - #[display("stored block indices does not match transaction count")] - BlockBodyTransactionCount, /// Thrown when the cache service task dropped. #[display("cache service task stopped")] CacheServiceUnavailable, @@ -120,7 +108,7 @@ pub enum ProviderError { /// Static File is not found at specified path. #[cfg(feature = "std")] #[display("not able to find {_0} static file at {_1:?}")] - MissingStaticFilePath(StaticFileSegment, PathBuf), + MissingStaticFilePath(StaticFileSegment, std::path::PathBuf), /// Static File is not found for requested block. #[display("not able to find {_0} static file for block number {_1}")] MissingStaticFileBlock(StaticFileSegment, BlockNumber), @@ -133,12 +121,12 @@ pub enum ProviderError { /// Trying to insert data from an unexpected block number. #[display("trying to append data to {_0} as block #{_1} but expected block #{_2}")] UnexpectedStaticFileBlockNumber(StaticFileSegment, BlockNumber, BlockNumber), + /// Trying to insert data from an unexpected block number. + #[display("trying to append row to {_0} at index #{_1} but expected index #{_2}")] + UnexpectedStaticFileTxNumber(StaticFileSegment, TxNumber, TxNumber), /// Static File Provider was initialized as read-only. #[display("cannot get a writer on a read-only environment.")] ReadOnlyStaticFileAccess, - /// Error encountered when the block number conversion from U256 to u64 causes an overflow. - #[display("failed to convert block number U256 to u64: {_0}")] - BlockNumberOverflow(U256), /// Consistent view error. #[display("failed to initialize consistent view: {_0}")] ConsistentView(Box), @@ -146,6 +134,8 @@ pub enum ProviderError { StorageLockError(StorageLockError), /// Storage writer error. UnifiedStorageWriterError(UnifiedStorageWriterError), + /// Received invalid output from configured storage implementation. + InvalidStorageOutput, } impl From for ProviderError { @@ -176,7 +166,6 @@ impl core::error::Error for ProviderError { fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { match self { Self::Database(source) => core::error::Error::source(source), - Self::Rlp(source) => core::error::Error::source(source), Self::StorageLockError(source) => core::error::Error::source(source), Self::UnifiedStorageWriterError(source) => core::error::Error::source(source), _ => Option::None, diff --git a/crates/storage/errors/src/writer.rs b/crates/storage/errors/src/writer.rs index 10d4ad96ed3..3e060d7005d 100644 --- a/crates/storage/errors/src/writer.rs +++ b/crates/storage/errors/src/writer.rs @@ -1,5 +1,5 @@ use crate::db::DatabaseError; -use reth_primitives::StaticFileSegment; +use reth_static_file_types::StaticFileSegment; /// `UnifiedStorageWriter` related errors /// `StorageWriter` related errors diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index fa10a73cb33..4679f4fe914 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -15,7 +15,7 @@ workspace = true reth-mdbx-sys.workspace = true bitflags.workspace = true -byteorder = "1" +byteorder.workspace = true derive_more.workspace = true indexmap = "2" parking_lot.workspace = true diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 index 7b182325b31..0934fea1c16 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/man1/mdbx_chk.1 @@ -27,7 +27,7 @@ mdbx_chk \- MDBX checking tool .SH DESCRIPTION The .B mdbx_chk -utility intended to check an MDBX database file. +utility is intended to check an MDBX database file. .SH OPTIONS .TP .BR \-V @@ -55,7 +55,7 @@ check, including full check of all meta-pages and actual size of database file. .BR \-w Open environment in read-write mode and lock for writing while checking. This could be impossible if environment already used by another process(s) -in an incompatible read-write mode. This allow rollback to last steady commit +in an incompatible read-write mode. This allows rollback to last steady commit (in case environment was not closed properly) and then check transaction IDs of meta-pages. Otherwise, without \fB\-w\fP option environment will be opened in read-only mode. @@ -90,7 +90,7 @@ then forcibly loads ones by sequential access and tries to lock database pages i .TP .BR \-n Open MDBX environment(s) which do not use subdirectories. -This is legacy option. For now MDBX handles this automatically. +This is a legacy option. For now MDBX handles this automatically. .SH DIAGNOSTICS Exit status is zero if no errors occur. Errors result in a non-zero exit status diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h index 43960abfb4c..2665931de52 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h @@ -136,7 +136,7 @@ are only a few cases of changing data. | _DELETING_||| |Key is absent → Error since no such key |\ref mdbx_del() or \ref mdbx_replace()|Error \ref MDBX_NOTFOUND| |Key exist → Delete by key |\ref mdbx_del() with the parameter `data = NULL`|Deletion| -|Key exist → Delete by key with with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| +|Key exist → Delete by key with data matching check|\ref mdbx_del() with the parameter `data` filled with the value which should be match for deletion|Deletion or \ref MDBX_NOTFOUND if the value does not match| |Delete at the current cursor position |\ref mdbx_cursor_del() with \ref MDBX_CURRENT flag|Deletion| |Extract (read & delete) value by the key |\ref mdbx_replace() with zero flag and parameter `new_data = NULL`|Returning a deleted value| @@ -1413,7 +1413,7 @@ enum MDBX_env_flags_t { * \ref mdbx_env_set_syncbytes() and \ref mdbx_env_set_syncperiod() functions * could be very useful with `MDBX_SAFE_NOSYNC` flag. * - * The number and volume of of disk IOPs with MDBX_SAFE_NOSYNC flag will + * The number and volume of disk IOPs with MDBX_SAFE_NOSYNC flag will * exactly the as without any no-sync flags. However, you should expect a * larger process's [work set](https://bit.ly/2kA2tFX) and significantly worse * a [locality of reference](https://bit.ly/2mbYq2J), due to the more @@ -2079,7 +2079,7 @@ enum MDBX_option_t { * for all processes interacting with the database. * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K + * track readers in the environment. The default is about 100 for 4K * system page size. Starting a read-only transaction normally ties a lock * table slot to the current thread until the environment closes or the thread * exits. If \ref MDBX_NOTLS is in use, \ref mdbx_txn_begin() instead ties the @@ -3343,7 +3343,7 @@ mdbx_limits_txnsize_max(intptr_t pagesize); * \ingroup c_settings * * \details This defines the number of slots in the lock table that is used to - * track readers in the the environment. The default is about 100 for 4K system + * track readers in the environment. The default is about 100 for 4K system * page size. Starting a read-only transaction normally ties a lock table slot * to the current thread until the environment closes or the thread exits. If * \ref MDBX_NOTLS is in use, \ref mdbx_txn_begin() instead ties the slot to the @@ -5264,7 +5264,7 @@ LIBMDBX_API int mdbx_dbi_sequence(MDBX_txn *txn, MDBX_dbi dbi, uint64_t *result, * This returns a comparison as if the two data items were keys in the * specified database. * - * \warning There ss a Undefined behavior if one of arguments is invalid. + * \warning There is a Undefined behavior if one of arguments is invalid. * * \param [in] txn A transaction handle returned by \ref mdbx_txn_begin(). * \param [in] dbi A database handle returned by \ref mdbx_dbi_open(). diff --git a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ index dbe94755087..767f3791280 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ +++ b/crates/storage/libmdbx-rs/mdbx-sys/libmdbx/mdbx.h++ @@ -851,7 +851,7 @@ struct LIBMDBX_API_TYPE slice : public ::MDBX_val { /// \brief Checks whether the content of the slice is printable. /// \param [in] disable_utf8 By default if `disable_utf8` is `false` function /// checks that content bytes are printable ASCII-7 characters or a valid UTF8 - /// sequences. Otherwise, if if `disable_utf8` is `true` function checks that + /// sequences. Otherwise, if `disable_utf8` is `true` function checks that /// content bytes are printable extended 8-bit ASCII codes. MDBX_NOTHROW_PURE_FUNCTION bool is_printable(bool disable_utf8 = false) const noexcept; diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 3deff0c249b..26cfef54d8d 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -59,19 +59,18 @@ where } /// Returns an iterator over the raw key value slices. - #[allow(clippy::needless_lifetimes)] - pub fn iter_slices<'a>(&'a self) -> IntoIter<'a, K, Cow<'a, [u8]>, Cow<'a, [u8]>> { + pub fn iter_slices<'a>(self) -> IntoIter, Cow<'a, [u8]>> { self.into_iter() } /// Returns an iterator over database items. #[allow(clippy::should_implement_trait)] - pub fn into_iter(&self) -> IntoIter<'_, K, Key, Value> + pub fn into_iter(self) -> IntoIter where Key: TableObject, Value: TableObject, { - IntoIter::new(self.clone(), MDBX_NEXT, MDBX_NEXT) + IntoIter::new(self, MDBX_NEXT, MDBX_NEXT) } /// Retrieves a key/data pair from the cursor. Depending on the cursor op, @@ -508,7 +507,7 @@ unsafe impl Sync for Cursor where K: TransactionKind {} /// An iterator over the key/value pairs in an MDBX database. #[derive(Debug)] -pub enum IntoIter<'cur, K, Key, Value> +pub enum IntoIter where K: TransactionKind, Key: TableObject, @@ -535,11 +534,11 @@ where /// The next and subsequent operations to perform. next_op: ffi::MDBX_cursor_op, - _marker: PhantomData<(&'cur (), Key, Value)>, + _marker: PhantomData<(Key, Value)>, }, } -impl IntoIter<'_, K, Key, Value> +impl IntoIter where K: TransactionKind, Key: TableObject, @@ -547,11 +546,11 @@ where { /// Creates a new iterator backed by the given cursor. fn new(cursor: Cursor, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self { - IntoIter::Ok { cursor, op, next_op, _marker: Default::default() } + Self::Ok { cursor, op, next_op, _marker: Default::default() } } } -impl Iterator for IntoIter<'_, K, Key, Value> +impl Iterator for IntoIter where K: TransactionKind, Key: TableObject, @@ -747,13 +746,13 @@ where } } -impl<'cur, K, Key, Value> Iterator for IterDup<'cur, K, Key, Value> +impl Iterator for IterDup<'_, K, Key, Value> where K: TransactionKind, Key: TableObject, Value: TableObject, { - type Item = IntoIter<'cur, K, Key, Value>; + type Item = IntoIter; fn next(&mut self) -> Option { match self { diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index edf9321ace4..6a0b210401e 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -4,7 +4,7 @@ use crate::{ flags::EnvironmentFlags, transaction::{RO, RW}, txn_manager::{TxnManager, TxnManagerMessage, TxnPtr}, - Transaction, TransactionKind, + Mode, SyncMode, Transaction, TransactionKind, }; use byteorder::{ByteOrder, NativeEndian}; use mem::size_of; @@ -72,14 +72,14 @@ impl Environment { /// Returns true if the environment was opened in [`crate::Mode::ReadWrite`] mode. #[inline] - pub fn is_read_write(&self) -> bool { - self.inner.env_kind.is_write_map() + pub fn is_read_write(&self) -> Result { + Ok(!self.is_read_only()?) } /// Returns true if the environment was opened in [`crate::Mode::ReadOnly`] mode. #[inline] - pub fn is_read_only(&self) -> bool { - !self.inner.env_kind.is_write_map() + pub fn is_read_only(&self) -> Result { + Ok(matches!(self.info()?.mode(), Mode::ReadOnly)) } /// Returns the transaction manager. @@ -425,6 +425,23 @@ impl Info { fsync: self.0.mi_pgop_stat.fsync, } } + + /// Return the mode of the database + #[inline] + pub const fn mode(&self) -> Mode { + let mode = self.0.mi_mode; + if (mode & ffi::MDBX_RDONLY) != 0 { + Mode::ReadOnly + } else if (mode & ffi::MDBX_UTTERLY_NOSYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::UtterlyNoSync } + } else if (mode & ffi::MDBX_NOMETASYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::NoMetaSync } + } else if (mode & ffi::MDBX_SAFE_NOSYNC) != 0 { + Mode::ReadWrite { sync_mode: SyncMode::SafeNoSync } + } else { + Mode::ReadWrite { sync_mode: SyncMode::Durable } + } + } } impl fmt::Debug for Environment { @@ -472,8 +489,10 @@ pub struct PageOps { pub mincore: u64, } +/// Represents the geometry settings for the database environment #[derive(Clone, Debug, PartialEq, Eq)] pub struct Geometry { + /// The size range in bytes. pub size: Option, pub growth_step: Option, pub shrink_threshold: Option, @@ -781,15 +800,14 @@ impl EnvironmentBuilder { } /// Sets the interprocess/shared threshold to force flush the data buffers to disk, if - /// [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is used. + /// [`SyncMode::SafeNoSync`] is used. pub fn set_sync_bytes(&mut self, v: usize) -> &mut Self { self.sync_bytes = Some(v as u64); self } /// Sets the interprocess/shared relative period since the last unsteady commit to force flush - /// the data buffers to disk, if [`SyncMode::SafeNoSync`](crate::flags::SyncMode::SafeNoSync) is - /// used. + /// the data buffers to disk, if [`SyncMode::SafeNoSync`] is used. pub fn set_sync_period(&mut self, v: Duration) -> &mut Self { // For this option, mdbx uses units of 1/65536 of a second. let as_mdbx_units = (v.as_secs_f64() * 65536f64) as u64; diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index a70488b0826..a8c74556597 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -97,7 +97,7 @@ pub enum Error { #[error("invalid parameter specified")] DecodeError, /// The environment opened in read-only. - #[error("the environment opened in read-only")] + #[error("the environment opened in read-only, check for more")] Access, /// Database is too large for the current system. #[error("database is too large for the current system")] @@ -238,7 +238,10 @@ mod tests { #[test] fn test_description() { - assert_eq!("the environment opened in read-only", Error::from_err_code(13).to_string()); + assert_eq!( + "the environment opened in read-only, check for more", + Error::from_err_code(13).to_string() + ); assert_eq!("file is not an MDBX file", Error::Invalid.to_string()); } diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index d733327cefa..1457195be78 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -56,7 +56,7 @@ pub enum SyncMode { /// flag could be used with [`Environment::sync()`](crate::Environment::sync) as alternatively /// for batch committing or nested transaction (in some cases). /// - /// The number and volume of of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the + /// The number and volume of disk IOPs with [`SyncMode::SafeNoSync`] flag will exactly the /// as without any no-sync flags. However, you should expect a larger process's work set /// and significantly worse a locality of reference, due to the more intensive allocation /// of previously unused pages and increase the size of the database. diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 88236ebe991..84b2dabc90a 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -6,7 +6,7 @@ use crate::{ txn_manager::{TxnManagerMessage, TxnPtr}, Cursor, Error, Stat, TableObject, }; -use ffi::{mdbx_txn_renew, MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; +use ffi::{MDBX_txn_flags_t, MDBX_TXN_RDONLY, MDBX_TXN_READWRITE}; use indexmap::IndexSet; use parking_lot::{Mutex, MutexGuard}; use std::{ @@ -18,6 +18,9 @@ use std::{ time::Duration, }; +#[cfg(feature = "read-tx-timeouts")] +use ffi::mdbx_txn_renew; + mod private { use super::*; diff --git a/crates/storage/libmdbx-rs/src/txn_manager.rs b/crates/storage/libmdbx-rs/src/txn_manager.rs index 716e8ee62bd..ae4a93724c4 100644 --- a/crates/storage/libmdbx-rs/src/txn_manager.rs +++ b/crates/storage/libmdbx-rs/src/txn_manager.rs @@ -5,7 +5,10 @@ use crate::{ }; use std::{ ptr, - sync::mpsc::{sync_channel, Receiver, SyncSender}, + sync::{ + mpsc::{sync_channel, Receiver, SyncSender}, + Arc, + }, }; #[derive(Copy, Clone, Debug)] @@ -28,7 +31,7 @@ pub(crate) enum TxnManagerMessage { pub(crate) struct TxnManager { sender: SyncSender, #[cfg(feature = "read-tx-timeouts")] - read_transactions: Option>, + read_transactions: Option>, } impl TxnManager { @@ -289,11 +292,11 @@ mod read_transactions { // Sleep not more than `READ_TRANSACTIONS_CHECK_INTERVAL`, but at least until // the closest deadline of an active read transaction - let duration_until_closest_deadline = - self.max_duration - max_active_transaction_duration.unwrap_or_default(); - std::thread::sleep( - READ_TRANSACTIONS_CHECK_INTERVAL.min(duration_until_closest_deadline), + let sleep_duration = READ_TRANSACTIONS_CHECK_INTERVAL.min( + self.max_duration - max_active_transaction_duration.unwrap_or_default(), ); + trace!(target: "libmdbx", ?sleep_duration, elapsed = ?now.elapsed(), "Putting transaction monitor to sleep"); + std::thread::sleep(sleep_duration); } }; std::thread::Builder::new() diff --git a/crates/storage/libmdbx-rs/tests/environment.rs b/crates/storage/libmdbx-rs/tests/environment.rs index 99453ef113a..007418f76bb 100644 --- a/crates/storage/libmdbx-rs/tests/environment.rs +++ b/crates/storage/libmdbx-rs/tests/environment.rs @@ -128,6 +128,18 @@ fn test_info() { // assert_eq!(info.last_pgno(), 1); // assert_eq!(info.last_txnid(), 0); assert_eq!(info.num_readers(), 0); + assert!(matches!(info.mode(), Mode::ReadWrite { sync_mode: SyncMode::Durable })); + assert!(env.is_read_write().unwrap()); + + drop(env); + let env = Environment::builder() + .set_geometry(Geometry { size: Some(map_size..), ..Default::default() }) + .set_flags(EnvironmentFlags { mode: Mode::ReadOnly, ..Default::default() }) + .open(dir.path()) + .unwrap(); + let info = env.info().unwrap(); + assert!(matches!(info.mode(), Mode::ReadOnly)); + assert!(env.is_read_only().unwrap()); } #[test] diff --git a/crates/storage/nippy-jar/Cargo.toml b/crates/storage/nippy-jar/Cargo.toml index 9f212bf44e8..56f140afbda 100644 --- a/crates/storage/nippy-jar/Cargo.toml +++ b/crates/storage/nippy-jar/Cargo.toml @@ -34,7 +34,6 @@ derive_more.workspace = true rand = { workspace = true, features = ["small_rng"] } tempfile.workspace = true - [features] default = [] test-utils = [] diff --git a/crates/storage/nippy-jar/src/compression/mod.rs b/crates/storage/nippy-jar/src/compression/mod.rs index 28a92fe909f..f9bf8110eeb 100644 --- a/crates/storage/nippy-jar/src/compression/mod.rs +++ b/crates/storage/nippy-jar/src/compression/mod.rs @@ -44,7 +44,9 @@ pub trait Compression: Serialize + for<'a> Deserialize<'a> { #[derive(Debug, Serialize, Deserialize)] #[cfg_attr(test, derive(PartialEq))] pub enum Compressors { + /// Zstandard compression algorithm with custom settings. Zstd(Zstd), + /// LZ4 compression algorithm with custom settings. Lz4(Lz4), } diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index 500247d1767..896a65bd708 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -12,10 +12,13 @@ pub use zstd::{bulk::Decompressor, dict::DecoderDictionary}; type RawDictionary = Vec; +/// Represents the state of a Zstandard compression operation. #[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub enum ZstdState { + /// The compressor is pending a dictionary. #[default] PendingDictionary, + /// The compressor is ready to perform compression. Ready, } @@ -51,6 +54,7 @@ impl Zstd { } } + /// Sets the compression level for the Zstd compression instance. pub const fn with_level(mut self, level: i32) -> Self { self.level = level; self @@ -209,7 +213,7 @@ impl Compression for Zstd { return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())) } - let mut dictionaries = vec![]; + let mut dictionaries = Vec::with_capacity(columns.len()); for column in columns { // ZSTD requires all training data to be continuous in memory, alongside the size of // each entry diff --git a/crates/storage/nippy-jar/src/consistency.rs b/crates/storage/nippy-jar/src/consistency.rs index 1093fb5546a..952980ef6ef 100644 --- a/crates/storage/nippy-jar/src/consistency.rs +++ b/crates/storage/nippy-jar/src/consistency.rs @@ -28,6 +28,11 @@ pub struct NippyJarChecker { } impl NippyJarChecker { + /// Creates a new instance of [`NippyJarChecker`] with the provided [`NippyJar`]. + /// + /// This method initializes the checker without any associated file handles for + /// the data or offsets files. The [`NippyJar`] passed in contains all necessary + /// configurations for handling data. pub const fn new(jar: NippyJar) -> Self { Self { jar, data_file: None, offsets_file: None } } diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 26776482729..376411ac265 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -25,9 +25,10 @@ impl std::fmt::Debug for NippyJarCursor<'_, H> { } impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { + /// Creates a new instance of [`NippyJarCursor`] for the given [`NippyJar`]. pub fn new(jar: &'a NippyJar) -> Result { let max_row_size = jar.max_row_size; - Ok(NippyJarCursor { + Ok(Self { jar, reader: Arc::new(jar.open_data_reader()?), // Makes sure that we have enough buffer capacity to decompress any row of data. @@ -36,12 +37,14 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { }) } + /// Creates a new instance of [`NippyJarCursor`] with the specified [`NippyJar`] and data + /// reader. pub fn with_reader( jar: &'a NippyJar, reader: Arc, ) -> Result { let max_row_size = jar.max_row_size; - Ok(NippyJarCursor { + Ok(Self { jar, reader, // Makes sure that we have enough buffer capacity to decompress any row of data. diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index fc096cf848c..385e39357a0 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -4,53 +4,92 @@ use thiserror::Error; /// Errors associated with [`crate::NippyJar`]. #[derive(Error, Debug)] pub enum NippyJarError { + /// An internal error occurred, wrapping any type of error. #[error(transparent)] Internal(#[from] Box), + + /// An error occurred while disconnecting, wrapping a standard I/O error. #[error(transparent)] Disconnect(#[from] std::io::Error), + + /// An error related to the file system occurred, wrapping a file system path error. #[error(transparent)] FileSystem(#[from] reth_fs_util::FsPathError), + + /// A custom error message provided by the user. #[error("{0}")] Custom(String), + + /// An error occurred during serialization/deserialization with Bincode. #[error(transparent)] Bincode(#[from] Box), + + /// An error occurred with the Elias-Fano encoding/decoding process. #[error(transparent)] EliasFano(#[from] anyhow::Error), + + /// Compression was enabled, but the compressor is not ready yet. #[error("compression was enabled, but it's not ready yet")] CompressorNotReady, + + /// Decompression was enabled, but the decompressor is not ready yet. #[error("decompression was enabled, but it's not ready yet")] DecompressorNotReady, + + /// The number of columns does not match the expected length. #[error("number of columns does not match: {0} != {1}")] ColumnLenMismatch(usize, usize), + + /// An unexpected missing value was encountered at a specific row and column. #[error("unexpected missing value: row:col {0}:{1}")] UnexpectedMissingValue(u64, u64), + + /// The size of an offset exceeds the maximum allowed size of 8 bytes. #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] OffsetSizeTooBig { /// The read offset size in number of bytes. offset_size: u8, }, + + /// The size of an offset is less than the minimum allowed size of 1 byte. #[error("the size of an offset must be at least 1 byte, got {offset_size}")] OffsetSizeTooSmall { /// The read offset size in number of bytes. offset_size: u8, }, + + /// An attempt was made to read an offset that is out of bounds. #[error("attempted to read an out of bounds offset: {index}")] OffsetOutOfBounds { /// The index of the offset that was being read. index: usize, }, + + /// The output buffer is too small for the compression or decompression operation. #[error("compression or decompression requires a bigger destination output")] OutputTooSmall, + + /// A dictionary is not loaded when it is required for operations. #[error("dictionary is not loaded.")] DictionaryNotLoaded, + + /// It's not possible to generate a compressor after loading a dictionary. #[error("it's not possible to generate a compressor after loading a dictionary.")] CompressorNotAllowed, + + /// The number of offsets is smaller than the requested prune size. #[error("number of offsets ({0}) is smaller than prune request ({1}).")] InvalidPruning(u64, u64), + + /// The jar has been frozen and cannot be modified. #[error("jar has been frozen and cannot be modified.")] FrozenJar, + + /// The file is in an inconsistent state. #[error("File is in an inconsistent state.")] InconsistentState, - #[error("Missing file: {0}.")] + + /// A specified file is missing. + #[error("Missing file: {}", .0.display())] MissingFile(PathBuf), } diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index bdc950aa38a..98eddf22ee9 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -10,7 +10,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![allow(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use memmap2::Mmap; @@ -21,13 +20,9 @@ use std::{ ops::Range, path::{Path, PathBuf}, }; - -// Windows specific extension for std::fs -#[cfg(windows)] -use std::os::windows::prelude::OpenOptionsExt; - use tracing::*; +/// Compression algorithms supported by `NippyJar`. pub mod compression; #[cfg(test)] use compression::Compression; @@ -55,10 +50,13 @@ pub use writer::NippyJarWriter; mod consistency; pub use consistency::NippyJarChecker; +/// The version number of the Nippy Jar format. const NIPPY_JAR_VERSION: usize = 1; - +/// The file extension used for index files. const INDEX_FILE_EXTENSION: &str = "idx"; +/// The file extension used for offsets files. const OFFSETS_FILE_EXTENSION: &str = "off"; +/// The file extension used for configuration files. pub const CONFIG_FILE_EXTENSION: &str = "conf"; /// A [`RefRow`] is a list of column value slices pointing to either an internal buffer or a diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 9bf9bf52644..3a1003bee76 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -354,6 +354,10 @@ impl NippyJarWriter { Ok(()) } + /// Commits changes to the data file and offsets without synchronizing all data to disk. + /// + /// This function flushes the buffered data to the data file and commits the offsets, + /// but it does not guarantee that all data is synchronized to persistent storage. #[cfg(feature = "test-utils")] pub fn commit_without_sync_all(&mut self) -> Result<(), NippyJarError> { self.data_file.flush()?; @@ -412,41 +416,49 @@ impl NippyJarWriter { Ok(()) } + /// Returns the maximum row size for the associated [`NippyJar`]. #[cfg(test)] pub const fn max_row_size(&self) -> usize { self.jar.max_row_size } + /// Returns the column index of the current checker instance. #[cfg(test)] pub const fn column(&self) -> usize { self.column } + /// Returns a reference to the offsets vector. #[cfg(test)] pub fn offsets(&self) -> &[u64] { &self.offsets } + /// Returns a mutable reference to the offsets vector. #[cfg(test)] pub fn offsets_mut(&mut self) -> &mut Vec { &mut self.offsets } + /// Returns the path to the offsets file for the associated [`NippyJar`]. #[cfg(test)] pub fn offsets_path(&self) -> std::path::PathBuf { self.jar.offsets_path() } + /// Returns the path to the data file for the associated [`NippyJar`]. #[cfg(test)] pub fn data_path(&self) -> &Path { self.jar.data_path() } + /// Returns a mutable reference to the buffered writer for the data file. #[cfg(any(test, feature = "test-utils"))] pub fn data_file(&mut self) -> &mut BufWriter { &mut self.data_file } + /// Returns a reference to the associated [`NippyJar`] instance. #[cfg(any(test, feature = "test-utils"))] pub const fn jar(&self) -> &NippyJar { &self.jar diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 00e1c9f098d..84808ed7c38 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -17,6 +17,7 @@ reth-chainspec.workspace = true reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["reth-codec", "secp256k1"] } +reth-primitives-traits = { workspace = true, features = ["reth-codec"] } reth-fs-util.workspace = true reth-errors.workspace = true reth-storage-errors.workspace = true @@ -38,6 +39,7 @@ reth-node-types.workspace = true alloy-eips.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine.workspace = true +alloy-consensus.workspace = true revm.workspace = true # optimism @@ -65,7 +67,6 @@ strum.workspace = true # test-utils reth-ethereum-engine-primitives = { workspace = true, optional = true } -alloy-consensus = { workspace = true, optional = true } # parallel utils rayon.workspace = true @@ -91,13 +92,45 @@ optimism = [ "reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-primitives", + "reth-codecs/op", + "reth-db/optimism", + "reth-db-api/optimism", + "revm/optimism", + "reth-optimism-primitives/optimism", +] +serde = [ + "dashmap/serde", + "notify/serde", + "parking_lot/serde", + "rand/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-rpc-types-engine/serde", + "revm/serde", + "reth-codecs/serde", + "reth-optimism-primitives?/serde", + "reth-primitives-traits/serde", + "reth-execution-types/serde", + "reth-trie-db/serde", + "reth-trie/serde", ] -serde = ["reth-execution-types/serde"] test-utils = [ "reth-db/test-utils", "reth-nippy-jar/test-utils", "reth-trie/test-utils", "reth-chain-state/test-utils", "reth-ethereum-engine-primitives", - "alloy-consensus", + "reth-chainspec/test-utils", + "reth-evm/test-utils", + "reth-network-p2p/test-utils", + "reth-primitives/test-utils", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", + "reth-db-api/test-utils", + "reth-trie-db/test-utils", + "revm/test-utils", + "reth-prune-types/test-utils", + "reth-stages-types/test-utils", + "reth-optimism-primitives?/arbitrary", ] diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index 09b892562fb..3e1ba2a4b8f 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -76,3 +76,105 @@ where } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_storage_reverts_iter_empty() { + // Create empty sample data for reverts and wiped entries. + let reverts: Vec<(B256, RevertToSlot)> = vec![]; + let wiped: Vec<(B256, U256)> = vec![]; + + // Create the iterator with the empty data. + let iter = StorageRevertsIter::new(reverts, wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify that the results are empty. + assert_eq!(results, vec![]); + } + + #[test] + fn test_storage_reverts_iter_reverts_only() { + // Create sample data for only reverts. + let reverts = vec![ + (B256::from_slice(&[4; 32]), RevertToSlot::Destroyed), + (B256::from_slice(&[5; 32]), RevertToSlot::Some(U256::from(40))), + ]; + + // Create the iterator with only reverts and no wiped entries. + let iter = StorageRevertsIter::new(reverts, vec![]); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[4; 32]), U256::ZERO), // Revert slot previous value + (B256::from_slice(&[5; 32]), U256::from(40)), // Only revert present. + ] + ); + } + + #[test] + fn test_storage_reverts_iter_wiped_only() { + // Create sample data for only wiped entries. + let wiped = vec![ + (B256::from_slice(&[6; 32]), U256::from(50)), + (B256::from_slice(&[7; 32]), U256::from(60)), + ]; + + // Create the iterator with only wiped entries and no reverts. + let iter = StorageRevertsIter::new(vec![], wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[6; 32]), U256::from(50)), // Only wiped present. + (B256::from_slice(&[7; 32]), U256::from(60)), // Only wiped present. + ] + ); + } + + #[test] + fn test_storage_reverts_iter_interleaved() { + // Create sample data for interleaved reverts and wiped entries. + let reverts = vec![ + (B256::from_slice(&[8; 32]), RevertToSlot::Some(U256::from(70))), + (B256::from_slice(&[9; 32]), RevertToSlot::Some(U256::from(80))), + // Some higher key than wiped + (B256::from_slice(&[15; 32]), RevertToSlot::Some(U256::from(90))), + ]; + + let wiped = vec![ + (B256::from_slice(&[8; 32]), U256::from(75)), // Same key as revert + (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped with new key + ]; + + // Create the iterator with the sample data. + let iter = StorageRevertsIter::new(reverts, wiped); + + // Iterate and collect results into a vector for verification. + let results: Vec<_> = iter.collect(); + + // Verify the output order and values. + assert_eq!( + results, + vec![ + (B256::from_slice(&[8; 32]), U256::from(70)), // Revert takes priority. + (B256::from_slice(&[9; 32]), U256::from(80)), // Only revert present. + (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped entry. + (B256::from_slice(&[15; 32]), U256::from(90)), // WGreater revert entry + ] + ); + } +} diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 894a41620c5..deccdea2831 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -46,6 +46,9 @@ pub use reth_chain_state::{ CanonStateNotifications, CanonStateSubscriptions, }; +// reexport traits to avoid breaking changes +pub use reth_storage_api::{HistoryWriter, StatsReader}; + pub(crate) fn to_range>(bounds: R) -> std::ops::Range { let start = match bounds.start_bound() { std::ops::Bound::Included(&v) => v, diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 9e6f32b33a3..68f1498eccb 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,13 +1,19 @@ +#![allow(unused)] use crate::{ - providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, - DatabaseProviderFactory, DatabaseProviderRO, EvmEnvProvider, HeaderProvider, ProviderError, - ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - RequestsProvider, StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, + providers::{ConsistentProvider, StaticFileProvider}, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, + ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, + DatabaseProviderFactory, EvmEnvProvider, FullProvider, HashedPostStateProvider, HeaderProvider, + ProviderError, ProviderFactory, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + StageCheckpointReader, StateProviderBox, StateProviderFactory, StateReader, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber}; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, +}; use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; use alloy_rpc_types_engine::ForkchoiceState; use reth_chain_state::{ @@ -15,33 +21,37 @@ use reth_chain_state::{ MemoryOverlayStateProvider, }; use reth_chainspec::{ChainInfo, EthereumHardforks}; -use reth_db::models::BlockNumberAddress; +use reth_db::{models::BlockNumberAddress, transaction::DbTx, Database}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; -use reth_node_types::NodeTypesWithDB; +use reth_execution_types::ExecutionOutcome; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionMeta, + TransactionSigned, }; +use reth_primitives_traits::BlockBody as _; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::StorageChangeSetReader; +use reth_storage_api::{ + DBProvider, NodePrimitivesProvider, StateCommitmentProvider, StorageChangeSetReader, +}; use reth_storage_errors::provider::ProviderResult; +use reth_trie::HashedPostState; +use reth_trie_db::StateCommitment; use revm::{ - db::states::PlainStorageRevert, + db::BundleState, primitives::{BlockEnv, CfgEnvWithHandlerCfg}, }; use std::{ - collections::{hash_map, HashMap}, - ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, + ops::{Add, RangeBounds, RangeInclusive, Sub}, sync::Arc, time::Instant, }; use tracing::trace; -use super::ProviderNodeTypes; +use crate::providers::ProviderNodeTypes; /// The main type for interacting with the blockchain. /// @@ -50,11 +60,11 @@ use super::ProviderNodeTypes; /// type that holds an instance of the database and the blockchain tree. #[derive(Debug)] pub struct BlockchainProvider2 { - /// Provider type used to access the database. - database: ProviderFactory, + /// Provider factory used to access the database. + pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical /// state. - pub(super) canonical_in_memory_state: CanonicalInMemoryState, + pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } impl Clone for BlockchainProvider2 { @@ -67,15 +77,15 @@ impl Clone for BlockchainProvider2 { } impl BlockchainProvider2 { - /// Create a new provider using only the database, fetching the latest header from - /// the database to initialize the provider. - pub fn new(database: ProviderFactory) -> ProviderResult { - let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; + /// Create a new [`BlockchainProvider2`] using only the storage, fetching the latest + /// header from the database to initialize the provider. + pub fn new(storage: ProviderFactory) -> ProviderResult { + let provider = storage.provider()?; + let best = provider.chain_info()?; match provider.header_by_number(best.best_number)? { Some(header) => { drop(provider); - Ok(Self::with_latest(database, SealedHeader::new(header, best.best_hash))?) + Ok(Self::with_latest(storage, SealedHeader::new(header, best.best_hash))?) } None => Err(ProviderError::HeaderNotFound(best.best_number.into())), } @@ -86,8 +96,11 @@ impl BlockchainProvider2 { /// /// This returns a `ProviderResult` since it tries the retrieve the last finalized header from /// `database`. - pub fn with_latest(database: ProviderFactory, latest: SealedHeader) -> ProviderResult { - let provider = database.provider()?; + pub fn with_latest( + storage: ProviderFactory, + latest: SealedHeader>, + ) -> ProviderResult { + let provider = storage.provider()?; let finalized_header = provider .last_finalized_block_number()? .map(|num| provider.sealed_header(num)) @@ -104,7 +117,7 @@ impl BlockchainProvider2 { .transpose()? .flatten(); Ok(Self { - database, + database: storage, canonical_in_memory_state: CanonicalInMemoryState::with_head( latest, finalized_header, @@ -114,514 +127,41 @@ impl BlockchainProvider2 { } /// Gets a clone of `canonical_in_memory_state`. - pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { + pub fn canonical_in_memory_state(&self) -> CanonicalInMemoryState { self.canonical_in_memory_state.clone() } - // Helper function to convert range bounds - fn convert_range_bounds( - &self, - range: impl RangeBounds, - end_unbounded: impl FnOnce() -> T, - ) -> (T, T) - where - T: Copy + Add + Sub + From, - { - let start = match range.start_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n + T::from(1u8), - Bound::Unbounded => T::from(0u8), - }; - - let end = match range.end_bound() { - Bound::Included(&n) => n, - Bound::Excluded(&n) => n - T::from(1u8), - Bound::Unbounded => end_unbounded(), - }; - - (start, end) - } - - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// If the range is empty, or there are no blocks for the given range, then this returns `None`. - pub fn get_state( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - if range.is_empty() { - return Ok(None) - } - let start_block_number = *range.start(); - let end_block_number = *range.end(); - - // We are not removing block meta as it is used to get block changesets. - let mut block_bodies = Vec::new(); - for block_num in range.clone() { - let block_body = self - .block_body_indices(block_num)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; - block_bodies.push((block_num, block_body)) - } - - // get transaction receipts - let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) - else { - return Ok(None) - }; - let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { - return Ok(None) - }; - - let mut account_changeset = Vec::new(); - for block_num in range.clone() { - let changeset = - self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); - account_changeset.extend(changeset); - } - - let mut storage_changeset = Vec::new(); - for block_num in range { - let changeset = self.storage_changeset(block_num)?; - storage_changeset.extend(changeset); - } - - let (state, reverts) = - self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; - - let mut receipt_iter = - self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::with_capacity(block_bodies.len()); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for tx_num in block_body.tx_num_range() { - let receipt = receipt_iter - .next() - .ok_or_else(|| ProviderError::ReceiptNotFound(tx_num.into()))?; - block_receipts.push(Some(receipt)); - } - receipts.push(block_receipts); - } - - Ok(Some(ExecutionOutcome::new_init( - state, - reverts, - // We skip new contracts since we never delete them from the database - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - ))) - } - - /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the - /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given - /// storage and account changesets. - fn populate_bundle_state( - &self, - account_changeset: Vec<(u64, AccountBeforeTx)>, - storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, - block_range_end: BlockNumber, - ) -> ProviderResult<(BundleStateInit, RevertsInit)> { - let mut state: BundleStateInit = HashMap::new(); - let mut reverts: RevertsInit = HashMap::new(); - let state_provider = self.state_by_block_number_or_tag(block_range_end.into())?; - - // add account changeset changes - for (block_number, account_before) in account_changeset.into_iter().rev() { - let AccountBeforeTx { info: old_info, address } = account_before; - match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let new_info = state_provider.basic_account(address)?; - entry.insert((old_info, new_info, HashMap::new())); - } - hash_map::Entry::Occupied(mut entry) => { - // overwrite old account state. - entry.get_mut().0 = old_info; - } - } - // insert old info into reverts. - reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); - } - - // add storage changeset changes - for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { - let BlockNumberAddress((block_number, address)) = block_and_address; - // get account state or insert from plain state. - let account_state = match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let present_info = state_provider.basic_account(address)?; - entry.insert((present_info, present_info, HashMap::new())) - } - hash_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - // match storage. - match account_state.2.entry(old_storage.key) { - hash_map::Entry::Vacant(entry) => { - let new_storage_value = - state_provider.storage(address, old_storage.key)?.unwrap_or_default(); - entry.insert((old_storage.value, new_storage_value)); - } - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().0 = old_storage.value; - } - }; - - reverts - .entry(block_number) - .or_default() - .entry(address) - .or_default() - .1 - .push(old_storage); - } - - Ok((state, reverts)) - } - - /// Fetches a range of data from both in-memory state and persistent storage while a predicate - /// is met. - /// - /// Creates a snapshot of the in-memory chain state and database provider to prevent - /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing - /// recent in-memory blocks in case of overlaps. - /// - /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the - /// user to retrieve the required items from the database using [`RangeInclusive`]. - /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory - /// state, allowing for selection or filtering for the desired data. - fn get_in_memory_or_storage_by_block_range_while( - &self, - range: impl RangeBounds, - fetch_db_range: F, - map_block_state_item: G, - mut predicate: P, - ) -> ProviderResult> - where - F: FnOnce( - &DatabaseProviderRO, - RangeInclusive, - &mut P, - ) -> ProviderResult>, - G: Fn(Arc, &mut P) -> Option, - P: FnMut(&T) -> bool, - { - // Each one provides a snapshot at the time of instantiation, but its order matters. - // - // If we acquire first the database provider, it's possible that before the in-memory chain - // snapshot is instantiated, it will flush blocks to disk. This would - // mean that our database provider would not have access to the flushed blocks (since it's - // working under an older view), while the in-memory state may have deleted them - // entirely. Resulting in gaps on the range. - let mut in_memory_chain = - self.canonical_in_memory_state.canonical_chain().collect::>(); - let db_provider = self.database_provider_ro()?; - - let (start, end) = self.convert_range_bounds(range, || { - // the first block is the highest one. - in_memory_chain - .first() - .map(|b| b.number()) - .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) - }); - - if start > end { - return Ok(vec![]) - } - - // Split range into storage_range and in-memory range. If the in-memory range is not - // necessary drop it early. - // - // The last block of `in_memory_chain` is the lowest block number. - let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { - Some(lowest_memory_block) if lowest_memory_block <= end => { - let highest_memory_block = - in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); - - // Database will for a time overlap with in-memory-chain blocks. In - // case of a re-org, it can mean that the database blocks are of a forked chain, and - // so, we should prioritize the in-memory overlapped blocks. - let in_memory_range = - lowest_memory_block.max(start)..=end.min(highest_memory_block); - - // If requested range is in the middle of the in-memory range, remove the necessary - // lowest blocks - in_memory_chain.truncate( - in_memory_chain - .len() - .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), - ); - - let storage_range = - (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); - - (Some((in_memory_chain, in_memory_range)), storage_range) - } - _ => { - // Drop the in-memory chain so we don't hold blocks in memory. - drop(in_memory_chain); - - (None, Some(start..=end)) - } - }; - - let mut items = Vec::with_capacity((end - start + 1) as usize); - - if let Some(storage_range) = storage_range { - let mut db_items = fetch_db_range(&db_provider, storage_range.clone(), &mut predicate)?; - items.append(&mut db_items); - - // The predicate was not met, if the number of items differs from the expected. So, we - // return what we have. - if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { - return Ok(items) - } - } - - if let Some((in_memory_chain, in_memory_range)) = in_memory { - for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { - debug_assert!(num == block.number()); - if let Some(item) = map_block_state_item(block, &mut predicate) { - items.push(item); - } else { - break - } - } - } - - Ok(items) + /// Returns a provider with a created `DbTx` inside, which allows fetching data from the + /// database using different types of providers. Example: [`HeaderProvider`] + /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. + #[track_caller] + pub fn consistent_provider(&self) -> ProviderResult> { + ConsistentProvider::new(self.database.clone(), self.canonical_in_memory_state()) } /// This uses a given [`BlockState`] to initialize a state provider for that block. fn block_state_provider( &self, - state: &BlockState, - ) -> ProviderResult { + state: &BlockState, + ) -> ProviderResult> { let anchor_hash = state.anchor().hash; let latest_historical = self.database.history_by_block_hash(anchor_hash)?; Ok(state.state_provider(latest_historical)) } - /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. /// - /// * `fetch_from_db`: has a [`DatabaseProviderRO`] and the storage specific range. - /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from - /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. - fn get_in_memory_or_storage_by_tx_range( - &self, - range: impl RangeBounds, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult> - where - S: FnOnce( - DatabaseProviderRO, - RangeInclusive, - ) -> ProviderResult>, - M: Fn(RangeInclusive, Arc) -> ProviderResult>, - { - let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); - let provider = self.database.provider()?; - - // Get the last block number stored in the storage which does NOT overlap with in-memory - // chain. - let last_database_block_number = in_mem_chain - .last() - .map(|b| Ok(b.anchor().number)) - .unwrap_or_else(|| provider.last_block_number())?; - - // Get the next tx number for the last block stored in the storage, which marks the start of - // the in-memory state. - let last_block_body_index = provider - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; - let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - - let (start, end) = self.convert_range_bounds(range, || { - in_mem_chain - .iter() - .map(|b| b.block_ref().block().body.transactions.len() as u64) - .sum::() + - last_block_body_index.last_tx_num() - }); - - if start > end { - return Ok(vec![]) - } - - let mut tx_range = start..=end; - - // If the range is entirely before the first in-memory transaction number, fetch from - // storage - if *tx_range.end() < in_memory_tx_num { - return fetch_from_db(provider, tx_range); - } - - let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); - - // If the range spans storage and memory, get elements from storage first. - if *tx_range.start() < in_memory_tx_num { - // Determine the range that needs to be fetched from storage. - let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); - - // Set the remaining transaction range for in-memory - tx_range = in_memory_tx_num..=*tx_range.end(); - - items.extend(fetch_from_db(provider, db_range)?); - } - - // Iterate from the lowest block to the highest in-memory chain - for block_state in in_mem_chain.into_iter().rev() { - let block_tx_count = block_state.block_ref().block().body.transactions.len(); - let remaining = (tx_range.end() - tx_range.start() + 1) as usize; - - // If the transaction range start is equal or higher than the next block first - // transaction, advance - if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { - in_memory_tx_num += block_tx_count as u64; - continue - } - - // This should only be more than 0 once, in case of a partial range inside a block. - let skip = (tx_range.start() - in_memory_tx_num) as usize; - - items.extend(fetch_from_block_state( - skip..=skip + (remaining.min(block_tx_count - skip) - 1), - block_state, - )?); - - in_memory_tx_num += block_tx_count as u64; - - // Break if the range has been fully processed - if in_memory_tx_num > *tx_range.end() { - break - } - - // Set updated range - tx_range = in_memory_tx_num..=*tx_range.end(); - } - - Ok(items) - } - - /// Fetches data from either in-memory state or persistent storage by transaction - /// [`HashOrNumber`]. - fn get_in_memory_or_storage_by_tx( - &self, - id: HashOrNumber, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult> - where - S: FnOnce(DatabaseProviderRO) -> ProviderResult>, - M: Fn(usize, TxNumber, Arc) -> ProviderResult>, - { - // Order of instantiation matters. More information on: - // `get_in_memory_or_storage_by_block_range_while`. - let in_mem_chain = self.canonical_in_memory_state.canonical_chain().collect::>(); - let provider = self.database.provider()?; - - // Get the last block number stored in the database which does NOT overlap with in-memory - // chain. - let last_database_block_number = in_mem_chain - .last() - .map(|b| Ok(b.anchor().number)) - .unwrap_or_else(|| provider.last_block_number())?; - - // Get the next tx number for the last block stored in the database and consider it the - // first tx number of the in-memory state - let last_block_body_index = provider - .block_body_indices(last_database_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; - let mut in_memory_tx_num = last_block_body_index.next_tx_num(); - - // If the transaction number is less than the first in-memory transaction number, make a - // database lookup - if let HashOrNumber::Number(id) = id { - if id < in_memory_tx_num { - return fetch_from_db(provider) - } - } - - // Iterate from the lowest block to the highest - for block_state in in_mem_chain.into_iter().rev() { - let executed_block = block_state.block_ref(); - let block = executed_block.block(); - - for tx_index in 0..block.body.transactions.len() { - match id { - HashOrNumber::Hash(tx_hash) => { - if tx_hash == block.body.transactions[tx_index].hash() { - return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) - } - } - HashOrNumber::Number(id) => { - if id == in_memory_tx_num { - return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) - } - } - } - - in_memory_tx_num += 1; - } - } - - // Not found in-memory, so check database. - if let HashOrNumber::Hash(_) = id { - return fetch_from_db(provider) - } - - Ok(None) - } - - /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. - fn get_in_memory_or_storage_by_block( + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( &self, - id: BlockHashOrNumber, - fetch_from_db: S, - fetch_from_block_state: M, - ) -> ProviderResult - where - S: FnOnce(DatabaseProviderRO) -> ProviderResult, - M: Fn(Arc) -> ProviderResult, - { - let block_state = match id { - BlockHashOrNumber::Hash(block_hash) => { - self.canonical_in_memory_state.state_by_hash(block_hash) - } - BlockHashOrNumber::Number(block_number) => { - self.canonical_in_memory_state.state_by_number(block_number) - } - }; - - if let Some(block_state) = block_state { - return fetch_from_block_state(block_state) - } - fetch_from_db(self.database_provider_ro()?) + range: RangeInclusive, + ) -> ProviderResult>>> { + self.consistent_provider()?.get_state(range) } } -impl BlockchainProvider2 { - /// Ensures that the given block number is canonical (synced) - /// - /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are - /// out of range and would lead to invalid results, mainly during initial sync. - /// - /// Verifying the `block_number` would be expensive since we need to lookup sync table - /// Instead, we ensure that the `block_number` is within the range of the - /// [`Self::best_block_number`] which is updated when a block is synced. - #[inline] - fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { - let latest = self.best_block_number()?; - if block_number > latest { - Err(ProviderError::HeaderNotFound(block_number.into())) - } else { - Ok(()) - } - } +impl NodePrimitivesProvider for BlockchainProvider2 { + type Primitives = N::Primitives; } impl DatabaseProviderFactory for BlockchainProvider2 { @@ -638,112 +178,68 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } } +impl StateCommitmentProvider for BlockchainProvider2 { + type StateCommitment = N::StateCommitment; +} + impl StaticFileProviderFactory for BlockchainProvider2 { - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } impl HeaderProvider for BlockchainProvider2 { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - (*block_hash).into(), - |db_provider| db_provider.header(block_hash), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), - ) + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + self.consistent_provider()?.header(block_hash) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - num.into(), - |db_provider| db_provider.header_by_number(num), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), - ) + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.consistent_provider()?.header_by_number(num) } fn header_td(&self, hash: &BlockHash) -> ProviderResult> { - if let Some(num) = self.block_number(*hash)? { - self.header_td_by_number(num) - } else { - Ok(None) - } + self.consistent_provider()?.header_td(hash) } fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - let number = if self.canonical_in_memory_state.hash_by_number(number).is_some() { - // If the block exists in memory, we should return a TD for it. - // - // The canonical in memory state should only store post-merge blocks. Post-merge blocks - // have zero difficulty. This means we can use the total difficulty for the last - // finalized block number if present (so that we are not affected by reorgs), if not the - // last number in the database will be used. - if let Some(last_finalized_num_hash) = - self.canonical_in_memory_state.get_finalized_num_hash() - { - last_finalized_num_hash.number - } else { - self.last_block_number()? - } - } else { - // Otherwise, return what we have on disk for the input block - number - }; - self.database.header_td_by_number(number) + self.consistent_provider()?.header_td_by_number(number) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.header().clone()), - |_| true, - ) + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.consistent_provider()?.headers_range(range) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block_ref().block().header.clone())), - ) + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { + self.consistent_provider()?.sealed_header(number) } fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.clone()), - |_| true, - ) + ) -> ProviderResult>> { + self.consistent_provider()?.sealed_headers_range(range) } fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), - |block_state, predicate| { - let header = &block_state.block_ref().block().header; - predicate(header).then(|| header.clone()) - }, - predicate, - ) + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { + self.consistent_provider()?.sealed_headers_while(range, predicate) } } impl BlockHashReader for BlockchainProvider2 { fn block_hash(&self, number: u64) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.block_hash(number), - |block_state| Ok(Some(block_state.hash())), - ) + self.consistent_provider()?.block_hash(number) } fn canonical_hashes_range( @@ -751,15 +247,7 @@ impl BlockHashReader for BlockchainProvider2 { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - start..end, - |db_provider, inclusive_range, _| { - db_provider - .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) - }, - |block_state, _| Some(block_state.hash()), - |_| true, - ) + self.consistent_provider()?.canonical_hashes_range(start, end) } } @@ -777,11 +265,7 @@ impl BlockNumReader for BlockchainProvider2 { } fn block_number(&self, hash: B256) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - hash.into(), - |db_provider| db_provider.block_number(hash), - |block_state| Ok(Some(block_state.number())), - ) + self.consistent_provider()?.block_number(hash) } } @@ -800,89 +284,45 @@ impl BlockIdReader for BlockchainProvider2 { } impl BlockReader for BlockchainProvider2 { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { - match source { - BlockSource::Any | BlockSource::Canonical => { - // Note: it's fine to return the unsealed block because the caller already has - // the hash - self.get_in_memory_or_storage_by_block( - hash.into(), - |db_provider| db_provider.find_block_by_hash(hash, source), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), - ) - } - BlockSource::Pending => { - Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) - } - } + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + self.consistent_provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.block(id), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), - ) + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + self.consistent_provider()?.block(id) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.ommers(id), - |block_state| { - if self.chain_spec().final_paris_total_difficulty(block_state.number()).is_some() { - return Ok(Some(Vec::new())) - } - - Ok(Some(block_state.block_ref().block().body.ommers.clone())) - }, - ) + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + self.consistent_provider()?.ommers(id) } fn block_body_indices( &self, number: BlockNumber, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - number.into(), - |db_provider| db_provider.block_body_indices(number), - |block_state| { - // Find the last block indices on database - let last_storage_block_number = block_state.anchor().number; - let mut stored_indices = self - .database - .block_body_indices(last_storage_block_number)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; - - // Prepare our block indices - stored_indices.first_tx_num = stored_indices.next_tx_num(); - stored_indices.tx_count = 0; - - // Iterate from the lowest block in memory until our target block - for state in block_state.chain().into_iter().rev() { - let block_tx_count = state.block_ref().block.body.transactions.len() as u64; - if state.block_ref().block().number == number { - stored_indices.tx_count = block_tx_count; - } else { - stored_indices.first_tx_num += block_tx_count; - } - } - - Ok(Some(stored_indices)) - }, - ) + self.consistent_provider()?.block_body_indices(number) } /// Returns the block with senders with matching number or hash from database. @@ -895,272 +335,132 @@ impl BlockReader for BlockchainProvider2 { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.block_with_senders())), - ) + ) -> ProviderResult>> { + self.consistent_provider()?.block_with_senders(id, transaction_kind) } fn sealed_block_with_senders( &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.sealed_block_with_senders())), - ) + ) -> ProviderResult>> { + self.consistent_provider()?.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.block_range(range), - |block_state, _| Some(block_state.block_ref().block().clone().unseal()), - |_| true, - ) + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + self.consistent_provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.block_with_senders_range(range), - |block_state, _| Some(block_state.block_with_senders()), - |_| true, - ) + ) -> ProviderResult>> { + self.consistent_provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), - |block_state, _| Some(block_state.sealed_block_with_senders()), - |_| true, - ) + ) -> ProviderResult>> { + self.consistent_provider()?.sealed_block_with_senders_range(range) } } impl TransactionsProvider for BlockchainProvider2 { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - tx_hash.into(), - |db_provider| db_provider.transaction_id(tx_hash), - |_, tx_number, _| Ok(Some(tx_number)), - ) + self.consistent_provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_by_id(id), - |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) - }, - ) + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + self.consistent_provider()?.transaction_by_id(id) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_by_id_no_hash(id), - |tx_index, _, block_state| { - Ok(block_state - .block_ref() - .block() - .body - .transactions - .get(tx_index) - .cloned() - .map(Into::into)) - }, - ) + ) -> ProviderResult> { + self.consistent_provider()?.transaction_by_id_unhashed(id) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - if let Some(tx) = self.canonical_in_memory_state.transaction_by_hash(hash) { - return Ok(Some(tx)) - } - - self.database.transaction_by_hash(hash) + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.consistent_provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { - if let Some((tx, meta)) = - self.canonical_in_memory_state.transaction_by_hash_with_meta(tx_hash) - { - return Ok(Some((tx, meta))) - } - - self.database.transaction_by_hash_with_meta(tx_hash) + ) -> ProviderResult> { + self.consistent_provider()?.transaction_by_hash_with_meta(tx_hash) } fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), - ) + self.consistent_provider()?.transaction_block(id) } fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - id, - |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), - ) + ) -> ProviderResult>> { + self.consistent_provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block_range_while( - range, - |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), - |_| true, - ) + ) -> ProviderResult>> { + self.consistent_provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), - |index_range, block_state| { - Ok(block_state.block_ref().block().body.transactions[index_range] - .iter() - .cloned() - .map(Into::into) - .collect()) - }, - ) + ) -> ProviderResult> { + self.consistent_provider()?.transactions_by_tx_range(range) } fn senders_by_tx_range( &self, range: impl RangeBounds, ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.senders_by_tx_range(db_range), - |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), - ) + self.consistent_provider()?.senders_by_tx_range(range) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.transaction_sender(id), - |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), - ) + self.consistent_provider()?.transaction_sender(id) } } impl ReceiptProvider for BlockchainProvider2 { - fn receipt(&self, id: TxNumber) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx( - id.into(), - |provider| provider.receipt(id), - |tx_index, _, block_state| { - Ok(block_state.executed_block_receipts().get(tx_index).cloned()) - }, - ) - } + type Receipt = ReceiptTy; - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - for block_state in self.canonical_in_memory_state.canonical_chain() { - let executed_block = block_state.block_ref(); - let block = executed_block.block(); - let receipts = block_state.executed_block_receipts(); - - // assuming 1:1 correspondence between transactions and receipts - debug_assert_eq!( - block.body.transactions.len(), - receipts.len(), - "Mismatch between transaction and receipt count" - ); - - if let Some(tx_index) = block.body.transactions.iter().position(|tx| tx.hash() == hash) - { - // safe to use tx_index for receipts due to 1:1 correspondence - return Ok(receipts.get(tx_index).cloned()); - } - } + fn receipt(&self, id: TxNumber) -> ProviderResult> { + self.consistent_provider()?.receipt(id) + } - self.database.receipt_by_hash(hash) + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.consistent_provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { - self.get_in_memory_or_storage_by_block( - block, - |db_provider| db_provider.receipts_by_block(block), - |block_state| Ok(Some(block_state.executed_block_receipts())), - ) + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { + self.consistent_provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { - self.get_in_memory_or_storage_by_tx_range( - range, - |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), - |index_range, block_state| { - Ok(block_state.executed_block_receipts().drain(index_range).collect()) - }, - ) + ) -> ProviderResult> { + self.consistent_provider()?.receipts_by_tx_range(range) } } impl ReceiptProviderIdExt for BlockchainProvider2 { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { - match block { - BlockId::Hash(rpc_block_hash) => { - let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; - if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { - let block_state = self - .canonical_in_memory_state - .state_by_hash(rpc_block_hash.block_hash) - .ok_or(ProviderError::StateForHashNotFound(rpc_block_hash.block_hash))?; - receipts = Some(block_state.executed_block_receipts()); - } - Ok(receipts) - } - BlockId::Number(num_tag) => match num_tag { - BlockNumberOrTag::Pending => Ok(self - .canonical_in_memory_state - .pending_state() - .map(|block_state| block_state.executed_block_receipts())), - _ => { - if let Some(num) = self.convert_block_number(num_tag)? { - self.receipts_by_block(num.into()) - } else { - Ok(None) - } - } - }, - } + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + self.consistent_provider()?.receipts_by_block_id(block) } } @@ -1170,99 +470,40 @@ impl WithdrawalsProvider for BlockchainProvider2 { id: BlockHashOrNumber, timestamp: u64, ) -> ProviderResult> { - if !self.chain_spec().is_shanghai_active_at_timestamp(timestamp) { - return Ok(None) - } - - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), - ) + self.consistent_provider()?.withdrawals_by_block(id, timestamp) } fn latest_withdrawal(&self) -> ProviderResult> { - let best_block_num = self.best_block_number()?; - - self.get_in_memory_or_storage_by_block( - best_block_num.into(), - |db_provider| db_provider.latest_withdrawal(), - |block_state| { - Ok(block_state - .block_ref() - .block() - .body - .withdrawals - .clone() - .and_then(|mut w| w.pop())) - }, - ) - } -} - -impl RequestsProvider for BlockchainProvider2 { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if !self.chain_spec().is_prague_active_at_timestamp(timestamp) { - return Ok(None) - } - - self.get_in_memory_or_storage_by_block( - id, - |db_provider| db_provider.requests_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body.requests.clone()), - ) + self.consistent_provider()?.latest_withdrawal() } } impl StageCheckpointReader for BlockchainProvider2 { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { - self.database.provider()?.get_stage_checkpoint(id) + self.consistent_provider()?.get_stage_checkpoint(id) } fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - self.database.provider()?.get_stage_checkpoint_progress(id) + self.consistent_provider()?.get_stage_checkpoint_progress(id) } fn get_all_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_all_checkpoints() + self.consistent_provider()?.get_all_checkpoints() } } -impl EvmEnvProvider for BlockchainProvider2 { - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_env_with_header(cfg, block_env, &header, evm_config) - } - +impl EvmEnvProvider> for BlockchainProvider2 { fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); - Ok(()) + self.consistent_provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } fn fill_cfg_env_at( @@ -1272,27 +513,21 @@ impl EvmEnvProvider for BlockchainProvider2 { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_cfg_env_with_header(cfg, &header, evm_config) + self.consistent_provider()?.fill_cfg_env_at(cfg, at, evm_config) } fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - evm_config.fill_cfg_env(cfg, header, total_difficulty); - Ok(()) + self.consistent_provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } } @@ -1301,11 +536,11 @@ impl PruneCheckpointReader for BlockchainProvider2 { &self, segment: PruneSegment, ) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoint(segment) + self.consistent_provider()?.get_prune_checkpoint(segment) } fn get_prune_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoints() + self.consistent_provider()?.get_prune_checkpoints() } } @@ -1336,8 +571,9 @@ impl StateProviderFactory for BlockchainProvider2 { block_number: BlockNumber, ) -> ProviderResult { trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - let hash = self + let provider = self.consistent_provider()?; + provider.ensure_canonical_block(block_number)?; + let hash = provider .block_hash(block_number)? .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; self.history_by_block_hash(hash) @@ -1346,14 +582,11 @@ impl StateProviderFactory for BlockchainProvider2 { fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.get_in_memory_or_storage_by_block( + self.consistent_provider()?.get_in_memory_or_storage_by_block( block_hash.into(), - |_| { - // TODO(joshie): port history_by_block_hash to DatabaseProvider and use db_provider - self.database.history_by_block_hash(block_hash) - }, + |_| self.database.history_by_block_hash(block_hash), |block_state| { - let state_provider = self.block_state_provider(&block_state)?; + let state_provider = self.block_state_provider(block_state)?; Ok(Box::new(state_provider)) }, ) @@ -1428,10 +661,17 @@ impl StateProviderFactory for BlockchainProvider2 { } } -impl CanonChainTracker for BlockchainProvider2 -where - Self: BlockReader, -{ +impl HashedPostStateProvider for BlockchainProvider2 { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::<::KeyHasher>( + bundle_state.state(), + ) + } +} + +impl CanonChainTracker for BlockchainProvider2 { + type Header = HeaderTy; + fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { // update timestamp self.canonical_in_memory_state.on_forkchoice_update_received(); @@ -1449,134 +689,74 @@ where self.canonical_in_memory_state.last_exchanged_transition_configuration_timestamp() } - fn set_canonical_head(&self, header: SealedHeader) { + fn set_canonical_head(&self, header: SealedHeader) { self.canonical_in_memory_state.set_canonical_head(header); } - fn set_safe(&self, header: SealedHeader) { + fn set_safe(&self, header: SealedHeader) { self.canonical_in_memory_state.set_safe(header); } - fn set_finalized(&self, header: SealedHeader) { + fn set_finalized(&self, header: SealedHeader) { self.canonical_in_memory_state.set_finalized(header); } } -impl BlockReaderIdExt for BlockchainProvider2 +impl BlockReaderIdExt for BlockchainProvider2 where - Self: BlockReader + ReceiptProviderIdExt, + Self: ReceiptProviderIdExt, { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Number(num) => self.block_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: should we only apply this for the RPCs that are listed in EIP-1898? - // so not at the provider level? - // if we decide to do this at a higher level, then we can make this an automatic - // trait impl - if Some(true) == hash.require_canonical { - // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) - } else { - self.block_by_hash(hash.block_hash) - } - } - } + fn block_by_id(&self, id: BlockId) -> ProviderResult> { + self.consistent_provider()?.block_by_id(id) } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { - Ok(match id { - BlockNumberOrTag::Latest => { - Some(self.canonical_in_memory_state.get_canonical_head().unseal()) - } - BlockNumberOrTag::Finalized => { - self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Safe => { - self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Earliest => self.header_by_number(0)?, - BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), - - BlockNumberOrTag::Number(num) => self.header_by_number(num)?, - }) + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { + self.consistent_provider()?.header_by_number_or_tag(id) } fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { - match id { - BlockNumberOrTag::Latest => { - Ok(Some(self.canonical_in_memory_state.get_canonical_head())) - } - BlockNumberOrTag::Finalized => { - Ok(self.canonical_in_memory_state.get_finalized_header()) - } - BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), - BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), - } + ) -> ProviderResult>> { + self.consistent_provider()?.sealed_header_by_number_or_tag(id) } - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), - }) + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>> { + self.consistent_provider()?.sealed_header_by_id(id) } - fn header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?, - }) + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + self.consistent_provider()?.header_by_id(id) } - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: EIP-1898 question, see above - // here it is not handled - self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) - } - } + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + self.consistent_provider()?.ommers_by_id(id) } } -impl CanonStateSubscriptions for BlockchainProvider2 { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { +impl> CanonStateSubscriptions + for BlockchainProvider2 +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canonical_in_memory_state.subscribe_canon_state() } } -impl ForkChoiceSubscriptions for BlockchainProvider2 { - fn subscribe_safe_block(&self) -> ForkChoiceNotifications { +impl ForkChoiceSubscriptions for BlockchainProvider2 { + type Header = HeaderTy; + + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_safe_block(); ForkChoiceNotifications(receiver) } - fn subscribe_finalized_block(&self) -> ForkChoiceNotifications { + fn subscribe_finalized_block(&self) -> ForkChoiceNotifications { let receiver = self.canonical_in_memory_state.subscribe_finalized_block(); ForkChoiceNotifications(receiver) } @@ -1587,49 +767,7 @@ impl StorageChangeSetReader for BlockchainProvider2 { &self, block_number: BlockNumber, ) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { - let changesets = state - .block() - .execution_output - .bundle - .reverts - .clone() - .into_plain_state_reverts() - .storage - .into_iter() - .flatten() - .flat_map(|revert: PlainStorageRevert| { - revert.storage_revert.into_iter().map(move |(key, value)| { - ( - BlockNumberAddress((block_number, revert.address)), - StorageEntry { key: key.into(), value: value.to_previous_value() }, - ) - }) - }) - .collect(); - Ok(changesets) - } else { - // Perform checks on whether or not changesets exist for the block. - let provider = self.database.provider()?; - - // No prune checkpoint means history should exist and we should `unwrap_or(true)` - let storage_history_exists = provider - .get_prune_checkpoint(PruneSegment::StorageHistory)? - .and_then(|checkpoint| { - // return true if the block number is ahead of the prune checkpoint. - // - // The checkpoint stores the highest pruned block number, so we should make - // sure the block_number is strictly greater. - checkpoint.block_number.map(|checkpoint| block_number > checkpoint) - }) - .unwrap_or(true); - - if !storage_history_exists { - return Err(ProviderError::StateAtBlockPruned(block_number)) - } - - provider.storage_changeset(block_number) - } + self.consistent_provider()?.storage_changeset(block_number) } } @@ -1638,54 +776,20 @@ impl ChangeSetReader for BlockchainProvider2 { &self, block_number: BlockNumber, ) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { - let changesets = state - .block_ref() - .execution_output - .bundle - .reverts - .clone() - .into_plain_state_reverts() - .accounts - .into_iter() - .flatten() - .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) - .collect(); - Ok(changesets) - } else { - // Perform checks on whether or not changesets exist for the block. - let provider = self.database.provider()?; - // No prune checkpoint means history should exist and we should `unwrap_or(true)` - let account_history_exists = provider - .get_prune_checkpoint(PruneSegment::AccountHistory)? - .and_then(|checkpoint| { - // return true if the block number is ahead of the prune checkpoint. - // - // The checkpoint stores the highest pruned block number, so we should make - // sure the block_number is strictly greater. - checkpoint.block_number.map(|checkpoint| block_number > checkpoint) - }) - .unwrap_or(true); - - if !account_history_exists { - return Err(ProviderError::StateAtBlockPruned(block_number)) - } - - provider.account_block_changeset(block_number) - } + self.consistent_provider()?.account_block_changeset(block_number) } } impl AccountReader for BlockchainProvider2 { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { - // use latest state provider - let state_provider = self.latest()?; - state_provider.basic_account(address) + self.consistent_provider()?.basic_account(address) } } impl StateReader for BlockchainProvider2 { + type Receipt = ReceiptTy; + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. /// /// If data for the block does not exist, this will return [`None`]. @@ -1695,24 +799,16 @@ impl StateReader for BlockchainProvider2 { /// inconsistent. Currently this can safely be called within the blockchain tree thread, /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the /// first place. - fn get_state(&self, block: BlockNumber) -> ProviderResult> { - if let Some(state) = self.canonical_in_memory_state.state_by_number(block) { - let state = state.block_ref().execution_outcome().clone(); - Ok(Some(state)) - } else { - self.get_state(block..=block) - } + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>> { + StateReader::get_state(&self.consistent_provider()?, block) } } #[cfg(test)] mod tests { - use std::{ - ops::{Range, RangeBounds}, - sync::Arc, - time::Instant, - }; - use crate::{ providers::BlockchainProvider2, test_utils::{ @@ -1723,7 +819,7 @@ mod tests { BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; - use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; + use alloy_eips::{eip4895::Withdrawals, BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, TxNumber, B256}; use itertools::Itertools; use rand::Rng; @@ -1741,21 +837,24 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{ - Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, Withdrawals, - }; + use reth_primitives::{BlockExt, Receipt, SealedBlock, StaticFileSegment}; + use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, ReceiptProvider, - ReceiptProviderIdExt, RequestsProvider, StateProviderFactory, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + ReceiptProviderIdExt, StateProviderFactory, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; use reth_testing_utils::generators::{ self, random_block, random_block_range, random_changeset_range, random_eoa_accounts, random_receipt, BlockParams, BlockRangeParams, }; use revm::db::BundleState; - use std::ops::Bound; + use std::{ + ops::{Bound, Range, RangeBounds}, + sync::Arc, + time::Instant, + }; const TEST_BLOCKS_COUNT: usize = 5; @@ -1835,14 +934,18 @@ mod tests { .unwrap_or_default(); // Insert blocks into the database - for block in &database_blocks { + for (block, receipts) in database_blocks.iter().zip(&receipts) { // TODO: this should be moved inside `insert_historical_block`: let mut transactions_writer = static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + let mut receipts_writer = + static_file_provider.latest_writer(StaticFileSegment::Receipts)?; transactions_writer.increment_block(block.number)?; - for tx in block.body.transactions() { - let tx: TransactionSignedNoHash = tx.clone().into(); - transactions_writer.append_transaction(tx_num, &tx)?; + receipts_writer.increment_block(block.number)?; + + for (tx, receipt) in block.body.transactions().iter().zip(receipts) { + transactions_writer.append_transaction(tx_num, tx)?; + receipts_writer.append_receipt(tx_num, receipt)?; tx_num += 1; } @@ -1851,21 +954,8 @@ mod tests { )?; } - // Insert receipts into the static files - UnifiedStorageWriter::new( - &provider_rw, - Some(factory.static_file_provider().latest_writer(StaticFileSegment::Receipts)?), - ) - .append_receipts_from_blocks( - // The initial block number is required - database_blocks.first().map(|b| b.number).unwrap_or_default(), - receipts[..database_blocks.len()] - .iter() - .map(|vec| vec.clone().into_iter().map(Some).collect::>()), - )?; - // Commit to both storages: database and static files - UnifiedStorageWriter::commit(provider_rw, factory.static_file_provider())?; + UnifiedStorageWriter::commit(provider_rw)?; let provider = BlockchainProvider2::new(factory)?; @@ -1951,10 +1041,9 @@ mod tests { // Push to disk let provider_rw = hook_provider.database_provider_rw().unwrap(); UnifiedStorageWriter::from(&provider_rw, &hook_provider.static_file_provider()) - .save_blocks(&[lowest_memory_block]) - .unwrap(); - UnifiedStorageWriter::commit(provider_rw, hook_provider.static_file_provider()) + .save_blocks(vec![lowest_memory_block]) .unwrap(); + UnifiedStorageWriter::commit(provider_rw).unwrap(); // Remove from memory hook_provider.canonical_in_memory_state.remove_persisted_blocks(num_hash); @@ -2399,7 +1488,7 @@ mod tests { assert_eq!( provider .withdrawals_by_block( - reth_primitives::BlockHashOrNumber::Number(15), + alloy_eips::BlockHashOrNumber::Number(15), shainghai_timestamp ) .expect("could not call withdrawals by block"), @@ -2411,7 +1500,7 @@ mod tests { assert_eq!( provider .withdrawals_by_block( - reth_primitives::BlockHashOrNumber::Number(block.number), + alloy_eips::BlockHashOrNumber::Number(block.number), shainghai_timestamp )? .unwrap(), @@ -2849,37 +1938,6 @@ mod tests { Ok(()) } - #[test] - fn test_requests_provider() -> eyre::Result<()> { - let mut rng = generators::rng(); - let chain_spec = Arc::new(ChainSpecBuilder::mainnet().prague_activated().build()); - let (provider, database_blocks, in_memory_blocks, _) = - provider_with_chain_spec_and_random_blocks( - &mut rng, - chain_spec.clone(), - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { requests_count: Some(1..2), ..Default::default() }, - )?; - - let database_block = database_blocks.first().unwrap().clone(); - let in_memory_block = in_memory_blocks.last().unwrap().clone(); - - let prague_timestamp = - chain_spec.hardforks.fork(EthereumHardfork::Prague).as_timestamp().unwrap(); - - assert_eq!( - provider.requests_by_block(database_block.number.into(), prague_timestamp,)?, - database_block.body.requests.clone() - ); - assert_eq!( - provider.requests_by_block(in_memory_block.number.into(), prague_timestamp,)?, - in_memory_block.body.requests.clone() - ); - - Ok(()) - } - #[test] fn test_state_provider_factory() -> eyre::Result<()> { let mut rng = generators::rng(); @@ -3229,9 +2287,7 @@ mod tests { (transactions_by_tx_range, |block: &SealedBlock, _: &Vec>| block .body .transactions - .iter() - .map(|tx| Into::::into(tx.clone())) - .collect::>()), + .clone()), (receipts_by_tx_range, |block: &SealedBlock, receipts: &Vec>| receipts [block.number as usize] .clone()) @@ -3323,7 +2379,7 @@ mod tests { (block_range, |block: &SealedBlock| block.clone().unseal()), (block_with_senders_range, |block: &SealedBlock| block .clone() - .unseal() + .unseal::() .with_senders_unchecked(vec![])), (sealed_block_with_senders_range, |block: &SealedBlock| block .clone() @@ -3518,7 +2574,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), - block.clone().unseal().with_recovered_senders() + block.clone().unseal::().with_recovered_senders() ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) ), @@ -3527,7 +2583,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), - block.clone().unseal().with_recovered_senders() + block.clone().unseal::().with_recovered_senders() ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) ), @@ -3537,7 +2593,12 @@ mod tests { |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), Some( - block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + block + .clone() + .unseal::() + .with_recovered_senders() + .unwrap() + .seal(block.hash()) ) ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) @@ -3548,7 +2609,12 @@ mod tests { |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), Some( - block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + block + .clone() + .unseal::() + .with_recovered_senders() + .unwrap() + .seal(block.hash()) ) ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) @@ -3573,12 +2639,10 @@ mod tests { ), ( ONE, - transaction_by_id_no_hash, + transaction_by_id_unhashed, |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( tx_num, - Some(Into::::into( - block.body.transactions[test_tx_index].clone() - )) + Some(block.body.transactions[test_tx_index].clone()) ), u64::MAX ), diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index be6549033cd..619296b57f3 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -6,10 +6,11 @@ use alloy_primitives::{ Address, BlockNumber, Bytes, B256, }; use reth_primitives::{Account, Bytecode}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{HashedPostStateProvider, StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, TrieInput, }; /// A state provider that resolves to data from either a wrapped [`crate::ExecutionOutcome`] @@ -86,7 +87,7 @@ impl StateRootProvider { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut state = self.hashed_post_state(bundle_state); state.extend(hashed_state); self.state_provider.state_root(state) } @@ -100,7 +101,7 @@ impl StateRootProvider hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut state = HashedPostState::from_bundle_state(&bundle_state.state); + let mut state = self.hashed_post_state(bundle_state); state.extend(hashed_state); self.state_provider.state_root_with_updates(state) } @@ -110,7 +111,7 @@ impl StateRootProvider mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.state_root_from_nodes_with_updates(input) } } @@ -138,6 +139,17 @@ impl StorageRootProvider storage.extend(&hashed_storage); self.state_provider.storage_proof(address, slot, storage) } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut storage = self.get_hashed_storage(address); + storage.extend(&hashed_storage); + self.state_provider.storage_multiproof(address, slots, storage) + } } impl StateProofProvider @@ -150,7 +162,7 @@ impl StateProofProvider slots: &[B256], ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.proof(input, address, slots) } @@ -160,7 +172,7 @@ impl StateProofProvider targets: HashMap>, ) -> ProviderResult { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.multiproof(input, targets) } @@ -170,11 +182,19 @@ impl StateProofProvider target: HashedPostState, ) -> ProviderResult> { let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - input.prepend(HashedPostState::from_bundle_state(&bundle_state.state)); + input.prepend(self.hashed_post_state(bundle_state)); self.state_provider.witness(input, target) } } +impl HashedPostStateProvider + for BundleStateProvider +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + self.state_provider.hashed_post_state(bundle_state) + } +} + impl StateProvider for BundleStateProvider { fn storage( &self, diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs new file mode 100644 index 00000000000..927a78fe19e --- /dev/null +++ b/crates/storage/provider/src/providers/consistent.rs @@ -0,0 +1,1892 @@ +use super::{DatabaseProviderRO, ProviderFactory, ProviderNodeTypes}; +use crate::{ + providers::StaticFileProvider, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, + BlockReader, BlockReaderIdExt, BlockSource, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, + HeaderProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, + StageCheckpointReader, StateReader, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, +}; +use alloy_consensus::BlockHeader; +use alloy_eips::{ + eip2718::Encodable2718, + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, HashOrNumber, +}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use reth_chain_state::{BlockState, CanonicalInMemoryState, MemoryOverlayStateProviderRef}; +use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::models::BlockNumberAddress; +use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_evm::ConfigureEvmEnv; +use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; +use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy}; +use reth_primitives::{ + Account, BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, + TransactionMeta, +}; +use reth_primitives_traits::BlockBody; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::{ + DatabaseProviderFactory, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, +}; +use reth_storage_errors::provider::ProviderResult; +use revm::{ + db::states::PlainStorageRevert, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; +use std::{ + collections::{hash_map, HashMap}, + ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, + sync::Arc, +}; +use tracing::trace; + +/// Type that interacts with a snapshot view of the blockchain (storage and in-memory) at time of +/// instantiation, EXCEPT for pending, safe and finalized block which might change while holding +/// this provider. +/// +/// CAUTION: Avoid holding this provider for too long or the inner database transaction will +/// time-out. +#[derive(Debug)] +#[doc(hidden)] // triggers ICE for `cargo docs` +pub struct ConsistentProvider { + /// Storage provider. + storage_provider: as DatabaseProviderFactory>::Provider, + /// Head block at time of [`Self`] creation + head_block: Option>>, + /// In-memory canonical state. This is not a snapshot, and can change! Use with caution. + canonical_in_memory_state: CanonicalInMemoryState, +} + +impl ConsistentProvider { + /// Create a new provider using [`ProviderFactory`] and [`CanonicalInMemoryState`], + /// + /// Underneath it will take a snapshot by fetching [`CanonicalInMemoryState::head_state`] and + /// [`ProviderFactory::database_provider_ro`] effectively maintaining one single snapshotted + /// view of memory and database. + pub fn new( + storage_provider_factory: ProviderFactory, + state: CanonicalInMemoryState, + ) -> ProviderResult { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let head_block = state.head_state(); + let storage_provider = storage_provider_factory.database_provider_ro()?; + Ok(Self { storage_provider, head_block, canonical_in_memory_state: state }) + } + + // Helper function to convert range bounds + fn convert_range_bounds( + &self, + range: impl RangeBounds, + end_unbounded: impl FnOnce() -> T, + ) -> (T, T) + where + T: Copy + Add + Sub + From, + { + let start = match range.start_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n + T::from(1u8), + Bound::Unbounded => T::from(0u8), + }; + + let end = match range.end_bound() { + Bound::Included(&n) => n, + Bound::Excluded(&n) => n - T::from(1u8), + Bound::Unbounded => end_unbounded(), + }; + + (start, end) + } + + /// Storage provider for latest block + fn latest_ref<'a>(&'a self) -> ProviderResult> { + trace!(target: "providers::blockchain", "Getting latest block state provider"); + + // use latest state provider if the head state exists + if let Some(state) = &self.head_block { + trace!(target: "providers::blockchain", "Using head state for latest state provider"); + Ok(self.block_state_provider_ref(state)?.boxed()) + } else { + trace!(target: "providers::blockchain", "Using database state for latest state provider"); + Ok(self.storage_provider.latest()) + } + } + + fn history_by_block_hash_ref<'a>( + &'a self, + block_hash: BlockHash, + ) -> ProviderResult> { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + + self.get_in_memory_or_storage_by_block( + block_hash.into(), + |_| self.storage_provider.history_by_block_hash(block_hash), + |block_state| { + let state_provider = self.block_state_provider_ref(block_state)?; + Ok(Box::new(state_provider)) + }, + ) + } + + /// Returns a state provider indexed by the given block number or tag. + fn state_by_block_number_ref<'a>( + &'a self, + number: BlockNumber, + ) -> ProviderResult> { + let hash = + self.block_hash(number)?.ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; + self.history_by_block_hash_ref(hash) + } + + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. + /// + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( + &self, + range: RangeInclusive, + ) -> ProviderResult>>> { + if range.is_empty() { + return Ok(None) + } + let start_block_number = *range.start(); + let end_block_number = *range.end(); + + // We are not removing block meta as it is used to get block changesets. + let mut block_bodies = Vec::new(); + for block_num in range.clone() { + let block_body = self + .block_body_indices(block_num)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; + block_bodies.push((block_num, block_body)) + } + + // get transaction receipts + let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) + else { + return Ok(None) + }; + let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { + return Ok(None) + }; + + let mut account_changeset = Vec::new(); + for block_num in range.clone() { + let changeset = + self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); + account_changeset.extend(changeset); + } + + let mut storage_changeset = Vec::new(); + for block_num in range { + let changeset = self.storage_changeset(block_num)?; + storage_changeset.extend(changeset); + } + + let (state, reverts) = + self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; + + let mut receipt_iter = + self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); + + let mut receipts = Vec::with_capacity(block_bodies.len()); + // loop break if we are at the end of the blocks. + for (_, block_body) in block_bodies { + let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); + for tx_num in block_body.tx_num_range() { + let receipt = receipt_iter + .next() + .ok_or_else(|| ProviderError::ReceiptNotFound(tx_num.into()))?; + block_receipts.push(Some(receipt)); + } + receipts.push(block_receipts); + } + + Ok(Some(ExecutionOutcome::new_init( + state, + reverts, + // We skip new contracts since we never delete them from the database + Vec::new(), + receipts.into(), + start_block_number, + Vec::new(), + ))) + } + + /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the + /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given + /// storage and account changesets. + fn populate_bundle_state( + &self, + account_changeset: Vec<(u64, AccountBeforeTx)>, + storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, + block_range_end: BlockNumber, + ) -> ProviderResult<(BundleStateInit, RevertsInit)> { + let mut state: BundleStateInit = HashMap::new(); + let mut reverts: RevertsInit = HashMap::new(); + let state_provider = self.state_by_block_number_ref(block_range_end)?; + + // add account changeset changes + for (block_number, account_before) in account_changeset.into_iter().rev() { + let AccountBeforeTx { info: old_info, address } = account_before; + match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let new_info = state_provider.basic_account(address)?; + entry.insert((old_info, new_info, HashMap::new())); + } + hash_map::Entry::Occupied(mut entry) => { + // overwrite old account state. + entry.get_mut().0 = old_info; + } + } + // insert old info into reverts. + reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); + } + + // add storage changeset changes + for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { + let BlockNumberAddress((block_number, address)) = block_and_address; + // get account state or insert from plain state. + let account_state = match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let present_info = state_provider.basic_account(address)?; + entry.insert((present_info, present_info, HashMap::new())) + } + hash_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + // match storage. + match account_state.2.entry(old_storage.key) { + hash_map::Entry::Vacant(entry) => { + let new_storage_value = + state_provider.storage(address, old_storage.key)?.unwrap_or_default(); + entry.insert((old_storage.value, new_storage_value)); + } + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; + + reverts + .entry(block_number) + .or_default() + .entry(address) + .or_default() + .1 + .push(old_storage); + } + + Ok((state, reverts)) + } + + /// Fetches a range of data from both in-memory state and persistent storage while a predicate + /// is met. + /// + /// Creates a snapshot of the in-memory chain state and database provider to prevent + /// inconsistencies. Splits the range into in-memory and storage sections, prioritizing + /// recent in-memory blocks in case of overlaps. + /// + /// * `fetch_db_range` function (`F`) provides access to the database provider, allowing the + /// user to retrieve the required items from the database using [`RangeInclusive`]. + /// * `map_block_state_item` function (`G`) provides each block of the range in the in-memory + /// state, allowing for selection or filtering for the desired data. + fn get_in_memory_or_storage_by_block_range_while( + &self, + range: impl RangeBounds, + fetch_db_range: F, + map_block_state_item: G, + mut predicate: P, + ) -> ProviderResult> + where + F: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + &mut P, + ) -> ProviderResult>, + G: Fn(&BlockState, &mut P) -> Option, + P: FnMut(&T) -> bool, + { + // Each one provides a snapshot at the time of instantiation, but its order matters. + // + // If we acquire first the database provider, it's possible that before the in-memory chain + // snapshot is instantiated, it will flush blocks to disk. This would + // mean that our database provider would not have access to the flushed blocks (since it's + // working under an older view), while the in-memory state may have deleted them + // entirely. Resulting in gaps on the range. + let mut in_memory_chain = + self.head_block.as_ref().map(|b| b.chain().collect::>()).unwrap_or_default(); + let db_provider = &self.storage_provider; + + let (start, end) = self.convert_range_bounds(range, || { + // the first block is the highest one. + in_memory_chain + .first() + .map(|b| b.number()) + .unwrap_or_else(|| db_provider.last_block_number().unwrap_or_default()) + }); + + if start > end { + return Ok(vec![]) + } + + // Split range into storage_range and in-memory range. If the in-memory range is not + // necessary drop it early. + // + // The last block of `in_memory_chain` is the lowest block number. + let (in_memory, storage_range) = match in_memory_chain.last().as_ref().map(|b| b.number()) { + Some(lowest_memory_block) if lowest_memory_block <= end => { + let highest_memory_block = + in_memory_chain.first().as_ref().map(|b| b.number()).expect("qed"); + + // Database will for a time overlap with in-memory-chain blocks. In + // case of a re-org, it can mean that the database blocks are of a forked chain, and + // so, we should prioritize the in-memory overlapped blocks. + let in_memory_range = + lowest_memory_block.max(start)..=end.min(highest_memory_block); + + // If requested range is in the middle of the in-memory range, remove the necessary + // lowest blocks + in_memory_chain.truncate( + in_memory_chain + .len() + .saturating_sub(start.saturating_sub(lowest_memory_block) as usize), + ); + + let storage_range = + (lowest_memory_block > start).then(|| start..=lowest_memory_block - 1); + + (Some((in_memory_chain, in_memory_range)), storage_range) + } + _ => { + // Drop the in-memory chain so we don't hold blocks in memory. + drop(in_memory_chain); + + (None, Some(start..=end)) + } + }; + + let mut items = Vec::with_capacity((end - start + 1) as usize); + + if let Some(storage_range) = storage_range { + let mut db_items = fetch_db_range(db_provider, storage_range.clone(), &mut predicate)?; + items.append(&mut db_items); + + // The predicate was not met, if the number of items differs from the expected. So, we + // return what we have. + if items.len() as u64 != storage_range.end() - storage_range.start() + 1 { + return Ok(items) + } + } + + if let Some((in_memory_chain, in_memory_range)) = in_memory { + for (num, block) in in_memory_range.zip(in_memory_chain.into_iter().rev()) { + debug_assert!(num == block.number()); + if let Some(item) = map_block_state_item(block, &mut predicate) { + items.push(item); + } else { + break + } + } + } + + Ok(items) + } + + /// This uses a given [`BlockState`] to initialize a state provider for that block. + fn block_state_provider_ref( + &self, + state: &BlockState, + ) -> ProviderResult> { + let anchor_hash = state.anchor().hash; + let latest_historical = self.history_by_block_hash_ref(anchor_hash)?; + let in_memory = state.chain().map(|block_state| block_state.block()).collect(); + Ok(MemoryOverlayStateProviderRef::new(latest_historical, in_memory)) + } + + /// Fetches data from either in-memory state or persistent storage for a range of transactions. + /// + /// * `fetch_from_db`: has a `DatabaseProviderRO` and the storage specific range. + /// * `fetch_from_block_state`: has a [`RangeInclusive`] of elements that should be fetched from + /// [`BlockState`]. [`RangeInclusive`] is necessary to handle partial look-ups of a block. + fn get_in_memory_or_storage_by_tx_range( + &self, + range: impl RangeBounds, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce( + &DatabaseProviderRO, + RangeInclusive, + ) -> ProviderResult>, + M: Fn(RangeInclusive, &BlockState) -> ProviderResult>, + { + let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); + let provider = &self.storage_provider; + + // Get the last block number stored in the storage which does NOT overlap with in-memory + // chain. + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; + + // Get the next tx number for the last block stored in the storage, which marks the start of + // the in-memory state. + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + let (start, end) = self.convert_range_bounds(range, || { + in_mem_chain + .iter() + .map(|b| b.block_ref().block().body.transactions().len() as u64) + .sum::() + + last_block_body_index.last_tx_num() + }); + + if start > end { + return Ok(vec![]) + } + + let mut tx_range = start..=end; + + // If the range is entirely before the first in-memory transaction number, fetch from + // storage + if *tx_range.end() < in_memory_tx_num { + return fetch_from_db(provider, tx_range); + } + + let mut items = Vec::with_capacity((tx_range.end() - tx_range.start() + 1) as usize); + + // If the range spans storage and memory, get elements from storage first. + if *tx_range.start() < in_memory_tx_num { + // Determine the range that needs to be fetched from storage. + let db_range = *tx_range.start()..=in_memory_tx_num.saturating_sub(1); + + // Set the remaining transaction range for in-memory + tx_range = in_memory_tx_num..=*tx_range.end(); + + items.extend(fetch_from_db(provider, db_range)?); + } + + // Iterate from the lowest block to the highest in-memory chain + for block_state in in_mem_chain.iter().rev() { + let block_tx_count = block_state.block_ref().block().body.transactions().len(); + let remaining = (tx_range.end() - tx_range.start() + 1) as usize; + + // If the transaction range start is equal or higher than the next block first + // transaction, advance + if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { + in_memory_tx_num += block_tx_count as u64; + continue + } + + // This should only be more than 0 once, in case of a partial range inside a block. + let skip = (tx_range.start() - in_memory_tx_num) as usize; + + items.extend(fetch_from_block_state( + skip..=skip + (remaining.min(block_tx_count - skip) - 1), + block_state, + )?); + + in_memory_tx_num += block_tx_count as u64; + + // Break if the range has been fully processed + if in_memory_tx_num > *tx_range.end() { + break + } + + // Set updated range + tx_range = in_memory_tx_num..=*tx_range.end(); + } + + Ok(items) + } + + /// Fetches data from either in-memory state or persistent storage by transaction + /// [`HashOrNumber`]. + fn get_in_memory_or_storage_by_tx( + &self, + id: HashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult> + where + S: FnOnce(&DatabaseProviderRO) -> ProviderResult>, + M: Fn(usize, TxNumber, &BlockState) -> ProviderResult>, + { + let in_mem_chain = self.head_block.iter().flat_map(|b| b.chain()).collect::>(); + let provider = &self.storage_provider; + + // Get the last block number stored in the database which does NOT overlap with in-memory + // chain. + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; + + // Get the next tx number for the last block stored in the database and consider it the + // first tx number of the in-memory state + let last_block_body_index = provider + .block_body_indices(last_database_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_database_block_number))?; + let mut in_memory_tx_num = last_block_body_index.next_tx_num(); + + // If the transaction number is less than the first in-memory transaction number, make a + // database lookup + if let HashOrNumber::Number(id) = id { + if id < in_memory_tx_num { + return fetch_from_db(provider) + } + } + + // Iterate from the lowest block to the highest + for block_state in in_mem_chain.iter().rev() { + let executed_block = block_state.block_ref(); + let block = executed_block.block(); + + for tx_index in 0..block.body.transactions().len() { + match id { + HashOrNumber::Hash(tx_hash) => { + if tx_hash == block.body.transactions()[tx_index].trie_hash() { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + HashOrNumber::Number(id) => { + if id == in_memory_tx_num { + return fetch_from_block_state(tx_index, in_memory_tx_num, block_state) + } + } + } + + in_memory_tx_num += 1; + } + } + + // Not found in-memory, so check database. + if let HashOrNumber::Hash(_) = id { + return fetch_from_db(provider) + } + + Ok(None) + } + + /// Fetches data from either in-memory state or persistent storage by [`BlockHashOrNumber`]. + pub(crate) fn get_in_memory_or_storage_by_block( + &self, + id: BlockHashOrNumber, + fetch_from_db: S, + fetch_from_block_state: M, + ) -> ProviderResult + where + S: FnOnce(&DatabaseProviderRO) -> ProviderResult, + M: Fn(&BlockState) -> ProviderResult, + { + if let Some(Some(block_state)) = self.head_block.as_ref().map(|b| b.block_on_chain(id)) { + return fetch_from_block_state(block_state) + } + fetch_from_db(&self.storage_provider) + } +} + +impl ConsistentProvider { + /// Ensures that the given block number is canonical (synced) + /// + /// This is a helper for guarding the `HistoricalStateProvider` against block numbers that are + /// out of range and would lead to invalid results, mainly during initial sync. + /// + /// Verifying the `block_number` would be expensive since we need to lookup sync table + /// Instead, we ensure that the `block_number` is within the range of the + /// [`Self::best_block_number`] which is updated when a block is synced. + #[inline] + pub(crate) fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { + let latest = self.best_block_number()?; + if block_number > latest { + Err(ProviderError::HeaderNotFound(block_number.into())) + } else { + Ok(()) + } + } +} + +impl NodePrimitivesProvider for ConsistentProvider { + type Primitives = N::Primitives; +} + +impl StaticFileProviderFactory for ConsistentProvider { + fn static_file_provider(&self) -> StaticFileProvider { + self.storage_provider.static_file_provider() + } +} + +impl HeaderProvider for ConsistentProvider { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + (*block_hash).into(), + |db_provider| db_provider.header(block_hash), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + ) + } + + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + num.into(), + |db_provider| db_provider.header_by_number(num), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + ) + } + + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { + if let Some(num) = self.block_number(*hash)? { + self.header_td_by_number(num) + } else { + Ok(None) + } + } + + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { + let number = if self.head_block.as_ref().map(|b| b.block_on_chain(number.into())).is_some() + { + // If the block exists in memory, we should return a TD for it. + // + // The canonical in memory state should only store post-merge blocks. Post-merge blocks + // have zero difficulty. This means we can use the total difficulty for the last + // finalized block number if present (so that we are not affected by reorgs), if not the + // last number in the database will be used. + if let Some(last_finalized_num_hash) = + self.canonical_in_memory_state.get_finalized_num_hash() + { + last_finalized_num_hash.number + } else { + self.last_block_number()? + } + } else { + // Otherwise, return what we have on disk for the input block + number + }; + self.storage_provider.header_td_by_number(number) + } + + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.headers_range(range), + |block_state, _| Some(block_state.block_ref().block().header.header().clone()), + |_| true, + ) + } + + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.sealed_header(number), + |block_state| Ok(Some(block_state.block_ref().block().header.clone())), + ) + } + + fn sealed_headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.sealed_headers_range(range), + |block_state, _| Some(block_state.block_ref().block().header.clone()), + |_| true, + ) + } + + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), + |block_state, predicate| { + let header = &block_state.block_ref().block().header; + predicate(header).then(|| header.clone()) + }, + predicate, + ) + } +} + +impl BlockHashReader for ConsistentProvider { + fn block_hash(&self, number: u64) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_hash(number), + |block_state| Ok(Some(block_state.hash())), + ) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + start..end, + |db_provider, inclusive_range, _| { + db_provider + .canonical_hashes_range(*inclusive_range.start(), *inclusive_range.end() + 1) + }, + |block_state, _| Some(block_state.hash()), + |_| true, + ) + } +} + +impl BlockNumReader for ConsistentProvider { + fn chain_info(&self) -> ProviderResult { + let best_number = self.best_block_number()?; + Ok(ChainInfo { best_hash: self.block_hash(best_number)?.unwrap_or_default(), best_number }) + } + + fn best_block_number(&self) -> ProviderResult { + self.head_block.as_ref().map(|b| Ok(b.number())).unwrap_or_else(|| self.last_block_number()) + } + + fn last_block_number(&self) -> ProviderResult { + self.storage_provider.last_block_number() + } + + fn block_number(&self, hash: B256) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.block_number(hash), + |block_state| Ok(Some(block_state.number())), + ) + } +} + +impl BlockIdReader for ConsistentProvider { + fn pending_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.pending_block_num_hash()) + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_safe_num_hash()) + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + Ok(self.canonical_in_memory_state.get_finalized_num_hash()) + } +} + +impl BlockReader for ConsistentProvider { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + match source { + BlockSource::Any | BlockSource::Canonical => { + // Note: it's fine to return the unsealed block because the caller already has + // the hash + self.get_in_memory_or_storage_by_block( + hash.into(), + |db_provider| db_provider.find_block_by_hash(hash, source), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + ) + } + BlockSource::Pending => { + Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) + } + } + } + + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block(id), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + ) + } + + fn pending_block(&self) -> ProviderResult>> { + Ok(self.canonical_in_memory_state.pending_block()) + } + + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + Ok(self.canonical_in_memory_state.pending_block_with_senders()) + } + + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + Ok(self.canonical_in_memory_state.pending_block_and_receipts()) + } + + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.ommers(id), + |block_state| { + if self.chain_spec().final_paris_total_difficulty(block_state.number()).is_some() { + return Ok(Some(Vec::new())) + } + + Ok(block_state.block_ref().block().body.ommers().map(|o| o.to_vec())) + }, + ) + } + + fn block_body_indices( + &self, + number: BlockNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_body_indices(number), + |block_state| { + // Find the last block indices on database + let last_storage_block_number = block_state.anchor().number; + let mut stored_indices = self + .storage_provider + .block_body_indices(last_storage_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; + + // Prepare our block indices + stored_indices.first_tx_num = stored_indices.next_tx_num(); + stored_indices.tx_count = 0; + + // Iterate from the lowest block in memory until our target block + for state in block_state.chain().collect::>().into_iter().rev() { + let block_tx_count = state.block_ref().block.body.transactions().len() as u64; + if state.block_ref().block().number() == number { + stored_indices.tx_count = block_tx_count; + } else { + stored_indices.first_tx_num += block_tx_count; + } + } + + Ok(Some(stored_indices)) + }, + ) + } + + /// Returns the block with senders with matching number or hash from database. + /// + /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid + /// hashes, since they would need to be calculated on the spot, and we want fast querying.** + /// + /// Returns `None` if block is not found. + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.block_with_senders())), + ) + } + + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), + |block_state| Ok(Some(block_state.sealed_block_with_senders())), + ) + } + + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.block_range(range), + |block_state, _| Some(block_state.block_ref().block().clone().unseal()), + |_| true, + ) + } + + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.block_with_senders_range(range), + |block_state, _| Some(block_state.block_with_senders()), + |_| true, + ) + } + + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), + |block_state, _| Some(block_state.sealed_block_with_senders()), + |_| true, + ) + } +} + +impl TransactionsProvider for ConsistentProvider { + type Transaction = TxTy; + + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + tx_hash.into(), + |db_provider| db_provider.transaction_id(tx_hash), + |_, tx_number, _| Ok(Some(tx_number)), + ) + } + + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id(id), + |tx_index, _, block_state| { + Ok(block_state + .block_ref() + .block() + .body + .transactions() + .get(tx_index) + .cloned() + .map(Into::into)) + }, + ) + } + + fn transaction_by_id_unhashed( + &self, + id: TxNumber, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_by_id_unhashed(id), + |tx_index, _, block_state| { + Ok(block_state + .block_ref() + .block() + .body + .transactions() + .get(tx_index) + .cloned() + .map(Into::into)) + }, + ) + } + + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + if let Some(tx) = self.head_block.as_ref().and_then(|b| b.transaction_on_chain(hash)) { + return Ok(Some(tx)) + } + + self.storage_provider.transaction_by_hash(hash) + } + + fn transaction_by_hash_with_meta( + &self, + tx_hash: TxHash, + ) -> ProviderResult> { + if let Some((tx, meta)) = + self.head_block.as_ref().and_then(|b| b.transaction_meta_on_chain(tx_hash)) + { + return Ok(Some((tx, meta))) + } + + self.storage_provider.transaction_by_hash_with_meta(tx_hash) + } + + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_block(id), + |_, _, block_state| Ok(Some(block_state.block_ref().block().number())), + ) + } + + fn transactions_by_block( + &self, + id: BlockHashOrNumber, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + id, + |provider| provider.transactions_by_block(id), + |block_state| Ok(Some(block_state.block_ref().block().body.transactions().to_vec())), + ) + } + + fn transactions_by_block_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.transactions_by_block_range(range), + |block_state, _| Some(block_state.block_ref().block().body.transactions().to_vec()), + |_| true, + ) + } + + fn transactions_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.block_ref().block().body.transactions()[index_range].to_vec()) + }, + ) + } + + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.senders_by_tx_range(db_range), + |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), + ) + } + + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.transaction_sender(id), + |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), + ) + } +} + +impl ReceiptProvider for ConsistentProvider { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx( + id.into(), + |provider| provider.receipt(id), + |tx_index, _, block_state| { + Ok(block_state.executed_block_receipts().get(tx_index).cloned()) + }, + ) + } + + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + for block_state in self.head_block.iter().flat_map(|b| b.chain()) { + let executed_block = block_state.block_ref(); + let block = executed_block.block(); + let receipts = block_state.executed_block_receipts(); + + // assuming 1:1 correspondence between transactions and receipts + debug_assert_eq!( + block.body.transactions().len(), + receipts.len(), + "Mismatch between transaction and receipt count" + ); + + if let Some(tx_index) = + block.body.transactions().iter().position(|tx| tx.trie_hash() == hash) + { + // safe to use tx_index for receipts due to 1:1 correspondence + return Ok(receipts.get(tx_index).cloned()); + } + } + + self.storage_provider.receipt_by_hash(hash) + } + + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { + self.get_in_memory_or_storage_by_block( + block, + |db_provider| db_provider.receipts_by_block(block), + |block_state| Ok(Some(block_state.executed_block_receipts())), + ) + } + + fn receipts_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + self.get_in_memory_or_storage_by_tx_range( + range, + |db_provider, db_range| db_provider.receipts_by_tx_range(db_range), + |index_range, block_state| { + Ok(block_state.executed_block_receipts().drain(index_range).collect()) + }, + ) + } +} + +impl ReceiptProviderIdExt for ConsistentProvider { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + match block { + BlockId::Hash(rpc_block_hash) => { + let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; + if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { + if let Some(state) = self + .head_block + .as_ref() + .and_then(|b| b.block_on_chain(rpc_block_hash.block_hash.into())) + { + receipts = Some(state.executed_block_receipts()); + } + } + Ok(receipts) + } + BlockId::Number(num_tag) => match num_tag { + BlockNumberOrTag::Pending => Ok(self + .canonical_in_memory_state + .pending_state() + .map(|block_state| block_state.executed_block_receipts())), + _ => { + if let Some(num) = self.convert_block_number(num_tag)? { + self.receipts_by_block(num.into()) + } else { + Ok(None) + } + } + }, + } + } +} + +impl WithdrawalsProvider for ConsistentProvider { + fn withdrawals_by_block( + &self, + id: BlockHashOrNumber, + timestamp: u64, + ) -> ProviderResult> { + if !self.chain_spec().is_shanghai_active_at_timestamp(timestamp) { + return Ok(None) + } + + self.get_in_memory_or_storage_by_block( + id, + |db_provider| db_provider.withdrawals_by_block(id, timestamp), + |block_state| Ok(block_state.block_ref().block().body.withdrawals().cloned()), + ) + } + + fn latest_withdrawal(&self) -> ProviderResult> { + let best_block_num = self.best_block_number()?; + + self.get_in_memory_or_storage_by_block( + best_block_num.into(), + |db_provider| db_provider.latest_withdrawal(), + |block_state| { + Ok(block_state + .block_ref() + .block() + .body + .withdrawals() + .cloned() + .and_then(|mut w| w.pop())) + }, + ) + } +} + +impl StageCheckpointReader for ConsistentProvider { + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { + self.storage_provider.get_stage_checkpoint(id) + } + + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + self.storage_provider.get_stage_checkpoint_progress(id) + } + + fn get_all_checkpoints(&self) -> ProviderResult> { + self.storage_provider.get_all_checkpoints() + } +} + +impl EvmEnvProvider> for ConsistentProvider { + fn fill_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + block_env: &mut BlockEnv, + header: &HeaderTy, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
>, + { + let total_difficulty = self + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; + evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); + Ok(()) + } + + fn fill_cfg_env_at( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + at: BlockHashOrNumber, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
>, + { + let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; + let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; + self.fill_cfg_env_with_header(cfg, &header, evm_config) + } + + fn fill_cfg_env_with_header( + &self, + cfg: &mut CfgEnvWithHandlerCfg, + header: &HeaderTy, + evm_config: EvmConfig, + ) -> ProviderResult<()> + where + EvmConfig: ConfigureEvmEnv
>, + { + let total_difficulty = self + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; + evm_config.fill_cfg_env(cfg, header, total_difficulty); + Ok(()) + } +} + +impl PruneCheckpointReader for ConsistentProvider { + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult> { + self.storage_provider.get_prune_checkpoint(segment) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + self.storage_provider.get_prune_checkpoints() + } +} + +impl ChainSpecProvider for ConsistentProvider { + type ChainSpec = N::ChainSpec; + + fn chain_spec(&self) -> Arc { + ChainSpecProvider::chain_spec(&self.storage_provider) + } +} + +impl BlockReaderIdExt for ConsistentProvider { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Number(num) => self.block_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: should we only apply this for the RPCs that are listed in EIP-1898? + // so not at the provider level? + // if we decide to do this at a higher level, then we can make this an automatic + // trait impl + if Some(true) == hash.require_canonical { + // check the database, canonical blocks are only stored in the database + self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) + } else { + self.block_by_hash(hash.block_hash) + } + } + } + } + + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { + Ok(match id { + BlockNumberOrTag::Latest => { + Some(self.canonical_in_memory_state.get_canonical_head().unseal()) + } + BlockNumberOrTag::Finalized => { + self.canonical_in_memory_state.get_finalized_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Safe => { + self.canonical_in_memory_state.get_safe_header().map(|h| h.unseal()) + } + BlockNumberOrTag::Earliest => self.header_by_number(0)?, + BlockNumberOrTag::Pending => self.canonical_in_memory_state.pending_header(), + + BlockNumberOrTag::Number(num) => self.header_by_number(num)?, + }) + } + + fn sealed_header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult>>> { + match id { + BlockNumberOrTag::Latest => { + Ok(Some(self.canonical_in_memory_state.get_canonical_head())) + } + BlockNumberOrTag::Finalized => { + Ok(self.canonical_in_memory_state.get_finalized_header()) + } + BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), + BlockNumberOrTag::Earliest => self + .header_by_number(0)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), + BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), + BlockNumberOrTag::Number(num) => self + .header_by_number(num)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), + } + } + + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>>> { + Ok(match id { + BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), + }) + } + + fn header_by_id(&self, id: BlockId) -> ProviderResult>> { + Ok(match id { + BlockId::Number(num) => self.header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?, + }) + } + + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>> { + match id { + BlockId::Number(num) => self.ommers_by_number_or_tag(num), + BlockId::Hash(hash) => { + // TODO: EIP-1898 question, see above + // here it is not handled + self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) + } + } + } +} + +impl StorageChangeSetReader for ConsistentProvider { + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changesets = state + .block() + .execution_output + .bundle + .reverts + .clone() + .to_plain_state_reverts() + .storage + .into_iter() + .flatten() + .flat_map(|revert: PlainStorageRevert| { + revert.storage_revert.into_iter().map(move |(key, value)| { + ( + BlockNumberAddress((block_number, revert.address)), + StorageEntry { key: key.into(), value: value.to_previous_value() }, + ) + }) + }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let storage_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.storage_changeset(block_number) + } + } +} + +impl ChangeSetReader for ConsistentProvider { + fn account_block_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = + self.head_block.as_ref().and_then(|b| b.block_on_chain(block_number.into())) + { + let changesets = state + .block_ref() + .execution_output + .bundle + .reverts + .clone() + .to_plain_state_reverts() + .accounts + .into_iter() + .flatten() + .map(|(address, info)| AccountBeforeTx { address, info: info.map(Into::into) }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = self + .storage_provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + self.storage_provider.account_block_changeset(block_number) + } + } +} + +impl AccountReader for ConsistentProvider { + /// Get basic account information. + fn basic_account(&self, address: Address) -> ProviderResult> { + // use latest state provider + let state_provider = self.latest_ref()?; + state_provider.basic_account(address) + } +} + +impl StateReader for ConsistentProvider { + type Receipt = ReceiptTy; + + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. + /// + /// If data for the block does not exist, this will return [`None`]. + /// + /// NOTE: This cannot be called safely in a loop outside of the blockchain tree thread. This is + /// because the [`CanonicalInMemoryState`] could change during a reorg, causing results to be + /// inconsistent. Currently this can safely be called within the blockchain tree thread, + /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the + /// first place. + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>> { + if let Some(state) = self.head_block.as_ref().and_then(|b| b.block_on_chain(block.into())) { + let state = state.block_ref().execution_outcome().clone(); + Ok(Some(state)) + } else { + Self::get_state(self, block..=block) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + providers::blockchain_provider::BlockchainProvider2, + test_utils::create_test_provider_factory, BlockWriter, + }; + use alloy_eips::BlockHashOrNumber; + use alloy_primitives::B256; + use itertools::Itertools; + use rand::Rng; + use reth_chain_state::{ExecutedBlock, NewCanonicalChain}; + use reth_db::models::AccountBeforeTx; + use reth_execution_types::ExecutionOutcome; + use reth_primitives::SealedBlock; + use reth_storage_api::{BlockReader, BlockSource, ChangeSetReader}; + use reth_testing_utils::generators::{ + self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams, + }; + use revm::db::BundleState; + use std::{ + ops::{Bound, Range, RangeBounds}, + sync::Arc, + }; + + const TEST_BLOCKS_COUNT: usize = 5; + + fn random_blocks( + rng: &mut impl Rng, + database_blocks: usize, + in_memory_blocks: usize, + requests_count: Option>, + withdrawals_count: Option>, + tx_count: impl RangeBounds, + ) -> (Vec, Vec) { + let block_range = (database_blocks + in_memory_blocks - 1) as u64; + + let tx_start = match tx_count.start_bound() { + Bound::Included(&n) | Bound::Excluded(&n) => n, + Bound::Unbounded => u8::MIN, + }; + let tx_end = match tx_count.end_bound() { + Bound::Included(&n) | Bound::Excluded(&n) => n + 1, + Bound::Unbounded => u8::MAX, + }; + + let blocks = random_block_range( + rng, + 0..=block_range, + BlockRangeParams { + parent: Some(B256::ZERO), + tx_count: tx_start..tx_end, + requests_count, + withdrawals_count, + }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(database_blocks); + (database_blocks.to_vec(), in_memory_blocks.to_vec()) + } + + #[test] + fn test_block_reader_find_block_by_hash() -> eyre::Result<()> { + // Initialize random number generator and provider factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks and split into database and in-memory blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(5); + + // Insert first 5 blocks into the database + let provider_rw = factory.provider_rw()?; + for block in database_blocks { + provider_rw.insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + )?; + } + provider_rw.commit()?; + + // Create a new provider + let provider = BlockchainProvider2::new(factory)?; + let consistent_provider = provider.consistent_provider()?; + + // Useful blocks + let first_db_block = database_blocks.first().unwrap(); + let first_in_mem_block = in_memory_blocks.first().unwrap(); + let last_in_mem_block = in_memory_blocks.last().unwrap(); + + // No block in memory before setting in memory state + assert_eq!( + consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, + None + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, + None + ); + // No pending block in memory + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Pending)?, + None + ); + + // Insert first block into the in-memory state + let in_memory_block_senders = + first_in_mem_block.senders().expect("failed to recover senders"); + let chain = NewCanonicalChain::Commit { + new: vec![ExecutedBlock::new( + Arc::new(first_in_mem_block.clone()), + Arc::new(in_memory_block_senders), + Default::default(), + Default::default(), + Default::default(), + )], + }; + consistent_provider.canonical_in_memory_state.update_chain(chain); + let consistent_provider = provider.consistent_provider()?; + + // Now the block should be found in memory + assert_eq!( + consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, + Some(first_in_mem_block.clone().into()) + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, + Some(first_in_mem_block.clone().into()) + ); + + // Find the first block in database by hash + assert_eq!( + consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Any)?, + Some(first_db_block.clone().into()) + ); + assert_eq!( + consistent_provider + .find_block_by_hash(first_db_block.hash(), BlockSource::Canonical)?, + Some(first_db_block.clone().into()) + ); + + // No pending block in database + assert_eq!( + consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Pending)?, + None + ); + + // Insert the last block into the pending state + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + block: Arc::new(last_in_mem_block.clone()), + senders: Default::default(), + execution_output: Default::default(), + hashed_state: Default::default(), + trie: Default::default(), + }); + + // Now the last block should be found in memory + assert_eq!( + consistent_provider + .find_block_by_hash(last_in_mem_block.hash(), BlockSource::Pending)?, + Some(last_in_mem_block.clone().into()) + ); + + Ok(()) + } + + #[test] + fn test_block_reader_block() -> eyre::Result<()> { + // Initialize random number generator and provider factory + let mut rng = generators::rng(); + let factory = create_test_provider_factory(); + + // Generate 10 random blocks and split into database and in-memory blocks + let blocks = random_block_range( + &mut rng, + 0..=10, + BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..1, ..Default::default() }, + ); + let (database_blocks, in_memory_blocks) = blocks.split_at(5); + + // Insert first 5 blocks into the database + let provider_rw = factory.provider_rw()?; + for block in database_blocks { + provider_rw.insert_historical_block( + block.clone().seal_with_senders().expect("failed to seal block with senders"), + )?; + } + provider_rw.commit()?; + + // Create a new provider + let provider = BlockchainProvider2::new(factory)?; + let consistent_provider = provider.consistent_provider()?; + + // First in memory block + let first_in_mem_block = in_memory_blocks.first().unwrap(); + // First database block + let first_db_block = database_blocks.first().unwrap(); + + // First in memory block should not be found yet as not integrated to the in-memory state + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, + None + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, + None + ); + + // Insert first block into the in-memory state + let in_memory_block_senders = + first_in_mem_block.senders().expect("failed to recover senders"); + let chain = NewCanonicalChain::Commit { + new: vec![ExecutedBlock::new( + Arc::new(first_in_mem_block.clone()), + Arc::new(in_memory_block_senders), + Default::default(), + Default::default(), + Default::default(), + )], + }; + consistent_provider.canonical_in_memory_state.update_chain(chain); + + let consistent_provider = provider.consistent_provider()?; + + // First in memory block should be found + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, + Some(first_in_mem_block.clone().into()) + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, + Some(first_in_mem_block.clone().into()) + ); + + // First database block should be found + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Hash(first_db_block.hash()))?, + Some(first_db_block.clone().into()) + ); + assert_eq!( + consistent_provider.block(BlockHashOrNumber::Number(first_db_block.number))?, + Some(first_db_block.clone().into()) + ); + + Ok(()) + } + + #[test] + fn test_changeset_reader() -> eyre::Result<()> { + let mut rng = generators::rng(); + + let (database_blocks, in_memory_blocks) = + random_blocks(&mut rng, TEST_BLOCKS_COUNT, 1, None, None, 0..1); + + let first_database_block = database_blocks.first().map(|block| block.number).unwrap(); + let last_database_block = database_blocks.last().map(|block| block.number).unwrap(); + let first_in_memory_block = in_memory_blocks.first().map(|block| block.number).unwrap(); + + let accounts = random_eoa_accounts(&mut rng, 2); + + let (database_changesets, database_state) = random_changeset_range( + &mut rng, + &database_blocks, + accounts.into_iter().map(|(address, account)| (address, (account, Vec::new()))), + 0..0, + 0..0, + ); + let (in_memory_changesets, in_memory_state) = random_changeset_range( + &mut rng, + &in_memory_blocks, + database_state + .iter() + .map(|(address, (account, storage))| (*address, (*account, storage.clone()))), + 0..0, + 0..0, + ); + + let factory = create_test_provider_factory(); + + let provider_rw = factory.provider_rw()?; + provider_rw.append_blocks_with_state( + database_blocks + .into_iter() + .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) + .collect(), + ExecutionOutcome { + bundle: BundleState::new( + database_state.into_iter().map(|(address, (account, _))| { + (address, None, Some(account.into()), Default::default()) + }), + database_changesets + .iter() + .map(|block_changesets| { + block_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), []) + }) + }) + .collect::>(), + Vec::new(), + ), + first_block: first_database_block, + ..Default::default() + }, + Default::default(), + Default::default(), + )?; + provider_rw.commit()?; + + let provider = BlockchainProvider2::new(factory)?; + + let in_memory_changesets = in_memory_changesets.into_iter().next().unwrap(); + let chain = NewCanonicalChain::Commit { + new: vec![in_memory_blocks + .first() + .map(|block| { + let senders = block.senders().expect("failed to recover senders"); + ExecutedBlock::new( + Arc::new(block.clone()), + Arc::new(senders), + Arc::new(ExecutionOutcome { + bundle: BundleState::new( + in_memory_state.into_iter().map(|(address, (account, _))| { + (address, None, Some(account.into()), Default::default()) + }), + [in_memory_changesets.iter().map(|(address, account, _)| { + (*address, Some(Some((*account).into())), Vec::new()) + })], + [], + ), + first_block: first_in_memory_block, + ..Default::default() + }), + Default::default(), + Default::default(), + ) + }) + .unwrap()], + }; + provider.canonical_in_memory_state.update_chain(chain); + + let consistent_provider = provider.consistent_provider()?; + + assert_eq!( + consistent_provider.account_block_changeset(last_database_block).unwrap(), + database_changesets + .into_iter() + .last() + .unwrap() + .into_iter() + .sorted_by_key(|(address, _, _)| *address) + .map(|(address, account, _)| AccountBeforeTx { address, info: Some(account) }) + .collect::>() + ); + assert_eq!( + consistent_provider.account_block_changeset(first_in_memory_block).unwrap(), + in_memory_changesets + .into_iter() + .sorted_by_key(|(address, _, _)| *address) + .map(|(address, account, _)| AccountBeforeTx { address, info: Some(account) }) + .collect::>() + ); + + Ok(()) + } +} diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index 4640f460335..479537f120c 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -2,11 +2,11 @@ use crate::{BlockNumReader, DatabaseProviderFactory, HeaderProvider}; use alloy_primitives::B256; use reth_errors::ProviderError; use reth_primitives::GotExpected; -use reth_storage_api::{BlockReader, DBProvider}; +use reth_storage_api::{BlockReader, DBProvider, StateCommitmentProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::HashedPostState; -use reth_trie_db::DatabaseHashedPostState; +use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; pub use reth_storage_errors::provider::ConsistentViewError; @@ -33,7 +33,7 @@ pub struct ConsistentDbView { impl ConsistentDbView where - Factory: DatabaseProviderFactory, + Factory: DatabaseProviderFactory + StateCommitmentProvider, { /// Creates new consistent database view. pub const fn new(factory: Factory, tip: Option) -> Self { @@ -59,7 +59,9 @@ where { Ok(HashedPostState::default()) } else { - Ok(HashedPostState::from_reverts(provider.tx_ref(), block_number + 1)?) + Ok(HashedPostState::from_reverts::< + ::KeyHasher, + >(provider.tx_ref(), block_number + 1)?) } } diff --git a/crates/storage/provider/src/providers/database/chain.rs b/crates/storage/provider/src/providers/database/chain.rs new file mode 100644 index 00000000000..57bc2e0b5ce --- /dev/null +++ b/crates/storage/provider/src/providers/database/chain.rs @@ -0,0 +1,42 @@ +use crate::{providers::NodeTypesForProvider, DatabaseProvider}; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_node_types::FullNodePrimitives; +use reth_primitives::EthPrimitives; +use reth_storage_api::{ChainStorageReader, ChainStorageWriter, EthStorage}; + +/// Trait that provides access to implementations of [`ChainStorage`] +pub trait ChainStorage: Send + Sync { + /// Provides access to the chain reader. + fn reader(&self) -> impl ChainStorageReader, Primitives> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider; + + /// Provides access to the chain writer. + fn writer(&self) -> impl ChainStorageWriter, Primitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypesForProvider; +} + +impl ChainStorage for EthStorage { + fn reader( + &self, + ) -> impl ChainStorageReader, EthPrimitives> + where + TX: DbTx + 'static, + Types: NodeTypesForProvider, + { + self + } + + fn writer( + &self, + ) -> impl ChainStorageWriter, EthPrimitives> + where + TX: DbTxMut + DbTx + 'static, + Types: NodeTypesForProvider, + { + self + } +} diff --git a/crates/storage/provider/src/providers/database/metrics.rs b/crates/storage/provider/src/providers/database/metrics.rs index ba43298c36b..4ee8f1ce5b1 100644 --- a/crates/storage/provider/src/providers/database/metrics.rs +++ b/crates/storage/provider/src/providers/database/metrics.rs @@ -22,14 +22,6 @@ impl Default for DurationsRecorder { } impl DurationsRecorder { - /// Saves the provided duration for future logging and instantly reports as a metric with - /// `action` label. - pub(crate) fn record_duration(&mut self, action: Action, duration: Duration) { - self.actions.push((action, duration)); - self.current_metrics.record_duration(action, duration); - self.latest = Some(self.start.elapsed()); - } - /// Records the duration since last record, saves it for future logging and instantly reports as /// a metric with `action` label. pub(crate) fn record_relative(&mut self, action: Action) { @@ -56,12 +48,6 @@ pub(crate) enum Action { InsertHeaders, InsertHeaderNumbers, InsertHeaderTerminalDifficulties, - InsertBlockOmmers, - InsertTransactionSenders, - InsertTransactions, - InsertTransactionHashNumbers, - InsertBlockWithdrawals, - InsertBlockRequests, InsertBlockBodyIndices, InsertTransactionBlocks, GetNextTxNum, @@ -96,18 +82,6 @@ struct DatabaseProviderMetrics { insert_header_numbers: Histogram, /// Duration of insert header TD insert_header_td: Histogram, - /// Duration of insert block ommers - insert_block_ommers: Histogram, - /// Duration of insert tx senders - insert_tx_senders: Histogram, - /// Duration of insert transactions - insert_transactions: Histogram, - /// Duration of insert transaction hash numbers - insert_tx_hash_numbers: Histogram, - /// Duration of insert block withdrawals - insert_block_withdrawals: Histogram, - /// Duration of insert block requests - insert_block_requests: Histogram, /// Duration of insert block body indices insert_block_body_indices: Histogram, /// Duration of insert transaction blocks @@ -134,12 +108,6 @@ impl DatabaseProviderMetrics { Action::InsertHeaders => self.insert_headers.record(duration), Action::InsertHeaderNumbers => self.insert_header_numbers.record(duration), Action::InsertHeaderTerminalDifficulties => self.insert_header_td.record(duration), - Action::InsertBlockOmmers => self.insert_block_ommers.record(duration), - Action::InsertTransactionSenders => self.insert_tx_senders.record(duration), - Action::InsertTransactions => self.insert_transactions.record(duration), - Action::InsertTransactionHashNumbers => self.insert_tx_hash_numbers.record(duration), - Action::InsertBlockWithdrawals => self.insert_block_withdrawals.record(duration), - Action::InsertBlockRequests => self.insert_block_requests.record(duration), Action::InsertBlockBodyIndices => self.insert_block_body_indices.record(duration), Action::InsertTransactionBlocks => self.insert_tx_blocks.record(duration), Action::GetNextTxNum => self.get_next_tx_num.record(duration), diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 520b514527b..85b734ef661 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -3,11 +3,14 @@ use crate::{ to_range, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, - EvmEnvProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, ProviderError, - PruneCheckpointReader, RequestsProvider, StageCheckpointReader, StateProviderBox, + EvmEnvProvider, HashedPostStateProvider, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, + ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use core::fmt; use reth_chainspec::{ChainInfo, EthereumHardforks}; @@ -15,17 +18,23 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::TryIntoHistoricalStateProvider; +use reth_storage_api::{ + NodePrimitivesProvider, StateCommitmentProvider, TryIntoHistoricalStateProvider, +}; use reth_storage_errors::provider::ProviderResult; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; +use reth_trie::HashedPostState; +use reth_trie_db::StateCommitment; +use revm::{ + db::BundleState, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; use std::{ ops::{RangeBounds, RangeInclusive}, path::Path, @@ -41,6 +50,9 @@ use super::ProviderNodeTypes; mod metrics; +mod chain; +pub use chain::*; + /// A common provider that fetches data from a database or static file. /// /// This provider implements most provider or provider factory traits. @@ -50,22 +62,25 @@ pub struct ProviderFactory { /// Chain spec chain_spec: Arc, /// Static File Provider - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, /// Optional pruning configuration prune_modes: PruneModes, + /// The node storage handler. + storage: Arc, } impl fmt::Debug for ProviderFactory where - N: NodeTypesWithDB, + N: NodeTypesWithDB, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let Self { db, chain_spec, static_file_provider, prune_modes } = self; + let Self { db, chain_spec, static_file_provider, prune_modes, storage } = self; f.debug_struct("ProviderFactory") .field("db", &db) .field("chain_spec", &chain_spec) .field("static_file_provider", &static_file_provider) .field("prune_modes", &prune_modes) + .field("storage", &storage) .finish() } } @@ -75,9 +90,15 @@ impl ProviderFactory { pub fn new( db: N::DB, chain_spec: Arc, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, ) -> Self { - Self { db, chain_spec, static_file_provider, prune_modes: PruneModes::none() } + Self { + db, + chain_spec, + static_file_provider, + prune_modes: PruneModes::none(), + storage: Default::default(), + } } /// Enables metrics on the static file provider. @@ -111,13 +132,14 @@ impl>> ProviderFactory { path: P, chain_spec: Arc, args: DatabaseArguments, - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, ) -> RethResult { Ok(Self { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, prune_modes: PruneModes::none(), + storage: Default::default(), }) } } @@ -130,12 +152,13 @@ impl ProviderFactory { /// This sets the [`PruneModes`] to [`None`], because they should only be relevant for writing /// data. #[track_caller] - pub fn provider(&self) -> ProviderResult> { + pub fn provider(&self) -> ProviderResult> { Ok(DatabaseProvider::new( self.db.tx()?, self.chain_spec.clone(), self.static_file_provider.clone(), self.prune_modes.clone(), + self.storage.clone(), )) } @@ -144,12 +167,13 @@ impl ProviderFactory { /// [`BlockHashReader`]. This may fail if the inner read/write database transaction fails to /// open. #[track_caller] - pub fn provider_rw(&self) -> ProviderResult> { + pub fn provider_rw(&self) -> ProviderResult> { Ok(DatabaseProviderRW(DatabaseProvider::new_rw( self.db.tx_mut()?, self.chain_spec.clone(), self.static_file_provider.clone(), self.prune_modes.clone(), + self.storage.clone(), ))) } @@ -157,7 +181,7 @@ impl ProviderFactory { #[track_caller] pub fn latest(&self) -> ProviderResult { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProvider::new(self.db.tx()?, self.static_file_provider()))) + Ok(Box::new(LatestStateProvider::new(self.database_provider_ro()?))) } /// Storage provider for state at that given block @@ -184,10 +208,14 @@ impl ProviderFactory { } } +impl NodePrimitivesProvider for ProviderFactory { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for ProviderFactory { type DB = N::DB; - type Provider = DatabaseProvider<::TX, N::ChainSpec>; - type ProviderRW = DatabaseProvider<::TXMut, N::ChainSpec>; + type Provider = DatabaseProvider<::TX, N>; + type ProviderRW = DatabaseProvider<::TXMut, N>; fn database_provider_ro(&self) -> ProviderResult { self.provider() @@ -198,29 +226,36 @@ impl DatabaseProviderFactory for ProviderFactory { } } +impl StateCommitmentProvider for ProviderFactory { + type StateCommitment = N::StateCommitment; +} + impl StaticFileProviderFactory for ProviderFactory { /// Returns static file provider - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } impl HeaderSyncGapProvider for ProviderFactory { + type Header = HeaderTy; fn sync_gap( &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult { + ) -> ProviderResult> { self.provider()?.sync_gap(tip, highest_uninterrupted_block) } } impl HeaderProvider for ProviderFactory { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.provider()?.header(block_hash) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, num, @@ -248,7 +283,10 @@ impl HeaderProvider for ProviderFactory { ) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), @@ -258,7 +296,10 @@ impl HeaderProvider for ProviderFactory { ) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, number, @@ -270,15 +311,15 @@ impl HeaderProvider for ProviderFactory { fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.sealed_headers_while(range, |_| true) } fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), @@ -333,27 +374,37 @@ impl BlockNumReader for ProviderFactory { } impl BlockReader for ProviderFactory { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { self.provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.provider()?.block(id) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { self.provider()?.pending_block() } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { self.provider()?.pending_block_with_senders() } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { self.provider()?.pending_block_and_receipts() } - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.provider()?.ommers(id) } @@ -368,7 +419,7 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.block_with_senders(id, transaction_kind) } @@ -376,35 +427,37 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.provider()?.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders_range(range) } } impl TransactionsProvider for ProviderFactory { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, @@ -413,26 +466,26 @@ impl TransactionsProvider for ProviderFactory { ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, - |static_file| static_file.transaction_by_id_no_hash(id), - || self.provider()?.transaction_by_id_no_hash(id), + |static_file| static_file.transaction_by_id_unhashed(id), + || self.provider()?.transaction_by_id_unhashed(id), ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.provider()?.transaction_by_hash_with_meta(tx_hash) } @@ -443,21 +496,21 @@ impl TransactionsProvider for ProviderFactory { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.provider()?.transactions_by_tx_range(range) } @@ -474,7 +527,8 @@ impl TransactionsProvider for ProviderFactory { } impl ReceiptProvider for ProviderFactory { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, id, @@ -483,18 +537,21 @@ impl ReceiptProvider for ProviderFactory { ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.provider()?.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Receipts, to_range(range), @@ -519,16 +576,6 @@ impl WithdrawalsProvider for ProviderFactory { } } -impl RequestsProvider for ProviderFactory { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.provider()?.requests_by_block(id, timestamp) - } -} - impl StageCheckpointReader for ProviderFactory { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.provider()?.get_stage_checkpoint(id) @@ -542,29 +589,16 @@ impl StageCheckpointReader for ProviderFactory { } } -impl EvmEnvProvider for ProviderFactory { - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - self.provider()?.fill_env_at(cfg, block_env, at, evm_config) - } - +impl EvmEnvProvider> for ProviderFactory { fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } @@ -576,7 +610,7 @@ impl EvmEnvProvider for ProviderFactory { evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_cfg_env_at(cfg, at, evm_config) } @@ -584,11 +618,11 @@ impl EvmEnvProvider for ProviderFactory { fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { self.provider()?.fill_cfg_env_with_header(cfg, header, evm_config) } @@ -615,6 +649,14 @@ impl PruneCheckpointReader for ProviderFactory { } } +impl HashedPostStateProvider for ProviderFactory { + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::<::KeyHasher>( + bundle_state.state(), + ) + } +} + impl Clone for ProviderFactory { fn clone(&self) -> Self { Self { @@ -622,6 +664,7 @@ impl Clone for ProviderFactory { chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), prune_modes: self.prune_modes.clone(), + storage: self.storage.clone(), } } } @@ -632,7 +675,8 @@ mod tests { use crate::{ providers::{StaticFileProvider, StaticFileWriter}, test_utils::{blocks::TEST_BLOCK, create_test_provider_factory, MockNodeTypesWithDB}, - BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, TransactionsProvider, + BlockHashReader, BlockNumReader, BlockWriter, DBProvider, HeaderSyncGapProvider, + StorageLocation, TransactionsProvider, }; use alloy_primitives::{TxNumber, B256, U256}; use assert_matches::assert_matches; @@ -644,6 +688,7 @@ mod tests { test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; use reth_primitives::StaticFileSegment; + use reth_primitives_traits::SignedTransaction; use reth_prune_types::{PruneMode, PruneModes}; use reth_storage_errors::provider::ProviderError; use reth_testing_utils::generators::{self, random_block, random_header, BlockParams}; @@ -703,14 +748,20 @@ mod tests { { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap()), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); assert_matches!( provider.transaction_sender(0), Ok(Some(sender)) if sender == block.body.transactions[0].recover_signer().unwrap() ); - assert_matches!(provider.transaction_id(block.body.transactions[0].hash), Ok(Some(0))); + assert_matches!( + provider.transaction_id(block.body.transactions[0].hash()), + Ok(Some(0)) + ); } { @@ -721,11 +772,14 @@ mod tests { }; let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap(),), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); - assert_matches!(provider.transaction_id(block.body.transactions[0].hash), Ok(None)); + assert_matches!(provider.transaction_id(block.body.transactions[0].hash()), Ok(None)); } } @@ -742,7 +796,10 @@ mod tests { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap()), + provider.insert_block( + block.clone().try_seal_with_senders().unwrap(), + StorageLocation::Database + ), Ok(_) ); @@ -760,21 +817,6 @@ mod tests { let db_senders = provider.senders_by_tx_range(range); assert_eq!(db_senders, Ok(vec![])); - - let result = provider.take_block_transaction_range(0..=0); - assert_eq!( - result, - Ok(vec![( - 0, - block - .body - .transactions - .iter() - .cloned() - .map(|tx| tx.into_ecrecovered().unwrap()) - .collect() - )]) - ) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8140700faba..05e4ed4c0c0 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1,53 +1,68 @@ use crate::{ bundle_state::StorageRevertsIter, - providers::{database::metrics, static_file::StaticFileWriter, StaticFileProvider}, + providers::{ + database::{chain::ChainStorage, metrics}, + static_file::StaticFileWriter, + NodeTypesForProvider, StaticFileProvider, + }, to_range, traits::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, - writer::UnifiedStorageWriter, - AccountReader, BlockExecutionReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, + AccountReader, BlockBodyWriter, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, BundleStateInit, ChainStateBlockReader, ChainStateBlockWriter, DBProvider, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, HistoricalStateProviderRef, HistoryWriter, LatestStateProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, - PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, RevertsInit, - StageCheckpointReader, StateChangeWriter, StateProviderBox, StateReader, StateWriter, - StaticFileProviderFactory, StatsReader, StorageReader, StorageTrieWriter, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, TrieWriter, WithdrawalsProvider, + PruneCheckpointReader, PruneCheckpointWriter, RevertsInit, StageCheckpointReader, + StateCommitmentProvider, StateProviderBox, StateWriter, StaticFileProviderFactory, StatsReader, + StorageLocation, StorageReader, StorageTrieWriter, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, TrieWriter, WithdrawalsProvider, +}; +use alloy_consensus::{BlockHeader, Header}; +use alloy_eips::{ + eip2718::Encodable2718, + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, }; -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; -use itertools::{izip, Itertools}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap, HashSet}, + Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256, +}; +use itertools::Itertools; use rayon::slice::ParallelSliceMut; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_db::{ cursor::DbDupCursorRW, tables, BlockNumberList, PlainAccountState, PlainStorageState, }; use reth_db_api::{ - common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, - ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, + ShardedKey, StoredBlockBodyIndices, }, table::Table, transaction::{DbTx, DbTxMut}, - DatabaseError, DbTxUnwindExt, + DatabaseError, }; use reth_evm::ConfigureEvmEnv; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; +use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockBody, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, Requests, - SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, - TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, - Withdrawal, Withdrawals, + Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, SealedBlock, + SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, + TransactionMeta, }; +use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProvider, StorageChangeSetReader, TryIntoHistoricalStateProvider}; +use reth_storage_api::{ + BlockBodyReader, NodePrimitivesProvider, StateProvider, StorageChangeSetReader, + TryIntoHistoricalStateProvider, +}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -61,50 +76,49 @@ use revm::{ }; use std::{ cmp::Ordering, - collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{BTreeMap, BTreeSet}, fmt::Debug, - ops::{Bound, Deref, DerefMut, Range, RangeBounds, RangeInclusive}, + ops::{Deref, DerefMut, Range, RangeBounds, RangeInclusive}, sync::{mpsc, Arc}, - time::{Duration, Instant}, }; use tokio::sync::watch; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, trace}; /// A [`DatabaseProvider`] that holds a read-only database transaction. -pub type DatabaseProviderRO = DatabaseProvider<::TX, Spec>; +pub type DatabaseProviderRO = DatabaseProvider<::TX, N>; /// A [`DatabaseProvider`] that holds a read-write database transaction. /// /// Ideally this would be an alias type. However, there's some weird compiler error (), that forces us to wrap this in a struct instead. /// Once that issue is solved, we can probably revert back to being an alias type. #[derive(Debug)] -pub struct DatabaseProviderRW( - pub DatabaseProvider<::TXMut, Spec>, +pub struct DatabaseProviderRW( + pub DatabaseProvider<::TXMut, N>, ); -impl Deref for DatabaseProviderRW { - type Target = DatabaseProvider<::TXMut, Spec>; +impl Deref for DatabaseProviderRW { + type Target = DatabaseProvider<::TXMut, N>; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for DatabaseProviderRW { +impl DerefMut for DatabaseProviderRW { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl AsRef::TXMut, Spec>> - for DatabaseProviderRW +impl AsRef::TXMut, N>> + for DatabaseProviderRW { - fn as_ref(&self) -> &DatabaseProvider<::TXMut, Spec> { + fn as_ref(&self) -> &DatabaseProvider<::TXMut, N> { &self.0 } } -impl DatabaseProviderRW { +impl DatabaseProviderRW { /// Commit database transaction and static file if it exists. pub fn commit(self) -> ProviderResult { self.0.commit() @@ -116,10 +130,10 @@ impl DatabaseProviderRW { } } -impl From> - for DatabaseProvider<::TXMut, Spec> +impl From> + for DatabaseProvider<::TXMut, N> { - fn from(provider: DatabaseProviderRW) -> Self { + fn from(provider: DatabaseProviderRW) -> Self { provider.0 } } @@ -127,29 +141,31 @@ impl From> /// A provider struct that fetches data from the database. /// Wrapper around [`DbTx`] and [`DbTxMut`]. Example: [`HeaderProvider`] [`BlockHashReader`] #[derive(Debug)] -pub struct DatabaseProvider { +pub struct DatabaseProvider { /// Database transaction. tx: TX, /// Chain spec - chain_spec: Arc, + chain_spec: Arc, /// Static File provider - static_file_provider: StaticFileProvider, + static_file_provider: StaticFileProvider, /// Pruning configuration prune_modes: PruneModes, + /// Node storage handler. + storage: Arc, } -impl DatabaseProvider { +impl DatabaseProvider { /// Returns reference to prune modes. pub const fn prune_modes_ref(&self) -> &PruneModes { &self.prune_modes } } -impl DatabaseProvider { - /// State provider for latest block - pub fn latest<'a>(&'a self) -> ProviderResult> { +impl DatabaseProvider { + /// State provider for latest state + pub fn latest<'a>(&'a self) -> Box { trace!(target: "providers::db", "Returning latest state provider"); - Ok(Box::new(LatestStateProviderRef::new(&self.tx, self.static_file_provider.clone()))) + Box::new(LatestStateProviderRef::new(self)) } /// Storage provider for state at that given block hash @@ -162,10 +178,7 @@ impl DatabaseProvider { if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProviderRef::new( - &self.tx, - self.static_file_provider.clone(), - ))) + return Ok(Box::new(LatestStateProviderRef::new(self))) } // +1 as the changeset that we want is the one that was applied after this block. @@ -176,11 +189,7 @@ impl DatabaseProvider { let storage_history_prune_checkpoint = self.get_prune_checkpoint(PruneSegment::StorageHistory)?; - let mut state_provider = HistoricalStateProviderRef::new( - &self.tx, - block_number, - self.static_file_provider.clone(), - ); + let mut state_provider = HistoricalStateProviderRef::new(self, block_number); // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. @@ -201,46 +210,172 @@ impl DatabaseProvider { Ok(Box::new(state_provider)) } + + #[cfg(feature = "test-utils")] + /// Sets the prune modes for provider. + pub fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.prune_modes = prune_modes; + } +} + +impl NodePrimitivesProvider for DatabaseProvider { + type Primitives = N::Primitives; } -impl StaticFileProviderFactory for DatabaseProvider { +impl StaticFileProviderFactory for DatabaseProvider { /// Returns a static file provider - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.static_file_provider.clone() } } -impl ChainSpecProvider - for DatabaseProvider +impl> ChainSpecProvider + for DatabaseProvider { - type ChainSpec = Spec; + type ChainSpec = N::ChainSpec; fn chain_spec(&self) -> Arc { self.chain_spec.clone() } } -impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-write transaction. pub const fn new_rw( tx: TX, - chain_spec: Arc, - static_file_provider: StaticFileProvider, + chain_spec: Arc, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, + storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, storage } } } -impl AsRef for DatabaseProvider { +impl AsRef for DatabaseProvider { fn as_ref(&self) -> &Self { self } } -impl TryIntoHistoricalStateProvider - for DatabaseProvider -{ +impl DatabaseProvider { + /// Unwinds trie state for the given range. + /// + /// This includes calculating the resulted state root and comparing it with the parent block + /// state root. + pub fn unwind_trie_state_range( + &self, + range: RangeInclusive, + ) -> ProviderResult<()> { + let changed_accounts = self + .tx + .cursor_read::()? + .walk_range(range.clone())? + .collect::, _>>()?; + + // Unwind account hashes. Add changed accounts to account prefix set. + let hashed_addresses = self.unwind_account_hashing(changed_accounts.iter())?; + let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, account) in hashed_addresses { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + if account.is_none() { + destroyed_accounts.insert(hashed_address); + } + } + + // Unwind account history indices. + self.unwind_account_history_indices(changed_accounts.iter())?; + let storage_range = BlockNumberAddress::range(range.clone()); + + let changed_storages = self + .tx + .cursor_read::()? + .walk_range(storage_range)? + .collect::, _>>()?; + + // Unwind storage hashes. Add changed account and storage keys to corresponding prefix + // sets. + let mut storage_prefix_sets = HashMap::::default(); + let storage_entries = self.unwind_storage_hashing(changed_storages.iter().copied())?; + for (hashed_address, hashed_slots) in storage_entries { + account_prefix_set.insert(Nibbles::unpack(hashed_address)); + let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); + for slot in hashed_slots { + storage_prefix_set.insert(Nibbles::unpack(slot)); + } + storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); + } + + // Unwind storage history indices. + self.unwind_storage_history_indices(changed_storages.iter().copied())?; + + // Calculate the reverted merkle root. + // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets + // are pre-loaded. + let prefix_sets = TriePrefixSets { + account_prefix_set: account_prefix_set.freeze(), + storage_prefix_sets, + destroyed_accounts, + }; + let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) + .with_prefix_sets(prefix_sets) + .root_with_updates() + .map_err(Into::::into)?; + + let parent_number = range.start().saturating_sub(1); + let parent_state_root = self + .header_by_number(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? + .state_root(); + + // state root should be always correct as we are reverting state. + // but for sake of double verification we will check it again. + if new_state_root != parent_state_root { + let parent_hash = self + .block_hash(parent_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; + return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { + root: GotExpected { got: new_state_root, expected: parent_state_root }, + block_number: parent_number, + block_hash: parent_hash, + }))) + } + self.write_trie_updates(&trie_updates)?; + + Ok(()) + } + + /// Removes receipts from all transactions starting with provided number (inclusive). + fn remove_receipts_from( + &self, + from_tx: TxNumber, + last_block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult<()> { + if remove_from.database() { + // iterate over block body and remove receipts + self.remove::>>(from_tx..)?; + } + + if remove_from.static_files() && !self.prune_modes.has_receipts_pruning() { + let static_file_receipt_num = + self.static_file_provider.get_highest_static_file_tx(StaticFileSegment::Receipts); + + let to_delete = static_file_receipt_num + .map(|static_num| (static_num + 1).saturating_sub(from_tx)) + .unwrap_or_default(); + + self.static_file_provider + .latest_writer(StaticFileSegment::Receipts)? + .prune_receipts(to_delete, last_block)?; + } + + Ok(()) + } +} + +impl TryIntoHistoricalStateProvider for DatabaseProvider { fn try_into_history_at_block( self, mut block_number: BlockNumber, @@ -248,7 +383,7 @@ impl TryIntoHistoricalStateProvider if block_number == self.best_block_number().unwrap_or_default() && block_number == self.last_block_number().unwrap_or_default() { - return Ok(Box::new(LatestStateProvider::new(self.tx, self.static_file_provider))) + return Ok(Box::new(LatestStateProvider::new(self))) } // +1 as the changeset that we want is the one that was applied after this block. @@ -259,8 +394,7 @@ impl TryIntoHistoricalStateProvider let storage_history_prune_checkpoint = self.get_prune_checkpoint(PruneSegment::StorageHistory)?; - let mut state_provider = - HistoricalStateProvider::new(self.tx, block_number, self.static_file_provider); + let mut state_provider = HistoricalStateProvider::new(self, block_number); // If we pruned account or storage history, we can't return state on every historical block. // Instead, we should cap it at the latest prune checkpoint for corresponding prune segment. @@ -283,15 +417,21 @@ impl TryIntoHistoricalStateProvider } } -impl - DatabaseProvider +impl StateCommitmentProvider for DatabaseProvider { + type StateCommitment = N::StateCommitment; +} + +impl< + Tx: DbTx + DbTxMut + 'static, + N: NodeTypesForProvider>, + > DatabaseProvider { // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. // #[cfg(any(test, feature = "test-utils"))] /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, - block: SealedBlockWithSenders, + block: SealedBlockWithSenders<::Block>, ) -> ProviderResult { let ttd = if block.number == 0 { block.difficulty @@ -315,7 +455,7 @@ impl DatabaseProvider { +impl DatabaseProvider { /// Creates a provider with an inner read-only transaction. pub const fn new( tx: TX, - chain_spec: Arc, - static_file_provider: StaticFileProvider, + chain_spec: Arc, + static_file_provider: StaticFileProvider, prune_modes: PruneModes, + storage: Arc, ) -> Self { - Self { tx, chain_spec, static_file_provider, prune_modes } + Self { tx, chain_spec, static_file_provider, prune_modes, storage } } /// Consume `DbTx` or `DbTxMut`. @@ -393,86 +534,19 @@ impl DatabaseProvider { } /// Returns a reference to the chain specification. - pub fn chain_spec(&self) -> &Spec { + pub fn chain_spec(&self) -> &N::ChainSpec { &self.chain_spec } +} - /// Disables long-lived read transaction safety guarantees for leaks prevention and - /// observability improvements. - /// - /// CAUTION: In most of the cases, you want the safety guarantees for long read transactions - /// enabled. Use this only if you're sure that no write transaction is open in parallel, meaning - /// that Reth as a node is offline and not progressing. - pub fn disable_long_read_transaction_safety(mut self) -> Self { - self.tx.disable_long_read_transaction_safety(); - self - } - - /// Return full table as Vec - pub fn table(&self) -> Result>, DatabaseError> - where - T::Key: Default + Ord, - { - self.tx - .cursor_read::()? - .walk(Some(T::Key::default()))? - .collect::, DatabaseError>>() - } - - /// Return a list of entries from the table, based on the given range. - #[inline] - pub fn get( - &self, - range: impl RangeBounds, - ) -> Result>, DatabaseError> { - self.tx.cursor_read::()?.walk_range(range)?.collect::, _>>() - } - - /// Iterates over read only values in the given table and collects them into a vector. - /// - /// Early-returns if the range is empty, without opening a cursor transaction. - fn cursor_read_collect>( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - let capacity = match range_size_hint(&range) { - Some(0) | None => return Ok(Vec::new()), - Some(capacity) => capacity, - }; - let mut cursor = self.tx.cursor_read::()?; - self.cursor_collect_with_capacity(&mut cursor, range, capacity) - } - - /// Iterates over read only values in the given table and collects them into a vector. - fn cursor_collect>( - &self, - cursor: &mut impl DbCursorRO, - range: impl RangeBounds, - ) -> ProviderResult> { - let capacity = range_size_hint(&range).unwrap_or(0); - self.cursor_collect_with_capacity(cursor, range, capacity) - } - - fn cursor_collect_with_capacity>( - &self, - cursor: &mut impl DbCursorRO, - range: impl RangeBounds, - capacity: usize, - ) -> ProviderResult> { - let mut items = Vec::with_capacity(capacity); - for entry in cursor.walk_range(range)? { - items.push(entry?.1); - } - Ok(items) - } - +impl DatabaseProvider { fn transactions_by_tx_range_with_cursor( &self, range: impl RangeBounds, cursor: &mut C, - ) -> ProviderResult> + ) -> ProviderResult>> where - C: DbCursorRO, + C: DbCursorRO>>, { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Transactions, @@ -486,31 +560,18 @@ impl DatabaseProvider { fn block_with_senders( &self, id: BlockHashOrNumber, - transaction_kind: TransactionVariant, + _transaction_kind: TransactionVariant, header_by_number: HF, construct_block: BF, ) -> ProviderResult> where - Spec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: FnOnce(BlockNumber) -> ProviderResult>, - BF: FnOnce( - H, - Vec, - Vec
, - Vec
, - Option, - Option, - ) -> ProviderResult>, + BF: FnOnce(H, BodyTy, Vec
) -> ProviderResult>, { let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; let Some(header) = header_by_number(block_number)? else { return Ok(None) }; - let ommers = self.ommers(block_number.into())?.unwrap_or_default(); - let withdrawals = - self.withdrawals_by_block(block_number.into(), header.as_ref().timestamp)?; - let requests = self.requests_by_block(block_number.into(), header.as_ref().timestamp)?; - // Get the block body // // If the body indices are not found, this means that the transactions either do not exist @@ -527,20 +588,14 @@ impl DatabaseProvider { (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) }; - let body = transactions - .into_iter() - .map(|tx| match transaction_kind { - TransactionVariant::NoHash => TransactionSigned { - // Caller explicitly asked for no hash, so we don't calculate it - hash: B256::ZERO, - signature: tx.signature, - transaction: tx.transaction, - }, - TransactionVariant::WithHash => tx.with_hash(), - }) - .collect(); + let body = self + .storage + .reader() + .read_block_bodies(self, vec![(header.as_ref(), transactions)])? + .pop() + .ok_or(ProviderError::InvalidStorageOutput)?; - construct_block(header, body, senders, ommers, withdrawals, requests) + construct_block(header, body, senders) } /// Returns a range of blocks from the database. @@ -551,7 +606,6 @@ impl DatabaseProvider { /// - Range of transaction numbers /// – Ommers /// – Withdrawals - /// – Requests /// – Senders fn block_range( &self, @@ -560,16 +614,9 @@ impl DatabaseProvider { mut assemble_block: F, ) -> ProviderResult> where - Spec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: FnOnce(RangeInclusive) -> ProviderResult>, - F: FnMut( - H, - Range, - Vec
, - Option, - Option, - ) -> ProviderResult, + F: FnMut(H, BodyTy, Range) -> ProviderResult, { if range.is_empty() { return Ok(Vec::new()) @@ -579,58 +626,41 @@ impl DatabaseProvider { let mut blocks = Vec::with_capacity(len); let headers = headers_range(range)?; - let mut ommers_cursor = self.tx.cursor_read::()?; - let mut withdrawals_cursor = self.tx.cursor_read::()?; - let mut requests_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let mut block_body_cursor = self.tx.cursor_read::()?; + let mut present_headers = Vec::new(); for header in headers { - let header_ref = header.as_ref(); // If the body indices are not found, this means that the transactions either do // not exist in the database yet, or they do exit but are // not indexed. If they exist but are not indexed, we don't // have enough information to return the block anyways, so // we skip the block. if let Some((_, block_body_indices)) = - block_body_cursor.seek_exact(header_ref.number)? + block_body_cursor.seek_exact(header.as_ref().number())? { let tx_range = block_body_indices.tx_num_range(); - - // If we are past shanghai, then all blocks should have a withdrawal list, - // even if empty - let withdrawals = - if self.chain_spec.is_shanghai_active_at_timestamp(header_ref.timestamp) { - withdrawals_cursor - .seek_exact(header_ref.number)? - .map(|(_, w)| w.withdrawals) - .unwrap_or_default() - .into() - } else { - None - }; - let requests = - if self.chain_spec.is_prague_active_at_timestamp(header_ref.timestamp) { - (requests_cursor.seek_exact(header_ref.number)?.unwrap_or_default().1) - .into() - } else { - None - }; - let ommers = - if self.chain_spec.final_paris_total_difficulty(header_ref.number).is_some() { - Vec::new() - } else { - ommers_cursor - .seek_exact(header_ref.number)? - .map(|(_, o)| o.ommers) - .unwrap_or_default() - }; - - if let Ok(b) = assemble_block(header, tx_range, ommers, withdrawals, requests) { - blocks.push(b); - } + present_headers.push((header, tx_range)); } } + let mut inputs = Vec::new(); + for (header, tx_range) in &present_headers { + let transactions = if tx_range.is_empty() { + Vec::new() + } else { + self.transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? + }; + + inputs.push((header.as_ref(), transactions)); + } + + let bodies = self.storage.reader().read_block_bodies(self, inputs)?; + + for ((header, tx_range), body) in present_headers.into_iter().zip(bodies) { + blocks.push(assemble_block(header, body, tx_range)?); + } + Ok(blocks) } @@ -643,7 +673,6 @@ impl DatabaseProvider { /// - Transactions /// – Ommers /// – Withdrawals - /// – Requests /// – Senders fn block_with_senders_range( &self, @@ -652,38 +681,24 @@ impl DatabaseProvider { assemble_block: BF, ) -> ProviderResult> where - Spec: EthereumHardforks, - H: AsRef
, + H: AsRef>, HF: Fn(RangeInclusive) -> ProviderResult>, - BF: Fn( - H, - Vec, - Vec
, - Option, - Option, - Vec
, - ) -> ProviderResult, + BF: Fn(H, BodyTy, Vec
) -> ProviderResult, { - let mut tx_cursor = self.tx.cursor_read::()?; let mut senders_cursor = self.tx.cursor_read::()?; - self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals, requests| { - let (body, senders) = if tx_range.is_empty() { - (Vec::new(), Vec::new()) + self.block_range(range, headers_range, |header, body, tx_range| { + let senders = if tx_range.is_empty() { + Vec::new() } else { - let body = self - .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect::>(); // fetch senders from the senders table let known_senders = senders_cursor .walk_range(tx_range.clone())? .collect::, _>>()?; - let mut senders = Vec::with_capacity(body.len()); - for (tx_num, tx) in tx_range.zip(body.iter()) { + let mut senders = Vec::with_capacity(body.transactions().len()); + for (tx_num, tx) in tx_range.zip(body.transactions()) { match known_senders.get(&tx_num) { None => { // recover the sender from the transaction if not found @@ -696,676 +711,101 @@ impl DatabaseProvider { } } - (body, senders) + senders }; - assemble_block(header, body, ommers, withdrawals, requests, senders) + assemble_block(header, body, senders) }) } - /// Get requested blocks transaction with senders - pub(crate) fn get_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult)>> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .get::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let mut senders = - self.get::(first_transaction..=last_transaction)?; - - recover_block_senders(&mut senders, &transactions, first_transaction, last_transaction)?; - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id == sender_tx_id { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } else { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Get the given range of blocks. - pub fn get_block_range( + /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the + /// [`PlainAccountState`] and [`PlainStorageState`] tables, based on the given storage and + /// account changesets. + fn populate_bundle_state( &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult> + account_changeset: Vec<(u64, AccountBeforeTx)>, + storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, + plain_accounts_cursor: &mut A, + plain_storage_cursor: &mut S, + ) -> ProviderResult<(BundleStateInit, RevertsInit)> where - Spec: EthereumHardforks, + A: DbCursorRO, + S: DbDupCursorRO, { - // For blocks we need: - // - // - Headers - // - Bodies (transactions) - // - Uncles/ommers - // - Withdrawals - // - Requests - // - Signers - - let block_headers = self.get::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - let block_header_hashes = self.get::(range.clone())?; - let block_ommers = self.get::(range.clone())?; - let block_withdrawals = self.get::(range.clone())?; - let block_requests = self.get::(range.clone())?; - - let block_tx = self.get_block_transaction_range(range)?; - let mut blocks = Vec::with_capacity(block_headers.len()); - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); + // iterate previous value and get plain state value to create changeset + // Double option around Account represent if Account state is know (first option) and + // account is removed (Second Option) + let mut state: BundleStateInit = HashMap::default(); - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_requests_iter = block_requests.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - let mut block_requests = block_requests_iter.next(); + // This is not working for blocks that are not at tip. as plain state is not the last + // state of end range. We should rename the functions or add support to access + // History state. Accessing history state can be tricky but we are not gaining + // anything. - for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) - { - let header = SealedHeader::new(header, header_hash); + let mut reverts: RevertsInit = HashMap::default(); - let (transactions, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); + // add account changeset changes + for (block_number, account_before) in account_changeset.into_iter().rev() { + let AccountBeforeTx { info: old_info, address } = account_before; + match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let new_info = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); + entry.insert((old_info, new_info, HashMap::default())); + } + hash_map::Entry::Occupied(mut entry) => { + // overwrite old account state. + entry.get_mut().0 = old_info; + } + } + // insert old info into reverts. + reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); + } - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); + // add storage changeset changes + for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { + let BlockNumberAddress((block_number, address)) = block_and_address; + // get account state or insert from plain state. + let account_state = match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let present_info = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); + entry.insert((present_info, present_info, HashMap::default())) } + hash_map::Entry::Occupied(entry) => entry.into_mut(), }; - // withdrawal can be missing - let shanghai_is_active = - self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Withdrawals::default()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } + // match storage. + match account_state.2.entry(old_storage.key) { + hash_map::Entry::Vacant(entry) => { + let new_storage = plain_storage_cursor + .seek_by_key_subkey(address, old_storage.key)? + .filter(|storage| storage.key == old_storage.key) + .unwrap_or_default(); + entry.insert((old_storage.value, new_storage.value)); } - } else { - withdrawals = None - } - - // requests can be missing - let prague_is_active = self.chain_spec.is_prague_active_at_timestamp(header.timestamp); - let mut requests = Some(Requests::default()); - if prague_is_active { - if let Some((block_number, _)) = block_requests.as_ref() { - if *block_number == main_block_number { - requests = Some(block_requests.take().unwrap().1); - block_requests = block_requests_iter.next(); - } + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; } - } else { - requests = None; - } + }; - blocks.push(SealedBlockWithSenders { - block: SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - }, - senders, - }) + reverts + .entry(block_number) + .or_default() + .entry(address) + .or_default() + .1 + .push(old_storage); } - Ok(blocks) + Ok((state, reverts)) } +} - /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. - /// - /// 1. Iterate over the [`BlockBodyIndices`][tables::BlockBodyIndices] table to get all the - /// transaction ids. - /// 2. Iterate over the [`StorageChangeSets`][tables::StorageChangeSets] table and the - /// [`AccountChangeSets`][tables::AccountChangeSets] tables in reverse order to reconstruct - /// the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. - /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the plain state - /// 3. Save the old value to the local state - /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: - /// 1. Take the old value from the changeset - /// 2. Take the new value from the local state - /// 3. Set the local state to the value in the changeset - /// - /// If the range is empty, or there are no blocks for the given range, then this returns `None`. - pub fn get_state( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - if range.is_empty() { - return Ok(None) - } - let start_block_number = *range.start(); - - // We are not removing block meta as it is used to get block changesets. - let block_bodies = self.get::(range.clone())?; - - // get transaction receipts - let Some(from_transaction_num) = block_bodies.first().map(|bodies| bodies.1.first_tx_num()) - else { - return Ok(None) - }; - let Some(to_transaction_num) = block_bodies.last().map(|bodies| bodies.1.last_tx_num()) - else { - return Ok(None) - }; - - let storage_range = BlockNumberAddress::range(range.clone()); - - let storage_changeset = self.get::(storage_range)?; - let account_changeset = self.get::(range)?; - - // This is not working for blocks that are not at tip. as plain state is not the last - // state of end range. We should rename the functions or add support to access - // History state. Accessing history state can be tricky but we are not gaining - // anything. - let mut plain_accounts_cursor = self.tx.cursor_read::()?; - let mut plain_storage_cursor = self.tx.cursor_dup_read::()?; - - let (state, reverts) = self.populate_bundle_state( - account_changeset, - storage_changeset, - &mut plain_accounts_cursor, - &mut plain_storage_cursor, - )?; - - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.get::(from_transaction_num..=to_transaction_num)?.into_iter(); - - let mut receipts = Vec::with_capacity(block_bodies.len()); - // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { - let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); - } - } - receipts.push(block_receipts); - } - - Ok(Some(ExecutionOutcome::new_init( - state, - reverts, - Vec::new(), - receipts.into(), - start_block_number, - Vec::new(), - ))) - } - - /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the - /// [`PlainAccountState`] and [`PlainStorageState`] tables, based on the given storage and - /// account changesets. - fn populate_bundle_state( - &self, - account_changeset: Vec<(u64, AccountBeforeTx)>, - storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, - plain_accounts_cursor: &mut A, - plain_storage_cursor: &mut S, - ) -> ProviderResult<(BundleStateInit, RevertsInit)> - where - A: DbCursorRO, - S: DbDupCursorRO, - { - // iterate previous value and get plain state value to create changeset - // Double option around Account represent if Account state is know (first option) and - // account is removed (Second Option) - let mut state: BundleStateInit = HashMap::default(); - - // This is not working for blocks that are not at tip. as plain state is not the last - // state of end range. We should rename the functions or add support to access - // History state. Accessing history state can be tricky but we are not gaining - // anything. - - let mut reverts: RevertsInit = HashMap::default(); - - // add account changeset changes - for (block_number, account_before) in account_changeset.into_iter().rev() { - let AccountBeforeTx { info: old_info, address } = account_before; - match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let new_info = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); - entry.insert((old_info, new_info, HashMap::default())); - } - hash_map::Entry::Occupied(mut entry) => { - // overwrite old account state. - entry.get_mut().0 = old_info; - } - } - // insert old info into reverts. - reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); - } - - // add storage changeset changes - for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { - let BlockNumberAddress((block_number, address)) = block_and_address; - // get account state or insert from plain state. - let account_state = match state.entry(address) { - hash_map::Entry::Vacant(entry) => { - let present_info = plain_accounts_cursor.seek_exact(address)?.map(|kv| kv.1); - entry.insert((present_info, present_info, HashMap::default())) - } - hash_map::Entry::Occupied(entry) => entry.into_mut(), - }; - - // match storage. - match account_state.2.entry(old_storage.key) { - hash_map::Entry::Vacant(entry) => { - let new_storage = plain_storage_cursor - .seek_by_key_subkey(address, old_storage.key)? - .filter(|storage| storage.key == old_storage.key) - .unwrap_or_default(); - entry.insert((old_storage.value, new_storage.value)); - } - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().0 = old_storage.value; - } - }; - - reverts - .entry(block_number) - .or_default() - .entry(address) - .or_default() - .1 - .push(old_storage); - } - - Ok((state, reverts)) - } -} - -impl DatabaseProvider { +impl DatabaseProvider { /// Commit database transaction. pub fn commit(self) -> ProviderResult { Ok(self.tx.commit()?) } - /// Remove list of entries from the table. Returns the number of entries removed. - #[inline] - pub fn remove( - &self, - range: impl RangeBounds, - ) -> Result { - let mut entries = 0; - let mut cursor_write = self.tx.cursor_write::()?; - let mut walker = cursor_write.walk_range(range)?; - while walker.next().transpose()?.is_some() { - walker.delete_current()?; - entries += 1; - } - Ok(entries) - } - - /// Return a list of entries from the table, and remove them, based on the given range. - #[inline] - pub fn take( - &self, - range: impl RangeBounds, - ) -> Result>, DatabaseError> { - let mut cursor_write = self.tx.cursor_write::()?; - let mut walker = cursor_write.walk_range(range)?; - let mut items = Vec::new(); - while let Some(i) = walker.next().transpose()? { - walker.delete_current()?; - items.push(i) - } - Ok(items) - } - - /// Remove requested block transactions, without returning them. - /// - /// This will remove block data for the given range from the following tables: - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionSenders`](tables::TransactionSenders) - /// * [`TransactionHashNumbers`](tables::TransactionHashNumbers) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - pub fn remove_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult<()> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.take::(range)?; - - if block_bodies.is_empty() { - return Ok(()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(()) - } - - // Get transactions so we can then remove - let transactions = self - .take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - // remove senders - self.remove::(first_transaction..=last_transaction)?; - - // Remove TransactionHashNumbers - let mut tx_hash_cursor = self.tx.cursor_write::()?; - for (_, tx) in &transactions { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlocks index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.remove::(tx_id_range)?; - } - - Ok(()) - } - - /// Get requested blocks transaction with senders, also removing them from the database - /// - /// This will remove block data for the given range from the following tables: - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) - /// * [`Transactions`](tables::Transactions) - /// * [`TransactionSenders`](tables::TransactionSenders) - /// * [`TransactionHashNumbers`](tables::TransactionHashNumbers) - /// * [`TransactionBlocks`](tables::TransactionBlocks) - pub fn take_block_transaction_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult)>> { - // Raad range of block bodies to get all transactions id's of this range. - let block_bodies = self.get::(range)?; - - if block_bodies.is_empty() { - return Ok(Vec::new()) - } - - // Compute the first and last tx ID in the range - let first_transaction = block_bodies.first().expect("If we have headers").1.first_tx_num(); - let last_transaction = block_bodies.last().expect("Not empty").1.last_tx_num(); - - // If this is the case then all of the blocks in the range are empty - if last_transaction < first_transaction { - return Ok(block_bodies.into_iter().map(|(n, _)| (n, Vec::new())).collect()) - } - - // Get transactions and senders - let transactions = self - .take::(first_transaction..=last_transaction)? - .into_iter() - .map(|(id, tx)| (id, tx.into())) - .collect::>(); - - let mut senders = - self.take::(first_transaction..=last_transaction)?; - - recover_block_senders(&mut senders, &transactions, first_transaction, last_transaction)?; - - // Remove TransactionHashNumbers - let mut tx_hash_cursor = self.tx.cursor_write::()?; - for (_, tx) in &transactions { - if tx_hash_cursor.seek_exact(tx.hash())?.is_some() { - tx_hash_cursor.delete_current()?; - } - } - - // Remove TransactionBlocks index if there are transaction present - if !transactions.is_empty() { - let tx_id_range = transactions.first().unwrap().0..=transactions.last().unwrap().0; - self.remove::(tx_id_range)?; - } - - // Merge transaction into blocks - let mut block_tx = Vec::with_capacity(block_bodies.len()); - let mut senders = senders.into_iter(); - let mut transactions = transactions.into_iter(); - for (block_number, block_body) in block_bodies { - let mut one_block_tx = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - let tx = transactions.next(); - let sender = senders.next(); - - let recovered = match (tx, sender) { - (Some((tx_id, tx)), Some((sender_tx_id, sender))) => { - if tx_id == sender_tx_id { - Ok(TransactionSignedEcRecovered::from_signed_transaction(tx, sender)) - } else { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - } - (Some((tx_id, _)), _) | (_, Some((tx_id, _))) => { - Err(ProviderError::MismatchOfTransactionAndSenderId { tx_id }) - } - (None, None) => Err(ProviderError::BlockBodyTransactionCount), - }?; - one_block_tx.push(recovered) - } - block_tx.push((block_number, one_block_tx)); - } - - Ok(block_tx) - } - - /// Remove the given range of blocks, without returning any of the blocks. - /// - /// This will remove block data for the given range from the following tables: - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`BlockOmmers`](tables::BlockOmmers) - /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`BlockRequests`](tables::BlockRequests) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// - /// This will also remove transaction data according to - /// [`remove_block_transaction_range`](Self::remove_block_transaction_range). - pub fn remove_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult<()> { - let block_headers = self.remove::(range.clone())?; - if block_headers == 0 { - return Ok(()) - } - - self.tx.unwind_table_by_walker::( - range.clone(), - )?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove::(range.clone())?; - self.remove_block_transaction_range(range.clone())?; - self.remove::(range)?; - - Ok(()) - } - - /// Remove the given range of blocks, and return them. - /// - /// This will remove block data for the given range from the following tables: - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`BlockOmmers`](tables::BlockOmmers) - /// * [`BlockWithdrawals`](tables::BlockWithdrawals) - /// * [`BlockRequests`](tables::BlockRequests) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// - /// This will also remove transaction data according to - /// [`take_block_transaction_range`](Self::take_block_transaction_range). - pub fn take_block_range( - &self, - range: impl RangeBounds + Clone, - ) -> ProviderResult> - where - Spec: EthereumHardforks, - { - // For blocks we need: - // - // - Headers - // - Bodies (transactions) - // - Uncles/ommers - // - Withdrawals - // - Requests - // - Signers - - let block_headers = self.take::(range.clone())?; - if block_headers.is_empty() { - return Ok(Vec::new()) - } - - self.tx.unwind_table_by_walker::( - range.clone(), - )?; - let block_header_hashes = self.take::(range.clone())?; - let block_ommers = self.take::(range.clone())?; - let block_withdrawals = self.take::(range.clone())?; - let block_requests = self.take::(range.clone())?; - let block_tx = self.take_block_transaction_range(range.clone())?; - - let mut blocks = Vec::with_capacity(block_headers.len()); - - // rm HeaderTerminalDifficulties - self.remove::(range)?; - - // merge all into block - let block_header_iter = block_headers.into_iter(); - let block_header_hashes_iter = block_header_hashes.into_iter(); - let block_tx_iter = block_tx.into_iter(); - - // Ommers can be empty for some blocks - let mut block_ommers_iter = block_ommers.into_iter(); - let mut block_withdrawals_iter = block_withdrawals.into_iter(); - let mut block_requests_iter = block_requests.into_iter(); - let mut block_ommers = block_ommers_iter.next(); - let mut block_withdrawals = block_withdrawals_iter.next(); - let mut block_requests = block_requests_iter.next(); - - for ((main_block_number, header), (_, header_hash), (_, tx)) in - izip!(block_header_iter, block_header_hashes_iter, block_tx_iter) - { - let header = SealedHeader::new(header, header_hash); - - let (transactions, senders) = tx.into_iter().map(|tx| tx.to_components()).unzip(); - - // Ommers can be missing - let mut ommers = Vec::new(); - if let Some((block_number, _)) = block_ommers.as_ref() { - if *block_number == main_block_number { - ommers = block_ommers.take().unwrap().1.ommers; - block_ommers = block_ommers_iter.next(); - } - }; - - // withdrawal can be missing - let shanghai_is_active = - self.chain_spec.is_shanghai_active_at_timestamp(header.timestamp); - let mut withdrawals = Some(Withdrawals::default()); - if shanghai_is_active { - if let Some((block_number, _)) = block_withdrawals.as_ref() { - if *block_number == main_block_number { - withdrawals = Some(block_withdrawals.take().unwrap().1.withdrawals); - block_withdrawals = block_withdrawals_iter.next(); - } - } - } else { - withdrawals = None - } - - // requests can be missing - let prague_is_active = self.chain_spec.is_prague_active_at_timestamp(header.timestamp); - let mut requests = Some(Requests::default()); - if prague_is_active { - if let Some((block_number, _)) = block_requests.as_ref() { - if *block_number == main_block_number { - requests = Some(block_requests.take().unwrap().1); - block_requests = block_requests_iter.next(); - } - } - } else { - requests = None; - } - - blocks.push(SealedBlockWithSenders { - block: SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - }, - senders, - }) - } - - Ok(blocks) - } - /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. fn take_shard(&self, key: T::Key) -> ProviderResult> @@ -1423,13 +863,13 @@ impl DatabaseProvider { } } -impl AccountReader for DatabaseProvider { +impl AccountReader for DatabaseProvider { fn basic_account(&self, address: Address) -> ProviderResult> { Ok(self.tx.get::(address)?) } } -impl AccountExtReader for DatabaseProvider { +impl AccountExtReader for DatabaseProvider { fn changed_accounts_with_range( &self, range: impl RangeBounds, @@ -1473,7 +913,7 @@ impl AccountExtReader for DatabaseProvider StorageChangeSetReader for DatabaseProvider { +impl StorageChangeSetReader for DatabaseProvider { fn storage_changeset( &self, block_number: BlockNumber, @@ -1488,7 +928,7 @@ impl StorageChangeSetReader for DatabaseProvider ChangeSetReader for DatabaseProvider { +impl ChangeSetReader for DatabaseProvider { fn account_block_changeset( &self, block_number: BlockNumber, @@ -1505,12 +945,16 @@ impl ChangeSetReader for DatabaseProvider } } -impl HeaderSyncGapProvider for DatabaseProvider { +impl HeaderSyncGapProvider + for DatabaseProvider +{ + type Header = HeaderTy; + fn sync_gap( &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult { + ) -> ProviderResult> { let static_file_provider = self.static_file_provider(); // Make sure Headers static file is at the same height. If it's further, this @@ -1549,10 +993,10 @@ impl HeaderSyncGapProvider for DatabaseProvider HeaderProvider - for DatabaseProvider -{ - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl HeaderProvider for DatabaseProvider { + type Header = HeaderTy; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { Ok(self.header_by_number(num)?) } else { @@ -1560,12 +1004,12 @@ impl HeaderProvider } } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, num, |static_file| static_file.header_by_number(num), - || Ok(self.tx.get::(num)?), + || Ok(self.tx.get::>(num)?), ) } @@ -1592,17 +1036,25 @@ impl HeaderProvider ) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), |static_file, range, _| static_file.headers_range(range), - |range, _| self.cursor_read_collect::(range).map_err(Into::into), + |range, _| { + self.cursor_read_collect::>(range).map_err(Into::into) + }, |_| true, ) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, number, @@ -1623,15 +1075,17 @@ impl HeaderProvider fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Headers, to_range(range), |static_file, range, predicate| static_file.sealed_headers_while(range, predicate), |range, mut predicate| { let mut headers = vec![]; - for entry in self.tx.cursor_read::()?.walk_range(range)? { + for entry in + self.tx.cursor_read::>()?.walk_range(range)? + { let (number, header) = entry?; let hash = self .block_hash(number)? @@ -1649,7 +1103,7 @@ impl HeaderProvider } } -impl BlockHashReader for DatabaseProvider { +impl BlockHashReader for DatabaseProvider { fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Headers, @@ -1676,7 +1130,7 @@ impl BlockHashReader for DatabaseProvider } } -impl BlockNumReader for DatabaseProvider { +impl BlockNumReader for DatabaseProvider { fn chain_info(&self) -> ProviderResult { let best_number = self.best_block_number()?; let best_hash = self.block_hash(best_number)?.unwrap_or_default(); @@ -1707,8 +1161,14 @@ impl BlockNumReader for DatabaseProvider } } -impl BlockReader for DatabaseProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { +impl BlockReader for DatabaseProvider { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { if source.is_canonical() { self.block(hash.into()) } else { @@ -1721,40 +1181,44 @@ impl BlockReader for DatabasePr /// If the header for this block is not found, this returns `None`. /// If the header is found, but the transactions either do not exist, or are not indexed, this /// will return None. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { if let Some(number) = self.convert_hash_or_number(id)? { if let Some(header) = self.header_by_number(number)? { - let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; - let ommers = self.ommers(number.into())?.unwrap_or_default(); - let requests = self.requests_by_block(number.into(), header.timestamp)?; // If the body indices are not found, this means that the transactions either do not // exist in the database yet, or they do exit but are not indexed. // If they exist but are not indexed, we don't have enough // information to return the block anyways, so we return `None`. - let transactions = match self.transactions_by_block(number.into())? { - Some(transactions) => transactions, - None => return Ok(None), + let Some(transactions) = self.transactions_by_block(number.into())? else { + return Ok(None) }; - return Ok(Some(Block { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - })) - } + let body = self + .storage + .reader() + .read_block_bodies(self, vec![(&header, transactions)])? + .pop() + .ok_or(ProviderError::InvalidStorageOutput)?; + + return Ok(Some(Self::Block::new(header, body))) + } } Ok(None) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(None) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -1762,7 +1226,7 @@ impl BlockReader for DatabasePr /// /// If the block is not found, this returns `None`. /// If the block exists, but doesn't contain ommers, this returns `None`. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(id)? { // If the Paris (Merge) hardfork block is known and block is after it, return empty // ommers. @@ -1770,7 +1234,8 @@ impl BlockReader for DatabasePr return Ok(Some(Vec::new())) } - let ommers = self.tx.get::(number)?.map(|o| o.ommers); + let ommers = + self.tx.get::>(number)?.map(|o| o.ommers); return Ok(ommers) } @@ -1793,13 +1258,13 @@ impl BlockReader for DatabasePr &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, |block_number| self.header_by_number(block_number), - |header, transactions, senders, ommers, withdrawals, requests| { - Block { header, body: BlockBody { transactions, ommers, withdrawals, requests } } + |header, body, senders| { + Self::Block::new(header, body) // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1814,57 +1279,40 @@ impl BlockReader for DatabasePr &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, |block_number| self.sealed_header(block_number), - |header, transactions, senders, ommers, withdrawals, requests| { - SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - } - // Note: we're using unchecked here because we know the block contains valid txs - // wrt to its height and can ignore the s value check so pre - // EIP-2 txs are allowed - .try_with_senders_unchecked(senders) - .map(Some) - .map_err(|_| ProviderError::SenderRecoveryError) + |header, body, senders| { + SealedBlock { header, body } + // Note: we're using unchecked here because we know the block contains valid txs + // wrt to its height and can ignore the s value check so pre + // EIP-2 txs are allowed + .try_with_senders_unchecked(senders) + .map(Some) + .map_err(|_| ProviderError::SenderRecoveryError) }, ) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - let mut tx_cursor = self.tx.cursor_read::()?; + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.block_range( range, |range| self.headers_range(range), - |header, tx_range, ommers, withdrawals, requests| { - let transactions = if tx_range.is_empty() { - Vec::new() - } else { - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect() - }; - Ok(Block { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - }) - }, + |header, body, _| Ok(Self::Block::new(header, body)), ) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.headers_range(range), - |header, transactions, ommers, withdrawals, requests, senders| { - Block { header, body: BlockBody { transactions, ommers, withdrawals, requests } } + |header, body, senders| { + Self::Block::new(header, body) .try_with_senders_unchecked(senders) .map_err(|_| ProviderError::SenderRecoveryError) }, @@ -1874,26 +1322,20 @@ impl BlockReader for DatabasePr fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.sealed_headers_range(range), - |header, transactions, ommers, withdrawals, requests, senders| { - SealedBlockWithSenders::new( - SealedBlock { - header, - body: BlockBody { transactions, ommers, withdrawals, requests }, - }, - senders, - ) - .ok_or(ProviderError::SenderRecoveryError) + |header, body, senders| { + SealedBlockWithSenders::new(SealedBlock { header, body }, senders) + .ok_or(ProviderError::SenderRecoveryError) }, ) } } -impl TransactionsProviderExt - for DatabaseProvider +impl TransactionsProviderExt + for DatabaseProvider { /// Recovers transaction hashes by walking through `Transactions` table and /// calculating them in a parallel manner. Returned unsorted. @@ -1906,7 +1348,7 @@ impl TransactionsProviderExt tx_range, |static_file, range, _| static_file.transaction_hashes_by_range(range), |tx_range, _| { - let mut tx_cursor = self.tx.cursor_read::()?; + let mut tx_cursor = self.tx.cursor_read::>>()?; let tx_range_size = tx_range.clone().count(); let tx_walker = tx_cursor.walk_range(tx_range)?; @@ -1915,12 +1357,15 @@ impl TransactionsProviderExt let mut transaction_count = 0; #[inline] - fn calculate_hash( - entry: Result<(TxNumber, TransactionSignedNoHash), DatabaseError>, + fn calculate_hash( + entry: Result<(TxNumber, T), DatabaseError>, rlp_buf: &mut Vec, - ) -> Result<(B256, TxNumber), Box> { + ) -> Result<(B256, TxNumber), Box> + where + T: Encodable2718, + { let (tx_id, tx) = entry.map_err(|e| Box::new(e.into()))?; - tx.transaction.encode_with_signature(&tx.signature, rlp_buf, false); + tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } @@ -1962,59 +1407,49 @@ impl TransactionsProviderExt } // Calculates the hash of the given transaction -impl TransactionsProvider - for DatabaseProvider -{ +impl TransactionsProvider for DatabaseProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, |static_file| static_file.transaction_by_id(id), - || Ok(self.tx.get::(id)?.map(Into::into)), + || Ok(self.tx.get::>(id)?), ) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Transactions, id, - |static_file| static_file.transaction_by_id_no_hash(id), - || Ok(self.tx.get::(id)?), + |static_file| static_file.transaction_by_id_unhashed(id), + || Ok(self.tx.get::>(id)?), ) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { - Ok(self.transaction_by_id_no_hash(id)?.map(|tx| TransactionSigned { - hash, - signature: tx.signature, - transaction: tx.transaction, - })) + Ok(self.transaction_by_id_unhashed(id)?) } else { Ok(None) } - .map(|tx| tx.map(Into::into)) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { - if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { - let transaction = TransactionSigned { - hash: tx_hash, - signature: tx.signature, - transaction: tx.transaction, - }; + if let Some(transaction) = self.transaction_by_id_unhashed(transaction_id)? { if let Some(block_number) = transaction_cursor.seek(transaction_id).map(|b| b.map(|(_, bn)| bn))? { @@ -2032,9 +1467,9 @@ impl TransactionsProvider index, block_hash, block_number, - base_fee: header.base_fee_per_gas, - excess_blob_gas: header.excess_blob_gas, - timestamp: header.timestamp, + base_fee: header.base_fee_per_gas(), + excess_blob_gas: header.excess_blob_gas(), + timestamp: header.timestamp(), }; return Ok(Some((transaction, meta))) @@ -2055,8 +1490,8 @@ impl TransactionsProvider fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { - let mut tx_cursor = self.tx.cursor_read::()?; + ) -> ProviderResult>> { + let mut tx_cursor = self.tx.cursor_read::>()?; if let Some(block_number) = self.convert_hash_or_number(id)? { if let Some(body) = self.block_body_indices(block_number)? { @@ -2064,12 +1499,7 @@ impl TransactionsProvider return if tx_range.is_empty() { Ok(Some(Vec::new())) } else { - Ok(Some( - self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect(), - )) + Ok(Some(self.transactions_by_tx_range_with_cursor(tx_range, &mut tx_cursor)?)) } } } @@ -2079,8 +1509,8 @@ impl TransactionsProvider fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { - let mut tx_cursor = self.tx.cursor_read::()?; + ) -> ProviderResult>> { + let mut tx_cursor = self.tx.cursor_read::>()?; let mut results = Vec::new(); let mut body_cursor = self.tx.cursor_read::()?; for entry in body_cursor.walk_range(range)? { @@ -2092,7 +1522,6 @@ impl TransactionsProvider results.push( self.transactions_by_tx_range_with_cursor(tx_num_range, &mut tx_cursor)? .into_iter() - .map(Into::into) .collect(), ); } @@ -2103,10 +1532,10 @@ impl TransactionsProvider fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.transactions_by_tx_range_with_cursor( range, - &mut self.tx.cursor_read::()?, + &mut self.tx.cursor_read::>()?, ) } @@ -2122,19 +1551,19 @@ impl TransactionsProvider } } -impl ReceiptProvider - for DatabaseProvider -{ - fn receipt(&self, id: TxNumber) -> ProviderResult> { +impl ReceiptProvider for DatabaseProvider { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( StaticFileSegment::Receipts, id, |static_file| static_file.receipt(id), - || Ok(self.tx.get::(id)?), + || Ok(self.tx.get::>(id)?), ) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { self.receipt(id) } else { @@ -2142,7 +1571,10 @@ impl ReceiptProvider } } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(block)? { if let Some(body) = self.block_body_indices(number)? { let tx_range = body.tx_num_range(); @@ -2159,19 +1591,22 @@ impl ReceiptProvider fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.static_file_provider.get_range_with_static_file_or_database( StaticFileSegment::Receipts, to_range(range), |static_file, range, _| static_file.receipts_by_tx_range(range), - |range, _| self.cursor_read_collect::(range).map_err(Into::into), + |range, _| { + self.cursor_read_collect::>(range) + .map_err(Into::into) + }, |_| true, ) } } -impl WithdrawalsProvider - for DatabaseProvider +impl> WithdrawalsProvider + for DatabaseProvider { fn withdrawals_by_block( &self, @@ -2200,55 +1635,22 @@ impl WithdrawalsProvider } } -impl RequestsProvider - for DatabaseProvider +impl EvmEnvProvider> + for DatabaseProvider { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - if self.chain_spec.is_prague_active_at_timestamp(timestamp) { - if let Some(number) = self.convert_hash_or_number(id)? { - let requests = self.tx.get::(number)?; - return Ok(requests) - } - } - Ok(None) - } -} - -impl EvmEnvProvider - for DatabaseProvider -{ - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - self.fill_env_with_header(cfg, block_env, &header, evm_config) - } - fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_and_block_env(cfg, block_env, header, total_difficulty); Ok(()) } @@ -2260,7 +1662,7 @@ impl EvmEnvProvider evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -2270,25 +1672,30 @@ impl EvmEnvProvider fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, - header: &Header, + header: &HeaderTy, evm_config: EvmConfig, ) -> ProviderResult<()> where - EvmConfig: ConfigureEvmEnv
, + EvmConfig: ConfigureEvmEnv
>, { let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; + .header_td_by_number(header.number())? + .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?; evm_config.fill_cfg_env(cfg, header, total_difficulty); Ok(()) } } -impl StageCheckpointReader for DatabaseProvider { +impl StageCheckpointReader for DatabaseProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { Ok(self.tx.get::(id.to_string())?) } + /// Get stage checkpoint progress. + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { + Ok(self.tx.get::(id.to_string())?) + } + fn get_all_checkpoints(&self) -> ProviderResult> { self.tx .cursor_read::()? @@ -2296,14 +1703,9 @@ impl StageCheckpointReader for DatabaseProvider, _>>() .map_err(ProviderError::Database) } - - /// Get stage checkpoint progress. - fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - Ok(self.tx.get::(id.to_string())?) - } } -impl StageCheckpointWriter for DatabaseProvider { +impl StageCheckpointWriter for DatabaseProvider { /// Save stage checkpoint. fn save_stage_checkpoint( &self, @@ -2344,7 +1746,7 @@ impl StageCheckpointWriter for DatabaseProvider< } } -impl StorageReader for DatabaseProvider { +impl StorageReader for DatabaseProvider { fn plain_state_storages( &self, addresses_with_keys: impl IntoIterator)>, @@ -2407,7 +1809,77 @@ impl StorageReader for DatabaseProvider { } } -impl StateChangeWriter for DatabaseProvider { +impl StateWriter + for DatabaseProvider +{ + type Receipt = ReceiptTy; + + fn write_state( + &self, + execution_outcome: ExecutionOutcome, + is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, + ) -> ProviderResult<()> { + let (plain_state, reverts) = + execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); + + self.write_state_reverts(reverts, execution_outcome.first_block)?; + self.write_state_changes(plain_state)?; + + let mut bodies_cursor = self.tx.cursor_read::()?; + + let has_receipts_pruning = self.prune_modes.has_receipts_pruning() || + execution_outcome.receipts.iter().flatten().any(|receipt| receipt.is_none()); + + // Prepare receipts cursor if we are going to write receipts to the database + // + // We are writing to database if requested or if there's any kind of receipt pruning + // configured + let mut receipts_cursor = (write_receipts_to.database() || has_receipts_pruning) + .then(|| self.tx.cursor_write::>()) + .transpose()?; + + // Prepare receipts static writer if we are going to write receipts to static files + // + // We are writing to static files if requested and if there's no receipt pruning configured + let mut receipts_static_writer = (write_receipts_to.static_files() && + !has_receipts_pruning) + .then(|| { + self.static_file_provider + .get_writer(execution_outcome.first_block, StaticFileSegment::Receipts) + }) + .transpose()?; + + for (idx, receipts) in execution_outcome.receipts.into_iter().enumerate() { + let block_number = execution_outcome.first_block + idx as u64; + + // Increment block number for receipts static file writer + if let Some(writer) = receipts_static_writer.as_mut() { + writer.increment_block(block_number)?; + } + + let first_tx_index = bodies_cursor + .seek_exact(block_number)? + .map(|(_, indices)| indices.first_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; + + for (idx, receipt) in receipts.into_iter().enumerate() { + let receipt_idx = first_tx_index + idx as u64; + if let Some(receipt) = receipt { + if let Some(writer) = &mut receipts_static_writer { + writer.append_receipt(receipt_idx, &receipt)?; + } + + if let Some(cursor) = &mut receipts_cursor { + cursor.append(receipt_idx, receipt)?; + } + } + } + } + + Ok(()) + } + fn write_state_reverts( &self, reverts: PlainStateReverts, @@ -2597,9 +2069,15 @@ impl StateChangeWriter for DatabaseProvid /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()> { + fn remove_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult<()> { + let range = block + 1..=self.last_block_number()?; + if range.is_empty() { - return Ok(()) + return Ok(()); } // We are not removing block meta as it is used to get block changesets. @@ -2608,8 +2086,6 @@ impl StateChangeWriter for DatabaseProvid // get transaction receipts let from_transaction_num = block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); - let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); @@ -2662,8 +2138,7 @@ impl StateChangeWriter for DatabaseProvid } } - // iterate over block body and remove receipts - self.remove::(from_transaction_num..=to_transaction_num)?; + self.remove_receipts_from(from_transaction_num, block, remove_receipts_from)?; Ok(()) } @@ -2689,7 +2164,13 @@ impl StateChangeWriter for DatabaseProvid /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset - fn take_state(&self, range: RangeInclusive) -> ProviderResult { + fn take_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult> { + let range = block + 1..=self.last_block_number()?; + if range.is_empty() { return Ok(ExecutionOutcome::default()) } @@ -2757,22 +2238,45 @@ impl StateChangeWriter for DatabaseProvid } } - // iterate over block body and create ExecutionResult - let mut receipt_iter = - self.take::(from_transaction_num..=to_transaction_num)?.into_iter(); + // Collect receipts into tuples (tx_num, receipt) to correctly handle pruned receipts + let mut receipts_iter = self + .static_file_provider + .get_range_with_static_file_or_database( + StaticFileSegment::Receipts, + from_transaction_num..to_transaction_num + 1, + |static_file, range, _| { + static_file + .receipts_by_tx_range(range.clone()) + .map(|r| range.into_iter().zip(r).collect()) + }, + |range, _| { + self.tx + .cursor_read::>()? + .walk_range(range)? + .map(|r| r.map_err(Into::into)) + .collect() + }, + |_| true, + )? + .into_iter() + .peekable(); let mut receipts = Vec::with_capacity(block_bodies.len()); // loop break if we are at the end of the blocks. for (_, block_body) in block_bodies { let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); - for _ in block_body.tx_num_range() { - if let Some((_, receipt)) = receipt_iter.next() { - block_receipts.push(Some(receipt)); + for num in block_body.tx_num_range() { + if receipts_iter.peek().is_some_and(|(n, _)| *n == num) { + block_receipts.push(receipts_iter.next().map(|(_, r)| r)); + } else { + block_receipts.push(None); } } receipts.push(block_receipts); } + self.remove_receipts_from(from_transaction_num, block, remove_receipts_from)?; + Ok(ExecutionOutcome::new_init( state, reverts, @@ -2784,7 +2288,7 @@ impl StateChangeWriter for DatabaseProvid } } -impl TrieWriter for DatabaseProvider { +impl TrieWriter for DatabaseProvider { /// Writes trie updates. Returns the number of entries modified. fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult { if trie_updates.is_empty() { @@ -2834,7 +2338,7 @@ impl TrieWriter for DatabaseProvider StorageTrieWriter for DatabaseProvider { +impl StorageTrieWriter for DatabaseProvider { /// Writes storage trie updates from the given storage trie map. First sorts the storage trie /// updates by the hashed address, writing in sorted order. fn write_storage_trie_updates( @@ -2871,20 +2375,18 @@ impl StorageTrieWriter for DatabaseProvid } } -impl HashingWriter for DatabaseProvider { - fn unwind_account_hashing( +impl HashingWriter for DatabaseProvider { + fn unwind_account_hashing<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, ) -> ProviderResult>> { // Aggregate all block changesets and make a list of accounts that have been changed. // Note that collecting and then reversing the order is necessary to ensure that the // changes are applied in the correct order. - let hashed_accounts = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(_, e)| (keccak256(e.address), e.info))) - .collect::, _>>()? + let hashed_accounts = changesets + .into_iter() + .map(|(_, e)| (keccak256(e.address), e.info)) + .collect::>() .into_iter() .rev() .collect::>(); @@ -2902,13 +2404,25 @@ impl HashingWriter for DatabaseProvider, + ) -> ProviderResult>> { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_account_hashing(changesets.iter()) + } + fn insert_account_for_hashing( &self, - accounts: impl IntoIterator)>, + changesets: impl IntoIterator)>, ) -> ProviderResult>> { let mut hashed_accounts_cursor = self.tx.cursor_write::()?; let hashed_accounts = - accounts.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); + changesets.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { hashed_accounts_cursor.upsert(*hashed_address, *account)?; @@ -2921,23 +2435,20 @@ impl HashingWriter for DatabaseProvider, + changesets: impl Iterator, ) -> ProviderResult>> { // Aggregate all block changesets and make list of accounts that have been changed. - let mut changesets = self.tx.cursor_read::()?; let mut hashed_storages = changesets - .walk_range(range)? - .map(|entry| { - entry.map(|(BlockNumberAddress((_, address)), storage_entry)| { - (keccak256(address), keccak256(storage_entry.key), storage_entry.value) - }) + .into_iter() + .map(|(BlockNumberAddress((_, address)), storage_entry)| { + (keccak256(address), keccak256(storage_entry.key), storage_entry.value) }) - .collect::, _>>()?; + .collect::>(); hashed_storages.sort_by_key(|(ha, hk, _)| (*ha, *hk)); // Apply values to HashedState, and remove the account if it's None. let mut hashed_storage_keys: HashMap> = - HashMap::with_capacity(hashed_storages.len()); + HashMap::with_capacity_and_hasher(hashed_storages.len(), Default::default()); let mut hashed_storage = self.tx.cursor_dup_write::()?; for (hashed_address, key, value) in hashed_storages.into_iter().rev() { hashed_storage_keys.entry(hashed_address).or_default().insert(key); @@ -2957,6 +2468,18 @@ impl HashingWriter for DatabaseProvider, + ) -> ProviderResult>> { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_storage_hashing(changesets.into_iter()) + } + fn insert_storage_for_hashing( &self, storages: impl IntoIterator)>, @@ -3077,17 +2600,15 @@ impl HashingWriter for DatabaseProvider HistoryWriter for DatabaseProvider { - fn unwind_account_history_indices( +impl HistoryWriter for DatabaseProvider { + fn unwind_account_history_indices<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, ) -> ProviderResult { - let mut last_indices = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| entry.map(|(index, account)| (account.address, index))) - .collect::, _>>()?; + let mut last_indices = changesets + .into_iter() + .map(|(index, account)| (account.address, *index)) + .collect::>(); last_indices.sort_by_key(|(a, _)| *a); // Unwind the account history index. @@ -3114,6 +2635,18 @@ impl HistoryWriter for DatabaseProvider, + ) -> ProviderResult { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_account_history_indices(changesets.iter()) + } + fn insert_account_history_index( &self, account_transitions: impl IntoIterator)>, @@ -3126,16 +2659,12 @@ impl HistoryWriter for DatabaseProvider, + changesets: impl Iterator, ) -> ProviderResult { - let mut storage_changesets = self - .tx - .cursor_read::()? - .walk_range(range)? - .map(|entry| { - entry.map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) - }) - .collect::, _>>()?; + let mut storage_changesets = changesets + .into_iter() + .map(|(BlockNumberAddress((bn, address)), storage)| (address, storage.key, bn)) + .collect::>(); storage_changesets.sort_by_key(|(address, key, _)| (*address, *key)); let mut cursor = self.tx.cursor_write::()?; @@ -3164,6 +2693,18 @@ impl HistoryWriter for DatabaseProvider, + ) -> ProviderResult { + let changesets = self + .tx + .cursor_read::()? + .walk_range(range)? + .collect::, _>>()?; + self.unwind_storage_history_indices(changesets.into_iter()) + } + fn insert_storage_history_index( &self, storage_transitions: impl IntoIterator)>, @@ -3193,212 +2734,62 @@ impl HistoryWriter for DatabaseProvider BlockExecutionReader - for DatabaseProvider +impl BlockExecutionWriter + for DatabaseProvider { - fn get_block_and_execution_range( + fn take_block_and_execution_above( &self, - range: RangeInclusive, - ) -> ProviderResult { - // get blocks - let blocks = self.get_block_range(range.clone())?; - - // get execution res - let execution_state = self.get_state(range)?.unwrap_or_default(); - - Ok(Chain::new(blocks, execution_state, None)) - } -} - -impl StateReader for DatabaseProvider { - fn get_state(&self, block: BlockNumber) -> ProviderResult> { - self.get_state(block..=block) - } -} - -impl - BlockExecutionWriter for DatabaseProvider -{ - fn take_block_and_execution_range( - &self, - range: RangeInclusive, - ) -> ProviderResult { - let storage_range = BlockNumberAddress::range(range.clone()); - - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(range.clone())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult> { + let range = block + 1..=self.last_block_number()?; - // Unwind account history indices. - self.unwind_account_history_indices(range.clone())?; - - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } - - // Unwind storage history indices. - self.unwind_storage_history_indices(storage_range)?; - - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(Into::::into)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; - - // get blocks - let blocks = self.take_block_range(range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + self.unwind_trie_state_range(range.clone())?; // get execution res - let execution_state = self.take_state(range.clone())?; + let execution_state = self.take_state_above(block, remove_from)?; + + let blocks = self.sealed_block_with_senders_range(range)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove::(range)?; + self.remove_blocks_above(block, remove_from)?; // Update pipeline progress - if let Some(fork_number) = unwind_to { - self.update_pipeline_stages(fork_number, true)?; - } + self.update_pipeline_stages(block, true)?; Ok(Chain::new(blocks, execution_state, None)) } - fn remove_block_and_execution_range( + fn remove_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_from: StorageLocation, ) -> ProviderResult<()> { - let storage_range = BlockNumberAddress::range(range.clone()); + let range = block + 1..=self.last_block_number()?; - // Unwind account hashes. Add changed accounts to account prefix set. - let hashed_addresses = self.unwind_account_hashing(range.clone())?; - let mut account_prefix_set = PrefixSetMut::with_capacity(hashed_addresses.len()); - let mut destroyed_accounts = HashSet::default(); - for (hashed_address, account) in hashed_addresses { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account.is_none() { - destroyed_accounts.insert(hashed_address); - } - } - - // Unwind account history indices. - self.unwind_account_history_indices(range.clone())?; - - // Unwind storage hashes. Add changed account and storage keys to corresponding prefix - // sets. - let mut storage_prefix_sets = HashMap::::default(); - let storage_entries = self.unwind_storage_hashing(storage_range.clone())?; - for (hashed_address, hashed_slots) in storage_entries { - account_prefix_set.insert(Nibbles::unpack(hashed_address)); - let mut storage_prefix_set = PrefixSetMut::with_capacity(hashed_slots.len()); - for slot in hashed_slots { - storage_prefix_set.insert(Nibbles::unpack(slot)); - } - storage_prefix_sets.insert(hashed_address, storage_prefix_set.freeze()); - } - - // Unwind storage history indices. - self.unwind_storage_history_indices(storage_range)?; - - // Calculate the reverted merkle root. - // This is the same as `StateRoot::incremental_root_with_updates`, only the prefix sets - // are pre-loaded. - let prefix_sets = TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - }; - let (new_state_root, trie_updates) = StateRoot::from_tx(&self.tx) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(Into::::into)?; - - let parent_number = range.start().saturating_sub(1); - let parent_state_root = self - .header_by_number(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))? - .state_root; - - // state root should be always correct as we are reverting state. - // but for sake of double verification we will check it again. - if new_state_root != parent_state_root { - let parent_hash = self - .block_hash(parent_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_number.into()))?; - return Err(ProviderError::UnwindStateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: new_state_root, expected: parent_state_root }, - block_number: parent_number, - block_hash: parent_hash, - }))) - } - self.write_trie_updates(&trie_updates)?; - - // get blocks - let blocks = self.take_block_range(range.clone())?; - let unwind_to = blocks.first().map(|b| b.number.saturating_sub(1)); + self.unwind_trie_state_range(range)?; // remove execution res - self.remove_state(range.clone())?; + self.remove_state_above(block, remove_from)?; // remove block bodies it is needed for both get block range and get block execution results // that is why it is deleted afterwards. - self.remove::(range)?; + self.remove_blocks_above(block, remove_from)?; // Update pipeline progress - if let Some(block_number) = unwind_to { - self.update_pipeline_stages(block_number, true)?; - } + self.update_pipeline_stages(block, true)?; Ok(()) } } -impl BlockWriter - for DatabaseProvider +impl BlockWriter + for DatabaseProvider { + type Block = BlockTy; + type Receipt = ReceiptTy; + /// Inserts the block into the database, always modifying the following tables: /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`Headers`](tables::Headers) @@ -3413,7 +2804,6 @@ impl, + write_to: StorageLocation, ) -> ProviderResult { - let block_number = block.number; + let block_number = block.number(); let mut durations_recorder = metrics::DurationsRecorder::default(); - self.tx.put::(block_number, block.hash())?; - durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); - - // Put header with canonical hashes. - self.tx.put::(block_number, block.header.as_ref().clone())?; - durations_recorder.record_relative(metrics::Action::InsertHeaders); - - self.tx.put::(block.hash(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); - // total difficulty let ttd = if block_number == 0 { - block.difficulty + block.difficulty() } else { let parent_block_number = block_number - 1; let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); durations_recorder.record_relative(metrics::Action::GetParentTD); - parent_ttd + block.difficulty + parent_ttd + block.difficulty() }; - self.tx.put::(block_number, ttd.into())?; - durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); + if write_to.database() { + self.tx.put::(block_number, block.hash())?; + durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); - // insert body ommers data - if !block.body.ommers.is_empty() { - self.tx.put::( - block_number, - StoredBlockOmmers { ommers: block.block.body.ommers }, - )?; - durations_recorder.record_relative(metrics::Action::InsertBlockOmmers); + // Put header with canonical hashes. + self.tx + .put::>>(block_number, block.header.as_ref().clone())?; + durations_recorder.record_relative(metrics::Action::InsertHeaders); + + self.tx.put::(block_number, ttd.into())?; + durations_recorder.record_relative(metrics::Action::InsertHeaderTerminalDifficulties); + } + + if write_to.static_files() { + let mut writer = + self.static_file_provider.get_writer(block_number, StaticFileSegment::Headers)?; + writer.append_header(&block.header, ttd, &block.hash())?; } + self.tx.put::(block.hash(), block_number)?; + durations_recorder.record_relative(metrics::Action::InsertHeaderNumbers); + let mut next_tx_num = self .tx .cursor_read::()? @@ -3469,105 +2860,204 @@ impl(next_tx_num, *sender)?; - tx_senders_elapsed += start.elapsed(); } - let start = Instant::now(); - self.tx.put::(next_tx_num, transaction.into())?; - let elapsed = start.elapsed(); - if elapsed > Duration::from_secs(1) { - warn!( - target: "providers::db", - ?block_number, - tx_num = %next_tx_num, - hash = %hash, - ?elapsed, - "Transaction insertion took too long" - ); - } - transactions_elapsed += elapsed; - - if self - .prune_modes - .transaction_lookup - .filter(|prune_mode| prune_mode.is_full()) - .is_none() - { - let start = Instant::now(); - self.tx.put::(hash, next_tx_num)?; - tx_hash_numbers_elapsed += start.elapsed(); + if self.prune_modes.transaction_lookup.is_none_or(|m| !m.is_full()) { + self.tx.put::(*hash, next_tx_num)?; } next_tx_num += 1; } - durations_recorder - .record_duration(metrics::Action::InsertTransactionSenders, tx_senders_elapsed); - durations_recorder - .record_duration(metrics::Action::InsertTransactions, transactions_elapsed); - durations_recorder.record_duration( - metrics::Action::InsertTransactionHashNumbers, - tx_hash_numbers_elapsed, + + self.append_block_bodies(vec![(block_number, Some(block.block.body))], write_to)?; + + debug!( + target: "providers::db", + ?block_number, + actions = ?durations_recorder.actions, + "Inserted block" ); - if let Some(withdrawals) = block.block.body.withdrawals { - if !withdrawals.is_empty() { - self.tx.put::( - block_number, - StoredBlockWithdrawals { withdrawals }, - )?; - durations_recorder.record_relative(metrics::Action::InsertBlockWithdrawals); + Ok(StoredBlockBodyIndices { first_tx_num, tx_count }) + } + + fn append_block_bodies( + &self, + bodies: Vec<(BlockNumber, Option>)>, + write_transactions_to: StorageLocation, + ) -> ProviderResult<()> { + let Some(from_block) = bodies.first().map(|(block, _)| *block) else { return Ok(()) }; + + // Initialize writer if we will be writing transactions to staticfiles + let mut tx_static_writer = write_transactions_to + .static_files() + .then(|| { + self.static_file_provider.get_writer(from_block, StaticFileSegment::Transactions) + }) + .transpose()?; + + let mut block_indices_cursor = self.tx.cursor_write::()?; + let mut tx_block_cursor = self.tx.cursor_write::()?; + + // Initialize cursor if we will be writing transactions to database + let mut tx_cursor = write_transactions_to + .database() + .then(|| self.tx.cursor_write::>>()) + .transpose()?; + + // Get id for the next tx_num or zero if there are no transactions. + let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); + + for (block_number, body) in &bodies { + // Increment block on static file header. + if let Some(writer) = tx_static_writer.as_mut() { + writer.increment_block(*block_number)?; + } + + let tx_count = body.as_ref().map(|b| b.transactions().len() as u64).unwrap_or_default(); + let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; + + let mut durations_recorder = metrics::DurationsRecorder::default(); + + // insert block meta + block_indices_cursor.append(*block_number, block_indices)?; + + durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); + + let Some(body) = body else { continue }; + + // write transaction block index + if !body.transactions().is_empty() { + tx_block_cursor.append(block_indices.last_tx_num(), *block_number)?; + durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); + } + + // write transactions + for transaction in body.transactions() { + if let Some(writer) = tx_static_writer.as_mut() { + writer.append_transaction(next_tx_num, transaction)?; + } + if let Some(cursor) = tx_cursor.as_mut() { + cursor.append(next_tx_num, transaction.clone())?; + } + + // Increment transaction id for each transaction. + next_tx_num += 1; + } + + debug!( + target: "providers::db", + ?block_number, + actions = ?durations_recorder.actions, + "Inserted block body" + ); + } + + self.storage.writer().write_block_bodies(self, bodies)?; + + Ok(()) + } + + fn remove_blocks_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + let mut canonical_headers_cursor = self.tx.cursor_write::()?; + let mut rev_headers = canonical_headers_cursor.walk_back(None)?; + + while let Some(Ok((number, hash))) = rev_headers.next() { + if number <= block { + break } + self.tx.delete::(hash, None)?; + rev_headers.delete_current()?; } + self.remove::>>(block + 1..)?; + self.remove::(block + 1..)?; - if let Some(requests) = block.block.body.requests { - self.tx.put::(block_number, requests)?; - durations_recorder.record_relative(metrics::Action::InsertBlockRequests); + // First transaction to be removed + let unwind_tx_from = self + .tx + .get::(block)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + // Last transaction to be removed + let unwind_tx_to = self + .tx + .cursor_read::()? + .last()? + // shouldn't happen because this was OK above + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))? + .1 + .last_tx_num(); + + if unwind_tx_from <= unwind_tx_to { + for (hash, _) in self.transaction_hashes_by_range(unwind_tx_from..(unwind_tx_to + 1))? { + self.tx.delete::(hash, None)?; + } } - let block_indices = StoredBlockBodyIndices { first_tx_num, tx_count }; - self.tx.put::(block_number, block_indices.clone())?; - durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); + self.remove::(unwind_tx_from..)?; + + self.remove_bodies_above(block, remove_transactions_from)?; - if !block_indices.is_empty() { - self.tx.put::(block_indices.last_tx_num(), block_number)?; - durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); + Ok(()) + } + + fn remove_bodies_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()> { + self.storage.writer().remove_block_bodies_above(self, block)?; + + // First transaction to be removed + let unwind_tx_from = self + .tx + .get::(block)? + .map(|b| b.next_tx_num()) + .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; + + self.remove::(block + 1..)?; + self.remove::(unwind_tx_from..)?; + + if remove_transactions_from.database() { + self.remove::>>(unwind_tx_from..)?; } - debug!( - target: "providers::db", - ?block_number, - actions = ?durations_recorder.actions, - "Inserted block" - ); + if remove_transactions_from.static_files() { + let static_file_tx_num = self + .static_file_provider + .get_highest_static_file_tx(StaticFileSegment::Transactions); + + let to_delete = static_file_tx_num + .map(|static_tx| (static_tx + 1).saturating_sub(unwind_tx_from)) + .unwrap_or_default(); - Ok(block_indices) + self.static_file_provider + .latest_writer(StaticFileSegment::Transactions)? + .prune_transactions(to_delete, block)?; + } + + Ok(()) } /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, - blocks: Vec, - execution_outcome: ExecutionOutcome, + blocks: Vec>, + execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()> { @@ -3576,25 +3066,20 @@ impl PruneCheckpointReader for DatabaseProvider { +impl PruneCheckpointReader for DatabaseProvider { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -3632,7 +3117,7 @@ impl PruneCheckpointReader for DatabaseProvider PruneCheckpointWriter for DatabaseProvider { +impl PruneCheckpointWriter for DatabaseProvider { fn save_prune_checkpoint( &self, segment: PruneSegment, @@ -3642,7 +3127,7 @@ impl PruneCheckpointWriter for DatabaseProvider< } } -impl StatsReader for DatabaseProvider { +impl StatsReader for DatabaseProvider { fn count_entries(&self) -> ProviderResult { let db_entries = self.tx.entries::()?; let static_file_entries = match self.static_file_provider.count_entries::() { @@ -3655,7 +3140,7 @@ impl StatsReader for DatabaseProvider { } } -impl ChainStateBlockReader for DatabaseProvider { +impl ChainStateBlockReader for DatabaseProvider { fn last_finalized_block_number(&self) -> ProviderResult> { let mut finalized_blocks = self .tx @@ -3681,7 +3166,7 @@ impl ChainStateBlockReader for DatabaseProvider ChainStateBlockWriter for DatabaseProvider { +impl ChainStateBlockWriter for DatabaseProvider { fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()> { Ok(self .tx @@ -3695,7 +3180,7 @@ impl ChainStateBlockWriter for DatabaseProvider< } } -impl DBProvider for DatabaseProvider { +impl DBProvider for DatabaseProvider { type Tx = TX; fn tx_ref(&self) -> &Self::Tx { @@ -3714,93 +3199,3 @@ impl DBProvider for DatabasePro self.prune_modes_ref() } } - -/// Helper method to recover senders for any blocks in the db which do not have senders. This -/// compares the length of the input senders [`Vec`], with the length of given transactions [`Vec`], -/// and will add to the input senders vec if there are more transactions. -/// -/// NOTE: This will modify the input senders list, which is why a mutable reference is required. -fn recover_block_senders( - senders: &mut Vec<(u64, Address)>, - transactions: &[(u64, TransactionSigned)], - first_transaction: u64, - last_transaction: u64, -) -> ProviderResult<()> { - // Recover senders manually if not found in db - // NOTE: Transactions are always guaranteed to be in the database whereas - // senders might be pruned. - if senders.len() != transactions.len() { - if senders.len() > transactions.len() { - error!(target: "providers::db", senders=%senders.len(), transactions=%transactions.len(), - first_tx=%first_transaction, last_tx=%last_transaction, - "unexpected senders and transactions mismatch"); - } - let missing = transactions.len().saturating_sub(senders.len()); - senders.reserve(missing); - // Find all missing senders, their corresponding tx numbers and indexes to the original - // `senders` vector at which the recovered senders will be inserted. - let mut missing_senders = Vec::with_capacity(missing); - { - let mut senders = senders.iter().peekable(); - - // `transactions` contain all entries. `senders` contain _some_ of the senders for - // these transactions. Both are sorted and indexed by `TxNumber`. - // - // The general idea is to iterate on both `transactions` and `senders`, and advance - // the `senders` iteration only if it matches the current `transactions` entry's - // `TxNumber`. Otherwise, add the transaction to the list of missing senders. - for (i, (tx_number, transaction)) in transactions.iter().enumerate() { - if let Some((sender_tx_number, _)) = senders.peek() { - if sender_tx_number == tx_number { - // If current sender's `TxNumber` matches current transaction's - // `TxNumber`, advance the senders iterator. - senders.next(); - } else { - // If current sender's `TxNumber` doesn't match current transaction's - // `TxNumber`, add it to missing senders. - missing_senders.push((i, tx_number, transaction)); - } - } else { - // If there's no more senders left, but we're still iterating over - // transactions, add them to missing senders - missing_senders.push((i, tx_number, transaction)); - } - } - } - - // Recover senders - let recovered_senders = TransactionSigned::recover_signers( - missing_senders.iter().map(|(_, _, tx)| *tx).collect::>(), - missing_senders.len(), - ) - .ok_or(ProviderError::SenderRecoveryError)?; - - // Insert recovered senders along with tx numbers at the corresponding indexes to the - // original `senders` vector - for ((i, tx_number, _), sender) in missing_senders.into_iter().zip(recovered_senders) { - // Insert will put recovered senders at necessary positions and shift the rest - senders.insert(i, (*tx_number, sender)); - } - - // Debug assertions which are triggered during the test to ensure that all senders are - // present and sorted - debug_assert_eq!(senders.len(), transactions.len(), "missing one or more senders"); - debug_assert!(senders.iter().tuple_windows().all(|(a, b)| a.0 < b.0), "senders not sorted"); - } - - Ok(()) -} - -fn range_size_hint(range: &impl RangeBounds) -> Option { - let start = match range.start_bound().cloned() { - Bound::Included(start) => start, - Bound::Excluded(start) => start.checked_add(1)?, - Bound::Unbounded => 0, - }; - let end = match range.end_bound().cloned() { - Bound::Included(end) => end.saturating_add(1), - Bound::Excluded(end) => end, - Bound::Unbounded => return None, - }; - end.checked_sub(start).map(|x| x as _) -} diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 561e1d97436..b4a99541a89 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,14 +1,20 @@ use crate::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, CanonChainTracker, CanonStateNotifications, + BlockSource, BlockchainTreePendingStateProvider, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, FullExecutionDataProvider, HeaderProvider, - ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, + NodePrimitivesProvider, ProviderError, PruneCheckpointReader, ReceiptProvider, + ReceiptProviderIdExt, StageCheckpointReader, StateProviderBox, StateProviderFactory, + StaticFileProviderFactory, TransactionVariant, TransactionsProvider, TreeViewer, + WithdrawalsProvider, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag}; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable, TxHash, TxNumber, B256, U256}; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, +}; +use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; +use alloy_rpc_types_engine::ForkchoiceState; use reth_blockchain_tree_api::{ error::{CanonicalError, InsertBlockError}, BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, @@ -16,16 +22,19 @@ use reth_blockchain_tree_api::{ }; use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::table::Value; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{ + BlockTy, FullNodePrimitives, HeaderTy, NodeTypes, NodeTypesWithDB, ReceiptTy, TxTy, +}; use reth_primitives::{ - Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, + SealedBlockWithSenders, SealedHeader, TransactionMeta, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::CanonChainTracker; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -34,6 +43,7 @@ use std::{ sync::Arc, time::Instant, }; + use tracing::trace; mod database; @@ -55,16 +65,52 @@ mod bundle_state_provider; pub use bundle_state_provider::BundleStateProvider; mod consistent_view; -use alloy_rpc_types_engine::ForkchoiceState; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; mod blockchain_provider; pub use blockchain_provider::BlockchainProvider2; +mod consistent; +pub use consistent::ConsistentProvider; + +/// Helper trait to bound [`NodeTypes`] so that combined with database they satisfy +/// [`ProviderNodeTypes`]. +pub trait NodeTypesForProvider +where + Self: NodeTypes< + ChainSpec: EthereumHardforks, + Storage: ChainStorage, + Primitives: FullNodePrimitives, + >, +{ +} + +impl NodeTypesForProvider for T where + T: NodeTypes< + ChainSpec: EthereumHardforks, + Storage: ChainStorage, + Primitives: FullNodePrimitives, + > +{ +} + /// Helper trait keeping common requirements of providers for [`NodeTypesWithDB`]. -pub trait ProviderNodeTypes: NodeTypesWithDB {} +pub trait ProviderNodeTypes +where + Self: NodeTypesForProvider + NodeTypesWithDB, +{ +} +impl ProviderNodeTypes for T where T: NodeTypesForProvider + NodeTypesWithDB {} -impl ProviderNodeTypes for T where T: NodeTypesWithDB {} +/// A helper trait with requirements for [`NodeTypesForProvider`] to be used within legacy +/// blockchain tree. +pub trait NodeTypesForTree: NodeTypesForProvider {} +impl NodeTypesForTree for T where T: NodeTypesForProvider {} + +/// Helper trait with requirements for [`ProviderNodeTypes`] to be used within legacy blockchain +/// tree. +pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} +impl TreeNodeTypes for T where T: ProviderNodeTypes + NodeTypesForTree {} /// The main type for interacting with the blockchain. /// @@ -76,9 +122,9 @@ pub struct BlockchainProvider { /// Provider type used to access the database. database: ProviderFactory, /// The blockchain tree instance. - tree: Arc, + tree: Arc>, /// Tracks the chain info wrt forkchoice updates - chain_info: ChainInfoTracker, + chain_info: ChainInfoTracker, } impl Clone for BlockchainProvider { @@ -94,19 +140,19 @@ impl Clone for BlockchainProvider { impl BlockchainProvider { /// Sets the treeviewer for the provider. #[doc(hidden)] - pub fn with_tree(mut self, tree: Arc) -> Self { + pub fn with_tree(mut self, tree: Arc>) -> Self { self.tree = tree; self } } -impl BlockchainProvider { +impl BlockchainProvider { /// Create new provider instance that wraps the database and the blockchain tree, using the /// provided latest header to initialize the chain info tracker, alongside the finalized header /// if it exists. pub fn with_blocks( database: ProviderFactory, - tree: Arc, + tree: Arc>, latest: SealedHeader, finalized: Option, safe: Option, @@ -116,9 +162,12 @@ impl BlockchainProvider { /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Arc) -> ProviderResult { + pub fn new( + database: ProviderFactory, + tree: Arc>, + ) -> ProviderResult { let provider = database.provider()?; - let best: ChainInfo = provider.chain_info()?; + let best = provider.chain_info()?; let latest_header = provider .header_by_number(best.best_number)? .ok_or_else(|| ProviderError::HeaderNotFound(best.best_number.into()))?; @@ -183,6 +232,10 @@ where } } +impl NodePrimitivesProvider for BlockchainProvider { + type Primitives = N::Primitives; +} + impl DatabaseProviderFactory for BlockchainProvider { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; @@ -198,12 +251,14 @@ impl DatabaseProviderFactory for BlockchainProvider { } impl StaticFileProviderFactory for BlockchainProvider { - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } -impl HeaderProvider for BlockchainProvider { +impl HeaderProvider for BlockchainProvider { + type Header = Header; + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.header(block_hash) } @@ -290,8 +345,14 @@ impl BlockIdReader for BlockchainProvider { } } -impl BlockReader for BlockchainProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { +impl BlockReader for BlockchainProvider { + type Block = BlockTy; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { let block = match source { BlockSource::Any => { // check database first @@ -310,22 +371,26 @@ impl BlockReader for BlockchainProvider { Ok(block) } - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { match id { BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), BlockHashOrNumber::Number(num) => self.database.block_by_number(num), } } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.tree.pending_block()) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(self.tree.pending_block_with_senders()) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { Ok(self.tree.pending_block_and_receipts()) } @@ -350,7 +415,7 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.block_with_senders(id, transaction_kind) } @@ -358,53 +423,55 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.sealed_block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.database.block_range(range) } fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.database.sealed_block_with_senders_range(range) } } impl TransactionsProvider for BlockchainProvider { + type Transaction = TxTy; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.database.transaction_by_id(id) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { - self.database.transaction_by_id_no_hash(id) + ) -> ProviderResult> { + self.database.transaction_by_id_unhashed(id) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transaction_by_hash_with_meta(tx_hash) } @@ -415,21 +482,21 @@ impl TransactionsProvider for BlockchainProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.database.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.database.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.transactions_by_tx_range(range) } @@ -446,27 +513,32 @@ impl TransactionsProvider for BlockchainProvider { } impl ReceiptProvider for BlockchainProvider { - fn receipt(&self, id: TxNumber) -> ProviderResult> { + type Receipt = ReceiptTy; + + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>> { self.database.receipts_by_block(block) } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.database.receipts_by_tx_range(range) } } -impl ReceiptProviderIdExt for BlockchainProvider { +impl ReceiptProviderIdExt for BlockchainProvider { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { @@ -504,16 +576,6 @@ impl WithdrawalsProvider for BlockchainProvider { } } -impl RequestsProvider for BlockchainProvider { - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.database.requests_by_block(id, timestamp) - } -} - impl StageCheckpointReader for BlockchainProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) @@ -528,20 +590,7 @@ impl StageCheckpointReader for BlockchainProvider { } } -impl EvmEnvProvider for BlockchainProvider { - fn fill_env_at( - &self, - cfg: &mut CfgEnvWithHandlerCfg, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - self.database.provider()?.fill_env_at(cfg, block_env, at, evm_config) - } - +impl EvmEnvProvider for BlockchainProvider { fn fill_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -601,42 +650,13 @@ impl ChainSpecProvider for BlockchainProvider { } } -impl StateProviderFactory for BlockchainProvider { +impl StateProviderFactory for BlockchainProvider { /// Storage provider for latest block fn latest(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting latest block state provider"); self.database.latest() } - fn history_by_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - self.database.history_by_block_number(block_number) - } - - fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.database.history_by_block_hash(block_hash) - } - - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); - let mut state = self.history_by_block_hash(block); - - // we failed to get the state by hash, from disk, hash block be the pending block - if state.is_err() { - if let Ok(Some(pending)) = self.pending_state_by_hash(block) { - // we found pending block by hash - state = Ok(pending) - } - } - - state - } - /// Returns a [`StateProviderBox`] indexed by the given block number or tag. /// /// Note: if a number is provided this will only look at historical(canonical) state. @@ -669,6 +689,35 @@ impl StateProviderFactory for BlockchainProvider { } } + fn history_by_block_number( + &self, + block_number: BlockNumber, + ) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); + self.ensure_canonical_block(block_number)?; + self.database.history_by_block_number(block_number) + } + + fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); + self.database.history_by_block_hash(block_hash) + } + + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { + trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); + let mut state = self.history_by_block_hash(block); + + // we failed to get the state by hash, from disk, hash block be the pending block + if state.is_err() { + if let Ok(Some(pending)) = self.pending_state_by_hash(block) { + // we found pending block by hash + state = Ok(pending) + } + } + + state + } + /// Returns the state provider for pending state. /// /// If there's no pending block available then the latest state provider is returned: @@ -775,10 +824,9 @@ impl BlockchainTreeViewer for BlockchainProvider { } } -impl CanonChainTracker for BlockchainProvider -where - Self: BlockReader, -{ +impl CanonChainTracker for BlockchainProvider { + type Header = HeaderTy; + fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { // update timestamp self.chain_info.on_forkchoice_update_received(); @@ -809,11 +857,8 @@ where } } -impl BlockReaderIdExt for BlockchainProvider -where - Self: BlockReader + ReceiptProviderIdExt, -{ - fn block_by_id(&self, id: BlockId) -> ProviderResult> { +impl BlockReaderIdExt for BlockchainProvider { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -831,7 +876,10 @@ where } } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { Ok(match id { BlockNumberOrTag::Latest => Some(self.chain_info.get_canonical_head().unseal()), BlockNumberOrTag::Finalized => { @@ -847,50 +895,39 @@ where fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>> { match id { BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), BlockNumberOrTag::Safe => Ok(self.chain_info.get_safe_header()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Earliest => self + .header_by_number(0)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), BlockNumberOrTag::Pending => Ok(self.tree.pending_header()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ), + BlockNumberOrTag::Number(num) => self + .header_by_number(num)? + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), } } - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }), + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), }) } - fn header_by_id(&self, id: BlockId) -> ProviderResult> { + fn header_by_id(&self, id: BlockId) -> ProviderResult> { Ok(match id { BlockId::Number(num) => self.header_by_number_or_tag(num)?, BlockId::Hash(hash) => self.header(&hash.block_hash)?, }) } - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { match id { BlockId::Number(num) => self.ommers_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -912,12 +949,14 @@ impl BlockchainTreePendingStateProvider for BlockchainProv } impl CanonStateSubscriptions for BlockchainProvider { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.tree.subscribe_to_canonical_state() } } -impl ForkChoiceSubscriptions for BlockchainProvider { +impl ForkChoiceSubscriptions for BlockchainProvider { + type Header = HeaderTy; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let receiver = self.chain_info.subscribe_safe_block(); ForkChoiceNotifications(receiver) diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 640041e0801..93752c1e278 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -1,7 +1,8 @@ use crate::{ - providers::{state::macros::delegate_provider_impls, StaticFileProvider}, - AccountReader, BlockHashReader, ProviderError, StateProvider, StateRootProvider, + providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, + HashedPostStateProvider, ProviderError, StateProvider, StateRootProvider, }; +use alloy_eips::merge::EPOCH_SLOTS; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, @@ -13,18 +14,21 @@ use reth_db_api::{ table::Table, transaction::DbTx, }; -use reth_primitives::{constants::EPOCH_SLOTS, Account, Bytecode, StaticFileSegment}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_primitives::{Account, Bytecode}; +use reth_storage_api::{ + BlockNumReader, DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, + StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, - DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, + DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, StateCommitment, }; use std::fmt::Debug; @@ -40,15 +44,13 @@ use std::fmt::Debug; /// - [`tables::AccountChangeSets`] /// - [`tables::StorageChangeSets`] #[derive(Debug)] -pub struct HistoricalStateProviderRef<'b, TX: DbTx> { - /// Transaction - tx: &'b TX, +pub struct HistoricalStateProviderRef<'b, Provider> { + /// Database provider + provider: &'b Provider, /// Block number is main index for the history state of accounts and storages. block_number: BlockNumber, /// Lowest blocks at which different parts of the state are available. lowest_available_blocks: LowestAvailableBlocks, - /// Static File provider - static_file_provider: StaticFileProvider, } #[derive(Debug, Eq, PartialEq)] @@ -59,25 +61,22 @@ pub enum HistoryInfo { MaybeInPlainState, } -impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { +impl<'b, Provider: DBProvider + BlockNumReader + StateCommitmentProvider> + HistoricalStateProviderRef<'b, Provider> +{ /// Create new `StateProvider` for historical block number - pub fn new( - tx: &'b TX, - block_number: BlockNumber, - static_file_provider: StaticFileProvider, - ) -> Self { - Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } + pub fn new(provider: &'b Provider, block_number: BlockNumber) -> Self { + Self { provider, block_number, lowest_available_blocks: Default::default() } } /// Create new `StateProvider` for historical block number and lowest block numbers at which /// account & storage histories are available. pub const fn new_with_lowest_available_blocks( - tx: &'b TX, + provider: &'b Provider, block_number: BlockNumber, lowest_available_blocks: LowestAvailableBlocks, - static_file_provider: StaticFileProvider, ) -> Self { - Self { tx, block_number, lowest_available_blocks, static_file_provider } + Self { provider, block_number, lowest_available_blocks } } /// Lookup an account in the `AccountsHistory` table @@ -116,15 +115,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { /// Checks and returns `true` if distance to historical block exceeds the provided limit. fn check_distance_against_limit(&self, limit: u64) -> ProviderResult { - let tip = self - .tx - .cursor_read::()? - .last()? - .map(|(tip, _)| tip) - .or_else(|| { - self.static_file_provider.get_highest_static_file_block(StaticFileSegment::Headers) - }) - .ok_or(ProviderError::BestBlockNotFound)?; + let tip = self.provider.last_block_number()?; Ok(tip.saturating_sub(self.block_number) > limit) } @@ -145,7 +136,9 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ); } - Ok(HashedPostState::from_reverts(self.tx, self.block_number)?) + Ok(HashedPostState::from_reverts::< + ::KeyHasher, + >(self.tx(), self.block_number)?) } /// Retrieve revert hashed storage for this history provider and target address. @@ -162,7 +155,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { ); } - Ok(HashedStorage::from_reverts(self.tx, address, self.block_number)?) + Ok(HashedStorage::from_reverts(self.tx(), address, self.block_number)?) } fn history_info( @@ -174,7 +167,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { where T: Table, { - let mut cursor = self.tx.cursor_read::()?; + let mut cursor = self.tx().cursor_read::()?; // Lookup the history chunk in the history index. If they key does not appear in the // index, the first chunk for the next key will be returned so we filter out chunks that @@ -247,13 +240,21 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { } } -impl AccountReader for HistoricalStateProviderRef<'_, TX> { +impl HistoricalStateProviderRef<'_, Provider> { + fn tx(&self) -> &Provider::Tx { + self.provider.tx_ref() + } +} + +impl AccountReader + for HistoricalStateProviderRef<'_, Provider> +{ /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { match self.account_history_lookup(address)? { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(self - .tx + .tx() .cursor_dup_read::()? .seek_by_key_subkey(changeset_block_number, address)? .filter(|acc| acc.address == address) @@ -263,21 +264,18 @@ impl AccountReader for HistoricalStateProviderRef<'_, TX> { })? .info), HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => { - Ok(self.tx.get::(address)?) + Ok(self.tx().get::(address)?) } } } } -impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { +impl BlockHashReader + for HistoricalStateProviderRef<'_, Provider> +{ /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { - self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::Headers, - number, - |static_file| static_file.block_hash(number), - || Ok(self.tx.get::(number)?), - ) + self.provider.block_hash(number) } fn canonical_hashes_range( @@ -285,37 +283,23 @@ impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.static_file_provider.get_range_with_static_file_or_database( - StaticFileSegment::Headers, - start..end, - |static_file, range, _| static_file.canonical_hashes_range(range.start, range.end), - |range, _| { - self.tx - .cursor_read::() - .map(|mut cursor| { - cursor - .walk_range(range)? - .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() - })? - .map_err(Into::into) - }, - |_| true, - ) + self.provider.canonical_hashes_range(start, end) } } -impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { +impl StateRootProvider + for HistoricalStateProviderRef<'_, Provider> +{ fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state); - StateRoot::overlay_root(self.tx, revert_state) + StateRoot::overlay_root(self.tx(), revert_state) .map_err(|err| ProviderError::Database(err.into())) } fn state_root_from_nodes(&self, mut input: TrieInput) -> ProviderResult { input.prepend(self.revert_state()?); - StateRoot::overlay_root_from_nodes(self.tx, input) + StateRoot::overlay_root_from_nodes(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } @@ -325,7 +309,7 @@ impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult<(B256, TrieUpdates)> { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state); - StateRoot::overlay_root_with_updates(self.tx, revert_state) + StateRoot::overlay_root_with_updates(self.tx(), revert_state) .map_err(|err| ProviderError::Database(err.into())) } @@ -334,12 +318,14 @@ impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { mut input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { input.prepend(self.revert_state()?); - StateRoot::overlay_root_from_nodes_with_updates(self.tx, input) + StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } } -impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { +impl StorageRootProvider + for HistoricalStateProviderRef<'_, Provider> +{ fn storage_root( &self, address: Address, @@ -347,7 +333,7 @@ impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult { let mut revert_storage = self.revert_storage(address)?; revert_storage.extend(&hashed_storage); - StorageRoot::overlay_root(self.tx, address, revert_storage) + StorageRoot::overlay_root(self.tx(), address, revert_storage) .map_err(|err| ProviderError::Database(err.into())) } @@ -359,12 +345,26 @@ impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { ) -> ProviderResult { let mut revert_storage = self.revert_storage(address)?; revert_storage.extend(&hashed_storage); - StorageProof::overlay_storage_proof(self.tx, address, slot, revert_storage) + StorageProof::overlay_storage_proof(self.tx(), address, slot, revert_storage) + .map_err(Into::::into) + } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut revert_storage = self.revert_storage(address)?; + revert_storage.extend(&hashed_storage); + StorageProof::overlay_storage_multiproof(self.tx(), address, slots, revert_storage) .map_err(Into::::into) } } -impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { +impl StateProofProvider + for HistoricalStateProviderRef<'_, Provider> +{ /// Get account and storage proofs. fn proof( &self, @@ -373,7 +373,7 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { slots: &[B256], ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_account_proof(self.tx, input, address, slots) + Proof::overlay_account_proof(self.tx(), input, address, slots) .map_err(Into::::into) } @@ -383,7 +383,7 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { targets: HashMap>, ) -> ProviderResult { input.prepend(self.revert_state()?); - Proof::overlay_multiproof(self.tx, input, targets).map_err(Into::::into) + Proof::overlay_multiproof(self.tx(), input, targets).map_err(Into::::into) } fn witness( @@ -392,11 +392,23 @@ impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { target: HashedPostState, ) -> ProviderResult> { input.prepend(self.revert_state()?); - TrieWitness::overlay_witness(self.tx, input, target).map_err(Into::::into) + TrieWitness::overlay_witness(self.tx(), input, target).map_err(Into::::into) } } -impl StateProvider for HistoricalStateProviderRef<'_, TX> { +impl HashedPostStateProvider + for HistoricalStateProviderRef<'_, Provider> +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::< + ::KeyHasher, + >(bundle_state.state()) + } +} + +impl + StateProvider for HistoricalStateProviderRef<'_, Provider> +{ /// Get storage. fn storage( &self, @@ -406,7 +418,7 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { match self.storage_history_lookup(address, storage_key)? { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(Some( - self.tx + self.tx() .cursor_dup_read::()? .seek_by_key_subkey((changeset_block_number, address).into(), storage_key)? .filter(|entry| entry.key == storage_key) @@ -418,7 +430,7 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { .value, )), HistoryInfo::InPlainState | HistoryInfo::MaybeInPlainState => Ok(self - .tx + .tx() .cursor_dup_read::()? .seek_by_key_subkey(address, storage_key)? .filter(|entry| entry.key == storage_key) @@ -429,32 +441,34 @@ impl StateProvider for HistoricalStateProviderRef<'_, TX> { /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - self.tx.get::(code_hash).map_err(Into::into) + self.tx().get::(code_hash).map_err(Into::into) } } +impl StateCommitmentProvider + for HistoricalStateProviderRef<'_, Provider> +{ + type StateCommitment = Provider::StateCommitment; +} + /// State provider for a given block number. /// For more detailed description, see [`HistoricalStateProviderRef`]. #[derive(Debug)] -pub struct HistoricalStateProvider { - /// Database transaction - tx: TX, +pub struct HistoricalStateProvider { + /// Database provider. + provider: Provider, /// State at the block number is the main indexer of the state. block_number: BlockNumber, /// Lowest blocks at which different parts of the state are available. lowest_available_blocks: LowestAvailableBlocks, - /// Static File provider - static_file_provider: StaticFileProvider, } -impl HistoricalStateProvider { +impl + HistoricalStateProvider +{ /// Create new `StateProvider` for historical block number - pub fn new( - tx: TX, - block_number: BlockNumber, - static_file_provider: StaticFileProvider, - ) -> Self { - Self { tx, block_number, lowest_available_blocks: Default::default(), static_file_provider } + pub fn new(provider: Provider, block_number: BlockNumber) -> Self { + Self { provider, block_number, lowest_available_blocks: Default::default() } } /// Set the lowest block number at which the account history is available. @@ -477,18 +491,23 @@ impl HistoricalStateProvider { /// Returns a new provider that takes the `TX` as reference #[inline(always)] - fn as_ref(&self) -> HistoricalStateProviderRef<'_, TX> { + const fn as_ref(&self) -> HistoricalStateProviderRef<'_, Provider> { HistoricalStateProviderRef::new_with_lowest_available_blocks( - &self.tx, + &self.provider, self.block_number, self.lowest_available_blocks, - self.static_file_provider.clone(), ) } } +impl StateCommitmentProvider + for HistoricalStateProvider +{ + type StateCommitment = Provider::StateCommitment; +} + // Delegates all provider impls to [HistoricalStateProviderRef] -delegate_provider_impls!(HistoricalStateProvider where [TX: DbTx]); +delegate_provider_impls!(HistoricalStateProvider where [Provider: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider]); /// Lowest blocks at which different parts of the state are available. /// They may be [Some] if pruning is enabled. @@ -524,7 +543,6 @@ mod tests { providers::state::historical::{HistoryInfo, LowestAvailableBlocks}, test_utils::create_test_provider_factory, AccountReader, HistoricalStateProvider, HistoricalStateProviderRef, StateProvider, - StaticFileProviderFactory, }; use alloy_primitives::{address, b256, Address, B256, U256}; use reth_db::{tables, BlockNumberList}; @@ -533,6 +551,10 @@ mod tests { transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; + use reth_storage_api::{ + BlockHashReader, BlockNumReader, DBProvider, DatabaseProviderFactory, + StateCommitmentProvider, + }; use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); @@ -541,7 +563,9 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_historical_state_provider() { + const fn assert_historical_state_provider< + T: DBProvider + BlockNumReader + BlockHashReader + StateCommitmentProvider, + >() { assert_state_provider::>(); } @@ -549,7 +573,6 @@ mod tests { fn history_provider_get_account() { let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); tx.put::( ShardedKey { key: ADDRESS, highest_block_number: 7 }, @@ -609,63 +632,46 @@ mod tests { tx.put::(HIGHER_ADDRESS, higher_acc_plain).unwrap(); tx.commit().unwrap(); - let tx = factory.provider().unwrap().into_tx(); + let db = factory.provider().unwrap(); // run + assert_eq!(HistoricalStateProviderRef::new(&db, 1).basic_account(ADDRESS), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .basic_account(ADDRESS), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 2, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 2).basic_account(ADDRESS), Ok(Some(acc_at3)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 3, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 3).basic_account(ADDRESS), Ok(Some(acc_at3)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 4, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 4).basic_account(ADDRESS), Ok(Some(acc_at7)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 7, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 7).basic_account(ADDRESS), Ok(Some(acc_at7)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 9, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 9).basic_account(ADDRESS), Ok(Some(acc_at10)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 10, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 10).basic_account(ADDRESS), Ok(Some(acc_at10)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 11, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 11).basic_account(ADDRESS), Ok(Some(acc_at15)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 16, static_file_provider.clone()) - .basic_account(ADDRESS), + HistoricalStateProviderRef::new(&db, 16).basic_account(ADDRESS), Ok(Some(acc_plain)) ); + assert_eq!(HistoricalStateProviderRef::new(&db, 1).basic_account(HIGHER_ADDRESS), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .basic_account(HIGHER_ADDRESS), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 1000, static_file_provider) - .basic_account(HIGHER_ADDRESS), + HistoricalStateProviderRef::new(&db, 1000).basic_account(HIGHER_ADDRESS), Ok(Some(higher_acc_plain)) ); } @@ -674,7 +680,6 @@ mod tests { fn history_provider_get_storage() { let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); tx.put::( StorageShardedKey { @@ -721,57 +726,44 @@ mod tests { tx.put::(HIGHER_ADDRESS, higher_entry_plain).unwrap(); tx.commit().unwrap(); - let tx = factory.provider().unwrap().into_tx(); + let db = factory.provider().unwrap(); // run + assert_eq!(HistoricalStateProviderRef::new(&db, 0).storage(ADDRESS, STORAGE), Ok(None)); assert_eq!( - HistoricalStateProviderRef::new(&tx, 0, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), - Ok(None) - ); - assert_eq!( - HistoricalStateProviderRef::new(&tx, 3, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 3).storage(ADDRESS, STORAGE), Ok(Some(U256::ZERO)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 4, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 4).storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 7, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 7).storage(ADDRESS, STORAGE), Ok(Some(entry_at7.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 9, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 9).storage(ADDRESS, STORAGE), Ok(Some(entry_at10.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 10, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 10).storage(ADDRESS, STORAGE), Ok(Some(entry_at10.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 11, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 11).storage(ADDRESS, STORAGE), Ok(Some(entry_at15.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 16, static_file_provider.clone()) - .storage(ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 16).storage(ADDRESS, STORAGE), Ok(Some(entry_plain.value)) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1, static_file_provider.clone()) - .storage(HIGHER_ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 1).storage(HIGHER_ADDRESS, STORAGE), Ok(None) ); assert_eq!( - HistoricalStateProviderRef::new(&tx, 1000, static_file_provider) - .storage(HIGHER_ADDRESS, STORAGE), + HistoricalStateProviderRef::new(&db, 1000).storage(HIGHER_ADDRESS, STORAGE), Ok(Some(higher_entry_plain.value)) ); } @@ -779,19 +771,17 @@ mod tests { #[test] fn history_provider_unavailable() { let factory = create_test_provider_factory(); - let tx = factory.provider_rw().unwrap().into_tx(); - let static_file_provider = factory.static_file_provider(); + let db = factory.database_provider_rw().unwrap(); // provider block_number < lowest available block number, // i.e. state at provider block is pruned let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(3), storage_history_block_number: Some(3), }, - static_file_provider.clone(), ); assert_eq!( provider.account_history_lookup(ADDRESS), @@ -805,13 +795,12 @@ mod tests { // provider block_number == lowest available block number, // i.e. state at provider block is available let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(2), storage_history_block_number: Some(2), }, - static_file_provider.clone(), ); assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState)); assert_eq!( @@ -822,13 +811,12 @@ mod tests { // provider block_number == lowest available block number, // i.e. state at provider block is available let provider = HistoricalStateProviderRef::new_with_lowest_available_blocks( - &tx, + &db, 2, LowestAvailableBlocks { account_history_block_number: Some(1), storage_history_block_number: Some(1), }, - static_file_provider, ); assert_eq!(provider.account_history_lookup(ADDRESS), Ok(HistoryInfo::MaybeInPlainState)); assert_eq!( diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index fdcbfc4937f..bdb6de1e569 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -1,62 +1,58 @@ use crate::{ - providers::{state::macros::delegate_provider_impls, StaticFileProvider}, - AccountReader, BlockHashReader, StateProvider, StateRootProvider, + providers::state::macros::delegate_provider_impls, AccountReader, BlockHashReader, + HashedPostStateProvider, StateProvider, StateRootProvider, }; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockNumber, Bytes, StorageKey, StorageValue, B256, }; use reth_db::tables; -use reth_db_api::{ - cursor::{DbCursorRO, DbDupCursorRO}, - transaction::DbTx, +use reth_db_api::{cursor::DbDupCursorRO, transaction::DbTx}; +use reth_primitives::{Account, Bytecode}; +use reth_storage_api::{ + DBProvider, StateCommitmentProvider, StateProofProvider, StorageRootProvider, }; -use reth_primitives::{Account, Bytecode, StaticFileSegment}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ proof::{Proof, StorageProof}, updates::TrieUpdates, witness::TrieWitness, - AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageMultiProof, + StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, - DatabaseTrieWitness, + DatabaseTrieWitness, StateCommitment, }; /// State provider over latest state that takes tx reference. +/// +/// Wraps a [`DBProvider`] to get access to database. #[derive(Debug)] -pub struct LatestStateProviderRef<'b, TX: DbTx> { - /// database transaction - tx: &'b TX, - /// Static File provider - static_file_provider: StaticFileProvider, -} +pub struct LatestStateProviderRef<'b, Provider>(&'b Provider); -impl<'b, TX: DbTx> LatestStateProviderRef<'b, TX> { +impl<'b, Provider: DBProvider> LatestStateProviderRef<'b, Provider> { /// Create new state provider - pub const fn new(tx: &'b TX, static_file_provider: StaticFileProvider) -> Self { - Self { tx, static_file_provider } + pub const fn new(provider: &'b Provider) -> Self { + Self(provider) + } + + fn tx(&self) -> &Provider::Tx { + self.0.tx_ref() } } -impl AccountReader for LatestStateProviderRef<'_, TX> { +impl AccountReader for LatestStateProviderRef<'_, Provider> { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { - self.tx.get::(address).map_err(Into::into) + self.tx().get::(address).map_err(Into::into) } } -impl BlockHashReader for LatestStateProviderRef<'_, TX> { +impl BlockHashReader for LatestStateProviderRef<'_, Provider> { /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { - self.static_file_provider.get_with_static_file_or_database( - StaticFileSegment::Headers, - number, - |static_file| static_file.block_hash(number), - || Ok(self.tx.get::(number)?), - ) + self.0.block_hash(number) } fn canonical_hashes_range( @@ -64,34 +60,20 @@ impl BlockHashReader for LatestStateProviderRef<'_, TX> { start: BlockNumber, end: BlockNumber, ) -> ProviderResult> { - self.static_file_provider.get_range_with_static_file_or_database( - StaticFileSegment::Headers, - start..end, - |static_file, range, _| static_file.canonical_hashes_range(range.start, range.end), - |range, _| { - self.tx - .cursor_read::() - .map(|mut cursor| { - cursor - .walk_range(range)? - .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() - })? - .map_err(Into::into) - }, - |_| true, - ) + self.0.canonical_hashes_range(start, end) } } -impl StateRootProvider for LatestStateProviderRef<'_, TX> { +impl StateRootProvider + for LatestStateProviderRef<'_, Provider> +{ fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { - StateRoot::overlay_root(self.tx, hashed_state) + StateRoot::overlay_root(self.tx(), hashed_state) .map_err(|err| ProviderError::Database(err.into())) } fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult { - StateRoot::overlay_root_from_nodes(self.tx, input) + StateRoot::overlay_root_from_nodes(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } @@ -99,7 +81,7 @@ impl StateRootProvider for LatestStateProviderRef<'_, TX> { &self, hashed_state: HashedPostState, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_with_updates(self.tx, hashed_state) + StateRoot::overlay_root_with_updates(self.tx(), hashed_state) .map_err(|err| ProviderError::Database(err.into())) } @@ -107,18 +89,20 @@ impl StateRootProvider for LatestStateProviderRef<'_, TX> { &self, input: TrieInput, ) -> ProviderResult<(B256, TrieUpdates)> { - StateRoot::overlay_root_from_nodes_with_updates(self.tx, input) + StateRoot::overlay_root_from_nodes_with_updates(self.tx(), input) .map_err(|err| ProviderError::Database(err.into())) } } -impl StorageRootProvider for LatestStateProviderRef<'_, TX> { +impl StorageRootProvider + for LatestStateProviderRef<'_, Provider> +{ fn storage_root( &self, address: Address, hashed_storage: HashedStorage, ) -> ProviderResult { - StorageRoot::overlay_root(self.tx, address, hashed_storage) + StorageRoot::overlay_root(self.tx(), address, hashed_storage) .map_err(|err| ProviderError::Database(err.into())) } @@ -128,19 +112,31 @@ impl StorageRootProvider for LatestStateProviderRef<'_, TX> { slot: B256, hashed_storage: HashedStorage, ) -> ProviderResult { - StorageProof::overlay_storage_proof(self.tx, address, slot, hashed_storage) + StorageProof::overlay_storage_proof(self.tx(), address, slot, hashed_storage) + .map_err(Into::::into) + } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + StorageProof::overlay_storage_multiproof(self.tx(), address, slots, hashed_storage) .map_err(Into::::into) } } -impl StateProofProvider for LatestStateProviderRef<'_, TX> { +impl StateProofProvider + for LatestStateProviderRef<'_, Provider> +{ fn proof( &self, input: TrieInput, address: Address, slots: &[B256], ) -> ProviderResult { - Proof::overlay_account_proof(self.tx, input, address, slots) + Proof::overlay_account_proof(self.tx(), input, address, slots) .map_err(Into::::into) } @@ -149,7 +145,7 @@ impl StateProofProvider for LatestStateProviderRef<'_, TX> { input: TrieInput, targets: HashMap>, ) -> ProviderResult { - Proof::overlay_multiproof(self.tx, input, targets).map_err(Into::::into) + Proof::overlay_multiproof(self.tx(), input, targets).map_err(Into::::into) } fn witness( @@ -157,18 +153,30 @@ impl StateProofProvider for LatestStateProviderRef<'_, TX> { input: TrieInput, target: HashedPostState, ) -> ProviderResult> { - TrieWitness::overlay_witness(self.tx, input, target).map_err(Into::::into) + TrieWitness::overlay_witness(self.tx(), input, target).map_err(Into::::into) } } -impl StateProvider for LatestStateProviderRef<'_, TX> { +impl HashedPostStateProvider + for LatestStateProviderRef<'_, Provider> +{ + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::from_bundle_state::< + ::KeyHasher, + >(bundle_state.state()) + } +} + +impl StateProvider + for LatestStateProviderRef<'_, Provider> +{ /// Get storage. fn storage( &self, account: Address, storage_key: StorageKey, ) -> ProviderResult> { - let mut cursor = self.tx.cursor_dup_read::()?; + let mut cursor = self.tx().cursor_dup_read::()?; if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? { if entry.key == storage_key { return Ok(Some(entry.value)) @@ -179,34 +187,39 @@ impl StateProvider for LatestStateProviderRef<'_, TX> { /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - self.tx.get::(code_hash).map_err(Into::into) + self.tx().get::(code_hash).map_err(Into::into) } } +impl StateCommitmentProvider + for LatestStateProviderRef<'_, Provider> +{ + type StateCommitment = Provider::StateCommitment; +} + /// State provider for the latest state. #[derive(Debug)] -pub struct LatestStateProvider { - /// database transaction - db: TX, - /// Static File provider - static_file_provider: StaticFileProvider, -} +pub struct LatestStateProvider(Provider); -impl LatestStateProvider { +impl LatestStateProvider { /// Create new state provider - pub const fn new(db: TX, static_file_provider: StaticFileProvider) -> Self { - Self { db, static_file_provider } + pub const fn new(db: Provider) -> Self { + Self(db) } /// Returns a new provider that takes the `TX` as reference #[inline(always)] - fn as_ref(&self) -> LatestStateProviderRef<'_, TX> { - LatestStateProviderRef::new(&self.db, self.static_file_provider.clone()) + const fn as_ref(&self) -> LatestStateProviderRef<'_, Provider> { + LatestStateProviderRef::new(&self.0) } } +impl StateCommitmentProvider for LatestStateProvider { + type StateCommitment = Provider::StateCommitment; +} + // Delegates all provider impls to [LatestStateProviderRef] -delegate_provider_impls!(LatestStateProvider where [TX: DbTx]); +delegate_provider_impls!(LatestStateProvider where [Provider: DBProvider + BlockHashReader + StateCommitmentProvider]); #[cfg(test)] mod tests { @@ -214,7 +227,9 @@ mod tests { const fn assert_state_provider() {} #[allow(dead_code)] - const fn assert_latest_state_provider() { + const fn assert_latest_state_provider< + T: DBProvider + BlockHashReader + StateCommitmentProvider, + >() { assert_state_provider::>(); } } diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index b90924354c4..1fa15214e9a 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -50,12 +50,16 @@ macro_rules! delegate_provider_impls { StorageRootProvider $(where [$($generics)*])? { fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; fn storage_proof(&self, address: alloy_primitives::Address, slot: alloy_primitives::B256, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; + fn storage_multiproof(&self, address: alloy_primitives::Address, slots: &[alloy_primitives::B256], storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; } StateProofProvider $(where [$($generics)*])? { fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; fn multiproof(&self, input: reth_trie::TrieInput, targets: alloy_primitives::map::HashMap>) -> reth_storage_errors::provider::ProviderResult; fn witness(&self, input: reth_trie::TrieInput, target: reth_trie::HashedPostState) -> reth_storage_errors::provider::ProviderResult>; } + HashedPostStateProvider $(where [$($generics)*])? { + fn hashed_post_state(&self, bundle_state: &revm::db::BundleState) -> reth_trie::HashedPostState; + } ); } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 8d1dbd117cf..8f2d002ab89 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -6,44 +6,58 @@ use crate::{ to_range, BlockHashReader, BlockNumReader, HeaderProvider, ReceiptProvider, TransactionsProvider, }; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; -use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; -use reth_db_api::models::CompactU256; -use reth_primitives::{ - Header, Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, +use reth_db::{ + static_file::{ + BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, + TDWithHashMask, TotalDifficultyMask, TransactionMask, + }, + table::{Decompress, Value}, }; +use reth_node_types::NodePrimitives; +use reth_primitives::{transaction::recover_signers, SealedHeader, TransactionMeta}; +use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ + fmt::Debug, ops::{Deref, RangeBounds}, sync::Arc, }; /// Provider over a specific `NippyJar` and range. #[derive(Debug)] -pub struct StaticFileJarProvider<'a> { +pub struct StaticFileJarProvider<'a, N> { /// Main static file segment jar: LoadedJarRef<'a>, /// Another kind of static file segment to help query data from the main one. auxiliary_jar: Option>, + /// Metrics for the static files. metrics: Option>, + /// Node primitives + _pd: std::marker::PhantomData, } -impl<'a> Deref for StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> Deref for StaticFileJarProvider<'a, N> { type Target = LoadedJarRef<'a>; fn deref(&self) -> &Self::Target { &self.jar } } -impl<'a> From> for StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> From> for StaticFileJarProvider<'a, N> { fn from(value: LoadedJarRef<'a>) -> Self { - StaticFileJarProvider { jar: value, auxiliary_jar: None, metrics: None } + StaticFileJarProvider { + jar: value, + auxiliary_jar: None, + metrics: None, + _pd: Default::default(), + } } } -impl<'a> StaticFileJarProvider<'a> { +impl<'a, N: NodePrimitives> StaticFileJarProvider<'a, N> { /// Provides a cursor for more granular data access. pub fn cursor<'b>(&'b self) -> ProviderResult> where @@ -75,39 +89,44 @@ impl<'a> StaticFileJarProvider<'a> { } } -impl HeaderProvider for StaticFileJarProvider<'_> { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl> HeaderProvider for StaticFileJarProvider<'_, N> { + type Header = N::BlockHeader; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(header, _)| header)) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? - .get_two::>(block_hash.into())? + .get_two::(block_hash.into())? .filter(|(_, hash)| hash == block_hash) .map(|(td, _)| td.into())) } fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { - Ok(self.cursor()?.get_one::>(num.into())?.map(Into::into)) + Ok(self.cursor()?.get_one::(num.into())?.map(Into::into)) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut headers = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(header) = cursor.get_one::>(num.into())? { + if let Some(header) = cursor.get_one::>(num.into())? { headers.push(header); } } @@ -115,18 +134,21 @@ impl HeaderProvider for StaticFileJarProvider<'_> { Ok(headers) } - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>> { Ok(self .cursor()? - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) } fn sealed_headers_while( &self, range: impl RangeBounds, - mut predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + mut predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { let range = to_range(range); let mut cursor = self.cursor()?; @@ -134,7 +156,7 @@ impl HeaderProvider for StaticFileJarProvider<'_> { for number in range { if let Some((header, hash)) = - cursor.get_two::>(number.into())? + cursor.get_two::>(number.into())? { let sealed = SealedHeader::new(header, hash); if !predicate(&sealed) { @@ -147,9 +169,9 @@ impl HeaderProvider for StaticFileJarProvider<'_> { } } -impl BlockHashReader for StaticFileJarProvider<'_> { +impl BlockHashReader for StaticFileJarProvider<'_, N> { fn block_hash(&self, number: u64) -> ProviderResult> { - self.cursor()?.get_one::>(number.into()) + self.cursor()?.get_one::(number.into()) } fn canonical_hashes_range( @@ -161,7 +183,7 @@ impl BlockHashReader for StaticFileJarProvider<'_> { let mut hashes = Vec::with_capacity((end - start) as usize); for number in start..end { - if let Some(hash) = cursor.get_one::>(number.into())? { + if let Some(hash) = cursor.get_one::(number.into())? { hashes.push(hash) } } @@ -169,7 +191,7 @@ impl BlockHashReader for StaticFileJarProvider<'_> { } } -impl BlockNumReader for StaticFileJarProvider<'_> { +impl BlockNumReader for StaticFileJarProvider<'_, N> { fn chain_info(&self) -> ProviderResult { // Information on live database Err(ProviderError::UnsupportedProvider) @@ -189,45 +211,43 @@ impl BlockNumReader for StaticFileJarProvider<'_> { let mut cursor = self.cursor()?; Ok(cursor - .get_one::>((&hash).into())? + .get_one::((&hash).into())? .and_then(|res| (res == hash).then(|| cursor.number()).flatten())) } } -impl TransactionsProvider for StaticFileJarProvider<'_> { +impl> TransactionsProvider + for StaticFileJarProvider<'_, N> +{ + type Transaction = N::SignedTx; + fn transaction_id(&self, hash: TxHash) -> ProviderResult> { let mut cursor = self.cursor()?; Ok(cursor - .get_one::>((&hash).into())? - .and_then(|res| (res.hash() == hash).then(|| cursor.number()).flatten())) + .get_one::>((&hash).into())? + .and_then(|res| (res.trie_hash() == hash).then(|| cursor.number()).flatten())) } - fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>(num.into())? - .map(|tx| tx.with_hash())) + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, num: TxNumber, - ) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) + ) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - Ok(self - .cursor()? - .get_one::>((&hash).into())? - .map(|tx| tx.with_hash())) + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + self.cursor()?.get_one::>((&hash).into()) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { // Information required on indexing table [`tables::TransactionBlocks`] Err(ProviderError::UnsupportedProvider) } @@ -240,7 +260,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call static file // provider with `transactions_by_tx_range` instead. Err(ProviderError::UnsupportedProvider) @@ -249,7 +269,7 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call static file // provider with `transactions_by_tx_range` instead. Err(ProviderError::UnsupportedProvider) @@ -258,15 +278,13 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut txes = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(tx) = - cursor.get_one::>(num.into())? - { + if let Some(tx) = cursor.get_one::>(num.into())? { txes.push(tx) } } @@ -278,24 +296,27 @@ impl TransactionsProvider for StaticFileJarProvider<'_> { range: impl RangeBounds, ) -> ProviderResult> { let txs = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txs, txs.len()) - .ok_or(ProviderError::SenderRecoveryError) + recover_signers(&txs, txs.len()).ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { Ok(self .cursor()? - .get_one::>(num.into())? + .get_one::>(num.into())? .and_then(|tx| tx.recover_signer())) } } -impl ReceiptProvider for StaticFileJarProvider<'_> { - fn receipt(&self, num: TxNumber) -> ProviderResult> { - self.cursor()?.get_one::>(num.into()) +impl> + ReceiptProvider for StaticFileJarProvider<'_, N> +{ + type Receipt = N::Receipt; + + fn receipt(&self, num: TxNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx_static_file) = &self.auxiliary_jar { if let Some(num) = tx_static_file.transaction_id(hash)? { return self.receipt(num) @@ -304,7 +325,10 @@ impl ReceiptProvider for StaticFileJarProvider<'_> { Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { // Related to indexing tables. StaticFile should get the tx_range and call static file // provider with `receipt()` instead for each Err(ProviderError::UnsupportedProvider) @@ -313,13 +337,13 @@ impl ReceiptProvider for StaticFileJarProvider<'_> { fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut receipts = Vec::with_capacity((range.end - range.start) as usize); for num in range { - if let Some(tx) = cursor.get_one::>(num.into())? { + if let Some(tx) = cursor.get_one::>(num.into())? { receipts.push(tx) } } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e233332a0e9..7af071299cd 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -4,10 +4,15 @@ use super::{ }; use crate::{ to_range, BlockHashReader, BlockNumReader, BlockReader, BlockSource, HeaderProvider, - ReceiptProvider, RequestsProvider, StageCheckpointReader, StatsReader, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, + ReceiptProvider, StageCheckpointReader, StatsReader, TransactionVariant, TransactionsProvider, + TransactionsProviderExt, WithdrawalsProvider, +}; +use alloy_consensus::Header; +use alloy_eips::{ + eip2718::Encodable2718, + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, }; -use alloy_eips::BlockHashOrNumber; use alloy_primitives::{keccak256, Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use dashmap::DashMap; use notify::{RecommendedWatcher, RecursiveMode, Watcher}; @@ -15,30 +20,35 @@ use parking_lot::RwLock; use reth_chainspec::{ChainInfo, ChainSpecProvider}; use reth_db::{ lockfile::StorageLock, - static_file::{iter_static_files, HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}, + static_file::{ + iter_static_files, BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, + StaticFileCursor, TDWithHashMask, TransactionMask, + }, + table::{Decompress, Value}, tables, }; use reth_db_api::{ - cursor::DbCursorRO, - models::{CompactU256, StoredBlockBodyIndices}, - table::Table, - transaction::DbTx, + cursor::DbCursorRO, models::StoredBlockBodyIndices, table::Table, transaction::DbTx, }; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; +use reth_node_types::{FullNodePrimitives, NodePrimitives}; use reth_primitives::{ static_file::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, Withdrawal, - Withdrawals, + transaction::recover_signers, + BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, TransactionMeta, TransactionSigned, }; +use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; use reth_storage_api::DBProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, + fmt::Debug, + marker::PhantomData, ops::{Deref, Range, RangeBounds, RangeInclusive}, path::{Path, PathBuf}, sync::{mpsc, Arc}, @@ -74,10 +84,16 @@ impl StaticFileAccess { } /// [`StaticFileProvider`] manages all existing [`StaticFileJarProvider`]. -#[derive(Debug, Clone)] -pub struct StaticFileProvider(pub(crate) Arc); +#[derive(Debug)] +pub struct StaticFileProvider(pub(crate) Arc>); -impl StaticFileProvider { +impl Clone for StaticFileProvider { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl StaticFileProvider { /// Creates a new [`StaticFileProvider`]. fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { let provider = Self(Arc::new(StaticFileProviderInner::new(path, access)?)); @@ -143,6 +159,7 @@ impl StaticFileProvider { // appending/truncating rows for segment in event.paths { // Ensure it's a file with the .conf extension + #[allow(clippy::nonminimal_bool)] if !segment .extension() .is_some_and(|s| s.to_str() == Some(CONFIG_FILE_EXTENSION)) @@ -187,8 +204,8 @@ impl StaticFileProvider { } } -impl Deref for StaticFileProvider { - type Target = StaticFileProviderInner; +impl Deref for StaticFileProvider { + type Target = StaticFileProviderInner; fn deref(&self) -> &Self::Target { &self.0 @@ -197,7 +214,7 @@ impl Deref for StaticFileProvider { /// [`StaticFileProviderInner`] manages all existing [`StaticFileJarProvider`]. #[derive(Debug)] -pub struct StaticFileProviderInner { +pub struct StaticFileProviderInner { /// Maintains a map which allows for concurrent access to different `NippyJars`, over different /// segments and ranges. map: DashMap<(BlockNumber, StaticFileSegment), LoadedJar>, @@ -208,7 +225,8 @@ pub struct StaticFileProviderInner { /// Directory where `static_files` are located path: PathBuf, /// Maintains a writer set of [`StaticFileSegment`]. - writers: StaticFileWriters, + writers: StaticFileWriters, + /// Metrics for the static files. metrics: Option>, /// Access rights of the provider. access: StaticFileAccess, @@ -216,9 +234,11 @@ pub struct StaticFileProviderInner { blocks_per_file: u64, /// Write lock for when access is [`StaticFileAccess::RW`]. _lock_file: Option, + /// Node primitives + _pd: PhantomData, } -impl StaticFileProviderInner { +impl StaticFileProviderInner { /// Creates a new [`StaticFileProviderInner`]. fn new(path: impl AsRef, access: StaticFileAccess) -> ProviderResult { let _lock_file = if access.is_read_write() { @@ -237,6 +257,7 @@ impl StaticFileProviderInner { access, blocks_per_file: DEFAULT_BLOCKS_PER_STATIC_FILE, _lock_file, + _pd: Default::default(), }; Ok(provider) @@ -253,7 +274,7 @@ impl StaticFileProviderInner { } } -impl StaticFileProvider { +impl StaticFileProvider { /// Set a custom number of blocks per file. #[cfg(any(test, feature = "test-utils"))] pub fn with_custom_blocks_per_file(self, blocks_per_file: u64) -> Self { @@ -319,7 +340,7 @@ impl StaticFileProvider { segment: StaticFileSegment, block: BlockNumber, path: Option<&Path>, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider( segment, || self.get_segment_ranges_from_block(segment, block), @@ -334,7 +355,7 @@ impl StaticFileProvider { segment: StaticFileSegment, tx: TxNumber, path: Option<&Path>, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider( segment, || self.get_segment_ranges_from_transaction(segment, tx), @@ -351,7 +372,7 @@ impl StaticFileProvider { segment: StaticFileSegment, fn_range: impl Fn() -> Option, path: Option<&Path>, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // If we have a path, then get the block range from its name. // Otherwise, check `self.available_static_files` let block_range = match path { @@ -422,12 +443,12 @@ impl StaticFileProvider { &self, segment: StaticFileSegment, fixed_block_range: &SegmentRangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult> { let key = (fixed_block_range.end(), segment); // Avoid using `entry` directly to avoid a write lock in the common case. trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Getting provider"); - let mut provider: StaticFileJarProvider<'_> = if let Some(jar) = self.map.get(&key) { + let mut provider: StaticFileJarProvider<'_, N> = if let Some(jar) = self.map.get(&key) { trace!(target: "provider::static_file", ?segment, ?fixed_block_range, "Jar found in cache"); jar.into() } else { @@ -920,7 +941,7 @@ impl StaticFileProvider { pub fn find_static_file( &self, segment: StaticFileSegment, - func: impl Fn(StaticFileJarProvider<'_>) -> ProviderResult>, + func: impl Fn(StaticFileJarProvider<'_, N>) -> ProviderResult>, ) -> ProviderResult> { if let Some(highest_block) = self.get_highest_static_file_block(segment) { let mut range = self.find_fixed_range(highest_block); @@ -1090,7 +1111,7 @@ impl StaticFileProvider { }; if static_file_upper_bound - .map_or(false, |static_file_upper_bound| static_file_upper_bound >= number) + .is_some_and(|static_file_upper_bound| static_file_upper_bound >= number) { return fetch_from_static_file(self) } @@ -1163,30 +1184,35 @@ impl StaticFileProvider { /// Helper trait to manage different [`StaticFileProviderRW`] of an `Arc ProviderResult>; + ) -> ProviderResult>; /// Returns a mutable reference to a [`StaticFileProviderRW`] of the latest /// [`StaticFileSegment`]. fn latest_writer( &self, segment: StaticFileSegment, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Commits all changes of all [`StaticFileProviderRW`] of all [`StaticFileSegment`]. fn commit(&self) -> ProviderResult<()>; } -impl StaticFileWriter for StaticFileProvider { +impl StaticFileWriter for StaticFileProvider { + type Primitives = N; + fn get_writer( &self, block: BlockNumber, segment: StaticFileSegment, - ) -> ProviderResult> { + ) -> ProviderResult> { if self.access.is_read_only() { return Err(ProviderError::ReadOnlyStaticFileAccess) } @@ -1200,7 +1226,7 @@ impl StaticFileWriter for StaticFileProvider { fn latest_writer( &self, segment: StaticFileSegment, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_writer(self.get_highest_static_file_block(segment).unwrap_or_default(), segment) } @@ -1209,12 +1235,14 @@ impl StaticFileWriter for StaticFileProvider { } } -impl HeaderProvider for StaticFileProvider { - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { +impl> HeaderProvider for StaticFileProvider { + type Header = N::BlockHeader; + + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::>(block_hash.into())? .and_then(|(header, hash)| { if &hash == block_hash { return Some(header) @@ -1224,7 +1252,7 @@ impl HeaderProvider for StaticFileProvider { }) } - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.header_by_number(num)) .or_else(|err| { @@ -1240,7 +1268,7 @@ impl HeaderProvider for StaticFileProvider { self.find_static_file(StaticFileSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? - .get_two::>(block_hash.into())? + .get_two::(block_hash.into())? .and_then(|(td, hash)| (&hash == block_hash).then_some(td.0))) }) } @@ -1257,16 +1285,22 @@ impl HeaderProvider for StaticFileProvider { }) } - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Headers, to_range(range), - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } - fn sealed_header(&self, num: BlockNumber) -> ProviderResult> { + fn sealed_header( + &self, + num: BlockNumber, + ) -> ProviderResult>> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None) .and_then(|provider| provider.sealed_header(num)) .or_else(|err| { @@ -1281,14 +1315,14 @@ impl HeaderProvider for StaticFileProvider { fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { self.fetch_range_with_predicate( StaticFileSegment::Headers, to_range(range), |cursor, number| { Ok(cursor - .get_two::>(number.into())? + .get_two::>(number.into())? .map(|(header, hash)| SealedHeader::new(header, hash))) }, predicate, @@ -1296,7 +1330,7 @@ impl HeaderProvider for StaticFileProvider { } } -impl BlockHashReader for StaticFileProvider { +impl BlockHashReader for StaticFileProvider { fn block_hash(&self, num: u64) -> ProviderResult> { self.get_segment_provider_from_block(StaticFileSegment::Headers, num, None)?.block_hash(num) } @@ -1309,14 +1343,18 @@ impl BlockHashReader for StaticFileProvider { self.fetch_range_with_predicate( StaticFileSegment::Headers, start..end, - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::(number.into()), |_| true, ) } } -impl ReceiptProvider for StaticFileProvider { - fn receipt(&self, num: TxNumber) -> ProviderResult> { +impl> ReceiptProvider + for StaticFileProvider +{ + type Receipt = N::Receipt; + + fn receipt(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Receipts, num, None) .and_then(|provider| provider.receipt(num)) .or_else(|err| { @@ -1328,31 +1366,36 @@ impl ReceiptProvider for StaticFileProvider { }) } - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(num) = self.transaction_id(hash)? { return self.receipt(num) } Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { unreachable!() } fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Receipts, to_range(range), - |cursor, number| cursor.get_one::>(number.into()), + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } } -impl TransactionsProviderExt for StaticFileProvider { +impl> + TransactionsProviderExt for StaticFileProvider +{ fn transaction_hashes_by_range( &self, tx_range: Range, @@ -1363,13 +1406,13 @@ impl TransactionsProviderExt for StaticFileProvider { // chunks are too big, there will be idle threads waiting for work. Choosing an // arbitrary smaller value to make sure it doesn't happen. let chunk_size = 100; - let mut channels = Vec::new(); // iterator over the chunks let chunks = tx_range .clone() .step_by(chunk_size) .map(|start| start..std::cmp::min(start + chunk_size as u64, tx_range.end)); + let mut channels = Vec::with_capacity(tx_range_size.div_ceil(chunk_size)); for chunk_range in chunks { let (channel_tx, channel_rx) = mpsc::channel(); @@ -1387,7 +1430,7 @@ impl TransactionsProviderExt for StaticFileProvider { chunk_range, |cursor, number| { Ok(cursor - .get_one::>(number.into())? + .get_one::>(number.into())? .map(|transaction| { rlp_buf.clear(); let _ = channel_tx @@ -1413,13 +1456,17 @@ impl TransactionsProviderExt for StaticFileProvider { } } -impl TransactionsProvider for StaticFileProvider { +impl> TransactionsProvider + for StaticFileProvider +{ + type Transaction = N::SignedTx; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { let mut cursor = jar_provider.cursor()?; if cursor - .get_one::>((&tx_hash).into())? - .and_then(|tx| (tx.hash() == tx_hash).then_some(tx)) + .get_one::>((&tx_hash).into())? + .and_then(|tx| (tx.trie_hash() == tx_hash).then_some(tx)) .is_some() { Ok(cursor.number()) @@ -1429,7 +1476,7 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) .and_then(|provider| provider.transaction_by_id(num)) .or_else(|err| { @@ -1441,12 +1488,12 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, num: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { self.get_segment_provider_from_transaction(StaticFileSegment::Transactions, num, None) - .and_then(|provider| provider.transaction_by_id_no_hash(num)) + .and_then(|provider| provider.transaction_by_id_unhashed(num)) .or_else(|err| { if let ProviderError::MissingStaticFileTx(_, _) = err { Ok(None) @@ -1456,20 +1503,19 @@ impl TransactionsProvider for StaticFileProvider { }) } - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.find_static_file(StaticFileSegment::Transactions, |jar_provider| { Ok(jar_provider .cursor()? - .get_one::>((&hash).into())? - .map(|tx| tx.with_hash()) - .and_then(|tx| (tx.hash_ref() == &hash).then_some(tx))) + .get_one::>((&hash).into())? + .and_then(|tx| (tx.trie_hash() == hash).then_some(tx))) }) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1482,7 +1528,7 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1490,7 +1536,7 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1498,13 +1544,11 @@ impl TransactionsProvider for StaticFileProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { self.fetch_range_with_predicate( StaticFileSegment::Transactions, to_range(range), - |cursor, number| { - cursor.get_one::>(number.into()) - }, + |cursor, number| cursor.get_one::>(number.into()), |_| true, ) } @@ -1514,18 +1558,17 @@ impl TransactionsProvider for StaticFileProvider { range: impl RangeBounds, ) -> ProviderResult> { let txes = self.transactions_by_tx_range(range)?; - TransactionSignedNoHash::recover_signers(&txes, txes.len()) - .ok_or(ProviderError::SenderRecoveryError) + recover_signers(&txes, txes.len()).ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - Ok(self.transaction_by_id_no_hash(id)?.and_then(|tx| tx.recover_signer())) + Ok(self.transaction_by_id_unhashed(id)?.and_then(|tx| tx.recover_signer())) } } /* Cannot be successfully implemented but must exist for trait requirements */ -impl BlockNumReader for StaticFileProvider { +impl BlockNumReader for StaticFileProvider { fn chain_info(&self) -> ProviderResult { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) @@ -1547,37 +1590,45 @@ impl BlockNumReader for StaticFileProvider { } } -impl BlockReader for StaticFileProvider { +impl> BlockReader + for StaticFileProvider +{ + type Block = N::Block; + fn find_block_by_hash( &self, _hash: B256, _source: BlockSource, - ) -> ProviderResult> { + ) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { + fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block(&self) -> ProviderResult> { + fn pending_block(&self) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_and_receipts(&self) -> ProviderResult)>> { + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { + fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1591,7 +1642,7 @@ impl BlockReader for StaticFileProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1600,12 +1651,12 @@ impl BlockReader for StaticFileProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1613,19 +1664,19 @@ impl BlockReader for StaticFileProvider { fn block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } fn sealed_block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } } -impl WithdrawalsProvider for StaticFileProvider { +impl WithdrawalsProvider for StaticFileProvider { fn withdrawals_by_block( &self, _id: BlockHashOrNumber, @@ -1641,35 +1692,25 @@ impl WithdrawalsProvider for StaticFileProvider { } } -impl RequestsProvider for StaticFileProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - // Required data not present in static_files - Err(ProviderError::UnsupportedProvider) - } -} - -impl StatsReader for StaticFileProvider { +impl StatsReader for StaticFileProvider { fn count_entries(&self) -> ProviderResult { match T::NAME { tables::CanonicalHeaders::NAME | - tables::Headers::NAME | + tables::Headers::
::NAME | tables::HeaderTerminalDifficulties::NAME => Ok(self .get_highest_static_file_block(StaticFileSegment::Headers) .map(|block| block + 1) .unwrap_or_default() as usize), - tables::Receipts::NAME => Ok(self + tables::Receipts::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Receipts) .map(|receipts| receipts + 1) .unwrap_or_default() as usize), - tables::Transactions::NAME => Ok(self + tables::Transactions::::NAME => Ok(self .get_highest_static_file_tx(StaticFileSegment::Transactions) .map(|txs| txs + 1) - .unwrap_or_default() as usize), + .unwrap_or_default() + as usize), _ => Err(ProviderError::UnsupportedProvider), } } @@ -1677,11 +1718,14 @@ impl StatsReader for StaticFileProvider { /// Calculates the tx hash for the given transaction and its id. #[inline] -fn calculate_hash( - entry: (TxNumber, TransactionSignedNoHash), +fn calculate_hash( + entry: (TxNumber, T), rlp_buf: &mut Vec, -) -> Result<(B256, TxNumber), Box> { +) -> Result<(B256, TxNumber), Box> +where + T: Encodable2718, +{ let (tx_id, tx) = entry; - tx.transaction.encode_with_signature(&tx.signature, rlp_buf, false); + tx.encode_2718(rlp_buf); Ok((keccak256(rlp_buf), tx_id)) } diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 52eb6ed666e..71c6bf755e2 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -55,8 +55,10 @@ impl Deref for LoadedJar { #[cfg(test)] mod tests { use super::*; - use crate::{test_utils::create_test_provider_factory, HeaderProvider}; - use alloy_consensus::Transaction; + use crate::{ + test_utils::create_test_provider_factory, HeaderProvider, StaticFileProviderFactory, + }; + use alloy_consensus::{Header, Transaction}; use alloy_primitives::{BlockHash, TxNumber, B256, U256}; use rand::seq::SliceRandom; use reth_db::{ @@ -66,7 +68,7 @@ mod tests { use reth_db_api::transaction::DbTxMut; use reth_primitives::{ static_file::{find_fixed_range, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE}, - Header, Receipt, TransactionSignedNoHash, + EthPrimitives, Receipt, TransactionSigned, }; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; use reth_testing_utils::generators::{self, random_header_range}; @@ -116,7 +118,7 @@ mod tests { // Create StaticFile { - let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let manager = factory.static_file_provider(); let mut writer = manager.latest_writer(StaticFileSegment::Headers).unwrap(); let mut td = U256::ZERO; @@ -131,7 +133,7 @@ mod tests { // Use providers to query Header data and compare if it matches { let db_provider = factory.provider().unwrap(); - let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let manager = db_provider.static_file_provider(); let jar_provider = manager .get_segment_provider_from_block(StaticFileSegment::Headers, 0, Some(&static_file)) .unwrap(); @@ -170,7 +172,7 @@ mod tests { // [ Headers Creation and Commit ] { - let sf_rw = StaticFileProvider::read_write(&static_dir) + let sf_rw = StaticFileProvider::::read_write(&static_dir) .expect("Failed to create static file provider") .with_custom_blocks_per_file(blocks_per_file); @@ -189,8 +191,8 @@ mod tests { // Helper function to prune headers and validate truncation results fn prune_and_validate( - writer: &mut StaticFileProviderRWRefMut<'_>, - sf_rw: &StaticFileProvider, + writer: &mut StaticFileProviderRWRefMut<'_, EthPrimitives>, + sf_rw: &StaticFileProvider, static_dir: impl AsRef, prune_count: u64, expected_tip: Option, @@ -302,20 +304,20 @@ mod tests { /// * `10..=19`: no txs/receipts /// * `20..=29`: only one tx/receipt fn setup_tx_based_scenario( - sf_rw: &StaticFileProvider, + sf_rw: &StaticFileProvider, segment: StaticFileSegment, blocks_per_file: u64, ) { fn setup_block_ranges( - writer: &mut StaticFileProviderRWRefMut<'_>, - sf_rw: &StaticFileProvider, + writer: &mut StaticFileProviderRWRefMut<'_, EthPrimitives>, + sf_rw: &StaticFileProvider, segment: StaticFileSegment, block_range: &Range, mut tx_count: u64, next_tx_num: &mut u64, ) { let mut receipt = Receipt::default(); - let mut tx = TransactionSignedNoHash::default(); + let mut tx = TransactionSigned::default(); for block in block_range.clone() { writer.increment_block(block).unwrap(); @@ -413,7 +415,7 @@ mod tests { #[allow(clippy::too_many_arguments)] fn prune_and_validate( - sf_rw: &StaticFileProvider, + sf_rw: &StaticFileProvider, static_dir: impl AsRef, segment: StaticFileSegment, prune_count: u64, diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 8c31c021f21..b7f60c16442 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -2,18 +2,21 @@ use super::{ manager::StaticFileProviderInner, metrics::StaticFileProviderMetrics, StaticFileProvider, }; use crate::providers::static_file::metrics::StaticFileProviderOperation; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; +use reth_node_types::NodePrimitives; use reth_primitives::{ static_file::{SegmentHeader, SegmentRangeInclusive}, - Header, Receipt, StaticFileSegment, TransactionSignedNoHash, + Receipt, StaticFileSegment, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ borrow::Borrow, + fmt::Debug, path::{Path, PathBuf}, sync::{Arc, Weak}, time::Instant, @@ -24,19 +27,29 @@ use tracing::debug; /// /// WARNING: Trying to use more than one writer for the same segment type **will result in a /// deadlock**. -#[derive(Debug, Default)] -pub(crate) struct StaticFileWriters { - headers: RwLock>, - transactions: RwLock>, - receipts: RwLock>, +#[derive(Debug)] +pub(crate) struct StaticFileWriters { + headers: RwLock>>, + transactions: RwLock>>, + receipts: RwLock>>, +} + +impl Default for StaticFileWriters { + fn default() -> Self { + Self { + headers: Default::default(), + transactions: Default::default(), + receipts: Default::default(), + } + } } -impl StaticFileWriters { +impl StaticFileWriters { pub(crate) fn get_or_create( &self, segment: StaticFileSegment, - create_fn: impl FnOnce() -> ProviderResult, - ) -> ProviderResult> { + create_fn: impl FnOnce() -> ProviderResult>, + ) -> ProviderResult> { let mut write_guard = match segment { StaticFileSegment::Headers => self.headers.write(), StaticFileSegment::Transactions => self.transactions.write(), @@ -63,19 +76,19 @@ impl StaticFileWriters { /// Mutable reference to a [`StaticFileProviderRW`] behind a [`RwLockWriteGuard`]. #[derive(Debug)] -pub struct StaticFileProviderRWRefMut<'a>( - pub(crate) RwLockWriteGuard<'a, RawRwLock, Option>, +pub struct StaticFileProviderRWRefMut<'a, N>( + pub(crate) RwLockWriteGuard<'a, RawRwLock, Option>>, ); -impl std::ops::DerefMut for StaticFileProviderRWRefMut<'_> { +impl std::ops::DerefMut for StaticFileProviderRWRefMut<'_, N> { fn deref_mut(&mut self) -> &mut Self::Target { // This is always created by [`StaticFileWriters::get_or_create`] self.0.as_mut().expect("static file writer provider should be init") } } -impl std::ops::Deref for StaticFileProviderRWRefMut<'_> { - type Target = StaticFileProviderRW; +impl std::ops::Deref for StaticFileProviderRWRefMut<'_, N> { + type Target = StaticFileProviderRW; fn deref(&self) -> &Self::Target { // This is always created by [`StaticFileWriters::get_or_create`] @@ -85,11 +98,11 @@ impl std::ops::Deref for StaticFileProviderRWRefMut<'_> { #[derive(Debug)] /// Extends `StaticFileProvider` with writing capabilities -pub struct StaticFileProviderRW { +pub struct StaticFileProviderRW { /// Reference back to the provider. We need [Weak] here because [`StaticFileProviderRW`] is /// stored in a [`dashmap::DashMap`] inside the parent [`StaticFileProvider`].which is an /// [Arc]. If we were to use an [Arc] here, we would create a reference cycle. - reader: Weak, + reader: Weak>, /// A [`NippyJarWriter`] instance. writer: NippyJarWriter, /// Path to opened file. @@ -103,7 +116,7 @@ pub struct StaticFileProviderRW { prune_on_commit: Option<(u64, Option)>, } -impl StaticFileProviderRW { +impl StaticFileProviderRW { /// Creates a new [`StaticFileProviderRW`] for a [`StaticFileSegment`]. /// /// Before use, transaction based segments should ensure the block end range is the expected @@ -111,7 +124,7 @@ impl StaticFileProviderRW { pub fn new( segment: StaticFileSegment, block: BlockNumber, - reader: Weak, + reader: Weak>, metrics: Option>, ) -> ProviderResult { let (writer, data_path) = Self::open(segment, block, reader.clone(), metrics.clone())?; @@ -132,7 +145,7 @@ impl StaticFileProviderRW { fn open( segment: StaticFileSegment, block: u64, - reader: Weak, + reader: Weak>, metrics: Option>, ) -> ProviderResult<(NippyJarWriter, PathBuf)> { let start = Instant::now(); @@ -307,10 +320,7 @@ impl StaticFileProviderRW { /// and create the next one if we are past the end range. /// /// Returns the current [`BlockNumber`] as seen in the static file. - pub fn increment_block( - &mut self, - expected_block_number: BlockNumber, - ) -> ProviderResult { + pub fn increment_block(&mut self, expected_block_number: BlockNumber) -> ProviderResult<()> { let segment = self.writer.user_header().segment(); self.check_next_block_number(expected_block_number)?; @@ -337,7 +347,7 @@ impl StaticFileProviderRW { } } - let block = self.writer.user_header_mut().increment_block(); + self.writer.user_header_mut().increment_block(); if let Some(metrics) = &self.metrics { metrics.record_segment_operation( segment, @@ -346,7 +356,7 @@ impl StaticFileProviderRW { ); } - Ok(block) + Ok(()) } /// Verifies if the incoming block number matches the next expected block number @@ -488,16 +498,24 @@ impl StaticFileProviderRW { &mut self, tx_num: TxNumber, value: V, - ) -> ProviderResult { - if self.writer.user_header().tx_range().is_none() { - self.writer.user_header_mut().set_tx_range(tx_num, tx_num); - } else { + ) -> ProviderResult<()> { + if let Some(range) = self.writer.user_header().tx_range() { + let next_tx = range.end() + 1; + if next_tx != tx_num { + return Err(ProviderError::UnexpectedStaticFileTxNumber( + self.writer.user_header().segment(), + tx_num, + next_tx, + )) + } self.writer.user_header_mut().increment_tx(); + } else { + self.writer.user_header_mut().set_tx_range(tx_num, tx_num); } self.append_column(value)?; - Ok(self.writer.user_header().tx_end().expect("qed")) + Ok(()) } /// Appends header to static file. @@ -508,16 +526,19 @@ impl StaticFileProviderRW { /// Returns the current [`BlockNumber`] as seen in the static file. pub fn append_header( &mut self, - header: &Header, + header: &N::BlockHeader, total_difficulty: U256, hash: &BlockHash, - ) -> ProviderResult { + ) -> ProviderResult<()> + where + N::BlockHeader: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Headers); - let block_number = self.increment_block(header.number)?; + self.increment_block(header.number())?; self.append_column(header)?; self.append_column(CompactU256::from(total_difficulty))?; @@ -531,7 +552,7 @@ impl StaticFileProviderRW { ); } - Ok(block_number) + Ok(()) } /// Appends transaction to static file. @@ -540,16 +561,15 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_transaction( - &mut self, - tx_num: TxNumber, - tx: &TransactionSignedNoHash, - ) -> ProviderResult { + pub fn append_transaction(&mut self, tx_num: TxNumber, tx: &N::SignedTx) -> ProviderResult<()> + where + N::SignedTx: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Transactions); - let result = self.append_with_tx_number(tx_num, tx)?; + self.append_with_tx_number(tx_num, tx)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -559,7 +579,7 @@ impl StaticFileProviderRW { ); } - Ok(result) + Ok(()) } /// Appends receipt to static file. @@ -568,16 +588,15 @@ impl StaticFileProviderRW { /// empty blocks and this function wouldn't be called. /// /// Returns the current [`TxNumber`] as seen in the static file. - pub fn append_receipt( - &mut self, - tx_num: TxNumber, - receipt: &Receipt, - ) -> ProviderResult { + pub fn append_receipt(&mut self, tx_num: TxNumber, receipt: &N::Receipt) -> ProviderResult<()> + where + N::Receipt: Compact, + { let start = Instant::now(); self.ensure_no_queued_prune()?; debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Receipts); - let result = self.append_with_tx_number(tx_num, receipt)?; + self.append_with_tx_number(tx_num, receipt)?; if let Some(metrics) = &self.metrics { metrics.record_segment_operation( @@ -587,7 +606,7 @@ impl StaticFileProviderRW { ); } - Ok(result) + Ok(()) } /// Appends multiple receipts to the static file. @@ -615,7 +634,8 @@ impl StaticFileProviderRW { for receipt_result in receipts_iter { let (tx_num, receipt) = receipt_result?; - tx_number = self.append_with_tx_number(tx_num, receipt.borrow())?; + self.append_with_tx_number(tx_num, receipt.borrow())?; + tx_number = tx_num; count += 1; } @@ -750,7 +770,7 @@ impl StaticFileProviderRW { Ok(()) } - fn reader(&self) -> StaticFileProvider { + fn reader(&self) -> StaticFileProvider { Self::upgrade_provider_to_strong_reference(&self.reader) } @@ -763,8 +783,8 @@ impl StaticFileProviderRW { /// active. In reality, it's impossible to detach the [`StaticFileProviderRW`] from the /// [`StaticFileProvider`]. fn upgrade_provider_to_strong_reference( - provider: &Weak, - ) -> StaticFileProvider { + provider: &Weak>, + ) -> StaticFileProvider { provider.upgrade().map(StaticFileProvider).expect("StaticFileProvider is dropped") } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 57e111d674b..b5c0ba7a120 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -1,24 +1,27 @@ //! Dummy blocks and data for tests -use crate::{DatabaseProviderRW, ExecutionOutcome}; -use alloy_consensus::TxLegacy; +use crate::{DBProvider, DatabaseProviderRW, ExecutionOutcome}; +use alloy_consensus::{TxLegacy, EMPTY_OMMER_ROOT_HASH}; use alloy_primitives::{ - b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, Parity, Sealable, - TxKind, B256, U256, + b256, hex_literal::hex, map::HashMap, Address, BlockNumber, Bytes, Log, TxKind, B256, U256, }; +use alloy_consensus::Header; +use alloy_eips::eip4895::{Withdrawal, Withdrawals}; +use alloy_primitives::PrimitiveSignature as Signature; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; +use reth_node_types::NodeTypes; use reth_primitives::{ - Account, BlockBody, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - Signature, Transaction, TransactionSigned, TxType, Withdrawal, Withdrawals, + Account, BlockBody, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, TxType, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{db::BundleState, primitives::AccountInfo}; use std::{str::FromStr, sync::LazyLock}; /// Assert genesis block -pub fn assert_genesis_block( - provider: &DatabaseProviderRW, +pub fn assert_genesis_block( + provider: &DatabaseProviderRW, g: SealedBlock, ) { let n = g.number; @@ -40,7 +43,6 @@ pub fn assert_genesis_block( ); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); - assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); assert_eq!(tx.table::().unwrap(), vec![]); @@ -66,8 +68,7 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo Header { parent_hash: hex!("c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94f") .into(), - ommers_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - .into(), + ommers_hash: EMPTY_OMMER_ROOT_HASH, beneficiary: hex!("2adc25665018aa1fe0e6bc666dac8fc2697ff9ba").into(), state_root: hex!("50554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583d") .into(), @@ -87,9 +88,14 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo hex!("cf7b274520720b50e6a4c3e5c4d553101f44945396827705518ce17cb7219a42").into(), ), body: BlockBody { - transactions: vec![TransactionSigned { - hash: hex!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397").into(), - signature: Signature::new( + transactions: vec![TransactionSigned::new( + Transaction::Legacy(TxLegacy { + gas_price: 10, + gas_limit: 400_000, + to: TxKind::Call(hex!("095e7baea6a6c7c4c2dfeb977efac326af552d87").into()), + ..Default::default() + }), + Signature::new( U256::from_str( "51983300959770368863831494747186777928121405155922056726144551509338672451120", ) @@ -98,15 +104,10 @@ pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| SealedBlo "29056683545955299640297374067888344259176096769870751649153779895496107008675", ) .unwrap(), - Parity::NonEip155(false), + false, ), - transaction: Transaction::Legacy(TxLegacy { - gas_price: 10, - gas_limit: 400_000, - to: TxKind::Call(hex!("095e7baea6a6c7c4c2dfeb977efac326af552d87").into()), - ..Default::default() - }), - }], + b256!("3541dd1d17e76adeb25dcf2b0a9b60a1669219502e58dcf26a2beafbfb550397"), + )], ..Default::default() }, }); @@ -170,7 +171,7 @@ fn bundle_state_root(execution_outcome: &ExecutionOutcome) -> B256 { ( address, ( - Into::::into(info.clone()), + Into::::into(info), storage_root_unhashed( account .storage @@ -232,9 +233,7 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { header.number = number; header.state_root = state_root; header.parent_hash = B256::ZERO; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x30; 20])] }, execution_outcome) } @@ -298,9 +297,7 @@ fn block2( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -364,9 +361,7 @@ fn block3( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -455,9 +450,7 @@ fn block4( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } @@ -543,9 +536,7 @@ fn block5( header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - block.header = SealedHeader::new(header, seal); + block.header = SealedHeader::seal(header); (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index c7c94b939ac..abe1096a1bc 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -1,39 +1,43 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, - AccountReader, BlockExecutionReader, BlockHashReader, BlockIdReader, BlockNumReader, - BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, DatabaseProvider, - EvmEnvProvider, HeaderProvider, ReceiptProviderIdExt, RequestsProvider, StateProvider, - StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + ChainSpecProvider, ChangeSetReader, DatabaseProvider, EthStorage, EvmEnvProvider, + HeaderProvider, ReceiptProviderIdExt, StateProvider, StateProviderBox, StateProviderFactory, + StateReader, StateRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, +}; +use alloy_consensus::{constants::EMPTY_ROOT_HASH, Header}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumberOrTag, }; -use alloy_consensus::constants::EMPTY_ROOT_HASH; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{ keccak256, map::{HashMap, HashSet}, - Address, BlockHash, BlockNumber, Bytes, Sealable, StorageKey, StorageValue, TxHash, TxNumber, - B256, U256, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, }; use parking_lot::Mutex; use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db::mock::{DatabaseMock, TxMock}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_execution_types::ExecutionOutcome; +use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, GotExpected, Header, Receipt, SealedBlock, + Account, Block, BlockWithSenders, Bytecode, EthPrimitives, GotExpected, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, }; +use reth_primitives_traits::SignedTransaction; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ - DatabaseProviderFactory, StageCheckpointReader, StateProofProvider, StorageRootProvider, + DatabaseProviderFactory, HashedPostStateProvider, StageCheckpointReader, + StateCommitmentProvider, StateProofProvider, StorageRootProvider, }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, - TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + StorageMultiProof, StorageProof, TrieInput, }; +use reth_trie_db::MerklePatriciaTrie; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ collections::BTreeMap, @@ -150,10 +154,25 @@ impl MockEthProvider { } } +/// Mock node. +#[derive(Debug)] +pub struct MockNode; + +impl NodeTypes for MockNode { + type Primitives = EthPrimitives; + type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; +} + +impl StateCommitmentProvider for MockEthProvider { + type StateCommitment = ::StateCommitment; +} + impl DatabaseProviderFactory for MockEthProvider { type DB = DatabaseMock; - type Provider = DatabaseProvider; - type ProviderRW = DatabaseProvider; + type Provider = DatabaseProvider; + type ProviderRW = DatabaseProvider; fn database_provider_ro(&self) -> ProviderResult { Err(ConsistentViewError::Syncing { best_block: GotExpected::new(0, 0) }.into()) @@ -165,6 +184,8 @@ impl DatabaseProviderFactory for MockEthProvider { } impl HeaderProvider for MockEthProvider { + type Header = Header; + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { let lock = self.headers.lock(); Ok(lock.get(block_hash).cloned()) @@ -204,11 +225,7 @@ impl HeaderProvider for MockEthProvider { } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - Ok(self.header_by_number(number)?.map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - })) + Ok(self.header_by_number(number)?.map(SealedHeader::seal)) } fn sealed_headers_while( @@ -219,11 +236,7 @@ impl HeaderProvider for MockEthProvider { Ok(self .headers_range(range)? .into_iter() - .map(|h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) - }) + .map(SealedHeader::seal) .take_while(|h| predicate(h)) .collect()) } @@ -238,6 +251,8 @@ impl ChainSpecProvider for MockEthProvider { } impl TransactionsProvider for MockEthProvider { + type Transaction = TransactionSigned; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { let lock = self.blocks.lock(); let tx_number = lock @@ -249,7 +264,7 @@ impl TransactionsProvider for MockEthProvider { Ok(tx_number) } - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { let lock = self.blocks.lock(); let transaction = lock.values().flat_map(|block| &block.body.transactions).nth(id as usize).cloned(); @@ -257,16 +272,13 @@ impl TransactionsProvider for MockEthProvider { Ok(transaction) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); - let transaction = lock - .values() - .flat_map(|block| &block.body.transactions) - .nth(id as usize) - .map(|tx| Into::::into(tx.clone())); + let transaction = + lock.values().flat_map(|block| &block.body.transactions).nth(id as usize).cloned(); Ok(transaction) } @@ -280,7 +292,7 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); for (block_hash, block) in lock.iter() { for (index, tx) in block.body.transactions.iter().enumerate() { @@ -316,14 +328,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(self.block(id)?.map(|b| b.body.transactions)) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // init btreemap so we can return in order let mut map = BTreeMap::new(); for (_, block) in self.blocks.lock().iter() { @@ -338,14 +350,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); let transactions = lock .values() .flat_map(|block| &block.body.transactions) .enumerate() .filter(|&(tx_number, _)| range.contains(&(tx_number as TxNumber))) - .map(|(_, tx)| tx.clone().into()) + .map(|(_, tx)| tx.clone()) .collect(); Ok(transactions) @@ -374,6 +386,8 @@ impl TransactionsProvider for MockEthProvider { } impl ReceiptProvider for MockEthProvider { + type Receipt = Receipt; + fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } @@ -452,20 +466,22 @@ impl BlockNumReader for MockEthProvider { } impl BlockIdReader for MockEthProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> ProviderResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> ProviderResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } impl BlockReader for MockEthProvider { + type Block = Block; + fn find_block_by_hash( &self, hash: B256, @@ -552,14 +568,7 @@ impl BlockReaderIdExt for MockEthProvider { } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - self.header_by_id(id)?.map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ) + self.header_by_id(id)?.map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) } fn header_by_id(&self, id: BlockId) -> ProviderResult> { @@ -640,6 +649,15 @@ impl StorageRootProvider for MockEthProvider { ) -> ProviderResult { Ok(StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageMultiProof::empty()) + } } impl StateProofProvider for MockEthProvider { @@ -669,6 +687,12 @@ impl StateProofProvider for MockEthProvider { } } +impl HashedPostStateProvider for MockEthProvider { + fn hashed_post_state(&self, _state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + impl StateProvider for MockEthProvider { fn storage( &self, @@ -693,19 +717,6 @@ impl StateProvider for MockEthProvider { } impl EvmEnvProvider for MockEthProvider { - fn fill_env_at( - &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _block_env: &mut BlockEnv, - _at: BlockHashOrNumber, - _evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - Ok(()) - } - fn fill_env_with_header( &self, _cfg: &mut CfgEnvWithHandlerCfg, @@ -749,18 +760,6 @@ impl StateProviderFactory for MockEthProvider { Ok(Box::new(self.clone())) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { - Ok(Box::new(self.clone())) - } - - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(self.clone())) - } - - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(self.clone())) - } - fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -787,6 +786,18 @@ impl StateProviderFactory for MockEthProvider { } } + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + fn pending(&self) -> ProviderResult { Ok(Box::new(self.clone())) } @@ -809,16 +820,6 @@ impl WithdrawalsProvider for MockEthProvider { } } -impl RequestsProvider for MockEthProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - Ok(None) - } -} - impl ChangeSetReader for MockEthProvider { fn account_block_changeset( &self, @@ -828,16 +829,9 @@ impl ChangeSetReader for MockEthProvider { } } -impl BlockExecutionReader for MockEthProvider { - fn get_block_and_execution_range( - &self, - _range: RangeInclusive, - ) -> ProviderResult { - Ok(Chain::default()) - } -} - impl StateReader for MockEthProvider { + type Receipt = Receipt; + fn get_state(&self, _block: BlockNumber) -> ProviderResult> { Ok(None) } diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index 2200781096d..2c3795573c2 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,4 +1,7 @@ -use crate::{providers::StaticFileProvider, HashingWriter, ProviderFactory, TrieWriter}; +use crate::{ + providers::{ProviderNodeTypes, StaticFileProvider}, + HashingWriter, ProviderFactory, TrieWriter, +}; use alloy_primitives::B256; use reth_chainspec::{ChainSpec, MAINNET}; use reth_db::{ @@ -6,7 +9,7 @@ use reth_db::{ DatabaseEnv, }; use reth_errors::ProviderResult; -use reth_node_types::{NodeTypesWithDB, NodeTypesWithDBAdapter}; +use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives::{Account, StorageEntry}; use reth_trie::StateRoot; use reth_trie_db::DatabaseStateRoot; @@ -22,9 +25,11 @@ pub use reth_chain_state::test_utils::TestCanonStateSubscriptions; /// Mock [`reth_node_types::NodeTypes`] for testing. pub type MockNodeTypes = reth_node_types::AnyNodeTypesWithEngine< - (), + reth_primitives::EthPrimitives, reth_ethereum_engine_primitives::EthEngineTypes, reth_chainspec::ChainSpec, + reth_trie_db::MerklePatriciaTrie, + crate::EthStorage, >; /// Mock [`reth_node_types::NodeTypesWithDB`] for testing. @@ -50,7 +55,7 @@ pub fn create_test_provider_factory_with_chain_spec( } /// Inserts the genesis alloc from the provided chain spec into the trie. -pub fn insert_genesis>( +pub fn insert_genesis>( provider_factory: &ProviderFactory, chain_spec: Arc, ) -> ProviderResult { diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 0a205389c9b..3846313b9f4 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -4,7 +4,11 @@ use std::{ sync::Arc, }; -use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; +use alloy_consensus::Header; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumberOrTag, +}; use alloy_primitives::{ map::{HashMap, HashSet}, Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, @@ -18,13 +22,14 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_errors::ProviderError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, + Account, Block, BlockWithSenders, Bytecode, EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{StateProofProvider, StorageRootProvider}; +use reth_storage_api::{ + HashedPostStateProvider, NodePrimitivesProvider, StateProofProvider, StorageRootProvider, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, @@ -37,7 +42,7 @@ use crate::{ traits::{BlockSource, ReceiptProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, PruneCheckpointReader, - ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProvider, StateProviderBox, + ReceiptProviderIdExt, StageCheckpointReader, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StaticFileProviderFactory, TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; @@ -89,6 +94,8 @@ impl BlockNumReader for NoopProvider { } impl BlockReader for NoopProvider { + type Block = Block; + fn find_block_by_hash( &self, hash: B256, @@ -175,43 +182,45 @@ impl BlockReaderIdExt for NoopProvider { } impl BlockIdReader for NoopProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> ProviderResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> ProviderResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } impl TransactionsProvider for NoopProvider { + type Transaction = TransactionSigned; + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { Ok(None) } - fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } - fn transaction_by_id_no_hash( + fn transaction_by_id_unhashed( &self, _id: TxNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } - fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { Ok(None) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(None) } @@ -222,21 +231,21 @@ impl TransactionsProvider for NoopProvider { fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(None) } fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(Vec::default()) } fn transactions_by_tx_range( &self, _range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult> { Ok(Vec::default()) } @@ -253,6 +262,7 @@ impl TransactionsProvider for NoopProvider { } impl ReceiptProvider for NoopProvider { + type Receipt = Receipt; fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } @@ -276,6 +286,8 @@ impl ReceiptProvider for NoopProvider { impl ReceiptProviderIdExt for NoopProvider {} impl HeaderProvider for NoopProvider { + type Header = Header; + fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { Ok(None) } @@ -365,6 +377,15 @@ impl StorageRootProvider for NoopProvider { ) -> ProviderResult { Ok(reth_trie::StorageProof::new(slot)) } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageMultiProof::empty()) + } } impl StateProofProvider for NoopProvider { @@ -394,6 +415,12 @@ impl StateProofProvider for NoopProvider { } } +impl HashedPostStateProvider for NoopProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + impl StateProvider for NoopProvider { fn storage( &self, @@ -409,19 +436,6 @@ impl StateProvider for NoopProvider { } impl EvmEnvProvider for NoopProvider { - fn fill_env_at( - &self, - _cfg: &mut CfgEnvWithHandlerCfg, - _block_env: &mut BlockEnv, - _at: BlockHashOrNumber, - _evm_config: EvmConfig, - ) -> ProviderResult<()> - where - EvmConfig: ConfigureEvmEnv
, - { - Ok(()) - } - fn fill_env_with_header( &self, _cfg: &mut CfgEnvWithHandlerCfg, @@ -465,22 +479,6 @@ impl StateProviderFactory for NoopProvider { Ok(Box::new(*self)) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { - Ok(Box::new(*self)) - } - - fn pending(&self) -> ProviderResult { - Ok(Box::new(*self)) - } - fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, @@ -507,6 +505,22 @@ impl StateProviderFactory for NoopProvider { } } + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(*self)) + } + + fn pending(&self) -> ProviderResult { + Ok(Box::new(*self)) + } + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { Ok(Some(Box::new(*self))) } @@ -539,16 +553,6 @@ impl WithdrawalsProvider for NoopProvider { } } -impl RequestsProvider for NoopProvider { - fn requests_by_block( - &self, - _id: BlockHashOrNumber, - _timestamp: u64, - ) -> ProviderResult> { - Ok(None) - } -} - impl PruneCheckpointReader for NoopProvider { fn get_prune_checkpoint( &self, @@ -562,8 +566,12 @@ impl PruneCheckpointReader for NoopProvider { } } +impl NodePrimitivesProvider for NoopProvider { + type Primitives = EthPrimitives; +} + impl StaticFileProviderFactory for NoopProvider { - fn static_file_provider(&self) -> StaticFileProvider { + fn static_file_provider(&self) -> StaticFileProvider { StaticFileProvider::read_only(PathBuf::default(), false).unwrap() } } @@ -575,6 +583,8 @@ impl CanonStateSubscriptions for NoopProvider { } impl ForkChoiceSubscriptions for NoopProvider { + type Header = Header; + fn subscribe_safe_block(&self) -> ForkChoiceNotifications { let (_, rx) = watch::channel(None); ForkChoiceNotifications(rx) diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 8e3a54d86b9..d12f240e616 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,55 +1,143 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_node_types::NodePrimitives; use reth_primitives::SealedBlockWithSenders; -use reth_storage_api::BlockReader; +use reth_storage_api::NodePrimitivesProvider; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; -use std::ops::RangeInclusive; -/// BlockExecution Writer -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockExecutionWriter: BlockWriter + Send + Sync { - /// Take range of blocks and its execution result - fn take_block_and_execution_range( +/// An enum that represents the storage location for a piece of data. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum StorageLocation { + /// Write only to static files. + StaticFiles, + /// Write only to the database. + Database, + /// Write to both the database and static files. + Both, +} + +impl StorageLocation { + /// Returns true if the storage location includes static files. + pub const fn static_files(&self) -> bool { + matches!(self, Self::StaticFiles | Self::Both) + } + + /// Returns true if the storage location includes the database. + pub const fn database(&self) -> bool { + matches!(self, Self::Database | Self::Both) + } +} + +/// `BlockExecution` Writer +pub trait BlockExecutionWriter: + NodePrimitivesProvider> + BlockWriter + Send + Sync +{ + /// Take all of the blocks above the provided number and their execution result + /// + /// The passed block number will stay in the database. + /// + /// Accepts [`StorageLocation`] specifying from where should transactions and receipts be + /// removed. + fn take_block_and_execution_above( &self, - range: RangeInclusive, - ) -> ProviderResult; + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult>; - /// Remove range of blocks and its execution result - fn remove_block_and_execution_range( + /// Remove all of the blocks above the provided number and their execution result + /// + /// The passed block number will stay in the database. + /// + /// Accepts [`StorageLocation`] specifying from where should transactions and receipts be + /// removed. + fn remove_block_and_execution_above( &self, - range: RangeInclusive, + block: BlockNumber, + remove_from: StorageLocation, ) -> ProviderResult<()>; } -/// BlockExecution Reader -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait BlockExecutionReader: BlockReader + Send + Sync { - /// Get range of blocks and its execution result - fn get_block_and_execution_range( +impl BlockExecutionWriter for &T { + fn take_block_and_execution_above( &self, - range: RangeInclusive, - ) -> ProviderResult; + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult> { + (*self).take_block_and_execution_above(block, remove_from) + } + + fn remove_block_and_execution_above( + &self, + block: BlockNumber, + remove_from: StorageLocation, + ) -> ProviderResult<()> { + (*self).remove_block_and_execution_above(block, remove_from) + } } /// This just receives state, or [`ExecutionOutcome`], from the provider #[auto_impl::auto_impl(&, Arc, Box)] pub trait StateReader: Send + Sync { + /// Receipt type in [`ExecutionOutcome`]. + type Receipt: Send + Sync; + /// Get the [`ExecutionOutcome`] for the given block - fn get_state(&self, block: BlockNumber) -> ProviderResult>; + fn get_state( + &self, + block: BlockNumber, + ) -> ProviderResult>>; } /// Block Writer #[auto_impl::auto_impl(&, Arc, Box)] pub trait BlockWriter: Send + Sync { + /// The body this writer can write. + type Block: reth_primitives_traits::Block; + /// The receipt type for [`ExecutionOutcome`]. + type Receipt: Send + Sync; + /// Insert full block and make it canonical. Parent tx num and transition id is taken from /// parent block in database. /// /// Return [StoredBlockBodyIndices] that contains indices of the first and last transactions and /// transition in the block. - fn insert_block(&self, block: SealedBlockWithSenders) - -> ProviderResult; + /// + /// Accepts [`StorageLocation`] value which specifies where transactions and headers should be + /// written. + fn insert_block( + &self, + block: SealedBlockWithSenders, + write_to: StorageLocation, + ) -> ProviderResult; + + /// Appends a batch of block bodies extending the canonical chain. This is invoked during + /// `Bodies` stage and does not write to `TransactionHashNumbers` and `TransactionSenders` + /// tables which are populated on later stages. + /// + /// Bodies are passed as [`Option`]s, if body is `None` the corresponding block is empty. + fn append_block_bodies( + &self, + bodies: Vec<(BlockNumber, Option<::Body>)>, + write_transactions_to: StorageLocation, + ) -> ProviderResult<()>; + + /// Removes all blocks above the given block number from the database. + /// + /// Note: This does not remove state or execution data. + fn remove_blocks_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()>; + + /// Removes all block bodies above the given block number from the database. + fn remove_bodies_above( + &self, + block: BlockNumber, + remove_transactions_from: StorageLocation, + ) -> ProviderResult<()>; /// Appends a batch of sealed blocks to the blockchain, including sender information, and /// updates the post-state. @@ -67,8 +155,8 @@ pub trait BlockWriter: Send + Sync { /// Returns `Ok(())` on success, or an error if any operation fails. fn append_blocks_with_state( &self, - blocks: Vec, - execution_outcome: ExecutionOutcome, + blocks: Vec>, + execution_outcome: ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()>; diff --git a/crates/storage/provider/src/traits/finalized_block.rs b/crates/storage/provider/src/traits/finalized_block.rs deleted file mode 100644 index 98a6d9d0e34..00000000000 --- a/crates/storage/provider/src/traits/finalized_block.rs +++ /dev/null @@ -1,23 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_errors::ProviderResult; - -/// Functionality to read the last known chain blocks from the database. -pub trait ChainStateBlockReader: Send + Sync { - /// Returns the last finalized block number. - /// - /// If no finalized block has been written yet, this returns `None`. - fn last_finalized_block_number(&self) -> ProviderResult>; - /// Returns the last safe block number. - /// - /// If no safe block has been written yet, this returns `None`. - fn last_safe_block_number(&self) -> ProviderResult>; -} - -/// Functionality to write the last known chain blocks to the database. -pub trait ChainStateBlockWriter: Send + Sync { - /// Saves the given finalized block number in the DB. - fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; - - /// Saves the given safe block number in the DB. - fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; -} diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 4998e974165..be485839f00 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -7,20 +7,26 @@ use crate::{ }; use reth_chain_state::{CanonStateSubscriptions, ForkChoiceSubscriptions}; use reth_chainspec::EthereumHardforks; -use reth_node_types::NodeTypesWithDB; +use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; +use reth_storage_api::NodePrimitivesProvider; /// Helper trait to unify all provider traits for simplicity. pub trait FullProvider: DatabaseProviderFactory + + NodePrimitivesProvider + StaticFileProviderFactory - + BlockReaderIdExt - + AccountReader + + BlockReaderIdExt< + Transaction = TxTy, + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + > + AccountReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions - + ForkChoiceSubscriptions + + ForkChoiceSubscriptions
> + StageCheckpointReader + Clone + Unpin @@ -30,15 +36,20 @@ pub trait FullProvider: impl FullProvider for T where T: DatabaseProviderFactory + + NodePrimitivesProvider + StaticFileProviderFactory - + BlockReaderIdExt - + AccountReader + + BlockReaderIdExt< + Transaction = TxTy, + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + > + AccountReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + ChangeSetReader + CanonStateSubscriptions - + ForkChoiceSubscriptions + + ForkChoiceSubscriptions
> + StageCheckpointReader + Clone + Unpin diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs index 5ce7e119730..b572750d4a2 100644 --- a/crates/storage/provider/src/traits/header_sync_gap.rs +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -1,3 +1,4 @@ +use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use reth_network_p2p::headers::downloader::SyncTarget; @@ -7,21 +8,21 @@ use tokio::sync::watch; /// Represents a gap to sync: from `local_head` to `target` #[derive(Clone, Debug)] -pub struct HeaderSyncGap { +pub struct HeaderSyncGap { /// The local head block. Represents lower bound of sync range. - pub local_head: SealedHeader, + pub local_head: SealedHeader, /// The sync target. Represents upper bound of sync range. pub target: SyncTarget, } -impl HeaderSyncGap { +impl HeaderSyncGap { /// Returns `true` if the gap from the head to the target was closed #[inline] pub fn is_closed(&self) -> bool { match self.target.tip() { BlockHashOrNumber::Hash(hash) => self.local_head.hash() == hash, - BlockHashOrNumber::Number(num) => self.local_head.number == num, + BlockHashOrNumber::Number(num) => self.local_head.number() == num, } } } @@ -29,6 +30,9 @@ impl HeaderSyncGap { /// Client trait for determining the current headers sync gap. #[auto_impl::auto_impl(&, Arc)] pub trait HeaderSyncGapProvider: Send + Sync { + /// The header type. + type Header: Send + Sync; + /// Find a current sync gap for the headers depending on the last /// uninterrupted block number. Last uninterrupted block represents the block number before /// which there are no gaps. It's up to the caller to ensure that last uninterrupted block is @@ -37,5 +41,5 @@ pub trait HeaderSyncGapProvider: Send + Sync { &self, tip: watch::Receiver, highest_uninterrupted_block: BlockNumber, - ) -> ProviderResult; + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index c31c7c1e2f2..d82e97d1db7 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -9,37 +9,19 @@ pub use reth_evm::provider::EvmEnvProvider; mod block; pub use block::*; -mod chain_info; -pub use chain_info::CanonChainTracker; - mod header_sync_gap; pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider}; mod state; -pub use state::{StateChangeWriter, StateWriter}; +pub use state::StateWriter; pub use reth_chainspec::ChainSpecProvider; -mod hashing; -pub use hashing::HashingWriter; - -mod trie; -pub use trie::{StorageTrieWriter, TrieWriter}; - -mod history; -pub use history::HistoryWriter; - mod static_file_provider; pub use static_file_provider::StaticFileProviderFactory; -mod stats; -pub use stats::StatsReader; - mod full; pub use full::{FullProvider, FullRpcProvider}; mod tree_viewer; pub use tree_viewer::TreeViewer; - -mod finalized_block; -pub use finalized_block::{ChainStateBlockReader, ChainStateBlockWriter}; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 3d62b1886e8..2c4ee2cfa8d 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -6,21 +6,23 @@ use revm::db::{ states::{PlainStateReverts, StateChangeset}, OriginalValuesKnown, }; -use std::ops::RangeInclusive; -/// A helper trait for [`ExecutionOutcome`] to write state and receipts to storage. +use super::StorageLocation; + +/// A trait specifically for writing state changes or reverts pub trait StateWriter { - /// Write the data and receipts to the database or static files if `static_file_producer` is + /// Receipt type included into [`ExecutionOutcome`]. + type Receipt; + + /// Write the state and receipts to the database or static files if `static_file_producer` is /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, + fn write_state( + &self, + execution_outcome: ExecutionOutcome, is_value_known: OriginalValuesKnown, + write_receipts_to: StorageLocation, ) -> ProviderResult<()>; -} -/// A trait specifically for writing state changes or reverts -pub trait StateChangeWriter { /// Write state reverts to the database. /// /// NOTE: Reverts will delete all wiped storage from plain state. @@ -36,9 +38,19 @@ pub trait StateChangeWriter { /// Writes the hashed state changes to the database fn write_hashed_state(&self, hashed_state: &HashedPostStateSorted) -> ProviderResult<()>; - /// Remove the block range of state. - fn remove_state(&self, range: RangeInclusive) -> ProviderResult<()>; + /// Remove the block range of state above the given block. The state of the passed block is not + /// removed. + fn remove_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult<()>; - /// Take the block range of state, recreating the [`ExecutionOutcome`]. - fn take_state(&self, range: RangeInclusive) -> ProviderResult; + /// Take the block range of state, recreating the [`ExecutionOutcome`]. The state of the passed + /// block is not removed. + fn take_state_above( + &self, + block: BlockNumber, + remove_receipts_from: StorageLocation, + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/traits/static_file_provider.rs b/crates/storage/provider/src/traits/static_file_provider.rs index 24d69569205..9daab7e5a8f 100644 --- a/crates/storage/provider/src/traits/static_file_provider.rs +++ b/crates/storage/provider/src/traits/static_file_provider.rs @@ -1,7 +1,9 @@ +use reth_storage_api::NodePrimitivesProvider; + use crate::providers::StaticFileProvider; /// Static file provider factory. -pub trait StaticFileProviderFactory { +pub trait StaticFileProviderFactory: NodePrimitivesProvider { /// Create new instance of static file provider. - fn static_file_provider(&self) -> StaticFileProvider; + fn static_file_provider(&self) -> StaticFileProvider; } diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/provider/src/traits/trie.rs deleted file mode 100644 index 2edb4e072dd..00000000000 --- a/crates/storage/provider/src/traits/trie.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::collections::HashMap; - -use alloy_primitives::B256; -use auto_impl::auto_impl; -use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::{StorageTrieUpdates, TrieUpdates}; - -/// Trie Writer -#[auto_impl(&, Arc, Box)] -pub trait TrieWriter: Send + Sync { - /// Writes trie updates to the database. - /// - /// Returns the number of entries modified. - fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; -} - -/// Storage Trie Writer -#[auto_impl(&, Arc, Box)] -pub trait StorageTrieWriter: Send + Sync { - /// Writes storage trie updates from the given storage trie map. - /// - /// First sorts the storage trie updates by the hashed address key, writing in sorted order. - /// - /// Returns the number of entries modified. - fn write_storage_trie_updates( - &self, - storage_tries: &HashMap, - ) -> ProviderResult; - - /// Writes storage trie updates for the given hashed address. - fn write_individual_storage_trie_updates( - &self, - hashed_address: B256, - updates: &StorageTrieUpdates, - ) -> ProviderResult; -} diff --git a/crates/storage/provider/src/writer/database.rs b/crates/storage/provider/src/writer/database.rs deleted file mode 100644 index 1436fb8a6ab..00000000000 --- a/crates/storage/provider/src/writer/database.rs +++ /dev/null @@ -1,29 +0,0 @@ -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_db::{ - cursor::{DbCursorRO, DbCursorRW}, - tables, -}; -use reth_errors::ProviderResult; -use reth_primitives::Receipt; -use reth_storage_api::ReceiptWriter; - -pub(crate) struct DatabaseWriter<'a, W>(pub(crate) &'a mut W); - -impl ReceiptWriter for DatabaseWriter<'_, W> -where - W: DbCursorRO + DbCursorRW, -{ - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - _: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()> { - for (tx_idx, receipt) in receipts.into_iter().enumerate() { - if let Some(receipt) = receipt { - self.0.append(first_tx_index + tx_idx as u64, receipt)?; - } - } - Ok(()) - } -} diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 5b16b2da4e5..7ab6499cc3e 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -1,36 +1,19 @@ use crate::{ - providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter as SfWriter}, - writer::static_file::StaticFileWriter, - BlockExecutionWriter, BlockWriter, HistoryWriter, StateChangeWriter, StateWriter, TrieWriter, + providers::{StaticFileProvider, StaticFileWriter as SfWriter}, + BlockExecutionWriter, BlockWriter, HistoryWriter, StateWriter, StaticFileProviderFactory, + StorageLocation, TrieWriter, }; -use alloy_primitives::{BlockNumber, B256, U256}; +use alloy_consensus::BlockHeader; use reth_chain_state::ExecutedBlock; -use reth_db::{ - cursor::DbCursorRO, - models::CompactU256, - tables, - transaction::{DbTx, DbTxMut}, -}; -use reth_errors::{ProviderError, ProviderResult}; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{Header, SealedBlock, StaticFileSegment, TransactionSignedNoHash}; -use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - DBProvider, HeaderProvider, ReceiptWriter, StageCheckpointWriter, TransactionsProviderExt, -}; +use reth_db::transaction::{DbTx, DbTxMut}; +use reth_errors::ProviderResult; +use reth_primitives::{NodePrimitives, StaticFileSegment}; +use reth_primitives_traits::SignedTransaction; +use reth_storage_api::{DBProvider, StageCheckpointWriter, TransactionsProviderExt}; use reth_storage_errors::writer::UnifiedStorageWriterError; use revm::db::OriginalValuesKnown; -use std::{borrow::Borrow, sync::Arc}; -use tracing::{debug, instrument}; - -mod database; -mod static_file; -use database::DatabaseWriter; - -enum StorageType { - Database(C), - StaticFile(S), -} +use std::sync::Arc; +use tracing::debug; /// [`UnifiedStorageWriter`] is responsible for managing the writing to storage with both database /// and static file providers. @@ -83,14 +66,6 @@ impl<'a, ProviderDB, ProviderSF> UnifiedStorageWriter<'a, ProviderDB, ProviderSF self.static_file.as_ref().expect("should exist") } - /// Returns a mutable reference to the static file instance. - /// - /// # Panics - /// If the static file instance is not set. - fn static_file_mut(&mut self) -> &mut ProviderSF { - self.static_file.as_mut().expect("should exist") - } - /// Ensures that the static file instance is set. /// /// # Returns @@ -114,15 +89,13 @@ impl UnifiedStorageWriter<'_, (), ()> { /// start-up. /// /// NOTE: If unwinding data from storage, use `commit_unwind` instead! - pub fn commit

( - database: impl Into

+ AsRef

, - static_file: StaticFileProvider, - ) -> ProviderResult<()> + pub fn commit

(provider: P) -> ProviderResult<()> where - P: DBProvider, + P: DBProvider + StaticFileProviderFactory, { + let static_file = provider.static_file_provider(); static_file.commit()?; - database.into().into_tx().commit()?; + provider.commit()?; Ok(()) } @@ -134,33 +107,36 @@ impl UnifiedStorageWriter<'_, (), ()> { /// checkpoints on the next start-up. /// /// NOTE: Should only be used after unwinding data from storage! - pub fn commit_unwind

( - database: impl Into

+ AsRef

, - static_file: StaticFileProvider, - ) -> ProviderResult<()> + pub fn commit_unwind

(provider: P) -> ProviderResult<()> where - P: DBProvider, + P: DBProvider + StaticFileProviderFactory, { - database.into().into_tx().commit()?; + let static_file = provider.static_file_provider(); + provider.commit()?; static_file.commit()?; Ok(()) } } -impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> +impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider + BlockWriter + TransactionsProviderExt - + StateChangeWriter + TrieWriter + + StateWriter + HistoryWriter + StageCheckpointWriter + BlockExecutionWriter - + AsRef, + + AsRef + + StaticFileProviderFactory, { /// Writes executed blocks and receipts to storage. - pub fn save_blocks(&self, blocks: &[ExecutedBlock]) -> ProviderResult<()> { + pub fn save_blocks(&self, blocks: Vec>) -> ProviderResult<()> + where + N: NodePrimitives, + ProviderDB: BlockWriter + StateWriter, + { if blocks.is_empty() { debug!(target: "provider::storage_writer", "Attempted to write empty block range"); return Ok(()) @@ -168,23 +144,14 @@ where // NOTE: checked non-empty above let first_block = blocks.first().unwrap().block(); - let last_block = blocks.last().unwrap().block().clone(); - let first_number = first_block.number; - let last_block_number = last_block.number; - debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); + let last_block = blocks.last().unwrap().block(); + let first_number = first_block.number(); + let last_block_number = last_block.number(); - // Only write receipts to static files if there is no receipt pruning configured. - let mut state_writer = if self.database().prune_modes_ref().has_receipts_pruning() { - UnifiedStorageWriter::from_database(self.database()) - } else { - UnifiedStorageWriter::from( - self.database(), - self.static_file().get_writer(first_block.number, StaticFileSegment::Receipts)?, - ) - }; + debug!(target: "provider::storage_writer", block_count = %blocks.len(), "Writing blocks and execution data to storage"); - // TODO: remove all the clones and do performant / batched writes for each type of object + // TODO: Do performant / batched writes for each type of object // instead of a loop over all blocks, // meaning: // * blocks @@ -193,24 +160,24 @@ where // * trie updates (cannot naively extend, need helper) // * indices (already done basically) // Insert the blocks - for block in blocks { - let sealed_block = - block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); - self.database().insert_block(sealed_block)?; - self.save_header_and_transactions(block.block.clone())?; + for ExecutedBlock { block, senders, execution_output, hashed_state, trie } in blocks { + let sealed_block = Arc::unwrap_or_clone(block) + .try_with_senders_unchecked(Arc::unwrap_or_clone(senders)) + .unwrap(); + self.database().insert_block(sealed_block, StorageLocation::Both)?; // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. - let execution_outcome = block.execution_outcome().clone(); - state_writer.write_to_storage(execution_outcome, OriginalValuesKnown::No)?; + self.database().write_state( + Arc::unwrap_or_clone(execution_output), + OriginalValuesKnown::No, + StorageLocation::StaticFiles, + )?; // insert hashes and intermediate merkle nodes - { - let trie_updates = block.trie_updates().clone(); - let hashed_state = block.hashed_state(); - self.database().write_hashed_state(&hashed_state.clone().into_sorted())?; - self.database().write_trie_updates(&trie_updates)?; - } + self.database() + .write_hashed_state(&Arc::unwrap_or_clone(hashed_state).into_sorted())?; + self.database().write_trie_updates(&trie)?; } // update history indices @@ -224,76 +191,20 @@ where Ok(()) } - /// Writes the header & transactions to static files, and updates their respective checkpoints - /// on database. - #[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()) target = "storage")] - fn save_header_and_transactions(&self, block: Arc) -> ProviderResult<()> { - debug!(target: "provider::storage_writer", "Writing headers and transactions."); - - { - let header_writer = - self.static_file().get_writer(block.number, StaticFileSegment::Headers)?; - let mut storage_writer = UnifiedStorageWriter::from(self.database(), header_writer); - let td = storage_writer.append_headers_from_blocks( - block.header().number, - std::iter::once(&(block.header(), block.hash())), - )?; - - debug!(target: "provider::storage_writer", block_num=block.number, "Updating transaction metadata after writing"); - self.database() - .tx_ref() - .put::(block.number, CompactU256(td))?; - self.database() - .save_stage_checkpoint(StageId::Headers, StageCheckpoint::new(block.number))?; - } - - { - let transactions_writer = - self.static_file().get_writer(block.number, StaticFileSegment::Transactions)?; - let mut storage_writer = - UnifiedStorageWriter::from(self.database(), transactions_writer); - let no_hash_transactions = block - .body - .transactions - .clone() - .into_iter() - .map(TransactionSignedNoHash::from) - .collect(); - storage_writer.append_transactions_from_blocks( - block.header().number, - std::iter::once(&no_hash_transactions), - )?; - self.database() - .save_stage_checkpoint(StageId::Bodies, StageCheckpoint::new(block.number))?; - } - - Ok(()) - } - /// Removes all block, transaction and receipt data above the given block number from the /// database and static files. This is exclusive, i.e., it only removes blocks above /// `block_number`, and does not remove `block_number`. pub fn remove_blocks_above(&self, block_number: u64) -> ProviderResult<()> { + // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block + debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); + self.database().remove_block_and_execution_above(block_number, StorageLocation::Both)?; + // Get highest static file block for the total block range let highest_static_file_block = self .static_file() .get_highest_static_file_block(StaticFileSegment::Headers) .expect("todo: error handling, headers should exist"); - // Get the total txs for the block range, so we have the correct number of columns for - // receipts and transactions - // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block - let tx_range = self - .database() - .transaction_range_by_block_range(block_number + 1..=highest_static_file_block)?; - let total_txs = tx_range.end().saturating_sub(*tx_range.start()); - - // IMPORTANT: we use `block_number+1` to make sure we remove only what is ABOVE the block - debug!(target: "provider::storage_writer", ?block_number, "Removing blocks from database above block_number"); - self.database().remove_block_and_execution_range( - block_number + 1..=self.database().last_block_number()?, - )?; - // IMPORTANT: we use `highest_static_file_block.saturating_sub(block_number)` to make sure // we remove only what is ABOVE the block. // @@ -304,236 +215,6 @@ where .get_writer(block_number, StaticFileSegment::Headers)? .prune_headers(highest_static_file_block.saturating_sub(block_number))?; - self.static_file() - .get_writer(block_number, StaticFileSegment::Transactions)? - .prune_transactions(total_txs, block_number)?; - - if !self.database().prune_modes_ref().has_receipts_pruning() { - self.static_file() - .get_writer(block_number, StaticFileSegment::Receipts)? - .prune_receipts(total_txs, block_number)?; - } - - Ok(()) - } -} - -impl UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> -where - ProviderDB: DBProvider + HeaderProvider, -{ - /// Ensures that the static file writer is set and of the right [`StaticFileSegment`] variant. - /// - /// # Returns - /// - `Ok(())` if the static file writer is set. - /// - `Err(StorageWriterError::MissingStaticFileWriter)` if the static file instance is not set. - fn ensure_static_file_segment( - &self, - segment: StaticFileSegment, - ) -> Result<(), UnifiedStorageWriterError> { - match &self.static_file { - Some(writer) => { - if writer.user_header().segment() == segment { - Ok(()) - } else { - Err(UnifiedStorageWriterError::IncorrectStaticFileWriter( - writer.user_header().segment(), - segment, - )) - } - } - None => Err(UnifiedStorageWriterError::MissingStaticFileWriter), - } - } - - /// Appends headers to static files, using the - /// [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) table to determine the - /// total difficulty of the parent block during header insertion. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Headers segment. - pub fn append_headers_from_blocks( - &mut self, - initial_block_number: BlockNumber, - headers: impl Iterator, - ) -> ProviderResult - where - I: Borrow<(H, B256)>, - H: Borrow

, - { - self.ensure_static_file_segment(StaticFileSegment::Headers)?; - - let mut td = self - .database() - .header_td_by_number(initial_block_number)? - .ok_or(ProviderError::TotalDifficultyNotFound(initial_block_number))?; - - for pair in headers { - let (header, hash) = pair.borrow(); - let header = header.borrow(); - td += header.difficulty; - self.static_file_mut().append_header(header, td, hash)?; - } - - Ok(td) - } - - /// Appends transactions to static files, using the - /// [`BlockBodyIndices`](tables::BlockBodyIndices) table to determine the transaction number - /// when appending to static files. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Transactions segment. - pub fn append_transactions_from_blocks( - &mut self, - initial_block_number: BlockNumber, - transactions: impl Iterator, - ) -> ProviderResult<()> - where - T: Borrow>, - { - self.ensure_static_file_segment(StaticFileSegment::Transactions)?; - - let mut bodies_cursor = - self.database().tx_ref().cursor_read::()?; - - let mut last_tx_idx = None; - for (idx, transactions) in transactions.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let mut tx_index = first_tx_index - .or(last_tx_idx) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - for tx in transactions.borrow() { - self.static_file_mut().append_transaction(tx_index, tx)?; - tx_index += 1; - } - - self.static_file_mut().increment_block(block_number)?; - - // update index - last_tx_idx = Some(tx_index); - } - Ok(()) - } -} - -impl UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> -where - ProviderDB: DBProvider + HeaderProvider, -{ - /// Appends receipts block by block. - /// - /// ATTENTION: If called from [`UnifiedStorageWriter`] without a static file producer, it will - /// always write them to database. Otherwise, it will look into the pruning configuration to - /// decide. - /// - /// NOTE: The static file writer used to construct this [`UnifiedStorageWriter`] MUST be a - /// writer for the Receipts segment. - /// - /// # Parameters - /// - `initial_block_number`: The starting block number. - /// - `blocks`: An iterator over blocks, each block having a vector of optional receipts. If - /// `receipt` is `None`, it has been pruned. - pub fn append_receipts_from_blocks( - &mut self, - initial_block_number: BlockNumber, - blocks: impl Iterator>>, - ) -> ProviderResult<()> { - let mut bodies_cursor = - self.database().tx_ref().cursor_read::()?; - - // We write receipts to database in two situations: - // * If we are in live sync. In this case, `UnifiedStorageWriter` is built without a static - // file writer. - // * If there is any kind of receipt pruning - let mut storage_type = if self.static_file.is_none() || - self.database().prune_modes_ref().has_receipts_pruning() - { - StorageType::Database(self.database().tx_ref().cursor_write::()?) - } else { - self.ensure_static_file_segment(StaticFileSegment::Receipts)?; - StorageType::StaticFile(self.static_file_mut()) - }; - - let mut last_tx_idx = None; - for (idx, receipts) in blocks.enumerate() { - let block_number = initial_block_number + idx as u64; - - let mut first_tx_index = - bodies_cursor.seek_exact(block_number)?.map(|(_, indices)| indices.first_tx_num()); - - // If there are no indices, that means there have been no transactions - // - // So instead of returning an error, use zero - if block_number == initial_block_number && first_tx_index.is_none() { - first_tx_index = Some(0); - } - - let first_tx_index = first_tx_index - .or(last_tx_idx) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - - // update for empty blocks - last_tx_idx = Some(first_tx_index); - - match &mut storage_type { - StorageType::Database(cursor) => { - DatabaseWriter(cursor).append_block_receipts( - first_tx_index, - block_number, - receipts, - )?; - } - StorageType::StaticFile(sf) => { - StaticFileWriter(*sf).append_block_receipts( - first_tx_index, - block_number, - receipts, - )?; - } - }; - } - - Ok(()) - } -} - -impl StateWriter - for UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> -where - ProviderDB: DBProvider + StateChangeWriter + HeaderProvider, -{ - /// Write the data and receipts to the database or static files if `static_file_producer` is - /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. - fn write_to_storage( - &mut self, - execution_outcome: ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> { - let (plain_state, reverts) = - execution_outcome.bundle.into_plain_state_and_reverts(is_value_known); - - self.database().write_state_reverts(reverts, execution_outcome.first_block)?; - - self.append_receipts_from_blocks( - execution_outcome.first_block, - execution_outcome.receipts.into_iter(), - )?; - - self.database().write_state_changes(plain_state)?; - Ok(()) } } @@ -551,8 +232,9 @@ mod tests { models::{AccountBeforeTx, BlockNumberAddress}, transaction::{DbTx, DbTxMut}, }; + use reth_execution_types::ExecutionOutcome; use reth_primitives::{Account, Receipt, Receipts, StorageEntry}; - use reth_storage_api::DatabaseProviderFactory; + use reth_storage_api::{DatabaseProviderFactory, HashedPostStateProvider}; use reth_trie::{ test_utils::{state_root, storage_root_prehashed}, HashedPostState, HashedStorage, StateRoot, StorageRoot, @@ -664,8 +346,8 @@ mod tests { let mut revm_bundle_state = state.take_bundle(); // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + let reverts = revm_bundle_state.take_all_reverts().to_plain_state_reverts(); + let plain_state = revm_bundle_state.to_plain_state(OriginalValuesKnown::Yes); assert!(plain_state.storage.is_empty()); assert!(plain_state.contracts.is_empty()); provider.write_state_changes(plain_state).expect("Could not write plain state to DB"); @@ -675,7 +357,7 @@ mod tests { let reth_account_a = account_a.into(); let reth_account_b = account_b.into(); - let reth_account_b_changed = account_b_changed.clone().into(); + let reth_account_b_changed = (&account_b_changed).into(); // Check plain state assert_eq!( @@ -722,8 +404,8 @@ mod tests { let mut revm_bundle_state = state.take_bundle(); // Write plain state and reverts separately. - let reverts = revm_bundle_state.take_all_reverts().into_plain_state_reverts(); - let plain_state = revm_bundle_state.into_plain_state(OriginalValuesKnown::Yes); + let reverts = revm_bundle_state.take_all_reverts().to_plain_state_reverts(); + let plain_state = revm_bundle_state.to_plain_state(OriginalValuesKnown::Yes); // Account B selfdestructed so flag for it should be present. assert_eq!( plain_state.storage, @@ -813,9 +495,8 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); // Check plain storage state @@ -914,9 +595,8 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); assert_eq!( @@ -982,9 +662,8 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1129,10 +808,10 @@ mod tests { let bundle = state.take_bundle(); - let outcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + let outcome: ExecutionOutcome = + ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -1296,9 +975,8 @@ mod tests { init_state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1344,9 +1022,8 @@ mod tests { state.merge_transitions(BundleRetention::Reverts); let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); - let mut writer = UnifiedStorageWriter::from_database(&provider); - writer - .write_to_storage(outcome, OriginalValuesKnown::Yes) + provider + .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -1375,7 +1052,7 @@ mod tests { #[test] fn revert_to_indices() { - let base = ExecutionOutcome { + let base: ExecutionOutcome = ExecutionOutcome { bundle: BundleState::default(), receipts: vec![vec![Some(Receipt::default()); 2]; 7].into(), first_block: 10, @@ -1441,13 +1118,7 @@ mod tests { assert_eq!( StateRoot::overlay_root( tx, - ExecutionOutcome::new( - state.bundle_state.clone(), - Receipts::default(), - 0, - Vec::new() - ) - .hash_state_slow(), + provider_factory.hashed_post_state(&state.bundle_state) ) .unwrap(), state_root(expected.clone().into_iter().map(|(address, (account, storage))| ( @@ -1592,7 +1263,7 @@ mod tests { .build(); assert_eq!(previous_state.reverts.len(), 1); - let mut test = ExecutionOutcome { + let mut test: ExecutionOutcome = ExecutionOutcome { bundle: present_state, receipts: vec![vec![Some(Receipt::default()); 2]; 1].into(), first_block: 2, diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs deleted file mode 100644 index 5514e211e58..00000000000 --- a/crates/storage/provider/src/writer/static_file.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::providers::StaticFileProviderRWRefMut; -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_errors::ProviderResult; -use reth_primitives::Receipt; -use reth_storage_api::ReceiptWriter; - -pub(crate) struct StaticFileWriter<'a, W>(pub(crate) &'a mut W); - -impl ReceiptWriter for StaticFileWriter<'_, StaticFileProviderRWRefMut<'_>> { - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - block_number: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()> { - // Increment block on static file header. - self.0.increment_block(block_number)?; - let receipts = receipts.iter().enumerate().map(|(tx_idx, receipt)| { - Ok(( - first_tx_index + tx_idx as u64, - receipt - .as_ref() - .expect("receipt should not be filtered when saving to static files."), - )) - }); - self.0.append_receipts(receipts)?; - Ok(()) - } -} diff --git a/crates/storage/storage-api/Cargo.toml b/crates/storage/storage-api/Cargo.toml index 51d8eabfc40..7ebff976d13 100644 --- a/crates/storage/storage-api/Cargo.toml +++ b/crates/storage/storage-api/Cargo.toml @@ -18,13 +18,19 @@ reth-db-models.workspace = true reth-db-api.workspace = true reth-execution-types.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true reth-trie.workspace = true +reth-trie-db.workspace = true +reth-db.workspace = true +revm.workspace = true # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true +alloy-rpc-types-engine.workspace = true auto_impl.workspace = true diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index a3b0cc7438f..43a86aaf750 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -1,13 +1,11 @@ use crate::{ - BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, RequestsProvider, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, }; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, Sealable, B256}; +use alloy_primitives::{BlockNumber, B256}; use reth_db_models::StoredBlockBodyIndices; -use reth_primitives::{ - Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, -}; +use reth_primitives::{BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -42,64 +40,80 @@ impl BlockSource { } } +/// A helper type alias to access [`BlockReader::Block`]. +pub type ProviderBlock

=

::Block; + /// Api trait for fetching `Block` related data. /// /// If not requested otherwise, implementers of this trait should prioritize fetching blocks from /// the database. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockReader: BlockNumReader + HeaderProvider + TransactionsProvider + ReceiptProvider - + RequestsProvider + WithdrawalsProvider + Send + Sync { + /// The block type this provider reads. + type Block: reth_primitives_traits::Block< + Body: reth_primitives_traits::BlockBody, + Header = Self::Header, + >; + /// Tries to find in the given block source. /// /// Note: this only operates on the hash because the number might be ambiguous. /// /// Returns `None` if block is not found. - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult>; + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult>; /// Returns the block with given id from the database. /// /// Returns `None` if block is not found. - fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; + fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; /// Returns the pending block if available /// - /// Note: This returns a [SealedBlock] because it's expected that this is sealed by the provider - /// and the caller does not know the hash. - fn pending_block(&self) -> ProviderResult>; + /// Note: This returns a [`SealedBlockFor`] because it's expected that this is sealed by the + /// provider and the caller does not know the hash. + fn pending_block(&self) -> ProviderResult>>; /// Returns the pending block if available /// - /// Note: This returns a [SealedBlockWithSenders] because it's expected that this is sealed by + /// Note: This returns a [`SealedBlockWithSenders`] because it's expected that this is sealed by /// the provider and the caller does not know the hash. - fn pending_block_with_senders(&self) -> ProviderResult>; + fn pending_block_with_senders( + &self, + ) -> ProviderResult>>; /// Returns the pending block and receipts if available. - fn pending_block_and_receipts(&self) -> ProviderResult)>>; + #[allow(clippy::type_complexity)] + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>>; /// Returns the ommers/uncle headers of the given block from the database. /// /// Returns `None` if block is not found. - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; /// Returns the block with matching hash from the database. /// /// Returns `None` if block is not found. - fn block_by_hash(&self, hash: B256) -> ProviderResult> { + fn block_by_hash(&self, hash: B256) -> ProviderResult> { self.block(hash.into()) } /// Returns the block with matching number from database. /// /// Returns `None` if block is not found. - fn block_by_number(&self, num: u64) -> ProviderResult> { + fn block_by_number(&self, num: u64) -> ProviderResult> { self.block(num.into()) } @@ -117,7 +131,7 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns the sealed block with senders with matching number or hash from database. /// @@ -128,26 +142,164 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns all blocks in the given inclusive range. /// /// Note: returns only available blocks - fn block_range(&self, range: RangeInclusive) -> ProviderResult>; + fn block_range(&self, range: RangeInclusive) -> ProviderResult>; /// Returns a range of blocks from the database, along with the senders of each /// transaction in the blocks. fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>; + ) -> ProviderResult>>; /// Returns a range of sealed blocks from the database, along with the senders of each /// transaction in the blocks. fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>; + ) -> ProviderResult>>; +} + +impl BlockReader for std::sync::Arc { + type Block = T::Block; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + T::find_block_by_hash(self, hash, source) + } + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + T::block(self, id) + } + fn pending_block(&self) -> ProviderResult>> { + T::pending_block(self) + } + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + T::pending_block_with_senders(self) + } + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + T::pending_block_and_receipts(self) + } + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + T::ommers(self, id) + } + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + T::block_by_hash(self, hash) + } + fn block_by_number(&self, num: u64) -> ProviderResult> { + T::block_by_number(self, num) + } + fn block_body_indices(&self, num: u64) -> ProviderResult> { + T::block_body_indices(self, num) + } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::block_with_senders(self, id, transaction_kind) + } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::sealed_block_with_senders(self, id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + T::block_range(self, range) + } + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::block_with_senders_range(self, range) + } + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::sealed_block_with_senders_range(self, range) + } +} + +impl BlockReader for &T { + type Block = T::Block; + + fn find_block_by_hash( + &self, + hash: B256, + source: BlockSource, + ) -> ProviderResult> { + T::find_block_by_hash(self, hash, source) + } + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { + T::block(self, id) + } + fn pending_block(&self) -> ProviderResult>> { + T::pending_block(self) + } + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + T::pending_block_with_senders(self) + } + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + T::pending_block_and_receipts(self) + } + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + T::ommers(self, id) + } + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + T::block_by_hash(self, hash) + } + fn block_by_number(&self, num: u64) -> ProviderResult> { + T::block_by_number(self, num) + } + fn block_body_indices(&self, num: u64) -> ProviderResult> { + T::block_body_indices(self, num) + } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::block_with_senders(self, id, transaction_kind) + } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + T::sealed_block_with_senders(self, id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { + T::block_range(self, range) + } + fn block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::block_with_senders_range(self, range) + } + fn sealed_block_with_senders_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>> { + T::sealed_block_with_senders_range(self, range) + } } /// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. @@ -160,12 +312,11 @@ pub trait BlockReader: /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the block should be done using the type's `BlockReader` methods. -#[auto_impl::auto_impl(&, Arc)] pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the block with matching tag from the database /// /// Returns `None` if block is not found. - fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.block(num.into())) } @@ -173,7 +324,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn pending_header(&self) -> ProviderResult> { + fn pending_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Pending.into()) } @@ -181,7 +332,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn latest_header(&self) -> ProviderResult> { + fn latest_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Latest.into()) } @@ -189,7 +340,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn safe_header(&self) -> ProviderResult> { + fn safe_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Safe.into()) } @@ -197,14 +348,14 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// /// Note: This returns a [`SealedHeader`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn finalized_header(&self) -> ProviderResult> { + fn finalized_header(&self) -> ProviderResult>> { self.sealed_header_by_id(BlockNumberOrTag::Finalized.into()) } /// Returns the block with the matching [`BlockId`] from the database. /// /// Returns `None` if block is not found. - fn block_by_id(&self, id: BlockId) -> ProviderResult>; + fn block_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the block with senders with matching [`BlockId`]. /// @@ -215,7 +366,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { &self, id: BlockId, transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { match id { BlockId::Hash(hash) => { self.block_with_senders(hash.block_hash.into(), transaction_kind) @@ -230,7 +381,10 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { /// Returns the header with matching tag from the database /// /// Returns `None` if header is not found. - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + fn header_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into())) } @@ -241,36 +395,56 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? - .map_or_else( - || Ok(None), - |h| { - let sealed = h.seal_slow(); - let (header, seal) = sealed.into_parts(); - Ok(Some(SealedHeader::new(header, seal))) - }, - ) + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) } /// Returns the sealed header with the matching `BlockId` from the database. /// /// Returns `None` if header is not found. - fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult>; + fn sealed_header_by_id( + &self, + id: BlockId, + ) -> ProviderResult>>; /// Returns the header with the matching `BlockId` from the database. /// /// Returns `None` if header is not found. - fn header_by_id(&self, id: BlockId) -> ProviderResult>; + fn header_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the ommers with the matching tag from the database. - fn ommers_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { + fn ommers_by_number_or_tag( + &self, + id: BlockNumberOrTag, + ) -> ProviderResult>> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.ommers(num.into())) } /// Returns the ommers with the matching `BlockId` from the database. /// /// Returns `None` if block is not found. - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; +} + +/// Functionality to read the last known chain blocks from the database. +pub trait ChainStateBlockReader: Send + Sync { + /// Returns the last finalized block number. + /// + /// If no finalized block has been written yet, this returns `None`. + fn last_finalized_block_number(&self) -> ProviderResult>; + /// Returns the last safe block number. + /// + /// If no safe block has been written yet, this returns `None`. + fn last_safe_block_number(&self) -> ProviderResult>; +} + +/// Functionality to write the last known chain blocks to the database. +pub trait ChainStateBlockWriter: Send + Sync { + /// Saves the given finalized block number in the DB. + fn save_finalized_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; + + /// Saves the given safe block number in the DB. + fn save_safe_block_number(&self, block_number: BlockNumber) -> ProviderResult<()>; } diff --git a/crates/storage/storage-api/src/block_id.rs b/crates/storage/storage-api/src/block_id.rs index 3d9df2e329f..00856d348a5 100644 --- a/crates/storage/storage-api/src/block_id.rs +++ b/crates/storage/storage-api/src/block_id.rs @@ -82,11 +82,10 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { BlockNumberOrTag::Pending => self .pending_block_num_hash() .map(|res_opt| res_opt.map(|num_hash| num_hash.hash)), - _ => self - .convert_block_number(num)? - .map(|num| self.block_hash(num)) - .transpose() - .map(|maybe_hash| maybe_hash.flatten()), + BlockNumberOrTag::Finalized => self.finalized_block_hash(), + BlockNumberOrTag::Safe => self.safe_block_hash(), + BlockNumberOrTag::Earliest => self.block_hash(0), + BlockNumberOrTag::Number(num) => self.block_hash(num), }, } } @@ -100,13 +99,13 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { } /// Get the current pending block number and hash. - fn pending_block_num_hash(&self) -> ProviderResult>; + fn pending_block_num_hash(&self) -> ProviderResult>; /// Get the current safe block number and hash. - fn safe_block_num_hash(&self) -> ProviderResult>; + fn safe_block_num_hash(&self) -> ProviderResult>; /// Get the current finalized block number and hash. - fn finalized_block_num_hash(&self) -> ProviderResult>; + fn finalized_block_num_hash(&self) -> ProviderResult>; /// Get the safe block number. fn safe_block_number(&self) -> ProviderResult> { diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs new file mode 100644 index 00000000000..9b9c24c6863 --- /dev/null +++ b/crates/storage/storage-api/src/chain.rs @@ -0,0 +1,169 @@ +use crate::DBProvider; +use alloy_primitives::BlockNumber; +use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_db::{ + cursor::{DbCursorRO, DbCursorRW}, + models::{StoredBlockOmmers, StoredBlockWithdrawals}, + tables, + transaction::{DbTx, DbTxMut}, + DbTxUnwindExt, +}; +use reth_primitives_traits::{Block, BlockBody, FullNodePrimitives}; +use reth_storage_errors::provider::ProviderResult; + +/// Trait that implements how block bodies are written to the storage. +/// +/// Note: Within the current abstraction, this should only write to tables unrelated to +/// transactions. Writing of transactions is handled separately. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockBodyWriter { + /// Writes a set of block bodies to the storage. + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(BlockNumber, Option)>, + ) -> ProviderResult<()>; + + /// Removes all block bodies above the given block number from the database. + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: BlockNumber, + ) -> ProviderResult<()>; +} + +/// Trait that implements how chain-specific types are written to the storage. +pub trait ChainStorageWriter: + BlockBodyWriter::Body> +{ +} +impl ChainStorageWriter for T where + T: BlockBodyWriter::Body> +{ +} + +/// Input for reading a block body. Contains a header of block being read and a list of pre-fetched +/// transactions. +pub type ReadBodyInput<'a, B> = + (&'a ::Header, Vec<<::Body as BlockBody>::Transaction>); + +/// Trait that implements how block bodies are read from the storage. +/// +/// Note: Within the current abstraction, transactions persistence is handled separately, thus this +/// trait is provided with transactions read beforehand and is expected to construct the block body +/// from those transactions and additional data read from elsewhere. +#[auto_impl::auto_impl(&, Arc)] +pub trait BlockBodyReader { + /// The block type. + type Block: Block; + + /// Receives a list of block headers along with block transactions and returns the block bodies. + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult::Body>>; +} + +/// Trait that implements how chain-specific types are read from storage. +pub trait ChainStorageReader: + BlockBodyReader +{ +} +impl ChainStorageReader for T where + T: BlockBodyReader +{ +} + +/// Ethereum storage implementation. +#[derive(Debug, Default, Clone, Copy)] +pub struct EthStorage; + +impl BlockBodyWriter for EthStorage +where + Provider: DBProvider, +{ + fn write_block_bodies( + &self, + provider: &Provider, + bodies: Vec<(u64, Option)>, + ) -> ProviderResult<()> { + let mut ommers_cursor = provider.tx_ref().cursor_write::()?; + let mut withdrawals_cursor = + provider.tx_ref().cursor_write::()?; + + for (block_number, body) in bodies { + let Some(body) = body else { continue }; + + // Write ommers if any + if !body.ommers.is_empty() { + ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; + } + + // Write withdrawals if any + if let Some(withdrawals) = body.withdrawals { + if !withdrawals.is_empty() { + withdrawals_cursor + .append(block_number, StoredBlockWithdrawals { withdrawals })?; + } + } + } + + Ok(()) + } + + fn remove_block_bodies_above( + &self, + provider: &Provider, + block: BlockNumber, + ) -> ProviderResult<()> { + provider.tx_ref().unwind_table_by_num::(block)?; + provider.tx_ref().unwind_table_by_num::(block)?; + + Ok(()) + } +} + +impl BlockBodyReader for EthStorage +where + Provider: DBProvider + ChainSpecProvider, +{ + type Block = reth_primitives::Block; + + fn read_block_bodies( + &self, + provider: &Provider, + inputs: Vec>, + ) -> ProviderResult::Body>> { + // TODO: Ideally storage should hold its own copy of chain spec + let chain_spec = provider.chain_spec(); + + let mut ommers_cursor = provider.tx_ref().cursor_read::()?; + let mut withdrawals_cursor = provider.tx_ref().cursor_read::()?; + + let mut bodies = Vec::with_capacity(inputs.len()); + + for (header, transactions) in inputs { + // If we are past shanghai, then all blocks should have a withdrawal list, + // even if empty + let withdrawals = if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + withdrawals_cursor + .seek_exact(header.number)? + .map(|(_, w)| w.withdrawals) + .unwrap_or_default() + .into() + } else { + None + }; + let ommers = if chain_spec.final_paris_total_difficulty(header.number).is_some() { + Vec::new() + } else { + ommers_cursor.seek_exact(header.number)?.map(|(_, o)| o.ommers).unwrap_or_default() + }; + + bodies.push(reth_primitives::BlockBody { transactions, ommers, withdrawals }); + } + + Ok(bodies) + } +} diff --git a/crates/storage/provider/src/traits/chain_info.rs b/crates/storage/storage-api/src/chain_info.rs similarity index 80% rename from crates/storage/provider/src/traits/chain_info.rs rename to crates/storage/storage-api/src/chain_info.rs index 39f8639dd27..b6f58b7e73f 100644 --- a/crates/storage/provider/src/traits/chain_info.rs +++ b/crates/storage/storage-api/src/chain_info.rs @@ -4,6 +4,9 @@ use std::time::Instant; /// A type that can track updates related to fork choice updates. pub trait CanonChainTracker: Send + Sync { + /// The header type. + type Header: Send + Sync; + /// Notify the tracker about a received fork choice update. fn on_forkchoice_update_received(&self, update: &ForkchoiceState); @@ -19,11 +22,11 @@ pub trait CanonChainTracker: Send + Sync { fn last_exchanged_transition_configuration_timestamp(&self) -> Option; /// Sets the canonical head of the chain. - fn set_canonical_head(&self, header: SealedHeader); + fn set_canonical_head(&self, header: SealedHeader); /// Sets the safe block of the chain. - fn set_safe(&self, header: SealedHeader); + fn set_safe(&self, header: SealedHeader); /// Sets the finalized block of the chain. - fn set_finalized(&self, header: SealedHeader); + fn set_finalized(&self, header: SealedHeader); } diff --git a/crates/storage/storage-api/src/database_provider.rs b/crates/storage/storage-api/src/database_provider.rs index 6a463ed01e9..20aebce88fe 100644 --- a/crates/storage/storage-api/src/database_provider.rs +++ b/crates/storage/storage-api/src/database_provider.rs @@ -1,6 +1,14 @@ -use reth_db_api::{database::Database, transaction::DbTx}; +use reth_db_api::{ + common::KeyValue, + cursor::DbCursorRO, + database::Database, + table::Table, + transaction::{DbTx, DbTxMut}, + DatabaseError, +}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderResult; +use std::ops::{Bound, RangeBounds}; /// Database provider. pub trait DBProvider: Send + Sync + Sized + 'static { @@ -34,6 +42,101 @@ pub trait DBProvider: Send + Sync + Sized + 'static { /// Returns a reference to prune modes. fn prune_modes_ref(&self) -> &PruneModes; + + /// Return full table as Vec + fn table(&self) -> Result>, DatabaseError> + where + T::Key: Default + Ord, + { + self.tx_ref() + .cursor_read::()? + .walk(Some(T::Key::default()))? + .collect::, DatabaseError>>() + } + + /// Return a list of entries from the table, based on the given range. + #[inline] + fn get( + &self, + range: impl RangeBounds, + ) -> Result>, DatabaseError> { + self.tx_ref().cursor_read::()?.walk_range(range)?.collect::, _>>() + } + + /// Iterates over read only values in the given table and collects them into a vector. + /// + /// Early-returns if the range is empty, without opening a cursor transaction. + fn cursor_read_collect>( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { + let capacity = match range_size_hint(&range) { + Some(0) | None => return Ok(Vec::new()), + Some(capacity) => capacity, + }; + let mut cursor = self.tx_ref().cursor_read::()?; + self.cursor_collect_with_capacity(&mut cursor, range, capacity) + } + + /// Iterates over read only values in the given table and collects them into a vector. + fn cursor_collect>( + &self, + cursor: &mut impl DbCursorRO, + range: impl RangeBounds, + ) -> ProviderResult> { + let capacity = range_size_hint(&range).unwrap_or(0); + self.cursor_collect_with_capacity(cursor, range, capacity) + } + + /// Iterates over read only values in the given table and collects them into a vector with + /// capacity. + fn cursor_collect_with_capacity>( + &self, + cursor: &mut impl DbCursorRO, + range: impl RangeBounds, + capacity: usize, + ) -> ProviderResult> { + let mut items = Vec::with_capacity(capacity); + for entry in cursor.walk_range(range)? { + items.push(entry?.1); + } + Ok(items) + } + + /// Remove list of entries from the table. Returns the number of entries removed. + #[inline] + fn remove(&self, range: impl RangeBounds) -> Result + where + Self::Tx: DbTxMut, + { + let mut entries = 0; + let mut cursor_write = self.tx_ref().cursor_write::()?; + let mut walker = cursor_write.walk_range(range)?; + while walker.next().transpose()?.is_some() { + walker.delete_current()?; + entries += 1; + } + Ok(entries) + } + + /// Return a list of entries from the table, and remove them, based on the given range. + #[inline] + fn take( + &self, + range: impl RangeBounds, + ) -> Result>, DatabaseError> + where + Self::Tx: DbTxMut, + { + let mut cursor_write = self.tx_ref().cursor_write::()?; + let mut walker = cursor_write.walk_range(range)?; + let mut items = Vec::new(); + while let Some(i) = walker.next().transpose()? { + walker.delete_current()?; + items.push(i) + } + Ok(items) + } } /// Database provider factory. @@ -54,3 +157,17 @@ pub trait DatabaseProviderFactory: Send + Sync { /// Create new read-write database provider. fn database_provider_rw(&self) -> ProviderResult; } + +fn range_size_hint(range: &impl RangeBounds) -> Option { + let start = match range.start_bound().cloned() { + Bound::Included(start) => start, + Bound::Excluded(start) => start.checked_add(1)?, + Bound::Unbounded => 0, + }; + let end = match range.end_bound().cloned() { + Bound::Included(end) => end.saturating_add(1), + Bound::Excluded(end) => end, + Bound::Unbounded => return None, + }; + end.checked_sub(start).map(|x| x as _) +} diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/storage-api/src/hashing.rs similarity index 64% rename from crates/storage/provider/src/traits/hashing.rs rename to crates/storage/storage-api/src/hashing.rs index 2b759afa729..7cd30a82510 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/storage-api/src/hashing.rs @@ -1,11 +1,11 @@ -use alloy_primitives::{Address, BlockNumber, B256}; +use alloy_primitives::{map::HashMap, Address, BlockNumber, B256}; use auto_impl::auto_impl; -use reth_db_api::models::BlockNumberAddress; +use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; use reth_primitives::{Account, StorageEntry}; use reth_storage_errors::provider::ProviderResult; use std::{ - collections::{BTreeMap, BTreeSet, HashMap}, - ops::{Range, RangeInclusive}, + collections::{BTreeMap, BTreeSet}, + ops::{RangeBounds, RangeInclusive}, }; /// Hashing Writer @@ -16,9 +16,19 @@ pub trait HashingWriter: Send + Sync { /// # Returns /// /// Set of hashed keys of updated accounts. - fn unwind_account_hashing( + fn unwind_account_hashing<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, + ) -> ProviderResult>>; + + /// Unwind and clear account hashing in a given block range. + /// + /// # Returns + /// + /// Set of hashed keys of updated accounts. + fn unwind_account_hashing_range( + &self, + range: impl RangeBounds, ) -> ProviderResult>>; /// Inserts all accounts into [reth_db::tables::AccountsHistory] table. @@ -38,7 +48,17 @@ pub trait HashingWriter: Send + Sync { /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. fn unwind_storage_hashing( &self, - range: Range, + changesets: impl Iterator, + ) -> ProviderResult>>; + + /// Unwind and clear storage hashing in a given block range. + /// + /// # Returns + /// + /// Mapping of hashed keys of updated accounts to their respective updated hashed slots. + fn unwind_storage_hashing_range( + &self, + range: impl RangeBounds, ) -> ProviderResult>>; /// Iterates over storages and inserts them to hashing table. diff --git a/crates/storage/storage-api/src/header.rs b/crates/storage/storage-api/src/header.rs index 7202f51ddf1..b2d2c1663ed 100644 --- a/crates/storage/storage-api/src/header.rs +++ b/crates/storage/storage-api/src/header.rs @@ -1,33 +1,43 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockHash, BlockNumber, U256}; -use reth_primitives::{Header, SealedHeader}; +use reth_primitives::SealedHeader; +use reth_primitives_traits::BlockHeader; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; +/// A helper type alias to access [`HeaderProvider::Header`]. +pub type ProviderHeader

=

::Header; + /// Client trait for fetching `Header` related data. #[auto_impl::auto_impl(&, Arc)] pub trait HeaderProvider: Send + Sync { + /// The header type this provider supports. + type Header: BlockHeader; + /// Check if block is known fn is_known(&self, block_hash: &BlockHash) -> ProviderResult { self.header(block_hash).map(|header| header.is_some()) } /// Get header by block hash - fn header(&self, block_hash: &BlockHash) -> ProviderResult>; + fn header(&self, block_hash: &BlockHash) -> ProviderResult>; /// Retrieves the header sealed by the given block hash. - fn sealed_header_by_hash(&self, block_hash: BlockHash) -> ProviderResult> { + fn sealed_header_by_hash( + &self, + block_hash: BlockHash, + ) -> ProviderResult>> { Ok(self.header(&block_hash)?.map(|header| SealedHeader::new(header, block_hash))) } /// Get header by block number - fn header_by_number(&self, num: u64) -> ProviderResult>; + fn header_by_number(&self, num: u64) -> ProviderResult>; /// Get header by block number or hash fn header_by_hash_or_number( &self, hash_or_num: BlockHashOrNumber, - ) -> ProviderResult> { + ) -> ProviderResult> { match hash_or_num { BlockHashOrNumber::Hash(hash) => self.header(&hash), BlockHashOrNumber::Number(num) => self.header_by_number(num), @@ -41,16 +51,22 @@ pub trait HeaderProvider: Send + Sync { fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult>; /// Get headers in range of block numbers - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult>; + fn headers_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>; /// Get a single sealed header by block number. - fn sealed_header(&self, number: BlockNumber) -> ProviderResult>; + fn sealed_header( + &self, + number: BlockNumber, + ) -> ProviderResult>>; /// Get headers in range of block numbers. fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> ProviderResult> { + ) -> ProviderResult>> { self.sealed_headers_while(range, |_| true) } @@ -58,6 +74,6 @@ pub trait HeaderProvider: Send + Sync { fn sealed_headers_while( &self, range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult>; + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>>; } diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/storage-api/src/history.rs similarity index 59% rename from crates/storage/provider/src/traits/history.rs rename to crates/storage/storage-api/src/history.rs index cbf9bece4b9..4eadd6031c3 100644 --- a/crates/storage/provider/src/traits/history.rs +++ b/crates/storage/storage-api/src/history.rs @@ -1,8 +1,9 @@ use alloy_primitives::{Address, BlockNumber, B256}; use auto_impl::auto_impl; -use reth_db_api::models::BlockNumberAddress; +use reth_db::models::{AccountBeforeTx, BlockNumberAddress}; +use reth_primitives::StorageEntry; use reth_storage_errors::provider::ProviderResult; -use std::ops::{Range, RangeInclusive}; +use std::ops::{RangeBounds, RangeInclusive}; /// History Writer #[auto_impl(&, Arc, Box)] @@ -10,9 +11,17 @@ pub trait HistoryWriter: Send + Sync { /// Unwind and clear account history indices. /// /// Returns number of changesets walked. - fn unwind_account_history_indices( + fn unwind_account_history_indices<'a>( &self, - range: RangeInclusive, + changesets: impl Iterator, + ) -> ProviderResult; + + /// Unwind and clear account history indices in a given block range. + /// + /// Returns number of changesets walked. + fn unwind_account_history_indices_range( + &self, + range: impl RangeBounds, ) -> ProviderResult; /// Insert account change index to database. Used inside AccountHistoryIndex stage @@ -26,7 +35,15 @@ pub trait HistoryWriter: Send + Sync { /// Returns number of changesets walked. fn unwind_storage_history_indices( &self, - range: Range, + changesets: impl Iterator, + ) -> ProviderResult; + + /// Unwind and clear storage history indices in a given block range. + /// + /// Returns number of changesets walked. + fn unwind_storage_history_indices_range( + &self, + range: impl RangeBounds, ) -> ProviderResult; /// Insert storage change index to database. Used inside StorageHistoryIndex stage diff --git a/crates/storage/storage-api/src/legacy.rs b/crates/storage/storage-api/src/legacy.rs new file mode 100644 index 00000000000..e53a5d8bfa2 --- /dev/null +++ b/crates/storage/storage-api/src/legacy.rs @@ -0,0 +1,83 @@ +//! Traits used by the legacy execution engine. +//! +//! This module is scheduled for removal in the future. + +use alloy_eips::BlockNumHash; +use alloy_primitives::{BlockHash, BlockNumber}; +use auto_impl::auto_impl; +use reth_execution_types::ExecutionOutcome; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; + +/// Blockchain trait provider that gives access to the blockchain state that is not yet committed +/// (pending). +pub trait BlockchainTreePendingStateProvider: Send + Sync { + /// Returns a state provider that includes all state changes of the given (pending) block hash. + /// + /// In other words, the state provider will return the state after all transactions of the given + /// hash have been executed. + fn pending_state_provider( + &self, + block_hash: BlockHash, + ) -> ProviderResult> { + self.find_pending_state_provider(block_hash) + .ok_or(ProviderError::StateForHashNotFound(block_hash)) + } + + /// Returns state provider if a matching block exists. + fn find_pending_state_provider( + &self, + block_hash: BlockHash, + ) -> Option>; +} + +/// Provides data required for post-block execution. +/// +/// This trait offers methods to access essential post-execution data, including the state changes +/// in accounts and storage, as well as block hashes for both the pending and canonical chains. +/// +/// The trait includes: +/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. +/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical +/// blocks. +#[auto_impl(&, Box)] +pub trait ExecutionDataProvider: Send + Sync { + /// Return the execution outcome. + fn execution_outcome(&self) -> &ExecutionOutcome; + /// Return block hash by block number of pending or canonical chain. + fn block_hash(&self, block_number: BlockNumber) -> Option; +} + +impl ExecutionDataProvider for ExecutionOutcome { + fn execution_outcome(&self) -> &ExecutionOutcome { + self + } + + /// Always returns [None] because we don't have any information about the block header. + fn block_hash(&self, _block_number: BlockNumber) -> Option { + None + } +} + +/// Fork data needed for execution on it. +/// +/// It contains a canonical fork, the block on what pending chain was forked from. +#[auto_impl(&, Box)] +pub trait BlockExecutionForkProvider { + /// Return canonical fork, the block on what post state was forked from. + /// + /// Needed to create state provider. + fn canonical_fork(&self) -> BlockNumHash; +} + +/// Provides comprehensive post-execution state data required for further execution. +/// +/// This trait is used to create a state provider over the pending state and is a combination of +/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. +/// +/// The pending state includes: +/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. +/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. +/// * Canonical fork: Denotes the block from which the pending chain forked. +pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} + +impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index 3f93bbbde2f..4c5d2ab02e7 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -22,6 +22,9 @@ pub use block_id::*; mod block_hash; pub use block_hash::*; +mod chain; +pub use chain::*; + mod header; pub use header::*; @@ -31,9 +34,6 @@ pub use prune_checkpoint::*; mod receipts; pub use receipts::*; -mod requests; -pub use requests::*; - mod stage_checkpoint; pub use stage_checkpoint::*; @@ -49,6 +49,9 @@ pub use transactions::*; mod trie; pub use trie::*; +mod chain_info; +pub use chain_info::*; + mod withdrawals; pub use withdrawals::*; @@ -56,3 +59,17 @@ mod database_provider; pub use database_provider::*; pub mod noop; + +mod history; +pub use history::*; + +mod hashing; +pub use hashing::*; +mod stats; +pub use stats::*; + +mod legacy; +pub use legacy::*; + +mod primitives; +pub use primitives::*; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 7325e2b7436..858c8e4c832 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -1,28 +1,81 @@ //! Various noop implementations for traits. -use std::sync::Arc; - -use crate::{BlockHashReader, BlockNumReader}; -use alloy_primitives::{BlockNumber, B256}; -use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec}; -use reth_storage_errors::provider::ProviderResult; +use crate::{ + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + BlockSource, ChangeSetReader, HashedPostStateProvider, HeaderProvider, NodePrimitivesProvider, + PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, + StateProofProvider, StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, + StorageRootProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, +}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, BlockId, BlockNumberOrTag, +}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Address, BlockHash, BlockNumber, Bytes, StorageKey, StorageValue, TxHash, TxNumber, B256, U256, +}; +use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; +use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; +use reth_primitives::{ + BlockWithSenders, EthPrimitives, SealedBlockFor, SealedBlockWithSenders, TransactionMeta, +}; +use reth_primitives_traits::{Account, Bytecode, NodePrimitives, SealedHeader}; +use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_trie::{ + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, +}; +use std::{ + marker::PhantomData, + ops::{RangeBounds, RangeInclusive}, + sync::Arc, +}; /// Supports various api interfaces for testing purposes. -#[derive(Debug, Clone)] +#[derive(Debug)] #[non_exhaustive] -pub struct NoopBlockReader { +pub struct NoopProvider { chain_spec: Arc, + _phantom: PhantomData, +} + +impl NoopProvider { + /// Create a new instance for specific primitive types. + pub fn new(chain_spec: Arc) -> Self { + Self { chain_spec, _phantom: Default::default() } + } } -impl NoopBlockReader { +impl NoopProvider { /// Create a new instance of the `NoopBlockReader`. - pub const fn new(chain_spec: Arc) -> Self { - Self { chain_spec } + pub fn eth(chain_spec: Arc) -> Self { + Self { chain_spec, _phantom: Default::default() } + } +} + +impl NoopProvider { + /// Create a new instance of the [`NoopProvider`] with the mainnet chain spec. + pub fn mainnet() -> Self { + Self::eth(MAINNET.clone()) + } +} + +impl Default for NoopProvider { + fn default() -> Self { + Self::mainnet() + } +} + +impl Clone for NoopProvider { + fn clone(&self) -> Self { + Self { chain_spec: Arc::clone(&self.chain_spec), _phantom: Default::default() } } } /// Noop implementation for testing purposes -impl BlockHashReader for NoopBlockReader { +impl BlockHashReader for NoopProvider { fn block_hash(&self, _number: u64) -> ProviderResult> { Ok(None) } @@ -36,7 +89,7 @@ impl BlockHashReader for NoopBlockReader { } } -impl BlockNumReader for NoopBlockReader { +impl BlockNumReader for NoopProvider { fn chain_info(&self) -> ProviderResult { Ok(ChainInfo::default()) } @@ -54,10 +107,519 @@ impl BlockNumReader for NoopBlockReader { } } -impl ChainSpecProvider for NoopBlockReader { +impl ChainSpecProvider + for NoopProvider +{ type ChainSpec = ChainSpec; fn chain_spec(&self) -> Arc { self.chain_spec.clone() } } + +impl BlockIdReader for NoopProvider { + fn pending_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } + + fn safe_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } + + fn finalized_block_num_hash(&self) -> ProviderResult> { + Ok(None) + } +} + +impl BlockReaderIdExt for NoopProvider { + fn block_by_id(&self, _id: BlockId) -> ProviderResult> { + Ok(None) + } + + fn sealed_header_by_id( + &self, + _id: BlockId, + ) -> ProviderResult>> { + Ok(None) + } + + fn header_by_id(&self, _id: BlockId) -> ProviderResult> { + Ok(None) + } + + fn ommers_by_id(&self, _id: BlockId) -> ProviderResult>> { + Ok(None) + } +} + +impl BlockReader for NoopProvider { + type Block = N::Block; + + fn find_block_by_hash( + &self, + _hash: B256, + _source: BlockSource, + ) -> ProviderResult> { + Ok(None) + } + + fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { + Ok(None) + } + + fn pending_block(&self) -> ProviderResult>> { + Ok(None) + } + + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { + Ok(None) + } + + fn pending_block_and_receipts( + &self, + ) -> ProviderResult, Vec)>> { + Ok(None) + } + + fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { + Ok(None) + } + + fn block_body_indices(&self, _num: u64) -> ProviderResult> { + Ok(None) + } + + fn block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + Ok(None) + } + + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult>> { + Ok(None) + } + + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { + Ok(vec![]) + } + + fn block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult>> { + Ok(vec![]) + } + + fn sealed_block_with_senders_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult>> { + Ok(vec![]) + } +} + +impl TransactionsProvider for NoopProvider { + type Transaction = N::SignedTx; + + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_id_unhashed( + &self, + _id: TxNumber, + ) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn transaction_by_hash_with_meta( + &self, + _hash: TxHash, + ) -> ProviderResult> { + Ok(None) + } + + fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { + todo!() + } + + fn transactions_by_block( + &self, + _block_id: BlockHashOrNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn transactions_by_block_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult>> { + Ok(Vec::default()) + } + + fn transactions_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn senders_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(Vec::default()) + } + + fn transaction_sender(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } +} + +impl ReceiptProvider for NoopProvider { + type Receipt = N::Receipt; + + fn receipt(&self, _id: TxNumber) -> ProviderResult> { + Ok(None) + } + + fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult> { + Ok(None) + } + + fn receipts_by_block( + &self, + _block: BlockHashOrNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn receipts_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(vec![]) + } +} + +impl ReceiptProviderIdExt for NoopProvider {} + +impl HeaderProvider for NoopProvider { + type Header = N::BlockHeader; + + fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { + Ok(None) + } + + fn header_by_number(&self, _num: u64) -> ProviderResult> { + Ok(None) + } + + fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { + Ok(None) + } + + fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { + Ok(None) + } + + fn headers_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { + Ok(vec![]) + } + + fn sealed_header( + &self, + _number: BlockNumber, + ) -> ProviderResult>> { + Ok(None) + } + + fn sealed_headers_while( + &self, + _range: impl RangeBounds, + _predicate: impl FnMut(&SealedHeader) -> bool, + ) -> ProviderResult>> { + Ok(vec![]) + } +} + +impl AccountReader for NoopProvider { + fn basic_account(&self, _address: Address) -> ProviderResult> { + Ok(None) + } +} + +impl ChangeSetReader for NoopProvider { + fn account_block_changeset( + &self, + _block_number: BlockNumber, + ) -> ProviderResult> { + Ok(Vec::default()) + } +} + +impl StateRootProvider for NoopProvider { + fn state_root(&self, _state: HashedPostState) -> ProviderResult { + Ok(B256::default()) + } + + fn state_root_from_nodes(&self, _input: TrieInput) -> ProviderResult { + Ok(B256::default()) + } + + fn state_root_with_updates( + &self, + _state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + Ok((B256::default(), TrieUpdates::default())) + } + + fn state_root_from_nodes_with_updates( + &self, + _input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + Ok((B256::default(), TrieUpdates::default())) + } +} + +impl StorageRootProvider for NoopProvider { + fn storage_root( + &self, + _address: Address, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(B256::default()) + } + + fn storage_proof( + &self, + _address: Address, + slot: B256, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageProof::new(slot)) + } + + fn storage_multiproof( + &self, + _address: Address, + _slots: &[B256], + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageMultiProof::empty()) + } +} + +impl StateProofProvider for NoopProvider { + fn proof( + &self, + _input: TrieInput, + address: Address, + _slots: &[B256], + ) -> ProviderResult { + Ok(AccountProof::new(address)) + } + + fn multiproof( + &self, + _input: TrieInput, + _targets: HashMap>, + ) -> ProviderResult { + Ok(MultiProof::default()) + } + + fn witness( + &self, + _input: TrieInput, + _target: HashedPostState, + ) -> ProviderResult> { + Ok(HashMap::default()) + } +} + +impl HashedPostStateProvider for NoopProvider { + fn hashed_post_state(&self, _bundle_state: &revm::db::BundleState) -> HashedPostState { + HashedPostState::default() + } +} + +impl StateProvider for NoopProvider { + fn storage( + &self, + _account: Address, + _storage_key: StorageKey, + ) -> ProviderResult> { + Ok(None) + } + + fn bytecode_by_hash(&self, _code_hash: B256) -> ProviderResult> { + Ok(None) + } +} + +impl StateProviderFactory for NoopProvider { + fn latest(&self) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn state_by_block_number_or_tag( + &self, + number_or_tag: BlockNumberOrTag, + ) -> ProviderResult { + match number_or_tag { + BlockNumberOrTag::Latest => self.latest(), + BlockNumberOrTag::Finalized => { + // we can only get the finalized state by hash, not by num + let hash = + self.finalized_block_hash()?.ok_or(ProviderError::FinalizedBlockNotFound)?; + + // only look at historical state + self.history_by_block_hash(hash) + } + BlockNumberOrTag::Safe => { + // we can only get the safe state by hash, not by num + let hash = self.safe_block_hash()?.ok_or(ProviderError::SafeBlockNotFound)?; + + self.history_by_block_hash(hash) + } + BlockNumberOrTag::Earliest => self.history_by_block_number(0), + BlockNumberOrTag::Pending => self.pending(), + BlockNumberOrTag::Number(num) => self.history_by_block_number(num), + } + } + + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn pending(&self) -> ProviderResult { + Ok(Box::new(self.clone())) + } + + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { + Ok(Some(Box::new(self.clone()))) + } +} + +// impl EvmEnvProvider for NoopProvider { +// fn fill_env_at( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _block_env: &mut BlockEnv, +// _at: BlockHashOrNumber, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv

, +// { +// Ok(()) +// } +// +// fn fill_env_with_header( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _block_env: &mut BlockEnv, +// _header: &Header, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// +// fn fill_cfg_env_at( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _at: BlockHashOrNumber, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// +// fn fill_cfg_env_with_header( +// &self, +// _cfg: &mut CfgEnvWithHandlerCfg, +// _header: &Header, +// _evm_config: EvmConfig, +// ) -> ProviderResult<()> +// where +// EvmConfig: ConfigureEvmEnv
, +// { +// Ok(()) +// } +// } + +impl StageCheckpointReader for NoopProvider { + fn get_stage_checkpoint(&self, _id: StageId) -> ProviderResult> { + Ok(None) + } + + fn get_stage_checkpoint_progress(&self, _id: StageId) -> ProviderResult>> { + Ok(None) + } + + fn get_all_checkpoints(&self) -> ProviderResult> { + Ok(Vec::new()) + } +} + +impl WithdrawalsProvider for NoopProvider { + fn withdrawals_by_block( + &self, + _id: BlockHashOrNumber, + _timestamp: u64, + ) -> ProviderResult> { + Ok(None) + } + fn latest_withdrawal(&self) -> ProviderResult> { + Ok(None) + } +} + +impl PruneCheckpointReader for NoopProvider { + fn get_prune_checkpoint( + &self, + _segment: PruneSegment, + ) -> ProviderResult> { + Ok(None) + } + + fn get_prune_checkpoints(&self) -> ProviderResult> { + Ok(Vec::new()) + } +} + +impl NodePrimitivesProvider for NoopProvider { + type Primitives = N; +} diff --git a/crates/storage/storage-api/src/primitives.rs b/crates/storage/storage-api/src/primitives.rs new file mode 100644 index 00000000000..ae2a72e6e53 --- /dev/null +++ b/crates/storage/storage-api/src/primitives.rs @@ -0,0 +1,8 @@ +use reth_primitives::NodePrimitives; + +/// Provider implementation that knows configured [`NodePrimitives`]. +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait NodePrimitivesProvider { + /// The node primitive types. + type Primitives: NodePrimitives; +} diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index 06c6103ee9b..fdb70323856 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -1,33 +1,42 @@ use crate::BlockIdReader; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; -use alloy_primitives::{BlockNumber, TxHash, TxNumber}; -use reth_primitives::Receipt; +use alloy_primitives::{TxHash, TxNumber}; +use reth_primitives_traits::Receipt; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; -/// Client trait for fetching [Receipt] data . +/// A helper type alias to access [`ReceiptProvider::Receipt`]. +pub type ProviderReceipt

=

::Receipt; + +/// Client trait for fetching receipt data. #[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProvider: Send + Sync { + /// The receipt type. + type Receipt: Receipt; + /// Get receipt by transaction number /// /// Returns `None` if the transaction is not found. - fn receipt(&self, id: TxNumber) -> ProviderResult>; + fn receipt(&self, id: TxNumber) -> ProviderResult>; /// Get receipt by transaction hash. /// /// Returns `None` if the transaction is not found. - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get receipts by block num or hash. /// /// Returns `None` if the block is not found. - fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>>; + fn receipts_by_block( + &self, + block: BlockHashOrNumber, + ) -> ProviderResult>>; /// Get receipts by tx range. fn receipts_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult>; + ) -> ProviderResult>; } /// Trait extension for `ReceiptProvider`, for types that implement `BlockId` conversion. @@ -40,10 +49,9 @@ pub trait ReceiptProvider: Send + Sync { /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the receipts should be done using the type's `ReceiptProvider` methods. -#[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { /// Get receipt by block id - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { let id = match block { BlockId::Hash(hash) => BlockHashOrNumber::Hash(hash.block_hash), BlockId::Number(num_tag) => { @@ -64,24 +72,7 @@ pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { fn receipts_by_number_or_tag( &self, number_or_tag: BlockNumberOrTag, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.receipts_by_block_id(number_or_tag.into()) } } - -/// Writer trait for writing [`Receipt`] data. -pub trait ReceiptWriter { - /// Appends receipts for a block. - /// - /// # Parameters - /// - `first_tx_index`: The transaction number of the first receipt in the block. - /// - `block_number`: The block number to which the receipts belong. - /// - `receipts`: A vector of optional receipts in the block. If `None`, it means they were - /// pruned. - fn append_block_receipts( - &mut self, - first_tx_index: TxNumber, - block_number: BlockNumber, - receipts: Vec>, - ) -> ProviderResult<()>; -} diff --git a/crates/storage/storage-api/src/requests.rs b/crates/storage/storage-api/src/requests.rs deleted file mode 100644 index 02818c429b6..00000000000 --- a/crates/storage/storage-api/src/requests.rs +++ /dev/null @@ -1,14 +0,0 @@ -use alloy_eips::BlockHashOrNumber; -use reth_primitives::Requests; -use reth_storage_errors::provider::ProviderResult; - -/// Client trait for fetching EIP-7685 [Requests] for blocks. -#[auto_impl::auto_impl(&, Arc)] -pub trait RequestsProvider: Send + Sync { - /// Get withdrawals by block id. - fn requests_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult>; -} diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 9a3b855ff14..dc53319f4c5 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -2,12 +2,15 @@ use super::{ AccountReader, BlockHashReader, BlockIdReader, StateProofProvider, StateRootProvider, StorageRootProvider, }; -use alloy_eips::{BlockId, BlockNumHash, BlockNumberOrTag}; +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, StorageValue, B256, U256}; use auto_impl::auto_impl; -use reth_execution_types::ExecutionOutcome; -use reth_primitives::{Bytecode, KECCAK_EMPTY}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_primitives::Bytecode; +use reth_storage_errors::provider::ProviderResult; +use reth_trie::HashedPostState; +use reth_trie_db::StateCommitment; +use revm::db::states::BundleState; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; @@ -20,6 +23,7 @@ pub trait StateProvider: + StateRootProvider + StorageRootProvider + StateProofProvider + + HashedPostStateProvider + Send + Sync { @@ -81,6 +85,19 @@ pub trait StateProvider: } } +/// Trait implemented for database providers that can provide the [`StateCommitment`] type. +pub trait StateCommitmentProvider: Send + Sync { + /// The [`StateCommitment`] type that can be used to perform state commitment operations. + type StateCommitment: StateCommitment; +} + +/// Trait that provides the hashed state from various sources. +#[auto_impl(&, Arc, Box)] +pub trait HashedPostStateProvider: Send + Sync { + /// Returns the `HashedPostState` of the provided [`BundleState`]. + fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState; +} + /// Trait implemented for database providers that can be converted into a historical state provider. pub trait TryIntoHistoricalStateProvider { /// Returns a historical [`StateProvider`] indexed by the given historic block number. @@ -166,77 +183,3 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// If the block couldn't be found, returns `None`. fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult>; } - -/// Blockchain trait provider that gives access to the blockchain state that is not yet committed -/// (pending). -pub trait BlockchainTreePendingStateProvider: Send + Sync { - /// Returns a state provider that includes all state changes of the given (pending) block hash. - /// - /// In other words, the state provider will return the state after all transactions of the given - /// hash have been executed. - fn pending_state_provider( - &self, - block_hash: BlockHash, - ) -> ProviderResult> { - self.find_pending_state_provider(block_hash) - .ok_or(ProviderError::StateForHashNotFound(block_hash)) - } - - /// Returns state provider if a matching block exists. - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option>; -} - -/// Provides data required for post-block execution. -/// -/// This trait offers methods to access essential post-execution data, including the state changes -/// in accounts and storage, as well as block hashes for both the pending and canonical chains. -/// -/// The trait includes: -/// * [`ExecutionOutcome`] - Captures all account and storage changes in the pending chain. -/// * Block hashes - Provides access to the block hashes of both the pending chain and canonical -/// blocks. -#[auto_impl(&, Box)] -pub trait ExecutionDataProvider: Send + Sync { - /// Return the execution outcome. - fn execution_outcome(&self) -> &ExecutionOutcome; - /// Return block hash by block number of pending or canonical chain. - fn block_hash(&self, block_number: BlockNumber) -> Option; -} - -impl ExecutionDataProvider for ExecutionOutcome { - fn execution_outcome(&self) -> &ExecutionOutcome { - self - } - - /// Always returns [None] because we don't have any information about the block header. - fn block_hash(&self, _block_number: BlockNumber) -> Option { - None - } -} - -/// Fork data needed for execution on it. -/// -/// It contains a canonical fork, the block on what pending chain was forked from. -#[auto_impl(&, Box)] -pub trait BlockExecutionForkProvider { - /// Return canonical fork, the block on what post state was forked from. - /// - /// Needed to create state provider. - fn canonical_fork(&self) -> BlockNumHash; -} - -/// Provides comprehensive post-execution state data required for further execution. -/// -/// This trait is used to create a state provider over the pending state and is a combination of -/// [`ExecutionDataProvider`] and [`BlockExecutionForkProvider`]. -/// -/// The pending state includes: -/// * `ExecutionOutcome`: Contains all changes to accounts and storage within the pending chain. -/// * Block hashes: Represents hashes of both the pending chain and canonical blocks. -/// * Canonical fork: Denotes the block from which the pending chain forked. -pub trait FullExecutionDataProvider: ExecutionDataProvider + BlockExecutionForkProvider {} - -impl FullExecutionDataProvider for T where T: ExecutionDataProvider + BlockExecutionForkProvider {} diff --git a/crates/storage/provider/src/traits/stats.rs b/crates/storage/storage-api/src/stats.rs similarity index 100% rename from crates/storage/provider/src/traits/stats.rs rename to crates/storage/storage-api/src/stats.rs diff --git a/crates/storage/storage-api/src/transactions.rs b/crates/storage/storage-api/src/transactions.rs index f2c44e9e140..ca2bcaeb469 100644 --- a/crates/storage/storage-api/src/transactions.rs +++ b/crates/storage/storage-api/src/transactions.rs @@ -1,7 +1,8 @@ use crate::{BlockNumReader, BlockReader}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{Address, BlockNumber, TxHash, TxNumber}; -use reth_primitives::{TransactionMeta, TransactionSigned, TransactionSignedNoHash}; +use reth_primitives::TransactionMeta; +use reth_primitives_traits::SignedTransaction; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::ops::{Range, RangeBounds, RangeInclusive}; @@ -18,9 +19,12 @@ pub enum TransactionVariant { WithHash, } -/// Client trait for fetching [TransactionSigned] related data. +/// Client trait for fetching transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProvider: BlockNumReader + Send + Sync { + /// The transaction type this provider reads. + type Transaction: Send + Sync + SignedTransaction; + /// Get internal transaction identifier by transaction hash. /// /// This is the inverse of [TransactionsProvider::transaction_by_id]. @@ -28,23 +32,21 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult>; /// Get transaction by id, computes hash every time so more expensive. - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. - fn transaction_by_id_no_hash( - &self, - id: TxNumber, - ) -> ProviderResult>; + fn transaction_by_id_unhashed(&self, id: TxNumber) + -> ProviderResult>; /// Get transaction by transaction hash. - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get transaction by transaction hash and additional metadata of the block the transaction was /// mined in fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get transaction block number fn transaction_block(&self, id: TxNumber) -> ProviderResult>; @@ -53,19 +55,19 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transactions_by_block( &self, block: BlockHashOrNumber, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Get transactions by block range. fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Get transactions by tx range. fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> ProviderResult>; + ) -> ProviderResult>; /// Get Senders from a tx range. fn senders_by_tx_range( @@ -79,7 +81,10 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { fn transaction_sender(&self, id: TxNumber) -> ProviderResult>; } -/// Client trait for fetching additional [TransactionSigned] related data. +/// A helper type alias to access [`TransactionsProvider::Transaction`]. +pub type ProviderTx

=

::Transaction; + +/// Client trait for fetching additional transactions related data. #[auto_impl::auto_impl(&, Arc)] pub trait TransactionsProviderExt: BlockReader + Send + Sync { /// Get transactions range by block range. diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index f7d41066d06..ee1ca1de180 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -4,7 +4,8 @@ use alloy_primitives::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, + updates::{StorageTrieUpdates, TrieUpdates}, + AccountProof, HashedPostState, HashedStorage, MultiProof, StorageMultiProof, StorageProof, TrieInput, }; @@ -56,6 +57,14 @@ pub trait StorageRootProvider: Send + Sync { slot: B256, hashed_storage: HashedStorage, ) -> ProviderResult; + + /// Returns the storage multiproof for target slots. + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult; } /// A type that can generate state proof on top of a given post state. @@ -85,3 +94,33 @@ pub trait StateProofProvider: Send + Sync { target: HashedPostState, ) -> ProviderResult>; } + +/// Trie Writer +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait TrieWriter: Send + Sync { + /// Writes trie updates to the database. + /// + /// Returns the number of entries modified. + fn write_trie_updates(&self, trie_updates: &TrieUpdates) -> ProviderResult; +} + +/// Storage Trie Writer +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait StorageTrieWriter: Send + Sync { + /// Writes storage trie updates from the given storage trie map. + /// + /// First sorts the storage trie updates by the hashed address key, writing in sorted order. + /// + /// Returns the number of entries modified. + fn write_storage_trie_updates( + &self, + storage_tries: &HashMap, + ) -> ProviderResult; + + /// Writes storage trie updates for the given hashed address. + fn write_individual_storage_trie_updates( + &self, + hashed_address: B256, + updates: &StorageTrieUpdates, + ) -> ProviderResult; +} diff --git a/crates/storage/storage-api/src/withdrawals.rs b/crates/storage/storage-api/src/withdrawals.rs index 2de69b34eb6..47aa4944410 100644 --- a/crates/storage/storage-api/src/withdrawals.rs +++ b/crates/storage/storage-api/src/withdrawals.rs @@ -1,5 +1,7 @@ -use alloy_eips::BlockHashOrNumber; -use reth_primitives::{Withdrawal, Withdrawals}; +use alloy_eips::{ + eip4895::{Withdrawal, Withdrawals}, + BlockHashOrNumber, +}; use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching [Withdrawal] related data. diff --git a/crates/storage/zstd-compressors/Cargo.toml b/crates/storage/zstd-compressors/Cargo.toml new file mode 100644 index 00000000000..357684f32fc --- /dev/null +++ b/crates/storage/zstd-compressors/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "reth-zstd-compressors" +version.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +description = "Commonly used zstd compressors." + +[lints] +workspace = true + +[dependencies] +zstd = { workspace = true, features = ["experimental"] } + +[features] +default = ["std"] +std = [] \ No newline at end of file diff --git a/crates/primitives/src/compression/receipt_dictionary.bin b/crates/storage/zstd-compressors/receipt_dictionary.bin similarity index 100% rename from crates/primitives/src/compression/receipt_dictionary.bin rename to crates/storage/zstd-compressors/receipt_dictionary.bin diff --git a/crates/primitives/src/compression/mod.rs b/crates/storage/zstd-compressors/src/lib.rs similarity index 62% rename from crates/primitives/src/compression/mod.rs rename to crates/storage/zstd-compressors/src/lib.rs index ecceafc2068..d5167120bc7 100644 --- a/crates/primitives/src/compression/mod.rs +++ b/crates/storage/zstd-compressors/src/lib.rs @@ -1,41 +1,61 @@ +//! Commonly used zstd [`Compressor`] and [`Decompressor`] for reth types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use crate::alloc::string::ToString; use alloc::vec::Vec; -use core::cell::RefCell; use zstd::bulk::{Compressor, Decompressor}; /// Compression/Decompression dictionary for `Receipt`. -pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("./receipt_dictionary.bin"); +pub static RECEIPT_DICTIONARY: &[u8] = include_bytes!("../receipt_dictionary.bin"); /// Compression/Decompression dictionary for `Transaction`. -pub static TRANSACTION_DICTIONARY: &[u8] = include_bytes!("./transaction_dictionary.bin"); +pub static TRANSACTION_DICTIONARY: &[u8] = include_bytes!("../transaction_dictionary.bin"); -// We use `thread_local` compressors and decompressors because dictionaries can be quite big, and -// zstd-rs recommends to use one context/compressor per thread #[cfg(feature = "std")] -std::thread_local! { - /// Thread Transaction compressor. - pub static TRANSACTION_COMPRESSOR: RefCell> = RefCell::new( - Compressor::with_dictionary(0, TRANSACTION_DICTIONARY) - .expect("failed to initialize transaction compressor"), - ); - - /// Thread Transaction decompressor. - pub static TRANSACTION_DECOMPRESSOR: RefCell = - RefCell::new(ReusableDecompressor::new( - Decompressor::with_dictionary(TRANSACTION_DICTIONARY) - .expect("failed to initialize transaction decompressor"), - )); - - /// Thread receipt compressor. - pub static RECEIPT_COMPRESSOR: RefCell> = RefCell::new( - Compressor::with_dictionary(0, RECEIPT_DICTIONARY) - .expect("failed to initialize receipt compressor"), - ); - - /// Thread receipt decompressor. - pub static RECEIPT_DECOMPRESSOR: RefCell = - RefCell::new(ReusableDecompressor::new( - Decompressor::with_dictionary(RECEIPT_DICTIONARY) - .expect("failed to initialize receipt decompressor"), - )); +pub use locals::*; +#[cfg(feature = "std")] +mod locals { + use super::*; + use core::cell::RefCell; + + // We use `thread_local` compressors and decompressors because dictionaries can be quite big, + // and zstd-rs recommends to use one context/compressor per thread + std::thread_local! { + /// Thread Transaction compressor. + pub static TRANSACTION_COMPRESSOR: RefCell> = RefCell::new( + Compressor::with_dictionary(0, TRANSACTION_DICTIONARY) + .expect("failed to initialize transaction compressor"), + ); + + /// Thread Transaction decompressor. + pub static TRANSACTION_DECOMPRESSOR: RefCell = + RefCell::new(ReusableDecompressor::new( + Decompressor::with_dictionary(TRANSACTION_DICTIONARY) + .expect("failed to initialize transaction decompressor"), + )); + + /// Thread receipt compressor. + pub static RECEIPT_COMPRESSOR: RefCell> = RefCell::new( + Compressor::with_dictionary(0, RECEIPT_DICTIONARY) + .expect("failed to initialize receipt compressor"), + ); + + /// Thread receipt decompressor. + pub static RECEIPT_DECOMPRESSOR: RefCell = + RefCell::new(ReusableDecompressor::new( + Decompressor::with_dictionary(RECEIPT_DICTIONARY) + .expect("failed to initialize receipt decompressor"), + )); + } } /// Fn creates tx [`Compressor`] diff --git a/crates/primitives/src/compression/transaction_dictionary.bin b/crates/storage/zstd-compressors/transaction_dictionary.bin similarity index 100% rename from crates/primitives/src/compression/transaction_dictionary.bin rename to crates/storage/zstd-compressors/transaction_dictionary.bin diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index 82c80c0932b..68d8e958979 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # async tokio = { workspace = true, features = ["sync", "rt"] } -tracing-futures = "0.2" +tracing-futures.workspace = true futures-util.workspace = true # metrics diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index 28b5eaba9ff..340e925ec56 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -111,6 +111,13 @@ dyn_clone::clone_trait_object!(TaskSpawner); #[non_exhaustive] pub struct TokioTaskExecutor; +impl TokioTaskExecutor { + /// Converts the instance to a boxed [`TaskSpawner`]. + pub fn boxed(self) -> Box { + Box::new(self) + } +} + impl TaskSpawner for TokioTaskExecutor { fn spawn(&self, fut: BoxFuture<'static, ()>) -> JoinHandle<()> { tokio::task::spawn(fut) diff --git a/crates/tokio-util/src/event_sender.rs b/crates/tokio-util/src/event_sender.rs index a4e9815388c..16208ee19c0 100644 --- a/crates/tokio-util/src/event_sender.rs +++ b/crates/tokio-util/src/event_sender.rs @@ -40,3 +40,96 @@ impl EventSender { EventStream::new(self.sender.subscribe()) } } + +#[cfg(test)] +mod tests { + use super::*; + use tokio::{ + task, + time::{timeout, Duration}, + }; + use tokio_stream::StreamExt; + + #[tokio::test] + async fn test_event_broadcast_to_listener() { + let sender = EventSender::default(); + + // Create a listener for the events + let mut listener = sender.new_listener(); + + // Broadcast an event + sender.notify("event1"); + + // Check if the listener receives the event + let received_event = listener.next().await; + assert_eq!(received_event, Some("event1")); + } + + #[tokio::test] + async fn test_event_no_listener() { + let sender = EventSender::default(); + + // Broadcast an event with no listeners + sender.notify("event2"); + + // Ensure it doesn't panic or fail when no listeners are present + // (this test passes if it runs without errors). + } + + #[tokio::test] + async fn test_multiple_listeners_receive_event() { + let sender = EventSender::default(); + + // Create two listeners + let mut listener1 = sender.new_listener(); + let mut listener2 = sender.new_listener(); + + // Broadcast an event + sender.notify("event3"); + + // Both listeners should receive the same event + let event1 = listener1.next().await; + let event2 = listener2.next().await; + + assert_eq!(event1, Some("event3")); + assert_eq!(event2, Some("event3")); + } + + #[tokio::test] + async fn test_bounded_channel_size() { + // Create a channel with size 2 + let sender = EventSender::new(2); + + // Create a listener + let mut listener = sender.new_listener(); + + // Broadcast 3 events, which exceeds the channel size + sender.notify("event4"); + sender.notify("event5"); + sender.notify("event6"); + + // Only the last two should be received due to the size limit + let received_event1 = listener.next().await; + let received_event2 = listener.next().await; + + assert_eq!(received_event1, Some("event5")); + assert_eq!(received_event2, Some("event6")); + } + + #[tokio::test] + async fn test_event_listener_timeout() { + let sender = EventSender::default(); + let mut listener = sender.new_listener(); + + // Broadcast an event asynchronously + task::spawn(async move { + tokio::time::sleep(Duration::from_millis(50)).await; + sender.notify("delayed_event"); + }); + + // Use a timeout to ensure that the event is received within a certain time + let result = timeout(Duration::from_millis(100), listener.next()).await; + assert!(result.is_ok()); + assert_eq!(result.unwrap(), Some("delayed_event")); + } +} diff --git a/crates/tokio-util/src/ratelimit.rs b/crates/tokio-util/src/ratelimit.rs index 16e403f10aa..33a9c5273d8 100644 --- a/crates/tokio-util/src/ratelimit.rs +++ b/crates/tokio-util/src/ratelimit.rs @@ -8,7 +8,7 @@ use std::{ }; use tokio::time::Sleep; -/// Given a [Rate] this type enforces a rate limit. +/// Given a [`Rate`] this type enforces a rate limit. #[derive(Debug)] pub struct RateLimit { rate: Rate, @@ -122,6 +122,7 @@ impl Rate { #[cfg(test)] mod tests { use super::*; + use tokio::time; #[tokio::test] async fn test_rate_limit() { @@ -157,4 +158,118 @@ mod tests { }) .await; } + + #[tokio::test] + async fn test_rate_limit_initialization() { + let rate = Rate::new(5, Duration::from_secs(1)); + let limit = RateLimit::new(rate); + + // Verify the limit is correctly set + assert_eq!(limit.limit(), 5); + } + + #[tokio::test] + async fn test_rate_limit_allows_within_limit() { + let mut limit = RateLimit::new(Rate::new(3, Duration::from_millis(1))); + + // Check that the rate limiter is ready initially + for _ in 0..3 { + poll_fn(|cx| { + // Should be ready within the limit + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + // Signal that a request has been made + limit.tick(); + } + + // After 3 requests, it should be pending (rate limit hit) + poll_fn(|cx| { + // Exceeded limit, should now be limited + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + async fn test_rate_limit_enforces_wait_after_limit() { + let mut limit = RateLimit::new(Rate::new(2, Duration::from_millis(500))); + + // Consume the limit + for _ in 0..2 { + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + limit.tick(); + } + + // Should now be limited (pending) + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + // Wait until the rate period elapses + time::sleep(limit.rate.duration()).await; + + // Now it should be ready again after the wait + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + async fn test_wait_method_awaits_readiness() { + let mut limit = RateLimit::new(Rate::new(1, Duration::from_millis(500))); + + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + + limit.tick(); + + // The limit should now be exceeded + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_pending()); + Poll::Ready(()) + }) + .await; + + // The `wait` method should block until the rate period elapses + limit.wait().await; + + // After `wait`, it should now be ready + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + } + + #[tokio::test] + #[should_panic(expected = "RateLimit limited; poll_ready must be called first")] + async fn test_tick_panics_when_limited() { + let mut limit = RateLimit::new(Rate::new(1, Duration::from_secs(1))); + + poll_fn(|cx| { + assert!(limit.poll_ready(cx).is_ready()); + Poll::Ready(()) + }) + .await; + + // Consume the limit + limit.tick(); + + // Attempting to tick again without poll_ready being ready should panic + limit.tick(); + } } diff --git a/crates/tracing/Cargo.toml b/crates/tracing/Cargo.toml index 59631365d60..d944b5eeeb6 100644 --- a/crates/tracing/Cargo.toml +++ b/crates/tracing/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] tracing.workspace = true -tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt", "json"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["env-filter", "fmt", "ansi", "json"] } tracing-appender.workspace = true tracing-journald = "0.3" tracing-logfmt = "0.3.3" diff --git a/crates/tracing/src/formatter.rs b/crates/tracing/src/formatter.rs index 1322377f1c9..202a92136d2 100644 --- a/crates/tracing/src/formatter.rs +++ b/crates/tracing/src/formatter.rs @@ -54,7 +54,7 @@ impl LogFormat { .unwrap_or_else(|_| // If `RUST_LOG_TARGET` is not set, show target in logs only if the max enabled // level is higher than INFO (DEBUG, TRACE) - filter.max_level_hint().map_or(true, |max_level| max_level > tracing::Level::INFO)); + filter.max_level_hint().is_none_or(|max_level| max_level > tracing::Level::INFO)); match self { Self::Json => { diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 887543b521a..21463318816 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -17,6 +17,8 @@ reth-chain-state.workspace = true reth-chainspec.workspace = true reth-eth-wire-types.workspace = true reth-primitives = { workspace = true, features = ["c-kzg", "secp256k1"] } +reth-primitives-traits.workspace = true +reth-payload-util.workspace = true reth-execution-types.workspace = true reth-fs-util.workspace = true reth-storage-api.workspace = true @@ -72,9 +74,46 @@ serde_json.workspace = true [features] default = ["serde"] -serde = ["dep:serde"] -test-utils = ["rand", "paste", "serde"] -arbitrary = ["proptest", "reth-primitives/arbitrary", "proptest-arbitrary-interop"] +serde = [ + "dep:serde", + "reth-execution-types/serde", + "reth-eth-wire-types/serde", + "reth-provider/serde", + "alloy-consensus/serde", + "alloy-eips/serde", + "alloy-primitives/serde", + "bitflags/serde", + "parking_lot/serde", + "rand?/serde", + "revm/serde", + "smallvec/serde", + "reth-primitives-traits/serde", +] +test-utils = [ + "rand", + "paste", + "serde", + "reth-chain-state/test-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-provider/test-utils", + "revm/test-utils", + "reth-primitives-traits/test-utils", +] +arbitrary = [ + "proptest", + "reth-primitives/arbitrary", + "proptest-arbitrary-interop", + "reth-chainspec/arbitrary", + "reth-eth-wire-types/arbitrary", + "alloy-consensus/arbitrary", + "alloy-eips/arbitrary", + "alloy-primitives/arbitrary", + "bitflags/arbitrary", + "revm/arbitrary", + "reth-primitives-traits/arbitrary", + "smallvec/arbitrary", +] [[bench]] name = "truncate" diff --git a/crates/transaction-pool/benches/truncate.rs b/crates/transaction-pool/benches/truncate.rs index 22e45763054..1ca6f98499c 100644 --- a/crates/transaction-pool/benches/truncate.rs +++ b/crates/transaction-pool/benches/truncate.rs @@ -66,7 +66,7 @@ fn generate_many_transactions(senders: usize, max_depth: usize) -> Vec().new_tree(&mut runner).unwrap().current() % max_depth + 1; diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 96119a0f817..67c36a65998 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -1,11 +1,9 @@ //! A simple diskstore for blobs use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::{TxHash, B256}; -use alloy_rlp::{Decodable, Encodable}; use parking_lot::{Mutex, RwLock}; -use reth_primitives::BlobTransactionSidecar; use schnellru::{ByLength, LruMap}; use std::{collections::HashSet, fmt, fs, io, path::PathBuf, sync::Arc}; use tracing::{debug, trace}; @@ -77,10 +75,7 @@ impl BlobStore for DiskFileBlobStore { } fn cleanup(&self) -> BlobStoreCleanupStat { - let txs_to_delete = { - let mut txs_to_delete = self.inner.txs_to_delete.write(); - std::mem::take(&mut *txs_to_delete) - }; + let txs_to_delete = std::mem::take(&mut *self.inner.txs_to_delete.write()); let mut stat = BlobStoreCleanupStat::default(); let mut subsize = 0; debug!(target:"txpool::blob", num_blobs=%txs_to_delete.len(), "Removing blobs from disk"); @@ -104,7 +99,7 @@ impl BlobStore for DiskFileBlobStore { stat } - fn get(&self, tx: B256) -> Result, BlobStoreError> { + fn get(&self, tx: B256) -> Result>, BlobStoreError> { self.inner.get_one(tx) } @@ -115,14 +110,17 @@ impl BlobStore for DiskFileBlobStore { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { if txs.is_empty() { return Ok(Vec::new()) } self.inner.get_all(txs) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { if txs.is_empty() { return Ok(Vec::new()) } @@ -165,7 +163,7 @@ impl BlobStore for DiskFileBlobStore { struct DiskFileBlobStoreInner { blob_dir: PathBuf, - blob_cache: Mutex>, + blob_cache: Mutex, ByLength>>, size_tracker: BlobStoreSize, file_lock: RwLock<()>, txs_to_delete: RwLock>, @@ -204,9 +202,9 @@ impl DiskFileBlobStoreInner { /// Ensures blob is in the blob cache and written to the disk. fn insert_one(&self, tx: B256, data: BlobTransactionSidecar) -> Result<(), BlobStoreError> { - let mut buf = Vec::with_capacity(data.fields_len()); - data.encode(&mut buf); - self.blob_cache.lock().insert(tx, data); + let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); + data.rlp_encode_fields(&mut buf); + self.blob_cache.lock().insert(tx, Arc::new(data)); let size = self.write_one_encoded(tx, &buf)?; self.size_tracker.add_size(size); @@ -219,8 +217,8 @@ impl DiskFileBlobStoreInner { let raw = txs .iter() .map(|(tx, data)| { - let mut buf = Vec::with_capacity(data.fields_len()); - data.encode(&mut buf); + let mut buf = Vec::with_capacity(data.rlp_encoded_fields_length()); + data.rlp_encode_fields(&mut buf); (self.blob_disk_file(*tx), buf) }) .collect::>(); @@ -228,7 +226,7 @@ impl DiskFileBlobStoreInner { { let mut cache = self.blob_cache.lock(); for (tx, data) in txs { - cache.insert(tx, data); + cache.insert(tx, Arc::new(data)); } } let mut add = 0; @@ -279,15 +277,19 @@ impl DiskFileBlobStoreInner { } /// Retrieves the blob for the given transaction hash from the blob cache or disk. - fn get_one(&self, tx: B256) -> Result, BlobStoreError> { + fn get_one(&self, tx: B256) -> Result>, BlobStoreError> { if let Some(blob) = self.blob_cache.lock().get(&tx) { return Ok(Some(blob.clone())) } let blob = self.read_one(tx)?; + if let Some(blob) = &blob { - self.blob_cache.lock().insert(tx, blob.clone()); + let blob_arc = Arc::new(blob.clone()); + self.blob_cache.lock().insert(tx, blob_arc.clone()); + return Ok(Some(blob_arc)) } - Ok(blob) + + Ok(None) } /// Returns the path to the blob file for the given transaction hash. @@ -312,7 +314,7 @@ impl DiskFileBlobStoreInner { } } }; - BlobTransactionSidecar::decode(&mut data.as_slice()) + BlobTransactionSidecar::rlp_decode_fields(&mut data.as_slice()) .map(Some) .map_err(BlobStoreError::DecodeError) } @@ -322,7 +324,7 @@ impl DiskFileBlobStoreInner { self.read_many_raw(txs) .into_iter() .filter_map(|(tx, data)| { - BlobTransactionSidecar::decode(&mut data.as_slice()) + BlobTransactionSidecar::rlp_decode_fields(&mut data.as_slice()) .map(|sidecar| (tx, sidecar)) .ok() }) @@ -375,7 +377,7 @@ impl DiskFileBlobStoreInner { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { let mut res = Vec::with_capacity(txs.len()); let mut cache_miss = Vec::new(); { @@ -397,8 +399,9 @@ impl DiskFileBlobStoreInner { } let mut cache = self.blob_cache.lock(); for (tx, data) in from_disk { - cache.insert(tx, data.clone()); - res.push((tx, data)); + let arc = Arc::new(data.clone()); + cache.insert(tx, arc.clone()); + res.push((tx, arc.clone())); } Ok(res) @@ -408,14 +411,13 @@ impl DiskFileBlobStoreInner { /// /// Returns an error if there are any missing blobs. #[inline] - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { - let mut res = Vec::with_capacity(txs.len()); - for tx in txs { - let blob = self.get_one(tx)?.ok_or_else(|| BlobStoreError::MissingSidecar(tx))?; - res.push(blob) - } - - Ok(res) + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { + txs.into_iter() + .map(|tx| self.get_one(tx)?.ok_or(BlobStoreError::MissingSidecar(tx))) + .collect() } } @@ -519,14 +521,17 @@ mod tests { let blobs = rng_blobs(10); let all_hashes = blobs.iter().map(|(tx, _)| *tx).collect::>(); store.insert_all(blobs.clone()).unwrap(); + // all cached for (tx, blob) in &blobs { assert!(store.is_cached(tx)); - assert_eq!(store.get(*tx).unwrap().unwrap(), *blob); + let b = store.get(*tx).unwrap().map(Arc::unwrap_or_clone).unwrap(); + assert_eq!(b, *blob); } + let all = store.get_all(all_hashes.clone()).unwrap(); for (tx, blob) in all { - assert!(blobs.contains(&(tx, blob)), "missing blob {tx:?}"); + assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob))), "missing blob {tx:?}"); } assert!(store.contains(all_hashes[0]).unwrap()); @@ -546,4 +551,136 @@ mod tests { assert_eq!(store.data_size_hint(), Some(0)); assert_eq!(store.inner.size_tracker.num_blobs.load(Ordering::Relaxed), 0); } + + #[test] + fn disk_insert_and_retrieve() { + let (store, _dir) = tmp_store(); + + let (tx, blob) = rng_blobs(1).into_iter().next().unwrap(); + store.insert(tx, blob.clone()).unwrap(); + + assert!(store.is_cached(&tx)); + let retrieved_blob = store.get(tx).unwrap().map(Arc::unwrap_or_clone).unwrap(); + assert_eq!(retrieved_blob, blob); + } + + #[test] + fn disk_delete_blob() { + let (store, _dir) = tmp_store(); + + let (tx, blob) = rng_blobs(1).into_iter().next().unwrap(); + store.insert(tx, blob).unwrap(); + assert!(store.is_cached(&tx)); + + store.delete(tx).unwrap(); + assert!(store.inner.txs_to_delete.read().contains(&tx)); + store.cleanup(); + + let result = store.get(tx).unwrap(); + assert_eq!( + result, + Some(Arc::new(BlobTransactionSidecar { + blobs: vec![], + commitments: vec![], + proofs: vec![] + })) + ); + } + + #[test] + fn disk_insert_all_and_delete_all() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(5); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + for (tx, _) in &blobs { + assert!(store.is_cached(tx)); + } + + store.delete_all(txs.clone()).unwrap(); + store.cleanup(); + + for tx in txs { + let result = store.get(tx).unwrap(); + assert_eq!( + result, + Some(Arc::new(BlobTransactionSidecar { + blobs: vec![], + commitments: vec![], + proofs: vec![] + })) + ); + } + } + + #[test] + fn disk_get_all_blobs() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + let retrieved_blobs = store.get_all(txs.clone()).unwrap(); + for (tx, blob) in retrieved_blobs { + assert!(blobs.contains(&(tx, Arc::unwrap_or_clone(blob)))); + } + + store.delete_all(txs).unwrap(); + store.cleanup(); + } + + #[test] + fn disk_get_exact_blobs_success() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs.clone()).unwrap(); + + let retrieved_blobs = store.get_exact(txs).unwrap(); + for (retrieved_blob, (_, original_blob)) in retrieved_blobs.into_iter().zip(blobs) { + assert_eq!(Arc::unwrap_or_clone(retrieved_blob), original_blob); + } + } + + #[test] + fn disk_get_exact_blobs_failure() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(2); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs).unwrap(); + + // Try to get a blob that was never inserted + let missing_tx = TxHash::random(); + let result = store.get_exact(vec![txs[0], missing_tx]); + assert!(result.is_err()); + } + + #[test] + fn disk_data_size_hint() { + let (store, _dir) = tmp_store(); + assert_eq!(store.data_size_hint(), Some(0)); + + let blobs = rng_blobs(2); + store.insert_all(blobs).unwrap(); + assert!(store.data_size_hint().unwrap() > 0); + } + + #[test] + fn disk_cleanup_stat() { + let (store, _dir) = tmp_store(); + + let blobs = rng_blobs(3); + let txs = blobs.iter().map(|(tx, _)| *tx).collect::>(); + store.insert_all(blobs).unwrap(); + + store.delete_all(txs).unwrap(); + let stat = store.cleanup(); + assert_eq!(stat.delete_succeed, 3); + assert_eq!(stat.delete_failed, 0); + } } diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index 15160c2c3fa..0ab9c0d7af0 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -1,7 +1,5 @@ -use crate::blobstore::{ - BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize, BlobTransactionSidecar, -}; -use alloy_eips::eip4844::BlobAndProofV1; +use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobStoreSize}; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; use parking_lot::RwLock; use std::{collections::HashMap, sync::Arc}; @@ -15,7 +13,7 @@ pub struct InMemoryBlobStore { #[derive(Debug, Default)] struct InMemoryBlobStoreInner { /// Storage for all blob data. - store: RwLock>, + store: RwLock>>, size_tracker: BlobStoreSize, } @@ -75,43 +73,28 @@ impl BlobStore for InMemoryBlobStore { } // Retrieves the decoded blob data for the given transaction hash. - fn get(&self, tx: B256) -> Result, BlobStoreError> { - let store = self.inner.store.read(); - Ok(store.get(&tx).cloned()) + fn get(&self, tx: B256) -> Result>, BlobStoreError> { + Ok(self.inner.store.read().get(&tx).cloned()) } fn contains(&self, tx: B256) -> Result { - let store = self.inner.store.read(); - Ok(store.contains_key(&tx)) + Ok(self.inner.store.read().contains_key(&tx)) } fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError> { - let mut items = Vec::with_capacity(txs.len()); + ) -> Result)>, BlobStoreError> { let store = self.inner.store.read(); - for tx in txs { - if let Some(item) = store.get(&tx) { - items.push((tx, item.clone())); - } - } - - Ok(items) + Ok(txs.into_iter().filter_map(|tx| store.get(&tx).map(|item| (tx, item.clone()))).collect()) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { - let mut items = Vec::with_capacity(txs.len()); + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { let store = self.inner.store.read(); - for tx in txs { - if let Some(item) = store.get(&tx) { - items.push(item.clone()); - } else { - return Err(BlobStoreError::MissingSidecar(tx)) - } - } - - Ok(items) + Ok(txs.into_iter().filter_map(|tx| store.get(&tx).cloned()).collect()) } fn get_by_versioned_hashes( @@ -150,7 +133,7 @@ impl BlobStore for InMemoryBlobStore { /// Removes the given blob from the store and returns the size of the blob that was removed. #[inline] -fn remove_size(store: &mut HashMap, tx: &B256) -> usize { +fn remove_size(store: &mut HashMap>, tx: &B256) -> usize { store.remove(tx).map(|rem| rem.size()).unwrap_or_default() } @@ -159,11 +142,11 @@ fn remove_size(store: &mut HashMap, tx: &B256) -> /// We don't need to handle the size updates for replacements because transactions are unique. #[inline] fn insert_size( - store: &mut HashMap, + store: &mut HashMap>, tx: B256, blob: BlobTransactionSidecar, ) -> usize { let add = blob.size(); - store.insert(tx, blob); + store.insert(tx, Arc::new(blob)); add } diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index ee98e3eed85..a21cea6e06c 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -1,14 +1,16 @@ //! Storage for blob data of EIP4844 transactions. -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; pub use disk::{DiskFileBlobStore, DiskFileBlobStoreConfig, OpenDiskFileBlobStore}; pub use mem::InMemoryBlobStore; pub use noop::NoopBlobStore; -use reth_primitives::BlobTransactionSidecar; use std::{ fmt, - sync::atomic::{AtomicUsize, Ordering}, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; pub use tracker::{BlobStoreCanonTracker, BlobStoreUpdates}; @@ -44,7 +46,7 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { fn cleanup(&self) -> BlobStoreCleanupStat; /// Retrieves the decoded blob data for the given transaction hash. - fn get(&self, tx: B256) -> Result, BlobStoreError>; + fn get(&self, tx: B256) -> Result>, BlobStoreError>; /// Checks if the given transaction hash is in the blob store. fn contains(&self, tx: B256) -> Result; @@ -58,13 +60,14 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { fn get_all( &self, txs: Vec, - ) -> Result, BlobStoreError>; + ) -> Result)>, BlobStoreError>; /// Returns the exact [`BlobTransactionSidecar`] for the given transaction hashes in the exact /// order they were requested. /// /// Returns an error if any of the blobs are not found in the blob store. - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError>; + fn get_exact(&self, txs: Vec) + -> Result>, BlobStoreError>; /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. fn get_by_versioned_hashes( @@ -149,7 +152,7 @@ impl PartialEq for BlobStoreSize { } /// Statistics for the cleanup operation. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct BlobStoreCleanupStat { /// the number of successfully deleted blobs pub delete_succeed: usize, diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index 0e99858bd62..943a6eeda95 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -1,6 +1,7 @@ -use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError, BlobTransactionSidecar}; -use alloy_eips::eip4844::BlobAndProofV1; +use crate::blobstore::{BlobStore, BlobStoreCleanupStat, BlobStoreError}; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::B256; +use std::sync::Arc; /// A blobstore implementation that does nothing #[derive(Clone, Copy, Debug, PartialOrd, PartialEq, Eq, Default)] @@ -28,7 +29,7 @@ impl BlobStore for NoopBlobStore { BlobStoreCleanupStat::default() } - fn get(&self, _tx: B256) -> Result, BlobStoreError> { + fn get(&self, _tx: B256) -> Result>, BlobStoreError> { Ok(None) } @@ -39,11 +40,14 @@ impl BlobStore for NoopBlobStore { fn get_all( &self, _txs: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { Ok(vec![]) } - fn get_exact(&self, txs: Vec) -> Result, BlobStoreError> { + fn get_exact( + &self, + txs: Vec, + ) -> Result>, BlobStoreError> { if txs.is_empty() { return Ok(vec![]) } diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index e6041fa12e1..817114fcf25 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -1,7 +1,10 @@ //! Support for maintaining the blob pool. +use alloy_consensus::Typed2718; +use alloy_eips::eip2718::Encodable2718; use alloy_primitives::{BlockNumber, B256}; use reth_execution_types::ChainBlocks; +use reth_primitives_traits::{Block, BlockBody, SignedTransaction}; use std::collections::BTreeMap; /// The type that is used to track canonical blob transactions. @@ -37,13 +40,17 @@ impl BlobStoreCanonTracker { /// /// Note: In case this is a chain that's part of a reorg, this replaces previously tracked /// blocks. - pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { + pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_, B>) + where + B: Block>, + { let blob_txs = blocks.iter().map(|(num, block)| { let iter = block .body .transactions() - .filter(|tx| tx.transaction.is_eip4844()) - .map(|tx| tx.hash); + .iter() + .filter(|tx| tx.tx_type().is_eip4844()) + .map(|tx| tx.trie_hash()); (*num, iter) }); self.add_blocks(blob_txs); @@ -81,6 +88,14 @@ pub enum BlobStoreUpdates { #[cfg(test)] mod tests { + use alloy_consensus::Header; + use alloy_primitives::PrimitiveSignature as Signature; + use reth_execution_types::Chain; + use reth_primitives::{ + BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + TransactionSigned, + }; + use super::*; #[test] @@ -101,4 +116,85 @@ mod tests { BlobStoreUpdates::Finalized(block2.into_iter().chain(block3).collect::>()) ); } + + #[test] + fn test_add_new_chain_blocks() { + let mut tracker = BlobStoreCanonTracker::default(); + + // Create sample transactions + let tx1_hash = B256::random(); // EIP-4844 transaction + let tx2_hash = B256::random(); // EIP-4844 transaction + let tx3_hash = B256::random(); // Non-EIP-4844 transaction + + // Creating a first block with EIP-4844 transactions + let block1 = SealedBlockWithSenders { + block: SealedBlock { + header: SealedHeader::new( + Header { number: 10, ..Default::default() }, + B256::random(), + ), + body: BlockBody { + transactions: vec![ + TransactionSigned::new( + Transaction::Eip4844(Default::default()), + Signature::test_signature(), + tx1_hash, + ), + TransactionSigned::new( + Transaction::Eip4844(Default::default()), + Signature::test_signature(), + tx2_hash, + ), + // Another transaction that is not EIP-4844 + TransactionSigned::new( + Transaction::Eip7702(Default::default()), + Signature::test_signature(), + B256::random(), + ), + ], + ..Default::default() + }, + }, + ..Default::default() + }; + + // Creating a second block with EIP-1559 and EIP-2930 transactions + // Note: This block does not contain any EIP-4844 transactions + let block2 = SealedBlockWithSenders { + block: SealedBlock { + header: SealedHeader::new( + Header { number: 11, ..Default::default() }, + B256::random(), + ), + body: BlockBody { + transactions: vec![ + TransactionSigned::new( + Transaction::Eip1559(Default::default()), + Signature::test_signature(), + tx3_hash, + ), + TransactionSigned::new( + Transaction::Eip2930(Default::default()), + Signature::test_signature(), + tx2_hash, + ), + ], + ..Default::default() + }, + }, + ..Default::default() + }; + + // Extract blocks from the chain + let chain: Chain = Chain::new(vec![block1, block2], Default::default(), None); + let blocks = chain.into_inner().0; + + // Add new chain blocks to the tracker + tracker.add_new_chain_blocks(&blocks); + + // Tx1 and tx2 should be in the block containing EIP-4844 transactions + assert_eq!(tracker.blob_txs_in_blocks.get(&10).unwrap(), &vec![tx1_hash, tx2_hash]); + // No transactions should be in the block containing non-EIP-4844 transactions + assert!(tracker.blob_txs_in_blocks.get(&11).unwrap().is_empty()); + } } diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 1b4b010a8e1..a9603215c83 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -2,11 +2,9 @@ use crate::{ pool::{NEW_TX_LISTENER_BUFFER_SIZE, PENDING_TX_LISTENER_BUFFER_SIZE}, PoolSize, TransactionOrigin, }; +use alloy_consensus::constants::EIP4844_TX_TYPE_ID; +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::Address; -use reth_primitives::{ - constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, - EIP4844_TX_TYPE_ID, -}; use std::{collections::HashSet, ops::Mul}; /// Guarantees max transactions for one sender, compatible with geth/erigon @@ -29,6 +27,9 @@ pub const DEFAULT_PRICE_BUMP: u128 = 10; /// This enforces that a blob transaction requires a 100% price bump to be replaced pub const REPLACE_BLOB_PRICE_BUMP: u128 = 100; +/// Default maximum new transactions for broadcasting. +pub const MAX_NEW_PENDING_TXS_NOTIFICATIONS: usize = 200; + /// Configuration options for the Transaction pool. #[derive(Debug, Clone)] pub struct PoolConfig { @@ -55,6 +56,8 @@ pub struct PoolConfig { pub pending_tx_listener_buffer_size: usize, /// Bound on number of new transactions from `reth_network::TransactionsManager` to buffer. pub new_tx_listener_buffer_size: usize, + /// How many new pending transactions to buffer and send iterators in progress. + pub max_new_pending_txs_notifications: usize, } impl PoolConfig { @@ -82,6 +85,7 @@ impl Default for PoolConfig { local_transactions_config: Default::default(), pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, new_tx_listener_buffer_size: NEW_TX_LISTENER_BUFFER_SIZE, + max_new_pending_txs_notifications: MAX_NEW_PENDING_TXS_NOTIFICATIONS, } } } @@ -192,15 +196,15 @@ impl LocalTransactionConfig { /// Returns whether the local addresses vector contains the given address. #[inline] - pub fn contains_local_address(&self, address: Address) -> bool { - self.local_addresses.contains(&address) + pub fn contains_local_address(&self, address: &Address) -> bool { + self.local_addresses.contains(address) } /// Returns whether the particular transaction should be considered local. /// /// This always returns false if the local exemptions are disabled. #[inline] - pub fn is_local(&self, origin: TransactionOrigin, sender: Address) -> bool { + pub fn is_local(&self, origin: TransactionOrigin, sender: &Address) -> bool { if self.no_local_exemptions() { return false } @@ -282,10 +286,10 @@ mod tests { let config = LocalTransactionConfig { local_addresses, ..Default::default() }; // Should contain the inserted address - assert!(config.contains_local_address(address)); + assert!(config.contains_local_address(&address)); // Should not contain another random address - assert!(!config.contains_local_address(Address::new([2; 20]))); + assert!(!config.contains_local_address(&Address::new([2; 20]))); } #[test] @@ -298,7 +302,7 @@ mod tests { }; // Should return false as no exemptions is set to true - assert!(!config.is_local(TransactionOrigin::Local, address)); + assert!(!config.is_local(TransactionOrigin::Local, &address)); } #[test] @@ -311,13 +315,13 @@ mod tests { LocalTransactionConfig { no_exemptions: false, local_addresses, ..Default::default() }; // Should return true as the transaction origin is local - assert!(config.is_local(TransactionOrigin::Local, Address::new([2; 20]))); - assert!(config.is_local(TransactionOrigin::Local, address)); + assert!(config.is_local(TransactionOrigin::Local, &Address::new([2; 20]))); + assert!(config.is_local(TransactionOrigin::Local, &address)); // Should return true as the address is in the local_addresses set - assert!(config.is_local(TransactionOrigin::External, address)); + assert!(config.is_local(TransactionOrigin::External, &address)); // Should return false as the address is not in the local_addresses set - assert!(!config.is_local(TransactionOrigin::External, Address::new([2; 20]))); + assert!(!config.is_local(TransactionOrigin::External, &Address::new([2; 20]))); } #[test] diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index a4766a89d5c..f71bf018807 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -1,7 +1,8 @@ //! Transaction pool errors +use alloy_eips::eip4844::BlobTransactionValidationError; use alloy_primitives::{Address, TxHash, U256}; -use reth_primitives::{BlobTransactionValidationError, InvalidTransactionError}; +use reth_primitives::InvalidTransactionError; /// Transaction pool result type. pub type PoolResult = Result; diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 2cffcd33fa8..0e069291e73 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -151,12 +151,12 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use crate::{identifier::TransactionId, pool::PoolInner}; -use alloy_eips::eip4844::BlobAndProofV1; +use alloy_eips::eip4844::{BlobAndProofV1, BlobTransactionSidecar}; use alloy_primitives::{Address, TxHash, B256, U256}; use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives::{BlobTransactionSidecar, PooledTransactionsElement}; +use reth_primitives::RecoveredTx; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -166,9 +166,9 @@ pub use crate::{ blobstore::{BlobStore, BlobStoreError}, config::{ LocalTransactionConfig, PoolConfig, PriceBumpConfig, SubPoolLimit, DEFAULT_PRICE_BUMP, - DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, REPLACE_BLOB_PRICE_BUMP, - TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, - TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, + DEFAULT_TXPOOL_ADDITIONAL_VALIDATION_TASKS, MAX_NEW_PENDING_TXS_NOTIFICATIONS, + REPLACE_BLOB_PRICE_BUMP, TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, + TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT, TXPOOL_SUBPOOL_MAX_TXS_DEFAULT, }, error::PoolResult, ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering}, @@ -409,18 +409,22 @@ where &self, max: usize, ) -> Vec>> { - self.pooled_transactions().into_iter().take(max).collect() + self.pool.pooled_transactions_max(max) } fn get_pooled_transaction_elements( &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec<<::Transaction as PoolTransaction>::Pooled> { self.pool.get_pooled_transaction_elements(tx_hashes, limit) } - fn get_pooled_transaction_element(&self, tx_hash: TxHash) -> Option { + fn get_pooled_transaction_element( + &self, + tx_hash: TxHash, + ) -> Option::Transaction as PoolTransaction>::Pooled>> + { self.pool.get_pooled_transaction_element(tx_hash) } @@ -430,13 +434,6 @@ where Box::new(self.pool.best_transactions()) } - fn best_transactions_with_base_fee( - &self, - base_fee: u64, - ) -> Box>>> { - self.pool.best_transactions_with_attributes(BestTransactionsAttributes::base_fee(base_fee)) - } - fn best_transactions_with_attributes( &self, best_transactions_attributes: BestTransactionsAttributes, @@ -448,6 +445,13 @@ where self.pool.pending_transactions() } + fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.pool.pending_transactions_max(max) + } + fn queued_transactions(&self) -> Vec>> { self.pool.queued_transactions() } @@ -503,6 +507,27 @@ where self.pool.get_transactions_by_sender(sender) } + fn get_pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.pool.pending_transactions_with_predicate(predicate) + } + + fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.get_pending_transactions_by_sender(sender) + } + + fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + self.pool.get_queued_transactions_by_sender(sender) + } + fn get_highest_transaction_by_sender( &self, sender: Address, @@ -547,21 +572,24 @@ where self.pool.unique_senders() } - fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError> { + fn get_blob( + &self, + tx_hash: TxHash, + ) -> Result>, BlobStoreError> { self.pool.blob_store().get(tx_hash) } fn get_all_blobs( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { self.pool.blob_store().get_all(tx_hashes) } fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result>, BlobStoreError> { self.pool.blob_store().get_exact(tx_hashes) } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 23a8d0dc66a..96971b487f0 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -4,10 +4,13 @@ use crate::{ blobstore::{BlobStoreCanonTracker, BlobStoreUpdates}, error::PoolError, metrics::MaintainPoolMetrics, - traits::{CanonicalStateUpdate, TransactionPool, TransactionPoolExt}, - BlockInfo, PoolTransaction, + traits::{CanonicalStateUpdate, EthPoolTransaction, TransactionPool, TransactionPoolExt}, + BlockInfo, PoolTransaction, PoolUpdateKind, }; -use alloy_primitives::{Address, BlockHash, BlockNumber, Sealable}; +use alloy_consensus::BlockHeader; +use alloy_eips::BlockNumberOrTag; +use alloy_primitives::{Address, BlockHash, BlockNumber}; +use alloy_rlp::Encodable; use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -17,9 +20,9 @@ use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ - BlockNumberOrTag, PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, - TransactionSignedEcRecovered, + transaction::SignedTransactionIntoRecoveredExt, SealedHeader, TransactionSigned, }; +use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; use std::{ @@ -27,6 +30,7 @@ use std::{ collections::HashSet, hash::{Hash, Hasher}, path::{Path, PathBuf}, + sync::Arc, }; use tokio::sync::oneshot; use tracing::{debug, error, info, trace, warn}; @@ -75,7 +79,7 @@ pub fn maintain_transaction_pool_future( ) -> BoxFuture<'static, ()> where Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, - P: TransactionPoolExt + 'static, + P: TransactionPoolExt> + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, { @@ -96,7 +100,7 @@ pub async fn maintain_transaction_pool( config: MaintainPoolConfig, ) where Client: StateProviderFactory + BlockReaderIdExt + ChainSpecProvider + Clone + Send + 'static, - P: TransactionPoolExt + 'static, + P: TransactionPoolExt> + 'static, St: Stream + Send + Unpin + 'static, Tasks: TaskSpawner + 'static, { @@ -104,16 +108,16 @@ pub async fn maintain_transaction_pool( let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config; // ensure the pool points to latest state if let Ok(Some(latest)) = client.header_by_number_or_tag(BlockNumberOrTag::Latest) { - let sealed = latest.seal_slow(); - let (header, seal) = sealed.into_parts(); - let latest = SealedHeader::new(header, seal); + let latest = SealedHeader::seal(latest); let chain_spec = client.chain_spec(); let info = BlockInfo { - block_gas_limit: latest.gas_limit, + block_gas_limit: latest.gas_limit(), last_seen_block_hash: latest.hash(), - last_seen_block_number: latest.number, + last_seen_block_number: latest.number(), pending_basefee: latest - .next_block_base_fee(chain_spec.base_fee_params_at_timestamp(latest.timestamp + 12)) + .next_block_base_fee( + chain_spec.base_fee_params_at_timestamp(latest.timestamp() + 12), + ) .unwrap_or_default(), pending_blob_fee: latest.next_block_blob_fee(), }; @@ -317,7 +321,7 @@ pub async fn maintain_transaction_pool( // find all transactions that were mined in the old chain but not in the new chain let pruned_old_transactions = old_blocks .transactions_ecrecovered() - .filter(|tx| !new_mined_transactions.contains(&tx.hash)) + .filter(|tx| !new_mined_transactions.contains(tx.tx_hash())) .filter_map(|tx| { if tx.is_eip4844() { // reorged blobs no longer include the blob, which is necessary for @@ -325,20 +329,17 @@ pub async fn maintain_transaction_pool( // been validated previously, we still need the blob in order to // accurately set the transaction's // encoded-length which is propagated over the network. - pool.get_blob(tx.hash) + pool.get_blob(TransactionSigned::hash(&tx)) .ok() .flatten() + .map(Arc::unwrap_or_clone) .and_then(|sidecar| { - PooledTransactionsElementEcRecovered::try_from_blob_transaction( +

::Transaction::try_from_eip4844( tx, sidecar, ) - .ok() - }) - .map(|tx| { -

::Transaction::from_pooled(tx.into()) }) } else { -

::Transaction::try_from_consensus(tx.into()).ok() +

::Transaction::try_from_consensus(tx).ok() } }) .collect::>(); @@ -351,6 +352,7 @@ pub async fn maintain_transaction_pool( changed_accounts, // all transactions mined in the new chain need to be removed from the pool mined_transactions: new_blocks.transaction_hashes().collect(), + update_kind: PoolUpdateKind::Reorg, }; pool.on_canonical_state_change(update); @@ -433,6 +435,7 @@ pub async fn maintain_transaction_pool( pending_block_blob_fee, changed_accounts, mined_transactions, + update_kind: PoolUpdateKind::Commit, }; pool.on_canonical_state_change(update); @@ -457,7 +460,7 @@ impl FinalizedBlockTracker { let finalized = finalized_block?; self.last_finalized_block .replace(finalized) - .map_or(true, |last| last < finalized) + .is_none_or(|last| last < finalized) .then_some(finalized) } } @@ -553,7 +556,7 @@ async fn load_and_reinsert_transactions

( file_path: &Path, ) -> Result<(), TransactionsBackupError> where - P: TransactionPool, + P: TransactionPool>, { if !file_path.exists() { return Ok(()) @@ -566,16 +569,17 @@ where return Ok(()) } - let txs_signed: Vec = alloy_rlp::Decodable::decode(&mut data.as_slice())?; + let txs_signed: Vec<::Consensus> = + alloy_rlp::Decodable::decode(&mut data.as_slice())?; let pool_transactions = txs_signed .into_iter() .filter_map(|tx| tx.try_ecrecovered()) .filter_map(|tx| { // Filter out errors - ::try_from_consensus(tx.into()).ok() + ::try_from_consensus(tx).ok() }) - .collect::>(); + .collect(); let outcome = pool.add_transactions(crate::TransactionOrigin::Local, pool_transactions).await; @@ -586,7 +590,7 @@ where fn save_local_txs_backup

(pool: P, file_path: &Path) where - P: TransactionPool, + P: TransactionPool>, { let local_transactions = pool.get_local_transactions(); if local_transactions.is_empty() { @@ -596,11 +600,7 @@ where let local_transactions = local_transactions .into_iter() - .map(|tx| { - let recovered: TransactionSignedEcRecovered = - tx.transaction.clone().into_consensus().into(); - recovered.into_signed() - }) + .map(|tx| tx.transaction.clone_into_consensus().into_signed()) .collect::>(); let num_txs = local_transactions.len(); @@ -640,7 +640,7 @@ pub async fn backup_local_transactions_task

( pool: P, config: LocalTransactionBackupConfig, ) where - P: TransactionPool + Clone, + P: TransactionPool> + Clone, { let Some(transactions_path) = config.transactions_path else { // nothing to do diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 4464ae1fc8a..8d880994aa9 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -13,13 +13,16 @@ use crate::{ validate::ValidTransaction, AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, - PooledTransactionsElement, PropagatedTransactions, TransactionEvents, TransactionOrigin, - TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, + PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, + TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, +}; +use alloy_eips::{ + eip1559::ETHEREUM_BLOCK_GAS_LIMIT, + eip4844::{BlobAndProofV1, BlobTransactionSidecar}, }; -use alloy_eips::eip4844::BlobAndProofV1; use alloy_primitives::{Address, TxHash, B256, U256}; use reth_eth_wire_types::HandleMempoolData; -use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, BlobTransactionSidecar}; +use reth_primitives::RecoveredTx; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use tokio::sync::{mpsc, mpsc::Receiver}; @@ -133,14 +136,14 @@ impl TransactionPool for NoopTransactionPool { &self, _tx_hashes: Vec, _limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec<::Pooled> { vec![] } fn get_pooled_transaction_element( &self, _tx_hash: TxHash, - ) -> Option { + ) -> Option::Pooled>> { None } @@ -150,13 +153,6 @@ impl TransactionPool for NoopTransactionPool { Box::new(std::iter::empty()) } - fn best_transactions_with_base_fee( - &self, - _: u64, - ) -> Box>>> { - Box::new(std::iter::empty()) - } - fn best_transactions_with_attributes( &self, _: BestTransactionsAttributes, @@ -168,6 +164,13 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn pending_transactions_max( + &self, + _max: usize, + ) -> Vec>> { + vec![] + } + fn queued_transactions(&self) -> Vec>> { vec![] } @@ -220,6 +223,27 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_pending_transactions_with_predicate( + &self, + _predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + vec![] + } + + fn get_pending_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + + fn get_queued_transactions_by_sender( + &self, + _sender: Address, + ) -> Vec>> { + vec![] + } + fn get_highest_transaction_by_sender( &self, _sender: Address, @@ -261,21 +285,24 @@ impl TransactionPool for NoopTransactionPool { Default::default() } - fn get_blob(&self, _tx_hash: TxHash) -> Result, BlobStoreError> { + fn get_blob( + &self, + _tx_hash: TxHash, + ) -> Result>, BlobStoreError> { Ok(None) } fn get_all_blobs( &self, _tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result)>, BlobStoreError> { Ok(vec![]) } fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError> { + ) -> Result>, BlobStoreError> { if tx_hashes.is_empty() { return Ok(vec![]) } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index 52f25a9db8d..a07df7cd509 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -1,14 +1,17 @@ use crate::{ - identifier::TransactionId, pool::pending::PendingTransaction, PoolTransaction, - TransactionOrdering, ValidPoolTransaction, + error::{Eip4844PoolTransactionError, InvalidPoolTransactionError}, + identifier::{SenderId, TransactionId}, + pool::pending::PendingTransaction, + PoolTransaction, TransactionOrdering, ValidPoolTransaction, }; -use alloy_primitives::B256 as TxHash; +use alloy_primitives::Address; use core::fmt; +use reth_payload_util::PayloadTransactions; +use reth_primitives::{InvalidTransactionError, RecoveredTx}; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, sync::Arc, }; - use tokio::sync::broadcast::{error::TryRecvError, Receiver}; use tracing::debug; @@ -25,8 +28,8 @@ pub(crate) struct BestTransactionsWithFees { } impl crate::traits::BestTransactions for BestTransactionsWithFees { - fn mark_invalid(&mut self, tx: &Self::Item) { - BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -48,17 +51,21 @@ impl Iterator for BestTransactionsWithFees { fn next(&mut self) -> Option { // find the next transaction that satisfies the base fee loop { - let best = self.best.next()?; + let best = Iterator::next(&mut self.best)?; // If both the base fee and blob fee (if applicable for EIP-4844) are satisfied, return // the transaction if best.transaction.max_fee_per_gas() >= self.base_fee as u128 && best.transaction .max_fee_per_blob_gas() - .map_or(true, |fee| fee >= self.base_fee_per_blob_gas as u128) + .is_none_or(|fee| fee >= self.base_fee_per_blob_gas as u128) { return Some(best); } - crate::traits::BestTransactions::mark_invalid(self, &best); + crate::traits::BestTransactions::mark_invalid( + self, + &best, + InvalidPoolTransactionError::Underpriced, + ); } } } @@ -70,7 +77,8 @@ impl Iterator for BestTransactionsWithFees { /// be executed on the current state, but only yields transactions that are ready to be executed /// now. While it contains all gapless transactions of a sender, it _always_ only returns the /// transaction with the current on chain nonce. -pub(crate) struct BestTransactions { +#[derive(Debug)] +pub struct BestTransactions { /// Contains a copy of _all_ transactions of the pending pool at the point in time this /// iterator was created. pub(crate) all: BTreeMap>, @@ -80,7 +88,7 @@ pub(crate) struct BestTransactions { /// then can be moved from the `all` set to the `independent` set. pub(crate) independent: BTreeSet>, /// There might be the case where a yielded transactions is invalid, this will track it. - pub(crate) invalid: HashSet, + pub(crate) invalid: HashSet, /// Used to receive any new pending transactions that have been added to the pool after this /// iterator was static fileted /// @@ -93,8 +101,12 @@ pub(crate) struct BestTransactions { impl BestTransactions { /// Mark the transaction and it's descendants as invalid. - pub(crate) fn mark_invalid(&mut self, tx: &Arc>) { - self.invalid.insert(*tx.hash()); + pub(crate) fn mark_invalid( + &mut self, + tx: &Arc>, + _kind: InvalidPoolTransactionError, + ) { + self.invalid.insert(tx.sender_id()); } /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. @@ -128,6 +140,15 @@ impl BestTransactions { } } + /// Removes the currently best independent transaction from the independent set and the total + /// set. + fn pop_best(&mut self) -> Option> { + self.independent.pop_last().inspect(|best| { + let removed = self.all.remove(best.transaction.id()); + debug_assert!(removed.is_some(), "must be present in both sets"); + }) + } + /// Checks for new transactions that have come into the `PendingPool` after this iterator was /// created and inserts them fn add_new_transactions(&mut self) { @@ -143,8 +164,8 @@ impl BestTransactions { } impl crate::traits::BestTransactions for BestTransactions { - fn mark_invalid(&mut self, tx: &Self::Item) { - Self::mark_invalid(self, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + Self::mark_invalid(self, tx, kind) } fn no_updates(&mut self) { @@ -167,15 +188,15 @@ impl Iterator for BestTransactions { loop { self.add_new_transactions(); // Remove the next independent tx with the highest priority - let best = self.independent.pop_last()?; - let hash = best.transaction.hash(); + let best = self.pop_best()?; + let sender_id = best.transaction.sender_id(); - // skip transactions that were marked as invalid - if self.invalid.contains(hash) { + // skip transactions for which sender was marked as invalid + if self.invalid.contains(&sender_id) { debug!( target: "txpool", "[{:?}] skipping invalid transaction", - hash + best.transaction.hash() ); continue } @@ -186,9 +207,14 @@ impl Iterator for BestTransactions { } if self.skip_blobs && best.transaction.transaction.is_eip4844() { - // blobs should be skipped, marking the as invalid will ensure that no dependent + // blobs should be skipped, marking them as invalid will ensure that no dependent // transactions are returned - self.mark_invalid(&best.transaction) + self.mark_invalid( + &best.transaction, + InvalidPoolTransactionError::Eip4844( + Eip4844PoolTransactionError::NoEip4844Blobs, + ), + ) } else { return Some(best.transaction) } @@ -196,7 +222,52 @@ impl Iterator for BestTransactions { } } -/// A[`BestTransactions`](crate::traits::BestTransactions) implementation that filters the +/// Wrapper struct that allows to convert `BestTransactions` (used in tx pool) to +/// `PayloadTransactions` (used in block composition). +#[derive(Debug)] +pub struct BestPayloadTransactions +where + T: PoolTransaction, + I: Iterator>>, +{ + invalid: HashSet

, + best: I, +} + +impl BestPayloadTransactions +where + T: PoolTransaction, + I: Iterator>>, +{ + /// Create a new `BestPayloadTransactions` with the given iterator. + pub fn new(best: I) -> Self { + Self { invalid: Default::default(), best } + } +} + +impl PayloadTransactions for BestPayloadTransactions +where + T: PoolTransaction, + I: Iterator>>, +{ + type Transaction = T::Consensus; + + fn next(&mut self, _ctx: ()) -> Option> { + loop { + let tx = self.best.next()?; + if self.invalid.contains(&tx.sender()) { + continue + } + return Some(tx.to_consensus()) + } + } + + fn mark_invalid(&mut self, sender: Address, _nonce: u64) { + self.invalid.insert(sender); + } +} + +/// A [`BestTransactions`](crate::traits::BestTransactions) implementation that filters the /// transactions of iter with predicate. /// /// Filter out transactions are marked as invalid: @@ -208,7 +279,7 @@ pub struct BestTransactionFilter { impl BestTransactionFilter { /// Create a new [`BestTransactionFilter`] with the given predicate. - pub(crate) const fn new(best: I, predicate: P) -> Self { + pub const fn new(best: I, predicate: P) -> Self { Self { best, predicate } } } @@ -226,7 +297,10 @@ where if (self.predicate)(&best) { return Some(best) } - self.best.mark_invalid(&best); + self.best.mark_invalid( + &best, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); } } } @@ -236,8 +310,8 @@ where I: crate::traits::BestTransactions, P: FnMut(&::Item) -> bool + Send, { - fn mark_invalid(&mut self, tx: &Self::Item) { - crate::traits::BestTransactions::mark_invalid(&mut self.best, tx) + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + crate::traits::BestTransactions::mark_invalid(&mut self.best, tx, kind) } fn no_updates(&mut self) { @@ -259,15 +333,98 @@ impl fmt::Debug for BestTransactionFilter { } } +/// Wrapper over [`crate::traits::BestTransactions`] that prioritizes transactions of certain +/// senders capping total gas used by such transactions. +#[derive(Debug)] +pub struct BestTransactionsWithPrioritizedSenders { + /// Inner iterator + inner: I, + /// A set of senders which transactions should be prioritized + prioritized_senders: HashSet
, + /// Maximum total gas limit of prioritized transactions + max_prioritized_gas: u64, + /// Buffer with transactions that are not being prioritized. Those will be the first to be + /// included after the prioritized transactions + buffer: VecDeque, + /// Tracker of total gas limit of prioritized transactions. Once it reaches + /// `max_prioritized_gas` no more transactions will be prioritized + prioritized_gas: u64, +} + +impl BestTransactionsWithPrioritizedSenders { + /// Constructs a new [`BestTransactionsWithPrioritizedSenders`]. + pub fn new(prioritized_senders: HashSet
, max_prioritized_gas: u64, inner: I) -> Self { + Self { + inner, + prioritized_senders, + max_prioritized_gas, + buffer: Default::default(), + prioritized_gas: Default::default(), + } + } +} + +impl Iterator for BestTransactionsWithPrioritizedSenders +where + I: crate::traits::BestTransactions>>, + T: PoolTransaction, +{ + type Item = ::Item; + + fn next(&mut self) -> Option { + // If we have space, try prioritizing transactions + if self.prioritized_gas < self.max_prioritized_gas { + for item in &mut self.inner { + if self.prioritized_senders.contains(&item.transaction.sender()) && + self.prioritized_gas + item.transaction.gas_limit() <= + self.max_prioritized_gas + { + self.prioritized_gas += item.transaction.gas_limit(); + return Some(item) + } + self.buffer.push_back(item); + } + } + + if let Some(item) = self.buffer.pop_front() { + Some(item) + } else { + self.inner.next() + } + } +} + +impl crate::traits::BestTransactions for BestTransactionsWithPrioritizedSenders +where + I: crate::traits::BestTransactions>>, + T: PoolTransaction, +{ + fn mark_invalid(&mut self, tx: &Self::Item, kind: InvalidPoolTransactionError) { + self.inner.mark_invalid(tx, kind) + } + + fn no_updates(&mut self) { + self.inner.no_updates() + } + + fn set_skip_blobs(&mut self, skip_blobs: bool) { + if skip_blobs { + self.buffer.retain(|tx| !tx.transaction.is_eip4844()) + } + self.inner.set_skip_blobs(skip_blobs) + } +} + #[cfg(test)] mod tests { use super::*; use crate::{ pool::pending::PendingPool, test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, - Priority, + BestTransactions, Priority, }; use alloy_primitives::U256; + use reth_payload_util::{PayloadTransactionsChain, PayloadTransactionsFixed}; #[test] fn test_best_iter() { @@ -313,12 +470,42 @@ mod tests { // mark the first tx as invalid let invalid = best.independent.iter().next().unwrap(); - best.mark_invalid(&invalid.transaction.clone()); + best.mark_invalid( + &invalid.transaction.clone(), + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); // iterator is empty assert!(best.next().is_none()); } + #[test] + fn test_best_transactions_iter_invalid() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 10; + // insert 10 gapless tx + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best: Box< + dyn crate::traits::BestTransactions>>, + > = Box::new(pool.best()); + + let tx = Iterator::next(&mut best).unwrap(); + crate::traits::BestTransactions::mark_invalid( + &mut *best, + &tx, + InvalidPoolTransactionError::Consensus(InvalidTransactionError::TxTypeNotSupported), + ); + assert!(Iterator::next(&mut best).is_none()); + } + #[test] fn test_best_with_fees_iter_base_fee_satisfied() { let mut pool = PendingPool::new(MockOrdering::default()); @@ -623,4 +810,241 @@ mod tests { assert_eq!(tx.nonce() % 2, 0); } } + + #[test] + fn test_best_transactions_prioritized_senders() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add 5 plain transactions from different senders with increasing gas price + for gas_price in 0..5 { + let tx = MockTransaction::eip1559().with_gas_price(gas_price); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + // Add another transaction with 0 gas price that's going to be prioritized by sender + let prioritized_tx = MockTransaction::eip1559().with_gas_price(0); + let valid_prioritized_tx = f.validated(prioritized_tx.clone()); + pool.add_transaction(Arc::new(valid_prioritized_tx), 0); + + let prioritized_senders = HashSet::from([prioritized_tx.sender()]); + let best = + BestTransactionsWithPrioritizedSenders::new(prioritized_senders, 200, pool.best()); + + // Verify that the prioritized transaction is returned first + // and the rest are returned in the reverse order of gas price + let mut iter = best.into_iter(); + let top_of_block_tx = iter.next().unwrap(); + assert_eq!(top_of_block_tx.max_fee_per_gas(), 0); + assert_eq!(top_of_block_tx.sender(), prioritized_tx.sender()); + for gas_price in (0..5).rev() { + assert_eq!(iter.next().unwrap().max_fee_per_gas(), gas_price); + } + + // TODO: Test that gas limits for prioritized transactions are respected + } + + #[test] + fn test_best_transactions_chained_iterators() { + let mut priority_pool = PendingPool::new(MockOrdering::default()); + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Block composition + // === + // (1) up to 100 gas: custom top-of-block transaction + // (2) up to 100 gas: transactions from the priority pool + // (3) up to 200 gas: only transactions from address A + // (4) up to 200 gas: only transactions from address B + // (5) until block gas limit: all transactions from the main pool + + // Notes: + // - If prioritized addresses overlap, a single transaction will be prioritized twice and + // therefore use the per-segment gas limit twice. + // - Priority pool and main pool must synchronize between each other to make sure there are + // no conflicts for the same nonce. For example, in this scenario, pools can't reject + // transactions with seemingly incorrect nonces, because previous transactions might be in + // the other pool. + + let address_top_of_block = Address::random(); + let address_in_priority_pool = Address::random(); + let address_a = Address::random(); + let address_b = Address::random(); + let address_regular = Address::random(); + + // Add transactions to the main pool + { + let prioritized_tx_a = + MockTransaction::eip1559().with_gas_price(5).with_sender(address_a); + // without our custom logic, B would be prioritized over A due to gas price: + let prioritized_tx_b = + MockTransaction::eip1559().with_gas_price(10).with_sender(address_b); + let regular_tx = + MockTransaction::eip1559().with_gas_price(15).with_sender(address_regular); + pool.add_transaction(Arc::new(f.validated(prioritized_tx_a)), 0); + pool.add_transaction(Arc::new(f.validated(prioritized_tx_b)), 0); + pool.add_transaction(Arc::new(f.validated(regular_tx)), 0); + } + + // Add transactions to the priority pool + { + let prioritized_tx = + MockTransaction::eip1559().with_gas_price(0).with_sender(address_in_priority_pool); + let valid_prioritized_tx = f.validated(prioritized_tx); + priority_pool.add_transaction(Arc::new(valid_prioritized_tx), 0); + } + + let mut block = PayloadTransactionsChain::new( + PayloadTransactionsFixed::single( + MockTransaction::eip1559().with_sender(address_top_of_block).into(), + ), + Some(100), + PayloadTransactionsChain::new( + BestPayloadTransactions::new(priority_pool.best()), + Some(100), + BestPayloadTransactions::new(BestTransactionsWithPrioritizedSenders::new( + HashSet::from([address_a]), + 200, + BestTransactionsWithPrioritizedSenders::new( + HashSet::from([address_b]), + 200, + pool.best(), + ), + )), + None, + ), + None, + ); + + assert_eq!(block.next(()).unwrap().signer(), address_top_of_block); + assert_eq!(block.next(()).unwrap().signer(), address_in_priority_pool); + assert_eq!(block.next(()).unwrap().signer(), address_a); + assert_eq!(block.next(()).unwrap().signer(), address_b); + assert_eq!(block.next(()).unwrap().signer(), address_regular); + } + + #[test] + fn test_best_with_fees_iter_no_blob_fee_required() { + // Tests transactions without blob fees where base fees are checked. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 0; // No blob fee requirement + + // Insert transactions with max_fee_per_gas above the base fee + for nonce in 0..5 { + let tx = MockTransaction::eip1559() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // All transactions should be returned as no blob fee requirement is imposed + for nonce in 0..5 { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.nonce(), nonce); + } + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_mix_of_blob_and_non_blob_transactions() { + // Tests mixed scenarios with both blob and non-blob transactions. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 15; + + // Add a non-blob transaction that satisfies the base fee + let tx_non_blob = + MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(base_fee as u128 + 5); + pool.add_transaction(Arc::new(f.validated(tx_non_blob.clone())), 0); + + // Add a blob transaction that satisfies both base fee and blob fee + let tx_blob = MockTransaction::eip4844() + .rng_hash() + .with_nonce(1) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 + 5); + pool.add_transaction(Arc::new(f.validated(tx_blob.clone())), 0); + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // Verify both transactions are returned + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_non_blob); + + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_blob); + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_transactions_with_skipping_blobs() { + // Tests the skip_blobs functionality to ensure blob transactions are skipped. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add a blob transaction + let tx_blob = MockTransaction::eip4844().rng_hash().with_nonce(0).with_blob_fee(100); + let valid_blob_tx = f.validated(tx_blob); + pool.add_transaction(Arc::new(valid_blob_tx), 0); + + // Add a non-blob transaction + let tx_non_blob = MockTransaction::eip1559().rng_hash().with_nonce(1).with_max_fee(200); + let valid_non_blob_tx = f.validated(tx_non_blob.clone()); + pool.add_transaction(Arc::new(valid_non_blob_tx), 0); + + let mut best = pool.best(); + best.skip_blobs(); + + // Only the non-blob transaction should be returned + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, tx_non_blob); + + // Ensure no more transactions are left + assert!(best.next().is_none()); + } + + #[test] + fn test_best_transactions_no_updates() { + // Tests the no_updates functionality to ensure it properly clears the + // new_transaction_receiver. + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add a transaction + let tx = MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(100); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (_tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // Ensure receiver is set + assert!(best.new_transaction_receiver.is_some()); + + // Call no_updates to clear the receiver + best.no_updates(); + + // Ensure receiver is cleared + assert!(best.new_transaction_receiver.is_none()); + } + + // TODO: Same nonce test } diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index cb09e823409..e6c0cb245c3 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -11,7 +11,7 @@ use std::{ /// A set of validated blob transactions in the pool that are __not pending__. /// -/// The purpose of this pool is keep track of blob transactions that are queued and to evict the +/// The purpose of this pool is to keep track of blob transactions that are queued and to evict the /// worst blob transactions once the sub-pool is full. /// /// This expects that certain constraints are met: @@ -198,14 +198,13 @@ impl BlobTransactions { &mut self, pending_fees: &PendingFees, ) -> Vec>> { - let to_remove = self.satisfy_pending_fee_ids(pending_fees); - - let mut removed = Vec::with_capacity(to_remove.len()); - for id in to_remove { - removed.push(self.remove_transaction(&id).expect("transaction exists")); - } + let removed = self + .satisfy_pending_fee_ids(pending_fees) + .into_iter() + .map(|id| self.remove_transaction(&id).expect("transaction exists")) + .collect(); - // set pending fees and reprioritize / resort + // Update pending fees and reprioritize self.pending_fees = pending_fees.clone(); self.reprioritize(); @@ -694,4 +693,102 @@ mod tests { ); } } + + #[test] + fn test_empty_pool_operations() { + let mut pool: BlobTransactions = BlobTransactions::default(); + + // Ensure pool is empty + assert!(pool.is_empty()); + assert_eq!(pool.len(), 0); + assert_eq!(pool.size(), 0); + + // Attempt to remove a non-existent transaction + let non_existent_id = TransactionId::new(0.into(), 0); + assert!(pool.remove_transaction(&non_existent_id).is_none()); + + // Check contains method on empty pool + assert!(!pool.contains(&non_existent_id)); + } + + #[test] + fn test_transaction_removal() { + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + + // Add a transaction + let tx = factory.validated_arc(MockTransaction::eip4844()); + let tx_id = *tx.id(); + pool.add_transaction(tx); + + // Remove the transaction + let removed = pool.remove_transaction(&tx_id); + assert!(removed.is_some()); + assert_eq!(*removed.unwrap().id(), tx_id); + assert!(pool.is_empty()); + } + + #[test] + fn test_satisfy_attributes_empty_pool() { + let pool: BlobTransactions = BlobTransactions::default(); + let attributes = BestTransactionsAttributes { blob_fee: Some(100), basefee: 100 }; + // Satisfy attributes on an empty pool should return an empty vector + let satisfied = pool.satisfy_attributes(attributes); + assert!(satisfied.is_empty()); + } + + #[test] + #[should_panic(expected = "transaction is not a blob tx")] + fn test_add_non_blob_transaction() { + // Ensure that adding a non-blob transaction causes a panic + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx = factory.validated_arc(MockTransaction::eip1559()); // Not a blob transaction + pool.add_transaction(tx); + } + + #[test] + #[should_panic(expected = "transaction already included")] + fn test_add_duplicate_blob_transaction() { + // Ensure that adding a duplicate blob transaction causes a panic + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx = factory.validated_arc(MockTransaction::eip4844()); + pool.add_transaction(tx.clone()); // First addition + pool.add_transaction(tx); // Attempt to add the same transaction again + } + + #[test] + fn test_remove_transactions_until_limit() { + // Test truncating the pool until it satisfies the given size limit + let mut factory = MockTransactionFactory::default(); + let mut pool = BlobTransactions::default(); + let tx1 = factory.validated_arc(MockTransaction::eip4844().with_size(100)); + let tx2 = factory.validated_arc(MockTransaction::eip4844().with_size(200)); + let tx3 = factory.validated_arc(MockTransaction::eip4844().with_size(300)); + + // Add transactions to the pool + pool.add_transaction(tx1); + pool.add_transaction(tx2); + pool.add_transaction(tx3); + + // Set a size limit that requires truncation + let limit = SubPoolLimit { max_txs: 2, max_size: 300 }; + let removed = pool.truncate_pool(limit); + + // Check that only one transaction was removed to satisfy the limit + assert_eq!(removed.len(), 1); + assert_eq!(pool.len(), 2); + assert!(pool.size() <= limit.max_size); + } + + #[test] + fn test_empty_pool_invariants() { + // Ensure that the invariants hold for an empty pool + let pool: BlobTransactions = BlobTransactions::default(); + pool.assert_invariants(); + assert!(pool.is_empty()); + assert_eq!(pool.size(), 0); + assert_eq!(pool.len(), 0); + } } diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 600a8da934e..89c4d6d3465 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -78,24 +78,19 @@ use crate::{ PoolTransaction, PropagatedTransactions, TransactionOrigin, }, validate::{TransactionValidationOutcome, ValidPoolTransaction}, - CanonicalStateUpdate, PoolConfig, TransactionOrdering, TransactionValidator, + CanonicalStateUpdate, EthPoolTransaction, PoolConfig, TransactionOrdering, + TransactionValidator, }; use alloy_primitives::{Address, TxHash, B256}; use best::BestTransactions; -use parking_lot::{Mutex, RwLock, RwLockReadGuard}; +use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; -use reth_primitives::{ - BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, TransactionSigned, - TransactionSignedEcRecovered, -}; -use std::{ - collections::{HashMap, HashSet}, - fmt, - sync::Arc, - time::Instant, -}; +use alloy_eips::eip4844::BlobTransactionSidecar; +use reth_primitives::RecoveredTx; +use rustc_hash::FxHashMap; +use std::{collections::HashSet, fmt, sync::Arc, time::Instant}; use tokio::sync::mpsc; use tracing::{debug, trace, warn}; mod events; @@ -106,7 +101,9 @@ use crate::{ traits::{GetPooledTransactionLimit, NewBlobSidecar, TransactionListenerKind}, validate::ValidTransaction, }; -pub use best::BestTransactionFilter; +pub use best::{ + BestPayloadTransactions, BestTransactionFilter, BestTransactionsWithPrioritizedSenders, +}; pub use blob::{blob_tx_priority, fee_delta}; pub use events::{FullTransactionEvent, TransactionEvent}; pub use listener::{AllTransactionsEvents, TransactionEvents}; @@ -166,7 +163,7 @@ where S: BlobStore, { /// Create a new transaction pool instance. - pub(crate) fn new(validator: V, ordering: T, blob_store: S, config: PoolConfig) -> Self { + pub fn new(validator: V, ordering: T, blob_store: S, config: PoolConfig) -> Self { Self { identifiers: Default::default(), validator, @@ -182,31 +179,31 @@ where } /// Returns the configured blob store. - pub(crate) const fn blob_store(&self) -> &S { + pub const fn blob_store(&self) -> &S { &self.blob_store } /// Returns stats about the size of the pool. - pub(crate) fn size(&self) -> PoolSize { + pub fn size(&self) -> PoolSize { self.get_pool_data().size() } /// Returns the currently tracked block - pub(crate) fn block_info(&self) -> BlockInfo { + pub fn block_info(&self) -> BlockInfo { self.get_pool_data().block_info() } /// Sets the currently tracked block - pub(crate) fn set_block_info(&self, info: BlockInfo) { + pub fn set_block_info(&self, info: BlockInfo) { self.pool.write().set_block_info(info) } /// Returns the internal [`SenderId`] for this address - pub(crate) fn get_sender_id(&self, addr: Address) -> SenderId { + pub fn get_sender_id(&self, addr: Address) -> SenderId { self.identifiers.write().sender_id_or_create(addr) } /// Returns all senders in the pool - pub(crate) fn unique_senders(&self) -> HashSet
{ + pub fn unique_senders(&self) -> HashSet
{ self.get_pool_data().unique_senders() } @@ -215,7 +212,7 @@ where fn changed_senders( &self, accs: impl Iterator, - ) -> HashMap { + ) -> FxHashMap { let mut identifiers = self.identifiers.write(); accs.into_iter() .map(|acc| { @@ -266,29 +263,24 @@ where /// If the pool contains the transaction, this adds a new listener that gets notified about /// transaction events. - pub(crate) fn add_transaction_event_listener( - &self, - tx_hash: TxHash, - ) -> Option { + pub fn add_transaction_event_listener(&self, tx_hash: TxHash) -> Option { self.get_pool_data() .contains(&tx_hash) .then(|| self.event_listener.write().subscribe(tx_hash)) } /// Adds a listener for all transaction events. - pub(crate) fn add_all_transactions_event_listener( - &self, - ) -> AllTransactionsEvents { + pub fn add_all_transactions_event_listener(&self) -> AllTransactionsEvents { self.event_listener.write().subscribe_all() } /// Returns a read lock to the pool's data. - pub(crate) fn get_pool_data(&self) -> RwLockReadGuard<'_, TxPool> { + pub fn get_pool_data(&self) -> RwLockReadGuard<'_, TxPool> { self.pool.read() } /// Returns hashes of _all_ transactions in the pool. - pub(crate) fn pooled_transactions_hashes(&self) -> Vec { + pub fn pooled_transactions_hashes(&self) -> Vec { self.get_pool_data() .all() .transactions_iter() @@ -298,62 +290,73 @@ where } /// Returns _all_ transactions in the pool. - pub(crate) fn pooled_transactions(&self) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).collect() + pub fn pooled_transactions(&self) -> Vec>> { + self.get_pool_data().all().transactions_iter().filter(|tx| tx.propagate).cloned().collect() + } + + /// Returns only the first `max` transactions in the pool. + pub fn pooled_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.get_pool_data() + .all() + .transactions_iter() + .filter(|tx| tx.propagate) + .take(max) + .cloned() + .collect() } - /// Returns the [`BlobTransaction`] for the given transaction if the sidecar exists. + /// Converts the internally tracked transaction to the pooled format. /// - /// Caution: this assumes the given transaction is eip-4844 - fn get_blob_transaction(&self, transaction: TransactionSigned) -> Option { - if let Ok(Some(sidecar)) = self.blob_store.get(transaction.hash()) { - if let Ok(blob) = BlobTransaction::try_from_signed(transaction, sidecar) { - return Some(blob) - } + /// If the transaction is an EIP-4844 transaction, the blob sidecar is fetched from the blob + /// store and attached to the transaction. + fn to_pooled_transaction( + &self, + transaction: Arc>, + ) -> Option::Transaction as PoolTransaction>::Pooled>> + where + ::Transaction: EthPoolTransaction, + { + if transaction.is_eip4844() { + let sidecar = self.blob_store.get(*transaction.hash()).ok()??; + transaction.transaction.clone().try_into_pooled_eip4844(sidecar) + } else { + transaction + .transaction + .clone() + .try_into_pooled() + .inspect_err(|err| { + debug!( + target: "txpool", %err, + "failed to convert transaction to pooled element; skipping", + ); + }) + .ok() } - None } - /// Returns converted [`PooledTransactionsElement`] for the given transaction hashes. - pub(crate) fn get_pooled_transaction_elements( + /// Returns pooled transactions for the given transaction hashes. + pub fn get_pooled_transaction_elements( &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec + ) -> Vec<<::Transaction as PoolTransaction>::Pooled> where - ::Transaction: - PoolTransaction>, + ::Transaction: EthPoolTransaction, { let transactions = self.get_all(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); let mut size = 0; for transaction in transactions { let encoded_len = transaction.encoded_length(); - let recovered: TransactionSignedEcRecovered = - transaction.transaction.clone().into_consensus().into(); - let tx = recovered.into_signed(); - let pooled = if tx.is_eip4844() { - // for EIP-4844 transactions, we need to fetch the blob sidecar from the blob store - if let Some(blob) = self.get_blob_transaction(tx) { - PooledTransactionsElement::BlobTransaction(blob) - } else { - continue - } - } else { - match PooledTransactionsElement::try_from(tx) { - Ok(element) => element, - Err(err) => { - debug!( - target: "txpool", %err, - "failed to convert transaction to pooled element; skipping", - ); - continue - } - } + let Some(pooled) = self.to_pooled_transaction(transaction) else { + continue; }; size += encoded_len; - elements.push(pooled); + elements.push(pooled.into_signed()); if limit.exceeds(size) { break @@ -363,33 +366,25 @@ where elements } - /// Returns converted [`PooledTransactionsElement`] for the given transaction hash. - pub(crate) fn get_pooled_transaction_element( + /// Returns converted pooled transaction for the given transaction hash. + pub fn get_pooled_transaction_element( &self, tx_hash: TxHash, - ) -> Option + ) -> Option::Transaction as PoolTransaction>::Pooled>> where - ::Transaction: - PoolTransaction>, + ::Transaction: EthPoolTransaction, { - self.get(&tx_hash).and_then(|transaction| { - let recovered: TransactionSignedEcRecovered = - transaction.transaction.clone().into_consensus().into(); - let tx = recovered.into_signed(); - if tx.is_eip4844() { - self.get_blob_transaction(tx).map(PooledTransactionsElement::BlobTransaction) - } else { - PooledTransactionsElement::try_from(tx).ok() - } - }) + self.get(&tx_hash).and_then(|tx| self.to_pooled_transaction(tx)) } /// Updates the entire pool after a new block was executed. - pub(crate) fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { + pub fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_>) { trace!(target: "txpool", ?update, "updating pool on canonical state change"); let block_info = update.block_info(); - let CanonicalStateUpdate { new_tip, changed_accounts, mined_transactions, .. } = update; + let CanonicalStateUpdate { + new_tip, changed_accounts, mined_transactions, update_kind, .. + } = update; self.validator.on_new_head_block(new_tip); let changed_senders = self.changed_senders(changed_accounts.into_iter()); @@ -399,6 +394,7 @@ where block_info, mined_transactions, changed_senders, + update_kind, ); // This will discard outdated transactions based on the account's nonce @@ -411,7 +407,7 @@ where /// Performs account updates on the pool. /// /// This will either promote or discard transactions based on the new account state. - pub(crate) fn update_accounts(&self, accounts: Vec) { + pub fn update_accounts(&self, accounts: Vec) { let changed_senders = self.changed_senders(accounts.into_iter()); let UpdateOutcome { promoted, discarded } = self.pool.write().update_accounts(changed_senders); @@ -431,6 +427,7 @@ where /// come in through that function, either as a batch or `std::iter::once`. fn add_transaction( &self, + pool: &mut RwLockWriteGuard<'_, TxPool>, origin: TransactionOrigin, tx: TransactionValidationOutcome, ) -> PoolResult { @@ -464,7 +461,7 @@ where origin, }; - let added = self.pool.write().add_transaction(tx, balance, state_nonce)?; + let added = pool.add_transaction(tx, balance, state_nonce)?; let hash = *added.hash(); // transaction was successfully inserted into the pool @@ -511,7 +508,8 @@ where } } - pub(crate) fn add_transaction_and_subscribe( + /// Adds a transaction and returns the event stream. + pub fn add_transaction_and_subscribe( &self, origin: TransactionOrigin, tx: TransactionValidationOutcome, @@ -526,33 +524,52 @@ where } /// Adds all transactions in the iterator to the pool, returning a list of results. + /// + /// Note: A large batch may lock the pool for a long time that blocks important operations + /// like updating the pool on canonical state changes. The caller should consider having + /// a max batch size to balance transaction insertions with other updates. pub fn add_transactions( &self, origin: TransactionOrigin, transactions: impl IntoIterator>, ) -> Vec> { - let mut added = - transactions.into_iter().map(|tx| self.add_transaction(origin, tx)).collect::>(); + // Add the transactions and enforce the pool size limits in one write lock + let (mut added, discarded) = { + let mut pool = self.pool.write(); + let added = transactions + .into_iter() + .map(|tx| self.add_transaction(&mut pool, origin, tx)) + .collect::>(); + + // Enforce the pool size limits if at least one transaction was added successfully + let discarded = if added.iter().any(Result::is_ok) { + pool.discard_worst() + } else { + Default::default() + }; - // If at least one transaction was added successfully, then we enforce the pool size limits. - let discarded = - if added.iter().any(Result::is_ok) { self.discard_worst() } else { Default::default() }; + (added, discarded) + }; - if discarded.is_empty() { - return added - } + if !discarded.is_empty() { + // Delete any blobs associated with discarded blob transactions + self.delete_discarded_blobs(discarded.iter()); - { - let mut listener = self.event_listener.write(); - discarded.iter().for_each(|tx| listener.discarded(tx)); - } + let discarded_hashes = + discarded.into_iter().map(|tx| *tx.hash()).collect::>(); - // It may happen that a newly added transaction is immediately discarded, so we need to - // adjust the result here - for res in &mut added { - if let Ok(hash) = res { - if discarded.contains(hash) { - *res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert)) + { + let mut listener = self.event_listener.write(); + discarded_hashes.iter().for_each(|hash| listener.discarded(hash)); + } + + // A newly added transaction may be immediately discarded, so we need to + // adjust the result here + for res in &mut added { + if let Ok(hash) = res { + if discarded_hashes.contains(hash) { + *res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert)) + } } } } @@ -665,13 +682,13 @@ where } /// Returns an iterator that yields transactions that are ready to be included in the block. - pub(crate) fn best_transactions(&self) -> BestTransactions { + pub fn best_transactions(&self) -> BestTransactions { self.get_pool_data().best_transactions() } /// Returns an iterator that yields transactions that are ready to be included in the block with /// the given base fee and optional blob fee attributes. - pub(crate) fn best_transactions_with_attributes( + pub fn best_transactions_with_attributes( &self, best_transactions_attributes: BestTransactionsAttributes, ) -> Box>>> @@ -679,18 +696,26 @@ where self.get_pool_data().best_transactions_with_attributes(best_transactions_attributes) } + /// Returns only the first `max` transactions in the pending pool. + pub fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>> { + self.get_pool_data().pending_transactions_iter().take(max).collect() + } + /// Returns all transactions from the pending sub-pool - pub(crate) fn pending_transactions(&self) -> Vec>> { + pub fn pending_transactions(&self) -> Vec>> { self.get_pool_data().pending_transactions() } /// Returns all transactions from parked pools - pub(crate) fn queued_transactions(&self) -> Vec>> { + pub fn queued_transactions(&self) -> Vec>> { self.get_pool_data().queued_transactions() } /// Returns all transactions in the pool - pub(crate) fn all_transactions(&self) -> AllPoolTransactions { + pub fn all_transactions(&self) -> AllPoolTransactions { let pool = self.get_pool_data(); AllPoolTransactions { pending: pool.pending_transactions(), @@ -699,7 +724,7 @@ where } /// Removes and returns all matching transactions from the pool. - pub(crate) fn remove_transactions( + pub fn remove_transactions( &self, hashes: Vec, ) -> Vec>> { @@ -717,7 +742,7 @@ where /// Removes and returns all matching transactions and their dependent transactions from the /// pool. - pub(crate) fn remove_transactions_and_descendants( + pub fn remove_transactions_and_descendants( &self, hashes: Vec, ) -> Vec>> { @@ -733,7 +758,8 @@ where removed } - pub(crate) fn remove_transactions_by_sender( + /// Removes and returns all transactions by the specified sender from the pool. + pub fn remove_transactions_by_sender( &self, sender: Address, ) -> Vec>> { @@ -748,7 +774,7 @@ where } /// Removes and returns all transactions that are present in the pool. - pub(crate) fn retain_unknown(&self, announcement: &mut A) + pub fn retain_unknown(&self, announcement: &mut A) where A: HandleMempoolData, { @@ -760,15 +786,12 @@ where } /// Returns the transaction by hash. - pub(crate) fn get( - &self, - tx_hash: &TxHash, - ) -> Option>> { + pub fn get(&self, tx_hash: &TxHash) -> Option>> { self.get_pool_data().get(tx_hash) } /// Returns all transactions of the address - pub(crate) fn get_transactions_by_sender( + pub fn get_transactions_by_sender( &self, sender: Address, ) -> Vec>> { @@ -776,8 +799,34 @@ where self.get_pool_data().get_transactions_by_sender(sender_id) } + /// Returns all queued transactions of the address by sender + pub fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().pending_txs_by_sender(sender_id) + } + + /// Returns all pending transactions filtered by predicate + pub fn pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.get_pool_data().pending_transactions_with_predicate(predicate) + } + + /// Returns all pending transactions of the address by sender + pub fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().queued_txs_by_sender(sender_id) + } + /// Returns the highest transaction of the address - pub(crate) fn get_highest_transaction_by_sender( + pub fn get_highest_transaction_by_sender( &self, sender: Address, ) -> Option>> { @@ -786,7 +835,7 @@ where } /// Returns the transaction with the highest nonce that is executable given the on chain nonce. - pub(crate) fn get_highest_consecutive_transaction_by_sender( + pub fn get_highest_consecutive_transaction_by_sender( &self, sender: Address, on_chain_nonce: u64, @@ -797,16 +846,29 @@ where ) } + /// Returns the transaction given a [`TransactionId`] + pub fn get_transaction_by_transaction_id( + &self, + transaction_id: &TransactionId, + ) -> Option>> { + self.get_pool_data().all().get(transaction_id).map(|tx| tx.transaction.clone()) + } + /// Returns all transactions that where submitted with the given [`TransactionOrigin`] - pub(crate) fn get_transactions_by_origin( + pub fn get_transactions_by_origin( &self, origin: TransactionOrigin, ) -> Vec>> { - self.get_pool_data().all().transactions_iter().filter(|tx| tx.origin == origin).collect() + self.get_pool_data() + .all() + .transactions_iter() + .filter(|tx| tx.origin == origin) + .cloned() + .collect() } /// Returns all pending transactions filted by [`TransactionOrigin`] - pub(crate) fn get_pending_transactions_by_origin( + pub fn get_pending_transactions_by_origin( &self, origin: TransactionOrigin, ) -> Vec>> { @@ -816,10 +878,7 @@ where /// Returns all the transactions belonging to the hashes. /// /// If no transaction exists, it is skipped. - pub(crate) fn get_all( - &self, - txs: Vec, - ) -> Vec>> { + pub fn get_all(&self, txs: Vec) -> Vec>> { if txs.is_empty() { return Vec::new() } @@ -827,7 +886,7 @@ where } /// Notify about propagated transactions. - pub(crate) fn on_propagated(&self, txs: PropagatedTransactions) { + pub fn on_propagated(&self, txs: PropagatedTransactions) { if txs.0.is_empty() { return } @@ -837,34 +896,20 @@ where } /// Number of transactions in the entire pool - pub(crate) fn len(&self) -> usize { + pub fn len(&self) -> usize { self.get_pool_data().len() } /// Whether the pool is empty - pub(crate) fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.get_pool_data().is_empty() } /// Returns whether or not the pool is over its configured size and transaction count limits. - pub(crate) fn is_exceeded(&self) -> bool { + pub fn is_exceeded(&self) -> bool { self.pool.read().is_exceeded() } - /// Enforces the size limits of pool and returns the discarded transactions if violated. - /// - /// If some of the transactions are blob transactions, they are also removed from the blob - /// store. - pub(crate) fn discard_worst(&self) -> HashSet { - let discarded = self.pool.write().discard_worst(); - - // delete any blobs associated with discarded blob transactions - self.delete_discarded_blobs(discarded.iter()); - - // then collect into tx hashes - discarded.into_iter().map(|tx| *tx.hash()).collect() - } - /// Inserts a blob transaction into the blob store fn insert_blob(&self, hash: TxHash, blob: BlobTransactionSidecar) { debug!(target: "txpool", "[{:?}] storing blob sidecar", hash); @@ -876,17 +921,17 @@ where } /// Delete a blob from the blob store - pub(crate) fn delete_blob(&self, blob: TxHash) { + pub fn delete_blob(&self, blob: TxHash) { let _ = self.blob_store.delete(blob); } /// Delete all blobs from the blob store - pub(crate) fn delete_blobs(&self, txs: Vec) { + pub fn delete_blobs(&self, txs: Vec) { let _ = self.blob_store.delete_all(txs); } /// Cleans up the blob store - pub(crate) fn cleanup_blobs(&self) { + pub fn cleanup_blobs(&self) { let stat = self.blob_store.cleanup(); self.blob_store_metrics.blobstore_failed_deletes.increment(stat.delete_failed as u64); self.update_blob_store_metrics(); @@ -1218,7 +1263,8 @@ mod tests { validate::ValidTransaction, BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionValidationOutcome, U256, }; - use reth_primitives::{kzg::Blob, transaction::generate_blob_sidecar}; + use alloy_eips::eip4844::BlobTransactionSidecar; + use reth_primitives::kzg::Blob; use std::{fs, path::PathBuf}; #[test] @@ -1253,7 +1299,7 @@ mod tests { .unwrap()]; // Generate a BlobTransactionSidecar from the blobs. - let sidecar = generate_blob_sidecar(blobs); + let sidecar = BlobTransactionSidecar::try_from_blobs(blobs).unwrap(); // Create an in-memory blob store. let blob_store = InMemoryBlobStore::default(); @@ -1268,27 +1314,22 @@ mod tests { // Insert the sidecar into the blob store if the current index is within the blob limit. if n < blob_limit.max_txs { - blob_store.insert(tx.get_hash(), sidecar.clone()).unwrap(); + blob_store.insert(*tx.get_hash(), sidecar.clone()).unwrap(); } // Add the transaction to the pool with external origin and valid outcome. - test_pool - .add_transaction( - TransactionOrigin::External, - TransactionValidationOutcome::Valid { - balance: U256::from(1_000), - state_nonce: 0, - transaction: ValidTransaction::ValidWithSidecar { - transaction: tx, - sidecar: sidecar.clone(), - }, - propagate: true, + test_pool.add_transactions( + TransactionOrigin::External, + [TransactionValidationOutcome::Valid { + balance: U256::from(1_000), + state_nonce: 0, + transaction: ValidTransaction::ValidWithSidecar { + transaction: tx, + sidecar: sidecar.clone(), }, - ) - .unwrap(); - - // Evict the worst transactions from the pool. - test_pool.discard_worst(); + propagate: true, + }], + ); } // Assert that the size of the pool's blob component is equal to the maximum blob limit. diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index b591fdb539a..29216af47d0 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -35,8 +35,8 @@ pub struct ParkedPool { best: BTreeSet>, /// Keeps track of last submission id for each sender. /// - /// This are sorted in Reverse order, so the last (highest) submission id is first, and the - /// lowest(oldest) is the last. + /// This are sorted in reverse order, so the last (highest) submission id is first, and the + /// lowest (oldest) is the last. last_sender_submission: BTreeSet, /// Keeps track of the number of transactions in the pool by the sender and the last submission /// id. @@ -856,4 +856,206 @@ mod tests { assert_eq!(submission_info2.sender_id, sender2); assert_eq!(submission_info2.submission_id, 2); } + + #[test] + fn test_remove_sender_count() { + // Initialize a mock transaction factory + let mut f = MockTransactionFactory::default(); + // Create an empty transaction pool + let mut pool = ParkedPool::>::default(); + // Generate two validated transactions and add them to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price()); + let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price()); + pool.add_transaction(tx1); + pool.add_transaction(tx2); + + // Define two different sender IDs and their corresponding submission IDs + let sender1: SenderId = 11.into(); + let sender2: SenderId = 22.into(); + + // Add the sender counts to the pool + pool.add_sender_count(sender1, 1); + + // We add sender 2 multiple times to test the removal of sender counts + pool.add_sender_count(sender2, 2); + pool.add_sender_count(sender2, 3); + + // Before removing the sender count we should have 4 sender transaction counts + assert_eq!(pool.sender_transaction_count.len(), 4); + assert!(pool.sender_transaction_count.contains_key(&sender1)); + + // We should have 1 sender transaction count for sender 1 before removing the sender count + assert_eq!(pool.sender_transaction_count.get(&sender1).unwrap().count, 1); + + // Remove the sender count for sender 1 + pool.remove_sender_count(sender1); + + // After removing the sender count we should have 3 sender transaction counts remaining + assert_eq!(pool.sender_transaction_count.len(), 3); + assert!(!pool.sender_transaction_count.contains_key(&sender1)); + + // Check the sender transaction count for sender 2 before removing the sender count + assert_eq!( + *pool.sender_transaction_count.get(&sender2).unwrap(), + SenderTransactionCount { count: 2, last_submission_id: 3 } + ); + + // Remove the sender count for sender 2 + pool.remove_sender_count(sender2); + + // After removing the sender count for sender 2, we still have 3 sender transaction counts + // remaining. + // + // This is because we added sender 2 multiple times and we only removed the last submission. + assert_eq!(pool.sender_transaction_count.len(), 3); + assert!(pool.sender_transaction_count.contains_key(&sender2)); + + // Sender transaction count for sender 2 should be updated correctly + assert_eq!( + *pool.sender_transaction_count.get(&sender2).unwrap(), + SenderTransactionCount { count: 1, last_submission_id: 3 } + ); + } + + #[test] + fn test_pool_size() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Create a transaction with a specific size and add it to the pool + let tx = f.validated_arc(MockTransaction::eip1559().set_size(1024).clone()); + pool.add_transaction(tx); + + // Assert that the reported size of the pool is correct + assert_eq!(pool.size(), 1024); + } + + #[test] + fn test_pool_len() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Initially, the pool should have zero transactions + assert_eq!(pool.len(), 0); + + // Add a transaction to the pool and check the length + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx); + assert_eq!(pool.len(), 1); + } + + #[test] + fn test_pool_contains() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Create a transaction and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + + // Before adding, the transaction should not be in the pool + assert!(!pool.contains(&tx_id)); + + // After adding, the transaction should be present in the pool + pool.add_transaction(tx); + assert!(pool.contains(&tx_id)); + } + + #[test] + fn test_get_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add a transaction to the pool and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + pool.add_transaction(tx.clone()); + + // Retrieve the transaction using `get()` and assert it matches the added transaction + let retrieved = pool.get(&tx_id).expect("Transaction should exist in the pool"); + assert_eq!(retrieved.transaction.id(), tx.id()); + } + + #[test] + fn test_all_transactions() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559()); + let tx2 = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx1.clone()); + pool.add_transaction(tx2.clone()); + + // Collect all transaction IDs from the pool + let all_txs: Vec<_> = pool.all().map(|tx| *tx.id()).collect(); + assert_eq!(all_txs.len(), 2); + + // Check that the IDs of both transactions are present + assert!(all_txs.contains(tx1.id())); + assert!(all_txs.contains(tx2.id())); + } + + #[test] + fn test_truncate_pool_edge_case() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions to the pool + let tx1 = f.validated_arc(MockTransaction::eip1559()); + let tx2 = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx1); + pool.add_transaction(tx2); + + // Set a limit that matches the current number of transactions + let limit = SubPoolLimit { max_txs: 2, max_size: usize::MAX }; + let removed = pool.truncate_pool(limit); + + // No transactions should be removed + assert!(removed.is_empty()); + + // Set a stricter limit that requires truncating one transaction + let limit = SubPoolLimit { max_txs: 1, max_size: usize::MAX }; + let removed = pool.truncate_pool(limit); + + // One transaction should be removed, and the pool should have one left + assert_eq!(removed.len(), 1); + assert_eq!(pool.len(), 1); + } + + #[test] + fn test_satisfy_base_fee_transactions() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add two transactions with different max fees + let tx1 = f.validated_arc(MockTransaction::eip1559().set_max_fee(100).clone()); + let tx2 = f.validated_arc(MockTransaction::eip1559().set_max_fee(200).clone()); + pool.add_transaction(tx1); + pool.add_transaction(tx2.clone()); + + // Check that only the second transaction satisfies the base fee requirement + let satisfied = pool.satisfy_base_fee_transactions(150); + assert_eq!(satisfied.len(), 1); + assert_eq!(satisfied[0].id(), tx2.id()) + } + + #[test] + fn test_remove_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = ParkedPool::>::default(); + + // Add a transaction to the pool and get its ID + let tx = f.validated_arc(MockTransaction::eip1559()); + let tx_id = *tx.id(); + pool.add_transaction(tx); + + // Ensure the transaction is in the pool before removal + assert!(pool.contains(&tx_id)); + + // Remove the transaction and check that it is no longer in the pool + let removed = pool.remove_transaction(&tx_id); + assert!(removed.is_some()); + assert!(!pool.contains(&tx_id)); + } } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index ff3ecf65a49..27706bd1754 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -6,9 +6,10 @@ use crate::{ }, Priority, SubPoolLimit, TransactionOrdering, ValidPoolTransaction, }; +use rustc_hash::FxHashMap; use std::{ cmp::Ordering, - collections::{BTreeMap, BTreeSet}, + collections::{hash_map::Entry, BTreeMap}, ops::Bound::Unbounded, sync::Arc, }; @@ -34,18 +35,12 @@ pub struct PendingPool { submission_id: u64, /// _All_ Transactions that are currently inside the pool grouped by their identifier. by_id: BTreeMap>, - /// _All_ transactions sorted by priority - all: BTreeSet>, /// The highest nonce transactions for each sender - like the `independent` set, but the /// highest instead of lowest nonce. - /// - /// Sorted by their scoring value. - highest_nonces: BTreeSet>, + highest_nonces: FxHashMap>, /// Independent transactions that can be included directly and don't require other /// transactions. - /// - /// Sorted by their scoring value. - independent_transactions: BTreeSet>, + independent_transactions: FxHashMap>, /// Keeps track of the size of this pool. /// /// See also [`PoolTransaction::size`](crate::traits::PoolTransaction::size). @@ -65,7 +60,6 @@ impl PendingPool { ordering, submission_id: 0, by_id: Default::default(), - all: Default::default(), independent_transactions: Default::default(), highest_nonces: Default::default(), size_of: Default::default(), @@ -82,7 +76,6 @@ impl PendingPool { fn clear_transactions(&mut self) -> BTreeMap> { self.independent_transactions.clear(); self.highest_nonces.clear(); - self.all.clear(); self.size_of.reset(); std::mem::take(&mut self.by_id) } @@ -108,7 +101,7 @@ impl PendingPool { pub(crate) fn best(&self) -> BestTransactions { BestTransactions { all: self.by_id.clone(), - independent: self.independent_transactions.clone(), + independent: self.independent_transactions.values().cloned().collect(), invalid: Default::default(), new_transaction_receiver: Some(self.new_transaction_notifier.subscribe()), skip_blobs: false, @@ -197,8 +190,7 @@ impl PendingPool { } } else { self.size_of += tx.transaction.size(); - self.update_independents_and_highest_nonces(&tx, &id); - self.all.insert(tx.clone()); + self.update_independents_and_highest_nonces(&tx); self.by_id.insert(id, tx); } } @@ -243,8 +235,7 @@ impl PendingPool { tx.priority = self.ordering.priority(&tx.transaction.transaction, base_fee); self.size_of += tx.transaction.size(); - self.update_independents_and_highest_nonces(&tx, &id); - self.all.insert(tx.clone()); + self.update_independents_and_highest_nonces(&tx); self.by_id.insert(id, tx); } } @@ -254,22 +245,27 @@ impl PendingPool { /// Updates the independent transaction and highest nonces set, assuming the given transaction /// is being _added_ to the pool. - fn update_independents_and_highest_nonces( - &mut self, - tx: &PendingTransaction, - tx_id: &TransactionId, - ) { - let ancestor_id = tx_id.unchecked_ancestor(); - if let Some(ancestor) = ancestor_id.and_then(|id| self.by_id.get(&id)) { - // the transaction already has an ancestor, so we only need to ensure that the - // highest nonces set actually contains the highest nonce for that sender - self.highest_nonces.remove(ancestor); - } else { - // If there's __no__ ancestor in the pool, then this transaction is independent, this is - // guaranteed because this pool is gapless. - self.independent_transactions.insert(tx.clone()); + fn update_independents_and_highest_nonces(&mut self, tx: &PendingTransaction) { + match self.highest_nonces.entry(tx.transaction.sender_id()) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() < tx.transaction.nonce() { + *entry.get_mut() = tx.clone(); + } + } + Entry::Vacant(entry) => { + entry.insert(tx.clone()); + } + } + match self.independent_transactions.entry(tx.transaction.sender_id()) { + Entry::Occupied(mut entry) => { + if entry.get().transaction.nonce() > tx.transaction.nonce() { + *entry.get_mut() = tx.clone(); + } + } + Entry::Vacant(entry) => { + entry.insert(tx.clone()); + } } - self.highest_nonces.insert(tx.clone()); } /// Returns the ancestor the given transaction, the transaction with `nonce - 1`. @@ -305,8 +301,7 @@ impl PendingPool { let priority = self.ordering.priority(&tx.transaction, base_fee); let tx = PendingTransaction { submission_id, transaction: tx, priority }; - self.update_independents_and_highest_nonces(&tx, &tx_id); - self.all.insert(tx.clone()); + self.update_independents_and_highest_nonces(&tx); // send the new transaction to any existing pendingpool static file iterators if self.new_transaction_notifier.receiver_count() > 0 { @@ -324,19 +319,25 @@ impl PendingPool { &mut self, id: &TransactionId, ) -> Option>> { - // mark the next as independent if it exists - if let Some(unlocked) = self.get(&id.descendant()) { - self.independent_transactions.insert(unlocked.clone()); + if let Some(lowest) = self.independent_transactions.get(&id.sender) { + if lowest.transaction.nonce() == id.nonce { + self.independent_transactions.remove(&id.sender); + // mark the next as independent if it exists + if let Some(unlocked) = self.get(&id.descendant()) { + self.independent_transactions.insert(id.sender, unlocked.clone()); + } + } } + let tx = self.by_id.remove(id)?; self.size_of -= tx.transaction.size(); - self.all.remove(&tx); - self.independent_transactions.remove(&tx); - // switch out for the next ancestor if there is one - if self.highest_nonces.remove(&tx) { + if let Some(highest) = self.highest_nonces.get(&id.sender) { + if highest.transaction.nonce() == id.nonce { + self.highest_nonces.remove(&id.sender); + } if let Some(ancestor) = self.ancestor(id) { - self.highest_nonces.insert(ancestor.clone()); + self.highest_nonces.insert(id.sender, ancestor.clone()); } } Some(tx.transaction) @@ -402,8 +403,12 @@ impl PendingPool { // we can reuse the temp array removed.clear(); + // we prefer removing transactions with lower ordering + let mut worst_transactions = self.highest_nonces.values().collect::>(); + worst_transactions.sort(); + // loop through the highest nonces set, removing transactions until we reach the limit - for tx in &self.highest_nonces { + for tx in worst_transactions { // return early if the pool is under limits if !limit.is_exceeded(original_length - total_removed, original_size - total_size) || non_local_senders == 0 @@ -517,16 +522,21 @@ impl PendingPool { self.by_id.get(id) } + /// Returns a reference to the independent transactions in the pool + #[cfg(test)] + pub(crate) const fn independent(&self) -> &FxHashMap> { + &self.independent_transactions + } + /// Asserts that the bijection between `by_id` and `all` is valid. #[cfg(any(test, feature = "test-utils"))] pub(crate) fn assert_invariants(&self) { - assert_eq!(self.by_id.len(), self.all.len(), "by_id.len() != all.len()"); assert!( - self.independent_transactions.len() <= self.all.len(), + self.independent_transactions.len() <= self.by_id.len(), "independent.len() > all.len()" ); assert!( - self.highest_nonces.len() <= self.all.len(), + self.highest_nonces.len() <= self.by_id.len(), "independent_descendants.len() > all.len()" ); assert_eq!( @@ -672,7 +682,7 @@ mod tests { // First transaction should be evicted. assert_eq!( - pool.highest_nonces.iter().next().map(|tx| *tx.transaction.hash()), + pool.highest_nonces.values().min().map(|tx| *tx.transaction.hash()), Some(*t.hash()) ); @@ -727,7 +737,7 @@ mod tests { .collect::>(); let actual_highest_nonces = pool .highest_nonces - .iter() + .values() .map(|tx| (tx.transaction.sender(), tx.transaction.nonce())) .collect::>(); assert_eq!(expected_highest_nonces, actual_highest_nonces); @@ -819,4 +829,147 @@ mod tests { pending.into_iter().map(|tx| (tx.sender(), tx.nonce())).collect::>(); assert_eq!(pending, expected_pending); } + + // + #[test] + fn test_eligible_updates_promoted() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_senders = 10; + + let first_txs: Vec<_> = (0..num_senders) // + .map(|_| MockTransaction::eip1559()) + .collect(); + let second_txs: Vec<_> = + first_txs.iter().map(|tx| tx.clone().rng_hash().inc_nonce()).collect(); + + for tx in first_txs { + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best(); + + for _ in 0..num_senders { + if let Some(tx) = best.next() { + assert_eq!(tx.nonce(), 0); + } else { + panic!("cannot read one of first_txs"); + } + } + + for tx in second_txs { + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + for _ in 0..num_senders { + if let Some(tx) = best.next() { + assert_eq!(tx.nonce(), 1); + } else { + panic!("cannot read one of second_txs"); + } + } + } + + #[test] + fn test_empty_pool_behavior() { + let mut pool = PendingPool::::new(MockOrdering::default()); + + // Ensure the pool is empty + assert!(pool.is_empty()); + assert_eq!(pool.len(), 0); + assert_eq!(pool.size(), 0); + + // Verify that attempting to truncate an empty pool does not panic and returns an empty vec + let removed = pool.truncate_pool(SubPoolLimit { max_txs: 10, max_size: 1000 }); + assert!(removed.is_empty()); + + // Verify that retrieving transactions from an empty pool yields nothing + let all_txs: Vec<_> = pool.all().collect(); + assert!(all_txs.is_empty()); + } + + #[test] + fn test_add_remove_transaction() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add a transaction and check if it's in the pool + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx.clone(), 0); + assert!(pool.contains(tx.id())); + assert_eq!(pool.len(), 1); + + // Remove the transaction and ensure it's no longer in the pool + let removed_tx = pool.remove_transaction(tx.id()).unwrap(); + assert_eq!(removed_tx.id(), tx.id()); + assert!(!pool.contains(tx.id())); + assert_eq!(pool.len(), 0); + } + + #[test] + fn test_reorder_on_basefee_update() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add two transactions with different fees + let tx1 = f.validated_arc(MockTransaction::eip1559().inc_price()); + let tx2 = f.validated_arc(MockTransaction::eip1559().inc_price_by(20)); + pool.add_transaction(tx1.clone(), 0); + pool.add_transaction(tx2.clone(), 0); + + // Ensure the transactions are in the correct order + let mut best = pool.best(); + assert_eq!(best.next().unwrap().hash(), tx2.hash()); + assert_eq!(best.next().unwrap().hash(), tx1.hash()); + + // Update the base fee to a value higher than tx1's fee, causing it to be removed + let removed = pool.update_base_fee((tx1.max_fee_per_gas() + 1) as u64); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].hash(), tx1.hash()); + + // Verify that only tx2 remains in the pool + assert_eq!(pool.len(), 1); + assert!(pool.contains(tx2.id())); + assert!(!pool.contains(tx1.id())); + } + + #[test] + #[should_panic(expected = "transaction already included")] + fn test_handle_duplicates() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add the same transaction twice and ensure it only appears once + let tx = f.validated_arc(MockTransaction::eip1559()); + pool.add_transaction(tx.clone(), 0); + assert!(pool.contains(tx.id())); + assert_eq!(pool.len(), 1); + + // Attempt to add the same transaction again, which should be ignored + pool.add_transaction(tx, 0); + } + + #[test] + fn test_update_blob_fee() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Add transactions with varying blob fees + let tx1 = f.validated_arc(MockTransaction::eip4844().set_blob_fee(50).clone()); + let tx2 = f.validated_arc(MockTransaction::eip4844().set_blob_fee(150).clone()); + pool.add_transaction(tx1.clone(), 0); + pool.add_transaction(tx2.clone(), 0); + + // Update the blob fee to a value that causes tx1 to be removed + let removed = pool.update_blob_fee(100); + assert_eq!(removed.len(), 1); + assert_eq!(removed[0].hash(), tx1.hash()); + + // Verify that only tx2 remains in the pool + assert!(pool.contains(tx2.id())); + assert!(!pool.contains(tx1.id())); + } } diff --git a/crates/transaction-pool/src/pool/state.rs b/crates/transaction-pool/src/pool/state.rs index d0a3b10f8cb..d65fc05b03f 100644 --- a/crates/transaction-pool/src/pool/state.rs +++ b/crates/transaction-pool/src/pool/state.rs @@ -46,8 +46,6 @@ bitflags::bitflags! { } } -// === impl TxState === - impl TxState { /// The state of a transaction is considered `pending`, if the transaction has: /// - _No_ parked ancestors @@ -89,8 +87,6 @@ pub enum SubPool { Pending, } -// === impl SubPool === - impl SubPool { /// Whether this transaction is to be moved to the pending sub-pool. #[inline] @@ -126,16 +122,15 @@ impl SubPool { impl From for SubPool { fn from(value: TxState) -> Self { if value.is_pending() { - return Self::Pending - } - if value.is_blob() { + Self::Pending + } else if value.is_blob() { // all _non-pending_ blob transactions are in the blob sub-pool - return Self::Blob + Self::Blob + } else if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { + Self::Queued + } else { + Self::BaseFee } - if value.bits() < TxState::BASE_FEE_POOL_BITS.bits() { - return Self::Queued - } - Self::BaseFee } } @@ -204,4 +199,61 @@ mod tests { assert!(state.is_blob()); assert!(!state.is_pending()); } + + #[test] + fn test_tx_state_no_nonce_gap() { + let mut state = TxState::default(); + state |= TxState::NO_NONCE_GAPS; + assert!(!state.has_nonce_gap()); + } + + #[test] + fn test_tx_state_with_nonce_gap() { + let state = TxState::default(); + assert!(state.has_nonce_gap()); + } + + #[test] + fn test_tx_state_enough_balance() { + let mut state = TxState::default(); + state.insert(TxState::ENOUGH_BALANCE); + assert!(state.contains(TxState::ENOUGH_BALANCE)); + } + + #[test] + fn test_tx_state_not_too_much_gas() { + let mut state = TxState::default(); + state.insert(TxState::NOT_TOO_MUCH_GAS); + assert!(state.contains(TxState::NOT_TOO_MUCH_GAS)); + } + + #[test] + fn test_tx_state_enough_fee_cap_block() { + let mut state = TxState::default(); + state.insert(TxState::ENOUGH_FEE_CAP_BLOCK); + assert!(state.contains(TxState::ENOUGH_FEE_CAP_BLOCK)); + } + + #[test] + fn test_tx_base_fee() { + let state = TxState::BASE_FEE_POOL_BITS; + assert_eq!(SubPool::BaseFee, state.into()); + } + + #[test] + fn test_blob_transaction_only() { + let state = TxState::BLOB_TRANSACTION; + assert_eq!(SubPool::Blob, state.into()); + assert!(state.is_blob()); + assert!(!state.is_pending()); + } + + #[test] + fn test_blob_transaction_with_base_fee_bits() { + let mut state = TxState::BASE_FEE_POOL_BITS; + state.insert(TxState::BLOB_TRANSACTION); + assert_eq!(SubPool::Blob, state.into()); + assert!(state.is_blob()); + assert!(!state.is_pending()); + } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 9d284392db5..dd6da1d0fef 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -15,17 +15,18 @@ use crate::{ AddedPendingTransaction, AddedTransaction, OnNewCanonicalStateOutcome, }, traits::{BestTransactionsAttributes, BlockInfo, PoolSize}, - PoolConfig, PoolResult, PoolTransaction, PriceBumpConfig, TransactionOrdering, + PoolConfig, PoolResult, PoolTransaction, PoolUpdateKind, PriceBumpConfig, TransactionOrdering, ValidPoolTransaction, U256, }; -use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::{ - constants::{ - eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, - }, +use alloy_consensus::constants::{ EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; +use alloy_eips::{ + eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, + eip4844::BLOB_TX_MIN_BLOB_GASPRICE, +}; +use alloy_primitives::{Address, TxHash, B256}; use rustc_hash::FxHashMap; use smallvec::SmallVec; use std::{ @@ -75,6 +76,8 @@ pub struct TxPool { all_transactions: AllTransactions, /// Transaction pool metrics metrics: TxPoolMetrics, + /// The last update kind that was applied to the pool. + latest_update_kind: Option, } // === impl TxPool === @@ -91,6 +94,7 @@ impl TxPool { all_transactions: AllTransactions::new(&config), config, metrics: Default::default(), + latest_update_kind: None, } } @@ -314,7 +318,7 @@ impl TxPool { // blob pool that are valid with the lower blob fee if best_transactions_attributes .blob_fee - .map_or(false, |fee| fee < self.all_transactions.pending_fees.blob_fee as u64) + .is_some_and(|fee| fee < self.all_transactions.pending_fees.blob_fee as u64) { let unlocked_by_blob_fee = self.blob_pool.satisfy_attributes(best_transactions_attributes); @@ -363,11 +367,34 @@ impl TxPool { self.pending_pool.all() } + /// Returns all pending transactions filtered by predicate + pub(crate) fn pending_transactions_with_predicate( + &self, + mut predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>> { + self.pending_transactions_iter().filter(|tx| predicate(tx)).collect() + } + + /// Returns all pending transactions for the specified sender + pub(crate) fn pending_txs_by_sender( + &self, + sender: SenderId, + ) -> Vec>> { + self.pending_transactions_iter().filter(|tx| tx.sender_id() == sender).collect() + } + /// Returns all transactions from parked pools pub(crate) fn queued_transactions(&self) -> Vec>> { self.basefee_pool.all().chain(self.queued_pool.all()).collect() } + /// Returns an iterator over all transactions from parked pools + pub(crate) fn queued_transactions_iter( + &self, + ) -> impl Iterator>> + '_ { + self.basefee_pool.all().chain(self.queued_pool.all()) + } + /// Returns queued and pending transactions for the specified sender pub fn queued_and_pending_txs_by_sender( &self, @@ -376,6 +403,14 @@ impl TxPool { (self.queued_pool.get_txs_by_sender(sender), self.pending_pool.get_txs_by_sender(sender)) } + /// Returns all queued transactions for the specified sender + pub(crate) fn queued_txs_by_sender( + &self, + sender: SenderId, + ) -> Vec>> { + self.queued_transactions_iter().filter(|tx| tx.sender_id() == sender).collect() + } + /// Returns `true` if the transaction with the given hash is already included in this pool. pub(crate) fn contains(&self, tx_hash: &TxHash) -> bool { self.all_transactions.contains(tx_hash) @@ -425,12 +460,14 @@ impl TxPool { /// Updates the transactions for the changed senders. pub(crate) fn update_accounts( &mut self, - changed_senders: HashMap, + changed_senders: FxHashMap, ) -> UpdateOutcome { - // track changed accounts - self.sender_info.extend(changed_senders.clone()); // Apply the state changes to the total set of transactions which triggers sub-pool updates. - let updates = self.all_transactions.update(changed_senders); + let updates = self.all_transactions.update(&changed_senders); + + // track changed accounts + self.sender_info.extend(changed_senders); + // Process the sub-pool updates let update = self.process_updates(updates); // update the metrics after the update @@ -446,25 +483,32 @@ impl TxPool { &mut self, block_info: BlockInfo, mined_transactions: Vec, - changed_senders: HashMap, + changed_senders: FxHashMap, + update_kind: PoolUpdateKind, ) -> OnNewCanonicalStateOutcome { // update block info let block_hash = block_info.last_seen_block_hash; self.all_transactions.set_block_info(block_info); // Remove all transaction that were included in the block + let mut removed_txs_count = 0; for tx_hash in &mined_transactions { if self.prune_transaction_by_hash(tx_hash).is_some() { - // Update removed transactions metric - self.metrics.removed_transactions.increment(1); + removed_txs_count += 1; } } + // Update removed transactions metric + self.metrics.removed_transactions.increment(removed_txs_count); + let UpdateOutcome { promoted, discarded } = self.update_accounts(changed_senders); self.update_transaction_type_metrics(); self.metrics.performed_state_updates.increment(1); + // Update the latest update kind + self.latest_update_kind = Some(update_kind); + OnNewCanonicalStateOutcome { block_hash, mined: mined_transactions, promoted, discarded } } @@ -604,8 +648,8 @@ impl TxPool { *transaction.hash(), PoolErrorKind::InvalidTransaction( InvalidPoolTransactionError::ExceedsGasLimit( - block_gas_limit, tx_gas_limit, + block_gas_limit, ), ), )), @@ -618,7 +662,7 @@ impl TxPool { InsertErr::Overdraft { transaction } => Err(PoolError::new( *transaction.hash(), PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::Overdraft { - cost: transaction.cost(), + cost: *transaction.cost(), balance: on_chain_balance, }), )), @@ -1056,11 +1100,11 @@ impl AllTransactions { self.by_hash.keys().copied() } - /// Returns an iterator over all _unique_ hashes in the pool + /// Returns an iterator over all transactions in the pool pub(crate) fn transactions_iter( &self, - ) -> impl Iterator>> + '_ { - self.by_hash.values().cloned() + ) -> impl Iterator>> + '_ { + self.by_hash.values() } /// Returns if the transaction for the given hash is already included in this pool @@ -1141,7 +1185,7 @@ impl AllTransactions { /// that got transaction included in the block. pub(crate) fn update( &mut self, - changed_accounts: HashMap, + changed_accounts: &FxHashMap, ) -> Vec { // pre-allocate a few updates let mut updates = Vec::with_capacity(64); @@ -1190,7 +1234,7 @@ impl AllTransactions { tx.state.insert(TxState::NO_NONCE_GAPS); tx.state.insert(TxState::NO_PARKED_ANCESTORS); tx.cumulative_cost = U256::ZERO; - if tx.transaction.cost() > info.balance { + if tx.transaction.cost() > &info.balance { // sender lacks sufficient funds to pay for this transaction tx.state.remove(TxState::ENOUGH_BALANCE); } else { @@ -1198,7 +1242,7 @@ impl AllTransactions { } } - changed_balance = Some(info.balance); + changed_balance = Some(&info.balance); } // If there's a nonce gap, we can shortcircuit, because there's nothing to update yet. @@ -1249,7 +1293,7 @@ impl AllTransactions { // If the account changed in the block, check the balance. if let Some(changed_balance) = changed_balance { - if cumulative_cost > changed_balance { + if &cumulative_cost > changed_balance { // sender lacks sufficient funds to pay for this transaction tx.state.remove(TxState::ENOUGH_BALANCE); } else { @@ -1289,7 +1333,7 @@ impl AllTransactions { id: *tx.transaction.id(), hash: *tx.transaction.hash(), current: current_pool, - destination: Destination::Pool(tx.subpool), + destination: tx.subpool.into(), }) } } @@ -1405,12 +1449,9 @@ impl AllTransactions { /// Caution: This assumes that mutually exclusive invariant is always true for the same sender. #[inline] fn contains_conflicting_transaction(&self, tx: &ValidPoolTransaction) -> bool { - let mut iter = self.txs_iter(tx.transaction_id.sender); - if let Some((_, existing)) = iter.next() { - return tx.tx_type_conflicts_with(&existing.transaction) - } - // no existing transaction for this sender - false + self.txs_iter(tx.transaction_id.sender) + .next() + .is_some_and(|(_, existing)| tx.tx_type_conflicts_with(&existing.transaction)) } /// Additional checks for a new transaction. @@ -1424,11 +1465,15 @@ impl AllTransactions { fn ensure_valid( &self, transaction: ValidPoolTransaction, + on_chain_nonce: u64, ) -> Result, InsertErr> { - if !self.local_transactions_config.is_local(transaction.origin, transaction.sender()) { + if !self.local_transactions_config.is_local(transaction.origin, transaction.sender_ref()) { let current_txs = self.tx_counter.get(&transaction.sender_id()).copied().unwrap_or_default(); - if current_txs >= self.max_account_slots { + + // Reject transactions if sender's capacity is exceeded. + // If transaction's nonce matches on-chain nonce always let it through + if current_txs >= self.max_account_slots && transaction.nonce() > on_chain_nonce { return Err(InsertErr::ExceededSenderTransactionsCapacity { transaction: Arc::new(transaction), }) @@ -1502,7 +1547,7 @@ impl AllTransactions { } } } - } else if new_blob_tx.cost() > on_chain_balance { + } else if new_blob_tx.cost() > &on_chain_balance { // the transaction would go into overdraft return Err(InsertErr::Overdraft { transaction: Arc::new(new_blob_tx) }) } @@ -1510,52 +1555,6 @@ impl AllTransactions { Ok(new_blob_tx) } - /// Returns true if the replacement candidate is underpriced and can't replace the existing - /// transaction. - #[inline] - fn is_underpriced( - existing_transaction: &ValidPoolTransaction, - maybe_replacement: &ValidPoolTransaction, - price_bumps: &PriceBumpConfig, - ) -> bool { - let price_bump = price_bumps.price_bump(existing_transaction.tx_type()); - - if maybe_replacement.max_fee_per_gas() <= - existing_transaction.max_fee_per_gas() * (100 + price_bump) / 100 - { - return true - } - - let existing_max_priority_fee_per_gas = - existing_transaction.transaction.max_priority_fee_per_gas().unwrap_or(0); - let replacement_max_priority_fee_per_gas = - maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or(0); - - if replacement_max_priority_fee_per_gas <= - existing_max_priority_fee_per_gas * (100 + price_bump) / 100 && - existing_max_priority_fee_per_gas != 0 && - replacement_max_priority_fee_per_gas != 0 - { - return true - } - - // check max blob fee per gas - if let Some(existing_max_blob_fee_per_gas) = - existing_transaction.transaction.max_fee_per_blob_gas() - { - // this enforces that blob txs can only be replaced by blob txs - let replacement_max_blob_fee_per_gas = - maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or(0); - if replacement_max_blob_fee_per_gas <= - existing_max_blob_fee_per_gas * (100 + price_bump) / 100 - { - return true - } - } - - false - } - /// Inserts a new _valid_ transaction into the pool. /// /// If the transaction already exists, it will be replaced if not underpriced. @@ -1595,7 +1594,7 @@ impl AllTransactions { ) -> InsertResult { assert!(on_chain_nonce <= transaction.nonce(), "Invalid transaction"); - let mut transaction = self.ensure_valid(transaction)?; + let mut transaction = self.ensure_valid(transaction, on_chain_nonce)?; let inserted_tx_id = *transaction.id(); let mut state = TxState::default(); @@ -1670,8 +1669,7 @@ impl AllTransactions { let maybe_replacement = transaction.as_ref(); // Ensure the new transaction is not underpriced - if Self::is_underpriced(existing_transaction, maybe_replacement, &self.price_bumps) - { + if existing_transaction.is_underpriced(maybe_replacement, &self.price_bumps) { return Err(InsertErr::Underpriced { transaction: pool_tx.transaction, existing: *entry.get().transaction.hash(), @@ -1690,29 +1688,17 @@ impl AllTransactions { // The next transaction of this sender let on_chain_id = TransactionId::new(transaction.sender_id(), on_chain_nonce); { - // get all transactions of the sender's account - let mut descendants = self.descendant_txs_mut(&on_chain_id).peekable(); - // Tracks the next nonce we expect if the transactions are gapless let mut next_nonce = on_chain_id.nonce; // We need to find out if the next transaction of the sender is considered pending - let mut has_parked_ancestor = if ancestor.is_none() { - // the new transaction is the next one - false - } else { - // The transaction was added above so the _inclusive_ descendants iterator - // returns at least 1 tx. - let (id, tx) = descendants.peek().expect("includes >= 1"); - if id.nonce < inserted_tx_id.nonce { - !tx.state.is_pending() - } else { - true - } - }; + // The direct descendant has _no_ parked ancestors because the `on_chain_nonce` is + // pending, so we can set this to `false` + let mut has_parked_ancestor = false; - // Traverse all transactions of the sender and update existing transactions - for (id, tx) in descendants { + // Traverse all future transactions of the sender starting with the on chain nonce, and + // update existing transactions: `[on_chain_nonce,..]` + for (id, tx) in self.descendant_txs_mut(&on_chain_id) { let current_pool = tx.subpool; // If there's a nonce gap, we can shortcircuit @@ -1757,7 +1743,7 @@ impl AllTransactions { id: *id, hash: *tx.transaction.hash(), current: current_pool, - destination: Destination::Pool(tx.subpool), + destination: tx.subpool.into(), }) } } @@ -1950,15 +1936,14 @@ impl SenderInfo { #[cfg(test)] mod tests { - use alloy_primitives::address; - use reth_primitives::TxType; - use super::*; use crate::{ test_utils::{MockOrdering, MockTransaction, MockTransactionFactory, MockTransactionSet}, traits::TransactionOrigin, SubPoolLimit, }; + use alloy_primitives::address; + use reth_primitives::TxType; #[test] fn test_insert_blob() { @@ -2342,7 +2327,9 @@ mod tests { let on_chain_nonce = 0; let mut f = MockTransactionFactory::default(); let mut pool = AllTransactions::default(); - let tx = MockTransaction::eip1559().inc_price().inc_limit(); + let mut tx = MockTransaction::eip1559().inc_price().inc_limit(); + tx.set_priority_fee(100); + tx.set_max_fee(100); let valid_tx = f.validated(tx.clone()); let InsertOk { updates, replaced_tx, move_to, state, .. } = pool.insert_tx(valid_tx.clone(), on_chain_balance, on_chain_nonce).unwrap(); @@ -2468,20 +2455,20 @@ mod tests { let first = f.validated(tx.clone()); let _ = pool.insert_tx(first.clone(), on_chain_balance, on_chain_nonce).unwrap(); let mut replacement = f.validated(tx.rng_hash().inc_price()); + // a price bump of 9% is not enough for a default min price bump of 10% replacement.transaction.set_priority_fee(109); replacement.transaction.set_max_fee(109); let err = pool.insert_tx(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::Underpriced { .. })); - // ensure first tx is not removed assert!(pool.contains(first.hash())); assert_eq!(pool.len(), 1); - // price bump of 10% is also not enough because the bump should be strictly greater than 10% + // should also fail if the bump in max fee is not enough replacement.transaction.set_priority_fee(110); - replacement.transaction.set_max_fee(110); + replacement.transaction.set_max_fee(109); let err = pool.insert_tx(replacement.clone(), on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::Underpriced { .. })); @@ -2489,7 +2476,7 @@ mod tests { assert_eq!(pool.len(), 1); // should also fail if the bump in priority fee is not enough - replacement.transaction.set_priority_fee(111); + replacement.transaction.set_priority_fee(109); replacement.transaction.set_max_fee(110); let err = pool.insert_tx(replacement, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::Underpriced { .. })); @@ -2506,8 +2493,7 @@ mod tests { let tx = MockTransaction::eip1559().inc_price().inc_limit(); let first = f.validated(tx.clone()); pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); - let tx = - MockTransaction::eip4844().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = MockTransaction::eip4844().set_sender(tx.sender()).inc_price_by(100).inc_limit(); let blob = f.validated(tx); let err = pool.insert_tx(blob, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}"); @@ -2522,8 +2508,7 @@ mod tests { let tx = MockTransaction::eip4844().inc_price().inc_limit(); let first = f.validated(tx.clone()); pool.insert_tx(first, on_chain_balance, on_chain_nonce).unwrap(); - let tx = - MockTransaction::eip1559().set_sender(tx.get_sender()).inc_price_by(100).inc_limit(); + let tx = MockTransaction::eip1559().set_sender(tx.sender()).inc_price_by(100).inc_limit(); let tx = f.validated(tx); let err = pool.insert_tx(tx, on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::TxTypeConflict { .. }), "{err:?}"); @@ -2634,6 +2619,7 @@ mod tests { let mut pool = AllTransactions::default(); let mut tx = MockTransaction::eip1559(); + let unblocked_tx = tx.clone(); for _ in 0..pool.max_account_slots { tx = tx.next(); pool.insert_tx(f.validated(tx.clone()), on_chain_balance, on_chain_nonce).unwrap(); @@ -2641,12 +2627,16 @@ mod tests { assert_eq!( pool.max_account_slots, - pool.tx_count(f.ids.sender_id(&tx.get_sender()).unwrap()) + pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap()) ); let err = pool.insert_tx(f.validated(tx.next()), on_chain_balance, on_chain_nonce).unwrap_err(); assert!(matches!(err, InsertErr::ExceededSenderTransactionsCapacity { .. })); + + assert!(pool + .insert_tx(f.validated(unblocked_tx), on_chain_balance, on_chain_nonce) + .is_ok()); } #[test] @@ -2669,7 +2659,7 @@ mod tests { assert_eq!( pool.max_account_slots, - pool.tx_count(f.ids.sender_id(&tx.get_sender()).unwrap()) + pool.tx_count(f.ids.sender_id(tx.get_sender()).unwrap()) ); pool.insert_tx( @@ -2844,7 +2834,7 @@ mod tests { let mut changed_senders = HashMap::default(); changed_senders.insert( id.sender, - SenderInfo { state_nonce: next.get_nonce(), balance: U256::from(1_000) }, + SenderInfo { state_nonce: next.nonce(), balance: U256::from(1_000) }, ); let outcome = pool.update_accounts(changed_senders); assert_eq!(outcome.discarded.len(), 1); @@ -2912,7 +2902,7 @@ mod tests { pool.update_basefee(pool_base_fee); // 2 txs, that should put the pool over the size limit but not max txs - let a_txs = MockTransactionSet::dependent(a_sender, 0, 2, TxType::Eip1559) + let a_txs = MockTransactionSet::dependent(a_sender, 0, 3, TxType::Eip1559) .into_iter() .map(|mut tx| { tx.set_size(default_limits.max_size / 2 + 1); @@ -3267,4 +3257,75 @@ mod tests { vec![1, 2, 3] ); } + + #[test] + fn test_pending_ordering() { + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::new(MockOrdering::default(), Default::default()); + + let tx_0 = MockTransaction::eip1559().with_nonce(1).set_gas_price(100).inc_limit(); + let tx_1 = tx_0.next(); + + let v0 = f.validated(tx_0); + let v1 = f.validated(tx_1); + + // nonce gap, tx should be queued + pool.add_transaction(v0.clone(), U256::MAX, 0).unwrap(); + assert_eq!(1, pool.queued_transactions().len()); + + // nonce gap is closed on-chain, both transactions should be moved to pending + pool.add_transaction(v1, U256::MAX, 1).unwrap(); + + assert_eq!(2, pool.pending_transactions().len()); + assert_eq!(0, pool.queued_transactions().len()); + + assert_eq!( + pool.pending_pool.independent().get(&v0.sender_id()).unwrap().transaction.nonce(), + v0.nonce() + ); + } + + // + #[test] + fn one_sender_one_independent_transaction() { + let mut on_chain_balance = U256::from(4_999); // only enough for 4 txs + let mut on_chain_nonce = 40; + let mut f = MockTransactionFactory::default(); + let mut pool = TxPool::mock(); + let mut submitted_txs = Vec::new(); + + // We use a "template" because we want all txs to have the same sender. + let template = + MockTransaction::eip1559().inc_price().inc_limit().with_value(U256::from(1_001)); + + // Add 8 txs. Because the balance is only sufficient for 4, so the last 4 will be + // Queued. + for tx_nonce in 40..48 { + let tx = f.validated(template.clone().with_nonce(tx_nonce).rng_hash()); + submitted_txs.push(*tx.id()); + pool.add_transaction(tx, on_chain_balance, on_chain_nonce).unwrap(); + } + + // A block is mined with two txs (so nonce is changed from 40 to 42). + // Now the balance gets so high that it's enough to execute alltxs. + on_chain_balance = U256::from(999_999); + on_chain_nonce = 42; + pool.remove_transaction(&submitted_txs[0]); + pool.remove_transaction(&submitted_txs[1]); + + // Add 4 txs. + for tx_nonce in 48..52 { + pool.add_transaction( + f.validated(template.clone().with_nonce(tx_nonce).rng_hash()), + on_chain_balance, + on_chain_nonce, + ) + .unwrap(); + } + + let best_txs: Vec<_> = pool.pending().best().map(|tx| *tx.id()).collect(); + assert_eq!(best_txs.len(), 10); // 8 - 2 + 4 = 10 + + assert_eq!(pool.pending_pool.independent().len(), 1); + } } diff --git a/crates/transaction-pool/src/pool/update.rs b/crates/transaction-pool/src/pool/update.rs index a5cce8291fa..d62b1792e7b 100644 --- a/crates/transaction-pool/src/pool/update.rs +++ b/crates/transaction-pool/src/pool/update.rs @@ -26,3 +26,9 @@ pub(crate) enum Destination { /// Move transaction to pool Pool(SubPool), } + +impl From for Destination { + fn from(sub_pool: SubPool) -> Self { + Self::Pool(sub_pool) + } +} diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index d51bf80270d..95a179aec81 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,12 +1,10 @@ use crate::EthPooledTransaction; use alloy_consensus::{TxEip1559, TxEip4844, TxLegacy}; -use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList}; +use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2718::Encodable2718, eip2930::AccessList}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use rand::Rng; use reth_chainspec::MAINNET; -use reth_primitives::{ - constants::MIN_PROTOCOL_BASE_FEE, sign_message, Transaction, TransactionSigned, -}; +use reth_primitives::{sign_message, Transaction, TransactionSigned}; /// A generator for transactions for testing purposes. #[derive(Debug)] @@ -201,7 +199,7 @@ impl TransactionBuilder { /// Signs the provided transaction using the specified signer and returns a signed transaction. fn signed(transaction: Transaction, signer: B256) -> TransactionSigned { let signature = sign_message(signer, transaction.signature_hash()).unwrap(); - TransactionSigned::from_transaction_and_signature(transaction, signature) + TransactionSigned::new_unhashed(transaction, signature) } /// Sets the signer for the transaction builder. diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index e2b5f373e44..d174c7b1604 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -7,22 +7,29 @@ use crate::{ CoinbaseTipOrdering, EthBlobTransactionSidecar, EthPoolTransaction, PoolTransaction, ValidPoolTransaction, }; -use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}; -use alloy_eips::eip2930::AccessList; -use alloy_primitives::{Address, Bytes, ChainId, TxHash, TxKind, B256, U256}; +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID}, + TxEip1559, TxEip2930, TxEip4844, TxLegacy, +}; +use alloy_eips::{ + eip1559::MIN_PROTOCOL_BASE_FEE, + eip2930::AccessList, + eip4844::{BlobTransactionSidecar, BlobTransactionValidationError, DATA_GAS_PER_BLOB}, +}; +use alloy_primitives::{ + Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, +}; use paste::paste; use rand::{ distributions::{Uniform, WeightedIndex}, prelude::Distribution, }; use reth_primitives::{ - constants::{eip4844::DATA_GAS_PER_BLOB, MIN_PROTOCOL_BASE_FEE}, - transaction::TryFromRecoveredTransactionError, - BlobTransactionSidecar, BlobTransactionValidationError, PooledTransactionsElementEcRecovered, - Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, TxType, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, + transaction::{SignedTransactionIntoRecoveredExt, TryFromRecoveredTransactionError}, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, RecoveredTx, Transaction, + TransactionSigned, TxType, }; - +use reth_primitives_traits::InMemorySize; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; /// A transaction pool implementation using [`MockOrdering`] for transaction ordering. @@ -53,6 +60,8 @@ macro_rules! set_value { *$field = new_value; } } + // Ensure the tx cost is always correct after each mutation. + $this.update_cost(); }; } @@ -63,7 +72,7 @@ macro_rules! get_value { MockTransaction::Legacy { $field, .. } | MockTransaction::Eip1559 { $field, .. } | MockTransaction::Eip4844 { $field, .. } | - MockTransaction::Eip2930 { $field, .. } => $field.clone(), + MockTransaction::Eip2930 { $field, .. } => $field, } }; } @@ -85,7 +94,7 @@ macro_rules! make_setters_getters { } /// Gets the value of the specified field. - pub fn [](&self) -> $t { + pub const fn [](&self) -> &$t { get_value!(self => $name) } )*} @@ -117,6 +126,8 @@ pub enum MockTransaction { input: Bytes, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-2930 transaction type. Eip2930 { @@ -142,6 +153,8 @@ pub enum MockTransaction { access_list: AccessList, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-1559 transaction type. Eip1559 { @@ -169,6 +182,8 @@ pub enum MockTransaction { input: Bytes, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, /// EIP-4844 transaction type. Eip4844 { @@ -200,6 +215,8 @@ pub enum MockTransaction { sidecar: BlobTransactionSidecar, /// The size of the transaction, returned in the implementation of [`PoolTransaction`]. size: usize, + /// The cost of the transaction, returned in the implementation of [`PoolTransaction`]. + cost: U256, }, } @@ -229,6 +246,7 @@ impl MockTransaction { value: Default::default(), input: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -246,6 +264,7 @@ impl MockTransaction { gas_price: 0, access_list: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -264,6 +283,7 @@ impl MockTransaction { input: Bytes::new(), access_list: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -284,6 +304,7 @@ impl MockTransaction { access_list: Default::default(), sidecar: Default::default(), size: Default::default(), + cost: U256::ZERO, } } @@ -554,69 +575,82 @@ impl MockTransaction { pub const fn is_eip2930(&self) -> bool { matches!(self, Self::Eip2930 { .. }) } + + fn update_cost(&mut self) { + match self { + Self::Legacy { cost, gas_limit, gas_price, value, .. } | + Self::Eip2930 { cost, gas_limit, gas_price, value, .. } => { + *cost = U256::from(*gas_limit) * U256::from(*gas_price) + *value + } + Self::Eip1559 { cost, gas_limit, max_fee_per_gas, value, .. } | + Self::Eip4844 { cost, gas_limit, max_fee_per_gas, value, .. } => { + *cost = U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value + } + }; + } } impl PoolTransaction for MockTransaction { type TryFromConsensusError = TryFromRecoveredTransactionError; - type Consensus = TransactionSignedEcRecovered; + type Consensus = TransactionSigned; - type Pooled = PooledTransactionsElementEcRecovered; + type Pooled = PooledTransactionsElement; - fn try_from_consensus(tx: Self::Consensus) -> Result { + fn try_from_consensus( + tx: RecoveredTx, + ) -> Result { tx.try_into() } - fn into_consensus(self) -> Self::Consensus { + fn into_consensus(self) -> RecoveredTx { self.into() } - fn from_pooled(pooled: Self::Pooled) -> Self { + fn from_pooled(pooled: RecoveredTx) -> Self { pooled.into() } + fn try_consensus_into_pooled( + tx: RecoveredTx, + ) -> Result, Self::TryFromConsensusError> { + let (tx, signer) = tx.to_components(); + Self::Pooled::try_from(tx) + .map(|tx| tx.with_signer(signer)) + .map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing) + } + fn hash(&self) -> &TxHash { - match self { - Self::Legacy { hash, .. } | - Self::Eip1559 { hash, .. } | - Self::Eip4844 { hash, .. } | - Self::Eip2930 { hash, .. } => hash, - } + self.get_hash() } fn sender(&self) -> Address { - match self { - Self::Legacy { sender, .. } | - Self::Eip1559 { sender, .. } | - Self::Eip4844 { sender, .. } | - Self::Eip2930 { sender, .. } => *sender, - } + *self.get_sender() + } + + fn sender_ref(&self) -> &Address { + self.get_sender() } fn nonce(&self) -> u64 { - match self { - Self::Legacy { nonce, .. } | - Self::Eip1559 { nonce, .. } | - Self::Eip4844 { nonce, .. } | - Self::Eip2930 { nonce, .. } => *nonce, - } + *self.get_nonce() } - fn cost(&self) -> U256 { + // Having `get_cost` from `make_setters_getters` would be cleaner but we didn't + // want to also generate the error-prone cost setters. For now cost should be + // correct at construction and auto-updated per field update via `update_cost`, + // not to be manually set. + fn cost(&self) -> &U256 { match self { - Self::Legacy { gas_price, value, gas_limit, .. } | - Self::Eip2930 { gas_limit, gas_price, value, .. } => { - U256::from(*gas_limit) * U256::from(*gas_price) + *value - } - Self::Eip1559 { max_fee_per_gas, value, gas_limit, .. } | - Self::Eip4844 { max_fee_per_gas, value, gas_limit, .. } => { - U256::from(*gas_limit) * U256::from(*max_fee_per_gas) + *value - } + Self::Legacy { cost, .. } | + Self::Eip2930 { cost, .. } | + Self::Eip1559 { cost, .. } | + Self::Eip4844 { cost, .. } => cost, } } fn gas_limit(&self) -> u64 { - self.get_gas_limit() + *self.get_gas_limit() } fn max_fee_per_gas(&self) -> u128 { @@ -695,24 +729,24 @@ impl PoolTransaction for MockTransaction { } } - /// Returns the input data associated with the transaction. - fn input(&self) -> &[u8] { + /// Returns true if the transaction is a contract creation. + fn is_create(&self) -> bool { match self { - Self::Legacy { .. } => &[], - Self::Eip1559 { input, .. } | - Self::Eip4844 { input, .. } | - Self::Eip2930 { input, .. } => input, + Self::Legacy { to, .. } | Self::Eip1559 { to, .. } | Self::Eip2930 { to, .. } => { + to.is_create() + } + Self::Eip4844 { .. } => false, } } + /// Returns the input data associated with the transaction. + fn input(&self) -> &[u8] { + self.get_input() + } + /// Returns the size of the transaction. fn size(&self) -> usize { - match self { - Self::Legacy { size, .. } | - Self::Eip1559 { size, .. } | - Self::Eip4844 { size, .. } | - Self::Eip2930 { size, .. } => *size, - } + *self.get_size() } /// Returns the transaction type as a byte identifier. @@ -756,11 +790,32 @@ impl EthPoolTransaction for MockTransaction { } } + fn try_into_pooled_eip4844( + self, + sidecar: Arc, + ) -> Option> { + let (tx, signer) = self.into_consensus().to_components(); + Self::Pooled::try_from_blob_transaction(tx, Arc::unwrap_or_clone(sidecar)) + .map(|tx| tx.with_signer(signer)) + .ok() + } + + fn try_from_eip4844( + tx: RecoveredTx, + sidecar: BlobTransactionSidecar, + ) -> Option { + let (tx, signer) = tx.to_components(); + Self::Pooled::try_from_blob_transaction(tx, sidecar) + .map(|tx| tx.with_signer(signer)) + .ok() + .map(Self::from_pooled) + } + fn validate_blob( &self, _blob: &BlobTransactionSidecar, _settings: &reth_primitives::kzg::KzgSettings, - ) -> Result<(), reth_primitives::BlobTransactionValidationError> { + ) -> Result<(), alloy_eips::eip4844::BlobTransactionValidationError> { match &self { Self::Eip4844 { .. } => Ok(()), _ => Err(BlobTransactionValidationError::NotBlobTransaction(self.tx_type())), @@ -772,10 +827,10 @@ impl EthPoolTransaction for MockTransaction { } } -impl TryFrom for MockTransaction { +impl TryFrom for MockTransaction { type Error = TryFromRecoveredTransactionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { let sender = tx.signer(); let transaction = tx.into_signed(); let hash = transaction.hash(); @@ -802,6 +857,7 @@ impl TryFrom for MockTransaction { value, input, size, + cost: U256::from(gas_limit) * U256::from(gas_price) + value, }), Transaction::Eip2930(TxEip2930 { chain_id, @@ -824,6 +880,7 @@ impl TryFrom for MockTransaction { input, access_list, size, + cost: U256::from(gas_limit) * U256::from(gas_price) + value, }), Transaction::Eip1559(TxEip1559 { chain_id, @@ -848,6 +905,7 @@ impl TryFrom for MockTransaction { input, access_list, size, + cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), Transaction::Eip4844(TxEip4844 { chain_id, @@ -876,6 +934,7 @@ impl TryFrom for MockTransaction { access_list, sidecar: BlobTransactionSidecar::default(), size, + cost: U256::from(gas_limit) * U256::from(max_fee_per_gas) + value, }), _ => unreachable!("Invalid transaction type"), } @@ -890,13 +949,10 @@ impl From for MockTransaction { } } -impl From for TransactionSignedEcRecovered { +impl From for RecoveredTx { fn from(tx: MockTransaction) -> Self { - let signed_tx = TransactionSigned { - hash: *tx.hash(), - signature: Signature::test_signature(), - transaction: tx.clone().into(), - }; + let signed_tx = + TransactionSigned::new(tx.clone().into(), Signature::test_signature(), *tx.hash()); Self::from_signed_transaction(signed_tx, tx.sender()) } @@ -907,28 +963,24 @@ impl From for Transaction { match mock { MockTransaction::Legacy { chain_id, - hash: _, - sender: _, nonce, gas_price, gas_limit, to, value, input, - size: _, + .. } => Self::Legacy(TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input }), MockTransaction::Eip2930 { chain_id, - hash: _, - sender: _, nonce, - to, + gas_price, gas_limit, - input, + to, value, - gas_price, access_list, - size: _, + input, + .. } => Self::Eip2930(TxEip2930 { chain_id, nonce, @@ -941,17 +993,15 @@ impl From for Transaction { }), MockTransaction::Eip1559 { chain_id, - hash: _, - sender: _, nonce, + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - gas_limit, to, value, access_list, input, - size: _, + .. } => Self::Eip1559(TxEip1559 { chain_id, nonce, @@ -965,19 +1015,17 @@ impl From for Transaction { }), MockTransaction::Eip4844 { chain_id, - hash: _, - sender: _, nonce, + gas_limit, max_fee_per_gas, max_priority_fee_per_gas, - max_fee_per_blob_gas, - gas_limit, to, value, access_list, - input, sidecar, - size: _, + max_fee_per_blob_gas, + input, + .. } => Self::Eip4844(TxEip4844 { chain_id, nonce, @@ -1002,107 +1050,11 @@ impl proptest::arbitrary::Arbitrary for MockTransaction { use proptest::prelude::Strategy; use proptest_arbitrary_interop::arb; - arb::<(Transaction, Address, B256)>() - .prop_map(|(tx, sender, tx_hash)| match &tx { - Transaction::Legacy(TxLegacy { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - input, - }) => Self::Legacy { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - gas_price: *gas_price, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - size: tx.size(), - }, - - Transaction::Eip2930(TxEip2930 { - chain_id, - nonce, - gas_price, - gas_limit, - to, - value, - access_list, - input, - }) => Self::Eip2930 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - gas_price: *gas_price, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - size: tx.size(), - }, - Transaction::Eip1559(TxEip1559 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - access_list, - }) => Self::Eip1559 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - max_fee_per_gas: *max_fee_per_gas, - max_priority_fee_per_gas: *max_priority_fee_per_gas, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - size: tx.size(), - }, - Transaction::Eip4844(TxEip4844 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to, - value, - input, - max_fee_per_blob_gas, - access_list, - blob_versioned_hashes: _, - }) => Self::Eip4844 { - chain_id: *chain_id, - sender, - hash: tx_hash, - nonce: *nonce, - max_fee_per_gas: *max_fee_per_gas, - max_priority_fee_per_gas: *max_priority_fee_per_gas, - max_fee_per_blob_gas: *max_fee_per_blob_gas, - gas_limit: { *gas_limit }, - to: *to, - value: *value, - input: input.clone(), - access_list: access_list.clone(), - // only generate a sidecar if it is a 4844 tx - also for the sake of - // performance just use a default sidecar - sidecar: BlobTransactionSidecar::default(), - size: tx.size(), - }, - #[allow(unreachable_patterns)] - _ => unimplemented!(), + arb::<(TransactionSigned, Address)>() + .prop_map(|(signed_transaction, signer)| { + RecoveredTx::from_signed_transaction(signed_transaction, signer) + .try_into() + .expect("Failed to create an Arbitrary MockTransaction via RecoveredTx") }) .boxed() } @@ -1121,8 +1073,8 @@ pub struct MockTransactionFactory { impl MockTransactionFactory { /// Generates a transaction ID for the given [`MockTransaction`]. pub fn tx_id(&mut self, tx: &MockTransaction) -> TransactionId { - let sender = self.ids.sender_id_or_create(tx.get_sender()); - TransactionId::new(sender, tx.get_nonce()) + let sender = self.ids.sender_id_or_create(tx.sender()); + TransactionId::new(sender, tx.nonce()) } /// Validates a [`MockTransaction`] and returns a [`MockValidTx`]. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index d19381935ec..a0d4d40983e 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -1,24 +1,30 @@ -#![allow(deprecated)] - use crate::{ blobstore::BlobStoreError, - error::PoolResult, + error::{InvalidPoolTransactionError, PoolResult}, pool::{state::SubPool, BestTransactionFilter, TransactionEvents}, validate::ValidPoolTransaction, AllTransactionsEvents, }; -use alloy_consensus::Transaction as _; -use alloy_eips::{eip2718::Encodable2718, eip2930::AccessList, eip4844::BlobAndProofV1}; +use alloy_consensus::{ + constants::{EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, + Transaction as _, +}; +use alloy_eips::{ + eip2718::Encodable2718, + eip2930::AccessList, + eip4844::{BlobAndProofV1, BlobTransactionSidecar, BlobTransactionValidationError}, +}; use alloy_primitives::{Address, TxHash, TxKind, B256, U256}; use futures_util::{ready, Stream}; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::{ - kzg::KzgSettings, transaction::TryFromRecoveredTransactionError, BlobTransactionSidecar, - BlobTransactionValidationError, PooledTransactionsElement, - PooledTransactionsElementEcRecovered, SealedBlock, Transaction, TransactionSignedEcRecovered, - EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + kzg::KzgSettings, + transaction::{SignedTransactionIntoRecoveredExt, TryFromRecoveredTransactionError}, + PooledTransactionsElement, PooledTransactionsElementEcRecovered, RecoveredTx, SealedBlock, + Transaction, TransactionSigned, }; +use reth_primitives_traits::SignedTransaction; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -34,6 +40,12 @@ use tokio::sync::mpsc::Receiver; /// The `PeerId` type. pub type PeerId = alloy_primitives::B512; +/// Helper type alias to access [`PoolTransaction::Consensus`] for a given [`TransactionPool`]. +pub type PoolConsensusTx

= <

::Transaction as PoolTransaction>::Consensus; + +/// Helper type alias to access [`PoolTransaction::Pooled`] for a given [`TransactionPool`]. +pub type PoolPooledTx

= <

::Transaction as PoolTransaction>::Pooled; + /// General purpose abstraction of a transaction-pool. /// /// This is intended to be used by API-consumers such as RPC that need inject new incoming, @@ -70,7 +82,6 @@ pub trait TransactionPool: Send + Sync + Clone { /// Imports all _external_ transactions /// - /// /// Consumer: Utility fn add_external_transactions( &self, @@ -81,7 +92,7 @@ pub trait TransactionPool: Send + Sync + Clone { /// Adds an _unvalidated_ transaction into the pool and subscribe to state changes. /// - /// This is the same as [TransactionPool::add_transaction] but returns an event stream for the + /// This is the same as [`TransactionPool::add_transaction`] but returns an event stream for the /// given transaction. /// /// Consumer: Custom @@ -228,17 +239,24 @@ pub trait TransactionPool: Send + Sync + Clone { &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec; + ) -> Vec<::Pooled>; - /// Returns converted [PooledTransactionsElement] for the given transaction hash. + /// Returns the pooled transaction variant for the given transaction hash. /// /// This adheres to the expected behavior of /// [`GetPooledTransactions`](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#getpooledtransactions-0x09): /// /// If the transaction is a blob transaction, the sidecar will be included. /// + /// It is expected that this variant represents the valid p2p format for full transactions. + /// E.g. for EIP-4844 transactions this is the consensus transaction format with the blob + /// sidecar. + /// /// Consumer: P2P - fn get_pooled_transaction_element(&self, tx_hash: TxHash) -> Option; + fn get_pooled_transaction_element( + &self, + tx_hash: TxHash, + ) -> Option::Pooled>>; /// Returns an iterator that yields transactions that are ready for block production. /// @@ -247,16 +265,6 @@ pub trait TransactionPool: Send + Sync + Clone { &self, ) -> Box>>>; - /// Returns an iterator that yields transactions that are ready for block production with the - /// given base fee. - /// - /// Consumer: Block production - #[deprecated(note = "Use best_transactions_with_attributes instead.")] - fn best_transactions_with_base_fee( - &self, - base_fee: u64, - ) -> Box>>>; - /// Returns an iterator that yields transactions that are ready for block production with the /// given base fee and optional blob fee attributes. /// @@ -277,6 +285,15 @@ pub trait TransactionPool: Send + Sync + Clone { /// Consumer: RPC fn pending_transactions(&self) -> Vec>>; + /// Returns first `max` transactions that can be included in the next block. + /// See + /// + /// Consumer: Block production + fn pending_transactions_max( + &self, + max: usize, + ) -> Vec>>; + /// Returns all transactions that can be included in _future_ blocks. /// /// This and [Self::pending_transactions] are mutually exclusive. @@ -351,6 +368,24 @@ pub trait TransactionPool: Send + Sync + Clone { sender: Address, ) -> Vec>>; + /// Returns all pending transactions filtered by predicate + fn get_pending_transactions_with_predicate( + &self, + predicate: impl FnMut(&ValidPoolTransaction) -> bool, + ) -> Vec>>; + + /// Returns all pending transactions sent by a given user + fn get_pending_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + + /// Returns all queued transactions sent by a given user + fn get_queued_transactions_by_sender( + &self, + sender: Address, + ) -> Vec>>; + /// Returns the highest transaction sent by a given user fn get_highest_transaction_by_sender( &self, @@ -430,7 +465,10 @@ pub trait TransactionPool: Send + Sync + Clone { /// Returns the [BlobTransactionSidecar] for the given transaction hash if it exists in the blob /// store. - fn get_blob(&self, tx_hash: TxHash) -> Result, BlobStoreError>; + fn get_blob( + &self, + tx_hash: TxHash, + ) -> Result>, BlobStoreError>; /// Returns all [BlobTransactionSidecar] for the given transaction hashes if they exists in the /// blob store. @@ -440,7 +478,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_all_blobs( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError>; + ) -> Result)>, BlobStoreError>; /// Returns the exact [BlobTransactionSidecar] for the given transaction hashes in the order /// they were requested. @@ -449,7 +487,7 @@ pub trait TransactionPool: Send + Sync + Clone { fn get_all_blobs_exact( &self, tx_hashes: Vec, - ) -> Result, BlobStoreError>; + ) -> Result>, BlobStoreError>; /// Return the [`BlobTransactionSidecar`]s for a list of blob versioned hashes. fn get_blobs_for_versioned_hashes( @@ -470,7 +508,7 @@ pub trait TransactionPoolExt: TransactionPool { /// /// ## Fee changes /// - /// The [CanonicalStateUpdate] includes the base and blob fee of the pending block, which + /// The [`CanonicalStateUpdate`] includes the base and blob fee of the pending block, which /// affects the dynamic fee requirement of pending transactions in the pool. /// /// ## EIP-4844 Blob transactions @@ -529,15 +567,20 @@ pub struct AllPoolTransactions { // === impl AllPoolTransactions === impl AllPoolTransactions { - /// Returns an iterator over all pending [`TransactionSignedEcRecovered`] transactions. - pub fn pending_recovered(&self) -> impl Iterator + '_ { + /// Returns an iterator over all pending [`RecoveredTx`] transactions. + pub fn pending_recovered(&self) -> impl Iterator> + '_ { self.pending.iter().map(|tx| tx.transaction.clone().into()) } - /// Returns an iterator over all queued [`TransactionSignedEcRecovered`] transactions. - pub fn queued_recovered(&self) -> impl Iterator + '_ { + /// Returns an iterator over all queued [`RecoveredTx`] transactions. + pub fn queued_recovered(&self) -> impl Iterator> + '_ { self.queued.iter().map(|tx| tx.transaction.clone().into()) } + + /// Returns an iterator over all transactions, both pending and queued. + pub fn all(&self) -> impl Iterator> + '_ { + self.pending.iter().chain(self.queued.iter()).map(|tx| tx.transaction.clone().into()) + } } impl Default for AllPoolTransactions { @@ -656,6 +699,15 @@ impl TransactionOrigin { } } +/// Represents the kind of update to the canonical state. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PoolUpdateKind { + /// The update was due to a block commit. + Commit, + /// The update was due to a reorganization. + Reorg, +} + /// Represents changes after a new canonical block or range of canonical blocks was added to the /// chain. /// @@ -680,6 +732,8 @@ pub struct CanonicalStateUpdate<'a> { pub changed_accounts: Vec, /// All mined transactions in the block range. pub mined_transactions: Vec, + /// The kind of update to the canonical state. + pub update_kind: PoolUpdateKind, } impl CanonicalStateUpdate<'_> { @@ -723,6 +777,11 @@ impl fmt::Display for CanonicalStateUpdate<'_> { } } +/// Alias to restrict the [`BestTransactions`] items to the pool's transaction type. +pub type BestTransactionsFor = Box< + dyn BestTransactions::Transaction>>>, +>; + /// An `Iterator` that only returns transactions that are ready to be executed. /// /// This makes no assumptions about the order of the transactions, but expects that _all_ @@ -737,7 +796,7 @@ pub trait BestTransactions: Iterator + Send { /// Implementers must ensure all subsequent transaction _don't_ depend on this transaction. /// In other words, this must remove the given transaction _and_ drain all transaction that /// depend on it. - fn mark_invalid(&mut self, transaction: &Self::Item); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError); /// An iterator may be able to receive additional pending transactions that weren't present it /// the pool when it was created. @@ -746,6 +805,15 @@ pub trait BestTransactions: Iterator + Send { /// listen to pool updates. fn no_updates(&mut self); + /// Convenience function for [`Self::no_updates`] that returns the iterator again. + fn without_updates(mut self) -> Self + where + Self: Sized, + { + self.no_updates(); + self + } + /// Skip all blob transactions. /// /// There's only limited blob space available in a block, once exhausted, EIP-4844 transactions @@ -754,20 +822,44 @@ pub trait BestTransactions: Iterator + Send { /// If called then the iterator will no longer yield blob transactions. /// /// Note: this will also exclude any transactions that depend on blob transactions. - fn skip_blobs(&mut self); + fn skip_blobs(&mut self) { + self.set_skip_blobs(true); + } /// Controls whether the iterator skips blob transactions or not. /// /// If set to true, no blob transactions will be returned. fn set_skip_blobs(&mut self, skip_blobs: bool); + + /// Convenience function for [`Self::skip_blobs`] that returns the iterator again. + fn without_blobs(mut self) -> Self + where + Self: Sized, + { + self.skip_blobs(); + self + } + + /// Creates an iterator which uses a closure to determine whether a transaction should be + /// returned by the iterator. + /// + /// All items the closure returns false for are marked as invalid via [`Self::mark_invalid`] and + /// descendant transactions will be skipped. + fn filter_transactions

(self, predicate: P) -> BestTransactionFilter + where + P: FnMut(&Self::Item) -> bool, + Self: Sized, + { + BestTransactionFilter::new(self, predicate) + } } impl BestTransactions for Box where T: BestTransactions + ?Sized, { - fn mark_invalid(&mut self, transaction: &Self::Item) { - (**self).mark_invalid(transaction); + fn mark_invalid(&mut self, transaction: &Self::Item, kind: InvalidPoolTransactionError) { + (**self).mark_invalid(transaction, kind) } fn no_updates(&mut self) { @@ -783,28 +875,9 @@ where } } -/// A subtrait on the [`BestTransactions`] trait that allows to filter transactions. -pub trait BestTransactionsFilter: BestTransactions { - /// Creates an iterator which uses a closure to determine if a transaction should be yielded. - /// - /// Given an element the closure must return true or false. The returned iterator will yield - /// only the elements for which the closure returns true. - /// - /// Descendant transactions will be skipped. - fn filter

(self, predicate: P) -> BestTransactionFilter - where - P: FnMut(&Self::Item) -> bool, - Self: Sized, - { - BestTransactionFilter::new(self, predicate) - } -} - -impl BestTransactionsFilter for T where T: BestTransactions {} - /// A no-op implementation that yields no transactions. impl BestTransactions for std::iter::Empty { - fn mark_invalid(&mut self, _tx: &T) {} + fn mark_invalid(&mut self, _tx: &T, _kind: InvalidPoolTransactionError) {} fn no_updates(&mut self) {} @@ -813,6 +886,36 @@ impl BestTransactions for std::iter::Empty { fn set_skip_blobs(&mut self, _skip_blobs: bool) {} } +/// A filter that allows to check if a transaction satisfies a set of conditions +pub trait TransactionFilter { + /// The type of the transaction to check. + type Transaction; + + /// Returns true if the transaction satisfies the conditions. + fn is_valid(&self, transaction: &Self::Transaction) -> bool; +} + +/// A no-op implementation of [`TransactionFilter`] which +/// marks all transactions as valid. +#[derive(Debug, Clone)] +pub struct NoopTransactionFilter(std::marker::PhantomData); + +// We can't derive Default because this forces T to be +// Default as well, which isn't necessary. +impl Default for NoopTransactionFilter { + fn default() -> Self { + Self(std::marker::PhantomData) + } +} + +impl TransactionFilter for NoopTransactionFilter { + type Transaction = T; + + fn is_valid(&self, _transaction: &Self::Transaction) -> bool { + true + } +} + /// A Helper type that bundles the best transactions attributes together. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct BestTransactionsAttributes { @@ -842,38 +945,81 @@ impl BestTransactionsAttributes { } } -/// Trait for transaction types used inside the pool -pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { +/// Trait for transaction types used inside the pool. +/// +/// This supports two transaction formats +/// - Consensus format: the form the transaction takes when it is included in a block. +/// - Pooled format: the form the transaction takes when it is gossiping around the network. +/// +/// This distinction is necessary for the EIP-4844 blob transactions, which require an additional +/// sidecar when they are gossiped around the network. It is expected that the `Consensus` format is +/// a subset of the `Pooled` format. +pub trait PoolTransaction: + fmt::Debug + + Send + + Sync + + Clone + + TryFrom, Error = Self::TryFromConsensusError> + + Into> + + From> +{ /// Associated error type for the `try_from_consensus` method. - type TryFromConsensusError; + type TryFromConsensusError: fmt::Display; /// Associated type representing the raw consensus variant of the transaction. - type Consensus: From + TryInto; + type Consensus: From; /// Associated type representing the recovered pooled variant of the transaction. - type Pooled: Into; + type Pooled: SignedTransaction; /// Define a method to convert from the `Consensus` type to `Self` - fn try_from_consensus(tx: Self::Consensus) -> Result { + fn try_from_consensus( + tx: RecoveredTx, + ) -> Result { tx.try_into() } + /// Clone the transaction into a consensus variant. + /// + /// This method is preferred when the [`PoolTransaction`] already wraps the consensus variant. + fn clone_into_consensus(&self) -> RecoveredTx { + self.clone().into_consensus() + } + /// Define a method to convert from the `Self` type to `Consensus` - fn into_consensus(self) -> Self::Consensus { + fn into_consensus(self) -> RecoveredTx { self.into() } /// Define a method to convert from the `Pooled` type to `Self` - fn from_pooled(pooled: Self::Pooled) -> Self { + fn from_pooled(pooled: RecoveredTx) -> Self { pooled.into() } + /// Tries to convert the `Consensus` type into the `Pooled` type. + fn try_into_pooled(self) -> Result, Self::TryFromConsensusError> { + Self::try_consensus_into_pooled(self.into_consensus()) + } + + /// Tries to convert the `Consensus` type into the `Pooled` type. + fn try_consensus_into_pooled( + tx: RecoveredTx, + ) -> Result, Self::TryFromConsensusError>; + + /// Converts the `Pooled` type into the `Consensus` type. + fn pooled_into_consensus(tx: Self::Pooled) -> Self::Consensus { + tx.into() + } + /// Hash of the transaction. fn hash(&self) -> &TxHash; /// The Sender of the transaction. fn sender(&self) -> Address; + /// Reference to the Sender of the transaction. + fn sender_ref(&self) -> &Address; + /// Returns the nonce for this transaction. fn nonce(&self) -> u64; @@ -883,7 +1029,7 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// For legacy transactions: `gas_price * gas_limit + tx_value`. /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + /// max_blob_fee_per_gas * blob_gas_used`. - fn cost(&self) -> U256; + fn cost(&self) -> &U256; /// Amount of gas that should be used in executing this transaction. This is paid up-front. fn gas_limit(&self) -> u64; @@ -923,6 +1069,11 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// [`TxKind::Create`] if the transaction is a contract creation. fn kind(&self) -> TxKind; + /// Returns true if the transaction is a contract creation. + /// We don't provide a default implementation via `kind` as it copies the 21-byte + /// [`TxKind`] for this simple check. A proper implementation shouldn't allocate. + fn is_create(&self) -> bool; + /// Returns the recipient of the transaction if it is not a [`TxKind::Create`] /// transaction. fn to(&self) -> Option

{ @@ -960,21 +1111,58 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { /// Returns `chain_id` fn chain_id(&self) -> Option; + + /// Ensures that the transaction's code size does not exceed the provided `max_init_code_size`. + /// + /// This is specifically relevant for contract creation transactions ([`TxKind::Create`]), + /// where the input data contains the initialization code. If the input code size exceeds + /// the configured limit, an [`InvalidPoolTransactionError::ExceedsMaxInitCodeSize`] error is + /// returned. + fn ensure_max_init_code_size( + &self, + max_init_code_size: usize, + ) -> Result<(), InvalidPoolTransactionError> { + if self.is_create() && self.input().len() > max_init_code_size { + Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( + self.size(), + max_init_code_size, + )) + } else { + Ok(()) + } + } } -/// Super trait for transactions that can be converted to and from Eth transactions -pub trait EthPoolTransaction: - PoolTransaction< - Consensus: From + Into, - Pooled: From + Into, -> -{ +/// Super trait for transactions that can be converted to and from Eth transactions intended for the +/// ethereum style pool. +/// +/// This extends the [`PoolTransaction`] trait with additional methods that are specific to the +/// Ethereum pool. +pub trait EthPoolTransaction: PoolTransaction { /// Extracts the blob sidecar from the transaction. fn take_blob(&mut self) -> EthBlobTransactionSidecar; /// Returns the number of blobs this transaction has. fn blob_count(&self) -> usize; + /// A specialization for the EIP-4844 transaction type. + /// Tries to reattach the blob sidecar to the transaction. + /// + /// This returns an option, but callers should ensure that the transaction is an EIP-4844 + /// transaction: [`PoolTransaction::is_eip4844`]. + fn try_into_pooled_eip4844( + self, + sidecar: Arc, + ) -> Option>; + + /// Tries to convert the `Consensus` type with a blob sidecar into the `Pooled` type. + /// + /// Returns `None` if passed transaction is not a blob transaction. + fn try_from_eip4844( + tx: RecoveredTx, + sidecar: BlobTransactionSidecar, + ) -> Option; + /// Validates the blob sidecar of the transaction with the given settings. fn validate_blob( &self, @@ -988,12 +1176,12 @@ pub trait EthPoolTransaction: /// The default [`PoolTransaction`] for the [Pool](crate::Pool) for Ethereum. /// -/// This type is essentially a wrapper around [`TransactionSignedEcRecovered`] with additional +/// This type is essentially a wrapper around [`RecoveredTx`] with additional /// fields derived from the transaction that are frequently used by the pools for ordering. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct EthPooledTransaction { - /// `EcRecovered` transaction info - pub(crate) transaction: TransactionSignedEcRecovered, +pub struct EthPooledTransaction { + /// `EcRecovered` transaction, the consensus format. + pub(crate) transaction: T, /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. @@ -1009,73 +1197,35 @@ pub struct EthPooledTransaction { pub(crate) blob_sidecar: EthBlobTransactionSidecar, } -/// Represents the blob sidecar of the [`EthPooledTransaction`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum EthBlobTransactionSidecar { - /// This transaction does not have a blob sidecar - None, - /// This transaction has a blob sidecar (EIP-4844) but it is missing - /// - /// It was either extracted after being inserted into the pool or re-injected after reorg - /// without the blob sidecar - Missing, - /// The eip-4844 transaction was pulled from the network and still has its blob sidecar - Present(BlobTransactionSidecar), -} - -impl EthBlobTransactionSidecar { - /// Returns the blob sidecar if it is present - pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { - match self { - Self::Present(sidecar) => Some(sidecar), - _ => None, - } - } -} - impl EthPooledTransaction { /// Create new instance of [Self]. /// /// Caution: In case of blob transactions, this does marks the blob sidecar as /// [`EthBlobTransactionSidecar::Missing`] - pub fn new(transaction: TransactionSignedEcRecovered, encoded_length: usize) -> Self { + pub fn new(transaction: RecoveredTx, encoded_length: usize) -> Self { let mut blob_sidecar = EthBlobTransactionSidecar::None; - #[allow(unreachable_patterns)] - let gas_cost = match &transaction.transaction { - Transaction::Legacy(t) => { - U256::from(t.gas_price).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip2930(t) => { - U256::from(t.gas_price).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip1559(t) => { - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip4844(t) => { - blob_sidecar = EthBlobTransactionSidecar::Missing; - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - Transaction::Eip7702(t) => { - U256::from(t.max_fee_per_gas).saturating_mul(U256::from(t.gas_limit)) - } - _ => U256::ZERO, - }; - let mut cost = transaction.value(); - cost = cost.saturating_add(gas_cost); + let gas_cost = U256::from(transaction.transaction.max_fee_per_gas()) + .saturating_mul(U256::from(transaction.transaction.gas_limit())); + + let mut cost = gas_cost.saturating_add(transaction.value()); if let Some(blob_tx) = transaction.as_eip4844() { // Add max blob cost using saturating math to avoid overflow cost = cost.saturating_add(U256::from( blob_tx.max_fee_per_blob_gas.saturating_mul(blob_tx.blob_gas() as u128), )); + + // because the blob sidecar is not included in this transaction variant, mark it as + // missing + blob_sidecar = EthBlobTransactionSidecar::Missing; } Self { transaction, cost, encoded_length, blob_sidecar } } /// Return the reference to the underlying transaction. - pub const fn transaction(&self) -> &TransactionSignedEcRecovered { + pub const fn transaction(&self) -> &RecoveredTx { &self.transaction } } @@ -1084,12 +1234,12 @@ impl EthPooledTransaction { impl From for EthPooledTransaction { fn from(tx: PooledTransactionsElementEcRecovered) -> Self { let encoded_length = tx.encode_2718_len(); - let (tx, signer) = tx.into_components(); + let (tx, signer) = tx.to_components(); match tx { PooledTransactionsElement::BlobTransaction(tx) => { // include the blob sidecar let (tx, blob) = tx.into_parts(); - let tx = TransactionSignedEcRecovered::from_signed_transaction(tx, signer); + let tx = RecoveredTx::from_signed_transaction(tx, signer); let mut pooled = Self::new(tx, encoded_length); pooled.blob_sidecar = EthBlobTransactionSidecar::Present(blob); pooled @@ -1105,13 +1255,27 @@ impl From for EthPooledTransaction { impl PoolTransaction for EthPooledTransaction { type TryFromConsensusError = TryFromRecoveredTransactionError; - type Consensus = TransactionSignedEcRecovered; + type Consensus = TransactionSigned; - type Pooled = PooledTransactionsElementEcRecovered; + type Pooled = PooledTransactionsElement; + + fn clone_into_consensus(&self) -> RecoveredTx { + self.transaction().clone() + } + + fn try_consensus_into_pooled( + tx: RecoveredTx, + ) -> Result, Self::TryFromConsensusError> { + let (tx, signer) = tx.to_components(); + let pooled = tx + .try_into_pooled() + .map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing)?; + Ok(RecoveredTx::from_signed_transaction(pooled, signer)) + } /// Returns hash of the transaction. fn hash(&self) -> &TxHash { - self.transaction.hash_ref() + self.transaction.tx_hash() } /// Returns the Sender of the transaction. @@ -1119,6 +1283,11 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.signer() } + /// Returns a reference to the Sender of the transaction. + fn sender_ref(&self) -> &Address { + self.transaction.signer_ref() + } + /// Returns the nonce for this transaction. fn nonce(&self) -> u64 { self.transaction.nonce() @@ -1130,8 +1299,8 @@ impl PoolTransaction for EthPooledTransaction { /// For legacy transactions: `gas_price * gas_limit + tx_value`. /// For EIP-4844 blob transactions: `max_fee_per_gas * gas_limit + tx_value + /// max_blob_fee_per_gas * blob_gas_used`. - fn cost(&self) -> U256 { - self.cost + fn cost(&self) -> &U256 { + &self.cost } /// Amount of gas that should be used in executing this transaction. This is paid up-front. @@ -1145,15 +1314,7 @@ impl PoolTransaction for EthPooledTransaction { /// /// This is also commonly referred to as the "Gas Fee Cap" (`GasFeeCap`). fn max_fee_per_gas(&self) -> u128 { - #[allow(unreachable_patterns)] - match &self.transaction.transaction { - Transaction::Legacy(tx) => tx.gas_price, - Transaction::Eip2930(tx) => tx.gas_price, - Transaction::Eip1559(tx) => tx.max_fee_per_gas, - Transaction::Eip4844(tx) => tx.max_fee_per_gas, - Transaction::Eip7702(tx) => tx.max_fee_per_gas, - _ => 0, - } + self.transaction.transaction.max_fee_per_gas() } fn access_list(&self) -> Option<&AccessList> { @@ -1164,14 +1325,7 @@ impl PoolTransaction for EthPooledTransaction { /// /// This will return `None` for non-EIP1559 transactions fn max_priority_fee_per_gas(&self) -> Option { - #[allow(unreachable_patterns, clippy::match_same_arms)] - match &self.transaction.transaction { - Transaction::Legacy(_) | Transaction::Eip2930(_) => None, - Transaction::Eip1559(tx) => Some(tx.max_priority_fee_per_gas), - Transaction::Eip4844(tx) => Some(tx.max_priority_fee_per_gas), - Transaction::Eip7702(tx) => Some(tx.max_priority_fee_per_gas), - _ => None, - } + self.transaction.transaction.max_priority_fee_per_gas() } fn max_fee_per_blob_gas(&self) -> Option { @@ -1183,7 +1337,7 @@ impl PoolTransaction for EthPooledTransaction { /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. /// For legacy transactions: `gas_price - base_fee`. fn effective_tip_per_gas(&self, base_fee: u64) -> Option { - self.transaction.effective_tip_per_gas(Some(base_fee)) + self.transaction.effective_tip_per_gas(base_fee) } /// Returns the max priority fee per gas if the transaction is an EIP-1559 transaction, and @@ -1198,8 +1352,13 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.kind() } + /// Returns true if the transaction is a contract creation. + fn is_create(&self) -> bool { + self.transaction.is_create() + } + fn input(&self) -> &[u8] { - self.transaction.input().as_ref() + self.transaction.input() } /// Returns a measurement of the heap usage of this type and all its internals. @@ -1239,6 +1398,28 @@ impl EthPoolTransaction for EthPooledTransaction { } } + fn try_into_pooled_eip4844( + self, + sidecar: Arc, + ) -> Option> { + PooledTransactionsElementEcRecovered::try_from_blob_transaction( + self.into_consensus(), + Arc::unwrap_or_clone(sidecar), + ) + .ok() + } + + fn try_from_eip4844( + tx: RecoveredTx, + sidecar: BlobTransactionSidecar, + ) -> Option { + let (tx, signer) = tx.to_components(); + PooledTransactionsElement::try_from_blob_transaction(tx, sidecar) + .ok() + .map(|tx| tx.with_signer(signer)) + .map(Self::from_pooled) + } + fn validate_blob( &self, sidecar: &BlobTransactionSidecar, @@ -1258,10 +1439,10 @@ impl EthPoolTransaction for EthPooledTransaction { } } -impl TryFrom for EthPooledTransaction { +impl TryFrom for EthPooledTransaction { type Error = TryFromRecoveredTransactionError; - fn try_from(tx: TransactionSignedEcRecovered) -> Result { + fn try_from(tx: RecoveredTx) -> Result { // ensure we can handle the transaction type and its format match tx.tx_type() as u8 { 0..=EIP1559_TX_TYPE_ID | EIP7702_TX_TYPE_ID => { @@ -1269,7 +1450,7 @@ impl TryFrom for EthPooledTransaction { } EIP4844_TX_TYPE_ID => { // doesn't have a blob sidecar - return Err(TryFromRecoveredTransactionError::BlobSidecarMissing) + return Err(TryFromRecoveredTransactionError::BlobSidecarMissing); } unsupported => { // unsupported transaction type @@ -1285,12 +1466,36 @@ impl TryFrom for EthPooledTransaction { } } -impl From for TransactionSignedEcRecovered { +impl From for RecoveredTx { fn from(tx: EthPooledTransaction) -> Self { tx.transaction } } +/// Represents the blob sidecar of the [`EthPooledTransaction`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum EthBlobTransactionSidecar { + /// This transaction does not have a blob sidecar + None, + /// This transaction has a blob sidecar (EIP-4844) but it is missing + /// + /// It was either extracted after being inserted into the pool or re-injected after reorg + /// without the blob sidecar + Missing, + /// The eip-4844 transaction was pulled from the network and still has its blob sidecar + Present(BlobTransactionSidecar), +} + +impl EthBlobTransactionSidecar { + /// Returns the blob sidecar if it is present + pub const fn maybe_sidecar(&self) -> Option<&BlobTransactionSidecar> { + match self { + Self::Present(sidecar) => Some(sidecar), + _ => None, + } + } +} + /// Represents the current status of the pool. #[derive(Debug, Clone, Copy, Default)] pub struct PoolSize { @@ -1421,7 +1626,9 @@ impl Stream for NewSubpoolTransactionStream { mod tests { use super::*; use alloy_consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}; - use reth_primitives::{constants::eip4844::DATA_GAS_PER_BLOB, Signature, TransactionSigned}; + use alloy_eips::eip4844::DATA_GAS_PER_BLOB; + use alloy_primitives::PrimitiveSignature as Signature; + use reth_primitives::TransactionSigned; #[test] fn test_pool_size_invariants() { @@ -1470,9 +1677,8 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1492,9 +1698,8 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1514,9 +1719,8 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1538,9 +1742,8 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 300); // Check that the pooled transaction is created correctly @@ -1562,9 +1765,8 @@ mod tests { ..Default::default() }); let signature = Signature::test_signature(); - let signed_tx = TransactionSigned::from_transaction_and_signature(tx, signature); - let transaction = - TransactionSignedEcRecovered::from_signed_transaction(signed_tx, Default::default()); + let signed_tx = TransactionSigned::new_unhashed(tx, signature); + let transaction = RecoveredTx::from_signed_transaction(signed_tx, Default::default()); let pooled_tx = EthPooledTransaction::new(transaction.clone(), 200); // Check that the pooled transaction is created correctly @@ -1573,4 +1775,27 @@ mod tests { assert_eq!(pooled_tx.blob_sidecar, EthBlobTransactionSidecar::None); assert_eq!(pooled_tx.cost, U256::from(100) + U256::from(10 * 1000)); } + + #[test] + fn test_pooled_transaction_limit() { + // No limit should never exceed + let limit_none = GetPooledTransactionLimit::None; + // Any size should return false + assert!(!limit_none.exceeds(1000)); + + // Size limit of 2MB (2 * 1024 * 1024 bytes) + let size_limit_2mb = GetPooledTransactionLimit::ResponseSizeSoftLimit(2 * 1024 * 1024); + + // Test with size below the limit + // 1MB is below 2MB, should return false + assert!(!size_limit_2mb.exceeds(1024 * 1024)); + + // Test with size exactly at the limit + // 2MB equals the limit, should return false + assert!(!size_limit_2mb.exceeds(2 * 1024 * 1024)); + + // Test with size exceeding the limit + // 3MB is above the 2MB limit, should return true + assert!(size_limit_2mb.exceeds(3 * 1024 * 1024)); + } } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 49165f189a0..998de5ffb51 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -8,15 +8,20 @@ use crate::{ }, traits::TransactionOrigin, validate::{ValidTransaction, ValidationTask, MAX_INIT_CODE_BYTE_SIZE}, - EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, PoolTransaction, + EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; -use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_primitives::{ - constants::eip4844::MAX_BLOBS_PER_BLOCK, GotExpected, InvalidTransactionError, SealedBlock, - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, +use alloy_consensus::{ + constants::{ + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, + }, + BlockHeader, }; +use alloy_eips::eip4844::MAX_BLOBS_PER_BLOCK; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_primitives::{InvalidTransactionError, SealedBlock}; +use reth_primitives_traits::GotExpected; use reth_storage_api::{AccountReader, StateProviderFactory}; use reth_tasks::TaskSpawner; use revm::{ @@ -100,11 +105,24 @@ where } fn on_new_head_block(&self, new_tip_block: &SealedBlock) { - self.inner.on_new_head_block(new_tip_block) + self.inner.on_new_head_block(new_tip_block.header()) } } /// A [`TransactionValidator`] implementation that validates ethereum transaction. +/// +/// It supports all known ethereum transaction types: +/// - Legacy +/// - EIP-2718 +/// - EIP-1559 +/// - EIP-4844 +/// - EIP-7702 +/// +/// And enforces additional constraints such as: +/// - Maximum transaction size +/// - Maximum gas limit +/// +/// And adheres to the configured [`LocalTransactionConfig`]. #[derive(Debug)] pub(crate) struct EthTransactionValidatorInner { /// Spec of the chain @@ -221,7 +239,7 @@ where // Check whether the init code size has been exceeded. if self.fork_tracker.is_shanghai_activated() { - if let Err(err) = ensure_max_init_code_size(&transaction, MAX_INIT_CODE_BYTE_SIZE) { + if let Err(err) = transaction.ensure_max_init_code_size(MAX_INIT_CODE_BYTE_SIZE) { return TransactionValidationOutcome::Invalid(transaction, err) } } @@ -248,7 +266,7 @@ where // Drop non-local transactions with a fee lower than the configured fee for acceptance into // the pool. - if !self.local_transactions_config.is_local(origin, transaction.sender()) && + if !self.local_transactions_config.is_local(origin, transaction.sender_ref()) && transaction.is_eip1559() && transaction.max_priority_fee_per_gas() < self.minimum_priority_fee { @@ -382,11 +400,12 @@ where let cost = transaction.cost(); // Checks for max cost - if cost > account.balance { + if cost > &account.balance { + let expected = *cost; return TransactionValidationOutcome::Invalid( transaction, InvalidTransactionError::InsufficientFunds( - GotExpected { got: account.balance, expected: cost }.into(), + GotExpected { got: account.balance, expected }.into(), ) .into(), ) @@ -453,17 +472,17 @@ where } } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) { + fn on_new_head_block(&self, new_tip_block: &T) { // update all forks - if self.chain_spec.is_cancun_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_cancun_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.cancun.store(true, std::sync::atomic::Ordering::Relaxed); } - if self.chain_spec.is_shanghai_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_shanghai_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.shanghai.store(true, std::sync::atomic::Ordering::Relaxed); } - if self.chain_spec.is_prague_active_at_timestamp(new_tip_block.timestamp) { + if self.chain_spec.is_prague_active_at_timestamp(new_tip_block.timestamp()) { self.fork_tracker.prague.store(true, std::sync::atomic::Ordering::Relaxed); } } @@ -639,6 +658,7 @@ impl EthTransactionValidatorBuilder { pub fn with_head_timestamp(mut self, timestamp: u64) -> Self { self.cancun = self.chain_spec.is_cancun_active_at_timestamp(timestamp); self.shanghai = self.chain_spec.is_shanghai_active_at_timestamp(timestamp); + self.prague = self.chain_spec.is_prague_active_at_timestamp(timestamp); self } @@ -708,7 +728,7 @@ impl EthTransactionValidatorBuilder { EthTransactionValidator { inner: Arc::new(inner) } } - /// Builds a the [`EthTransactionValidator`] and spawns validation tasks via the + /// Builds a [`EthTransactionValidator`] and spawns validation tasks via the /// [`TransactionValidationTaskExecutor`] /// /// The validator will spawn `additional_tasks` additional tasks for validation. @@ -780,22 +800,6 @@ impl ForkTracker { } } -/// Ensure that the code size is not greater than `max_init_code_size`. -/// `max_init_code_size` should be configurable so this will take it as an argument. -pub fn ensure_max_init_code_size( - transaction: &T, - max_init_code_size: usize, -) -> Result<(), InvalidPoolTransactionError> { - if transaction.kind().is_create() && transaction.input().len() > max_init_code_size { - Err(InvalidPoolTransactionError::ExceedsMaxInitCodeSize( - transaction.size(), - max_init_code_size, - )) - } else { - Ok(()) - } -} - /// Ensures that gas limit of the transaction exceeds the intrinsic gas of the transaction. /// /// Caution: This only checks past the Merge hardfork. @@ -814,7 +818,7 @@ pub fn ensure_intrinsic_gas( let gas_after_merge = validate_initial_tx_gas( spec_id, transaction.input(), - transaction.kind().is_create(), + transaction.is_create(), transaction.access_list().map(|list| list.0.as_slice()).unwrap_or(&[]), transaction.authorization_count() as u64, ); @@ -830,8 +834,8 @@ pub fn ensure_intrinsic_gas( mod tests { use super::*; use crate::{ - blobstore::InMemoryBlobStore, error::PoolErrorKind, CoinbaseTipOrdering, - EthPooledTransaction, Pool, TransactionPool, + blobstore::InMemoryBlobStore, error::PoolErrorKind, traits::PoolTransaction, + CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionPool, }; use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 4395cc97908..40f2deeafe8 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -4,10 +4,12 @@ use crate::{ error::InvalidPoolTransactionError, identifier::{SenderId, TransactionId}, traits::{PoolTransaction, TransactionOrigin}, + PriceBumpConfig, }; +use alloy_eips::eip4844::BlobTransactionSidecar; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; -use reth_primitives::{BlobTransactionSidecar, SealedBlock, TransactionSignedEcRecovered}; +use reth_primitives::{RecoveredTx, SealedBlock}; use std::{fmt, future::Future, time::Instant}; mod constants; @@ -280,6 +282,11 @@ impl ValidPoolTransaction { self.transaction.sender() } + /// Returns a reference to the address of the sender + pub fn sender_ref(&self) -> &Address { + self.transaction.sender_ref() + } + /// Returns the recipient of the transaction if it is not a CREATE transaction. pub fn to(&self) -> Option
{ self.transaction.to() @@ -310,7 +317,7 @@ impl ValidPoolTransaction { /// /// For EIP-1559 transactions: `max_fee_per_gas * gas_limit + tx_value`. /// For legacy transactions: `gas_price * gas_limit + tx_value`. - pub fn cost(&self) -> U256 { + pub fn cost(&self) -> &U256 { self.transaction.cost() } @@ -372,14 +379,63 @@ impl ValidPoolTransaction { pub(crate) fn tx_type_conflicts_with(&self, other: &Self) -> bool { self.is_eip4844() != other.is_eip4844() } -} -impl>> ValidPoolTransaction { - /// Converts to this type into a [`TransactionSignedEcRecovered`]. + /// Converts to this type into the consensus transaction of the pooled transaction. /// /// Note: this takes `&self` since indented usage is via `Arc`. - pub fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { - self.transaction.clone().into_consensus().into() + pub fn to_consensus(&self) -> RecoveredTx { + self.transaction.clone_into_consensus() + } + + /// Determines whether a candidate transaction (`maybe_replacement`) is underpriced compared to + /// an existing transaction in the pool. + /// + /// A transaction is considered underpriced if it doesn't meet the required fee bump threshold. + /// This applies to both standard gas fees and, for blob-carrying transactions (EIP-4844), + /// the blob-specific fees. + #[inline] + pub(crate) fn is_underpriced( + &self, + maybe_replacement: &Self, + price_bumps: &PriceBumpConfig, + ) -> bool { + // Retrieve the required price bump percentage for this type of transaction. + // + // The bump is different for EIP-4844 and other transactions. See `PriceBumpConfig`. + let price_bump = price_bumps.price_bump(self.tx_type()); + + // Check if the max fee per gas is underpriced. + if maybe_replacement.max_fee_per_gas() < self.max_fee_per_gas() * (100 + price_bump) / 100 { + return true + } + + let existing_max_priority_fee_per_gas = + self.transaction.max_priority_fee_per_gas().unwrap_or_default(); + let replacement_max_priority_fee_per_gas = + maybe_replacement.transaction.max_priority_fee_per_gas().unwrap_or_default(); + + // Check max priority fee per gas (relevant for EIP-1559 transactions only) + if existing_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas != 0 && + replacement_max_priority_fee_per_gas < + existing_max_priority_fee_per_gas * (100 + price_bump) / 100 + { + return true + } + + // Check max blob fee per gas + if let Some(existing_max_blob_fee_per_gas) = self.transaction.max_fee_per_blob_gas() { + // This enforces that blob txs can only be replaced by blob txs + let replacement_max_blob_fee_per_gas = + maybe_replacement.transaction.max_fee_per_blob_gas().unwrap_or_default(); + if replacement_max_blob_fee_per_gas < + existing_max_blob_fee_per_gas * (100 + price_bump) / 100 + { + return true + } + } + + false } } @@ -399,9 +455,11 @@ impl Clone for ValidPoolTransaction { impl fmt::Debug for ValidPoolTransaction { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidPoolTransaction") + .field("id", &self.transaction_id) + .field("pragate", &self.propagate) + .field("origin", &self.origin) .field("hash", self.transaction.hash()) - .field("provides", &self.transaction_id) - .field("raw_tx", &self.transaction) + .field("tx", &self.transaction) .finish() } } diff --git a/crates/transaction-pool/tests/it/best.rs b/crates/transaction-pool/tests/it/best.rs new file mode 100644 index 00000000000..20e83367643 --- /dev/null +++ b/crates/transaction-pool/tests/it/best.rs @@ -0,0 +1,11 @@ +//! Best transaction and filter testing + +use reth_transaction_pool::{noop::NoopTransactionPool, BestTransactions, TransactionPool}; + +#[test] +fn test_best_transactions() { + let noop = NoopTransactionPool::default(); + let mut best = + noop.best_transactions().filter_transactions(|_| true).without_blobs().without_updates(); + assert!(best.next().is_none()); +} diff --git a/crates/transaction-pool/tests/it/blobs.rs b/crates/transaction-pool/tests/it/blobs.rs index 0cdc6d088c0..9417c62278b 100644 --- a/crates/transaction-pool/tests/it/blobs.rs +++ b/crates/transaction-pool/tests/it/blobs.rs @@ -3,7 +3,7 @@ use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{MockTransaction, MockTransactionFactory, TestPoolBuilder}, - TransactionOrigin, TransactionPool, + PoolTransaction, TransactionOrigin, TransactionPool, }; #[tokio::test(flavor = "multi_thread")] @@ -16,23 +16,22 @@ async fn blobs_exclusive() { .add_transaction(TransactionOrigin::External, blob_tx.transaction.clone()) .await .unwrap(); - assert_eq!(hash, blob_tx.transaction.get_hash()); + assert_eq!(hash, *blob_tx.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_eq!(best_txns.next().unwrap().transaction.get_hash(), blob_tx.transaction.get_hash()); assert!(best_txns.next().is_none()); - let eip1559_tx = MockTransaction::eip1559() - .set_sender(blob_tx.transaction.get_sender()) - .inc_price_by(10_000); + let eip1559_tx = + MockTransaction::eip1559().set_sender(blob_tx.transaction.sender()).inc_price_by(10_000); let res = txpool.add_transaction(TransactionOrigin::External, eip1559_tx.clone()).await.unwrap_err(); - assert_eq!(res.hash, eip1559_tx.get_hash()); + assert_eq!(res.hash, *eip1559_tx.get_hash()); match res.kind { PoolErrorKind::ExistingConflictingTransactionType(addr, tx_type) => { - assert_eq!(addr, eip1559_tx.get_sender()); + assert_eq!(addr, eip1559_tx.sender()); assert_eq!(tx_type, eip1559_tx.tx_type()); } _ => unreachable!(), diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index c7438c9964e..3b74b8cb230 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -1,14 +1,15 @@ //! Transaction pool eviction tests. +use alloy_eips::eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use alloy_primitives::{Address, B256}; use rand::distributions::Uniform; -use reth_primitives::constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}; use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{ MockFeeRange, MockTransactionDistribution, MockTransactionRatio, TestPool, TestPoolBuilder, }, - BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionPool, TransactionPoolExt, + BlockInfo, PoolConfig, PoolTransaction, SubPoolLimit, TransactionOrigin, TransactionPool, + TransactionPoolExt, }; #[tokio::test(flavor = "multi_thread")] @@ -87,7 +88,7 @@ async fn only_blobs_eviction() { let set = set.into_vec(); // ensure that the first nonce is 0 - assert_eq!(set[0].get_nonce(), 0); + assert_eq!(set[0].nonce(), 0); // and finally insert it into the pool let results = pool.add_transactions(TransactionOrigin::External, set).await; @@ -194,7 +195,7 @@ async fn mixed_eviction() { ); let set = set.into_inner().into_vec(); - assert_eq!(set[0].get_nonce(), 0); + assert_eq!(set[0].nonce(), 0); let results = pool.add_transactions(TransactionOrigin::External, set).await; for (i, result) in results.iter().enumerate() { diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index ad13af22a6a..0f8a0b19e2b 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -33,11 +33,11 @@ async fn txpool_listener_all() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); assert_matches!( all_tx_events.next().await, - Some(FullTransactionEvent::Pending(hash)) if hash == transaction.transaction.get_hash() + Some(FullTransactionEvent::Pending(hash)) if hash == *transaction.transaction.get_hash() ); } diff --git a/crates/transaction-pool/tests/it/main.rs b/crates/transaction-pool/tests/it/main.rs index ead33a328dd..7db2b14c953 100644 --- a/crates/transaction-pool/tests/it/main.rs +++ b/crates/transaction-pool/tests/it/main.rs @@ -9,4 +9,6 @@ mod listeners; #[cfg(feature = "test-utils")] mod pending; +mod best; + const fn main() {} diff --git a/crates/transaction-pool/tests/it/pending.rs b/crates/transaction-pool/tests/it/pending.rs index 0b6349b24cc..be559c71eec 100644 --- a/crates/transaction-pool/tests/it/pending.rs +++ b/crates/transaction-pool/tests/it/pending.rs @@ -12,7 +12,7 @@ async fn txpool_new_pending_txs() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); @@ -20,6 +20,6 @@ async fn txpool_new_pending_txs() { let transaction = mock_tx_factory.create_eip1559(); let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); } diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 0bd28140f44..eadbb3176b5 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -12,21 +12,28 @@ description = "Commonly used types for trie usage in reth." workspace = true [dependencies] -reth-primitives-traits.workspace = true -reth-codecs.workspace = true - +# alloy alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["arrayvec"] } -alloy-trie = { workspace = true, features = ["serde"] } +alloy-trie.workspace = true alloy-consensus.workspace = true -alloy-genesis.workspace = true +reth-primitives-traits.workspace = true +reth-codecs = { workspace = true, optional = true } revm-primitives.workspace = true -bytes.workspace = true +alloy-genesis.workspace = true +alloy-rpc-types-eth = { workspace = true, optional = true } +alloy-serde = { workspace = true, optional = true } + +bytes = { workspace = true, optional = true } derive_more.workspace = true -serde.workspace = true itertools.workspace = true -nybbles = { workspace = true, features = ["serde", "rlp"] } +nybbles = { workspace = true, features = ["rlp"] } + +# `serde` feature +serde = { workspace = true, optional = true } + +serde_with = { workspace = true, optional = true } # `test-utils` feature hash-db = { version = "=0.15.2", optional = true } @@ -34,15 +41,71 @@ plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["serde"] } +reth-codecs.workspace = true + +alloy-primitives = { workspace = true, features = ["getrandom"] } +alloy-trie = { workspace = true, features = ["arbitrary", "serde"] } +bytes.workspace = true +hash-db = "=0.15.2" +plain_hasher = "0.2" arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true -hash-db = "=0.15.2" -plain_hasher = "0.2" +criterion.workspace = true +bincode.workspace = true +serde.workspace = true +serde_json.workspace = true +serde_with.workspace = true [features] -test-utils = ["dep:plain_hasher", "dep:hash-db", "arbitrary"] +eip1186 = [ + "alloy-rpc-types-eth/serde", + "dep:alloy-serde", +] +serde = [ + "dep:serde", + "bytes?/serde", + "nybbles/serde", + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-trie/serde", + "alloy-rpc-types-eth?/serde", + "revm-primitives/serde", + "reth-primitives-traits/serde", + "reth-codecs?/serde" +] +reth-codec = [ + "dep:reth-codecs", + "dep:bytes", +] +serde-bincode-compat = [ + "serde", + "reth-primitives-traits/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "dep:serde_with" +] +test-utils = [ + "dep:plain_hasher", + "dep:hash-db", + "arbitrary", + "reth-primitives-traits/test-utils", + "reth-codecs/test-utils", +] arbitrary = [ - "alloy-trie/arbitrary", - "dep:arbitrary", + "dep:reth-codecs", + "alloy-trie/arbitrary", + "dep:arbitrary", + "alloy-serde?/arbitrary", + "reth-primitives-traits/arbitrary", + "alloy-consensus/arbitrary", + "alloy-primitives/arbitrary", + "nybbles/arbitrary", + "revm-primitives/arbitrary", + "reth-codecs/arbitrary", + "alloy-rpc-types-eth?/arbitrary" ] + +[[bench]] +name = "prefix_set" +harness = false diff --git a/crates/trie/trie/benches/prefix_set.rs b/crates/trie/common/benches/prefix_set.rs similarity index 99% rename from crates/trie/trie/benches/prefix_set.rs rename to crates/trie/common/benches/prefix_set.rs index cae08d129f6..b61d58e0272 100644 --- a/crates/trie/trie/benches/prefix_set.rs +++ b/crates/trie/common/benches/prefix_set.rs @@ -7,7 +7,7 @@ use proptest::{ strategy::ValueTree, test_runner::{basic_result_cache, TestRunner}, }; -use reth_trie::{ +use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, Nibbles, }; diff --git a/crates/trie/common/src/constants.rs b/crates/trie/common/src/constants.rs new file mode 100644 index 00000000000..471b8bd9dcc --- /dev/null +++ b/crates/trie/common/src/constants.rs @@ -0,0 +1,24 @@ +/// The maximum size of RLP encoded trie account in bytes. +/// 2 (header) + 4 * 1 (field lens) + 8 (nonce) + 32 * 3 (balance, storage root, code hash) +pub const TRIE_ACCOUNT_RLP_MAX_SIZE: usize = 110; + +#[cfg(test)] +mod tests { + use super::*; + use crate::TrieAccount; + use alloy_primitives::{B256, U256}; + use alloy_rlp::Encodable; + + #[test] + fn account_rlp_max_size() { + let account = TrieAccount { + nonce: u64::MAX, + balance: U256::MAX, + storage_root: B256::from_slice(&[u8::MAX; 32]), + code_hash: B256::from_slice(&[u8::MAX; 32]), + }; + let mut encoded = Vec::new(); + account.encode(&mut encoded); + assert_eq!(encoded.len(), TRIE_ACCOUNT_RLP_MAX_SIZE); + } +} diff --git a/crates/trie/common/src/hash_builder/state.rs b/crates/trie/common/src/hash_builder/state.rs index c5cae21a1a3..4bf3bade398 100644 --- a/crates/trie/common/src/hash_builder/state.rs +++ b/crates/trie/common/src/hash_builder/state.rs @@ -1,13 +1,11 @@ use crate::TrieMask; use alloy_trie::{hash_builder::HashBuilderValue, nodes::RlpNode, HashBuilder}; -use bytes::Buf; use nybbles::Nibbles; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; /// The hash builder state for storing in the database. /// Check the `reth-trie` crate for more info on hash builder. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), @@ -63,7 +61,8 @@ impl From for HashBuilderState { } } -impl Compact for HashBuilderState { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for HashBuilderState { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -106,6 +105,8 @@ impl Compact for HashBuilderState { } fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let (key, mut buf) = Vec::from_compact(buf, 0); let stack_len = buf.get_u16() as usize; @@ -150,6 +151,7 @@ impl Compact for HashBuilderState { #[cfg(test)] mod tests { use super::*; + use reth_codecs::Compact; #[test] fn hash_builder_state_regression() { diff --git a/crates/trie/common/src/key.rs b/crates/trie/common/src/key.rs new file mode 100644 index 00000000000..71f8019bff5 --- /dev/null +++ b/crates/trie/common/src/key.rs @@ -0,0 +1,17 @@ +use alloy_primitives::{keccak256, B256}; + +/// Trait for hashing keys in state. +pub trait KeyHasher: Default + Clone + Send + Sync + 'static { + /// Hashes the given bytes into a 256-bit hash. + fn hash_key>(bytes: T) -> B256; +} + +/// A key hasher that uses the Keccak-256 hash function. +#[derive(Clone, Debug, Default)] +pub struct KeccakKeyHasher; + +impl KeyHasher for KeccakKeyHasher { + fn hash_key>(bytes: T) -> B256 { + keccak256(bytes) + } +} diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index bdec36028b9..6647de67811 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -11,9 +11,16 @@ /// The implementation of hash builder. pub mod hash_builder; +/// Constants related to the trie computation. +mod constants; +pub use constants::*; + mod account; pub use account::TrieAccount; +mod key; +pub use key::{KeccakKeyHasher, KeyHasher}; + mod nibbles; pub use nibbles::{Nibbles, StoredNibbles, StoredNibblesSubKey}; @@ -23,6 +30,10 @@ pub use storage::StorageTrieEntry; mod subnode; pub use subnode::StoredSubNode; +/// The implementation of a container for storing intermediate changes to a trie. +/// The container indicates when the trie has been modified. +pub mod prefix_set; + mod proofs; #[cfg(any(test, feature = "test-utils"))] pub use proofs::triehash; @@ -30,4 +41,19 @@ pub use proofs::*; pub mod root; +/// Buffer for trie updates. +pub mod updates; + +/// Bincode-compatible serde implementations for trie types. +/// +/// `bincode` crate allows for more efficient serialization of trie types, because it allows +/// non-string map keys. +/// +/// Read more: +#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +pub mod serde_bincode_compat { + pub use super::updates::serde_bincode_compat as updates; +} + +/// Re-export pub use alloy_trie::{nodes::*, proof, BranchNodeCompact, HashBuilder, TrieMask, EMPTY_ROOT_HASH}; diff --git a/crates/trie/common/src/nibbles.rs b/crates/trie/common/src/nibbles.rs index 991fb68f3c0..b1cc2f10c56 100644 --- a/crates/trie/common/src/nibbles.rs +++ b/crates/trie/common/src/nibbles.rs @@ -1,24 +1,10 @@ -use bytes::Buf; use derive_more::Deref; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; - pub use nybbles::Nibbles; /// The representation of nibbles of the merkle trie stored in the database. -#[derive( - Clone, - Debug, - Default, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - derive_more::Index, -)] +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, derive_more::Index)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibbles(pub Nibbles); impl From for StoredNibbles { @@ -56,7 +42,8 @@ impl core::borrow::Borrow<[u8]> for StoredNibbles { } } -impl Compact for StoredNibbles { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredNibbles { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -66,6 +53,8 @@ impl Compact for StoredNibbles { } fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let nibbles = &buf[..len]; buf.advance(len); (Self(Nibbles::from_nibbles_unchecked(nibbles)), buf) @@ -73,7 +62,9 @@ impl Compact for StoredNibbles { } /// The representation of nibbles of the merkle trie stored in the database. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord, Hash, Deref)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deref)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "test-utils", derive(arbitrary::Arbitrary))] pub struct StoredNibblesSubKey(pub Nibbles); impl From for StoredNibblesSubKey { @@ -97,7 +88,8 @@ impl From for Nibbles { } } -impl Compact for StoredNibblesSubKey { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredNibblesSubKey { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -118,3 +110,98 @@ impl Compact for StoredNibblesSubKey { (Self(Nibbles::from_nibbles_unchecked(&buf[..len])), &buf[65..]) } } + +#[cfg(test)] +mod tests { + use super::*; + use bytes::BytesMut; + use reth_codecs::Compact; + + #[test] + fn test_stored_nibbles_from_nibbles() { + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34, 0x56]); + let stored = StoredNibbles::from(nibbles.clone()); + assert_eq!(stored.0, nibbles); + } + + #[test] + fn test_stored_nibbles_from_vec() { + let bytes = vec![0x12, 0x34, 0x56]; + let stored = StoredNibbles::from(bytes.clone()); + assert_eq!(stored.0.as_slice(), bytes.as_slice()); + } + + #[test] + fn test_stored_nibbles_equality() { + let bytes = vec![0x12, 0x34]; + let stored = StoredNibbles::from(bytes.clone()); + assert_eq!(stored, *bytes.as_slice()); + } + + #[test] + fn test_stored_nibbles_partial_cmp() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let other = vec![0x12, 0x35]; + assert!(stored < *other.as_slice()); + } + + #[test] + fn test_stored_nibbles_to_compact() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let mut buf = BytesMut::with_capacity(10); + let len = stored.to_compact(&mut buf); + assert_eq!(len, 2); + assert_eq!(buf, &vec![0x12, 0x34][..]); + } + + #[test] + fn test_stored_nibbles_from_compact() { + let buf = vec![0x12, 0x34, 0x56]; + let (stored, remaining) = StoredNibbles::from_compact(&buf, 2); + assert_eq!(stored.0.as_slice(), &[0x12, 0x34]); + assert_eq!(remaining, &[0x56]); + } + + #[test] + fn test_stored_nibbles_subkey_from_nibbles() { + let nibbles = Nibbles::from_nibbles_unchecked(vec![0x12, 0x34]); + let subkey = StoredNibblesSubKey::from(nibbles.clone()); + assert_eq!(subkey.0, nibbles); + } + + #[test] + fn test_stored_nibbles_subkey_to_compact() { + let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let mut buf = BytesMut::with_capacity(65); + let len = subkey.to_compact(&mut buf); + assert_eq!(len, 65); + assert_eq!(buf[..2], [0x12, 0x34]); + assert_eq!(buf[64], 2); // Length byte + } + + #[test] + fn test_stored_nibbles_subkey_from_compact() { + let mut buf = vec![0x12, 0x34]; + buf.resize(65, 0); + buf[64] = 2; + let (subkey, remaining) = StoredNibblesSubKey::from_compact(&buf, 65); + assert_eq!(subkey.0.as_slice(), &[0x12, 0x34]); + assert_eq!(remaining, &[] as &[u8]); + } + + #[test] + fn test_serialization_stored_nibbles() { + let stored = StoredNibbles::from(vec![0x12, 0x34]); + let serialized = serde_json::to_string(&stored).unwrap(); + let deserialized: StoredNibbles = serde_json::from_str(&serialized).unwrap(); + assert_eq!(stored, deserialized); + } + + #[test] + fn test_serialization_stored_nibbles_subkey() { + let subkey = StoredNibblesSubKey::from(vec![0x12, 0x34]); + let serialized = serde_json::to_string(&subkey).unwrap(); + let deserialized: StoredNibblesSubKey = serde_json::from_str(&serialized).unwrap(); + assert_eq!(subkey, deserialized); + } +} diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/common/src/prefix_set.rs similarity index 96% rename from crates/trie/trie/src/prefix_set.rs rename to crates/trie/common/src/prefix_set.rs index da912fbbdad..2536a41ff0c 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/common/src/prefix_set.rs @@ -1,9 +1,9 @@ use crate::Nibbles; -use alloy_primitives::B256; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, }; +use std::sync::Arc; /// Collection of mutable prefix sets. #[derive(Clone, Default, Debug)] @@ -73,7 +73,7 @@ pub struct TriePrefixSets { /// # Examples /// /// ``` -/// use reth_trie::{prefix_set::PrefixSetMut, Nibbles}; +/// use reth_trie_common::{prefix_set::PrefixSetMut, Nibbles}; /// /// let mut prefix_set_mut = PrefixSetMut::default(); /// prefix_set_mut.insert(Nibbles::from_nibbles_unchecked(&[0xa, 0xb])); @@ -168,8 +168,7 @@ pub struct PrefixSet { } impl PrefixSet { - /// Returns `true` if any of the keys in the set has the given prefix or - /// if the given prefix is a prefix of any key in the set. + /// Returns `true` if any of the keys in the set has the given prefix #[inline] pub fn contains(&mut self, prefix: &[u8]) -> bool { if self.all { @@ -212,8 +211,8 @@ impl PrefixSet { } impl<'a> IntoIterator for &'a PrefixSet { - type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; - type Item = &'a reth_trie_common::Nibbles; + type Item = &'a Nibbles; + type IntoIter = std::slice::Iter<'a, Nibbles>; fn into_iter(self) -> Self::IntoIter { self.iter() } diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 8aca67f8d1a..99b315d2467 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -1,30 +1,60 @@ //! Merkle trie proofs. use crate::{Nibbles, TrieAccount}; -use alloy_primitives::{keccak256, Address, Bytes, B256, U256}; +use alloy_consensus::constants::KECCAK_EMPTY; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap}, + Address, Bytes, B256, U256, +}; use alloy_rlp::{encode_fixed_size, Decodable, EMPTY_STRING_CODE}; use alloy_trie::{ nodes::TrieNode, proof::{verify_proof, ProofNodes, ProofVerificationError}, - EMPTY_ROOT_HASH, + TrieMask, EMPTY_ROOT_HASH, }; use itertools::Itertools; -use reth_primitives_traits::{constants::KECCAK_EMPTY, Account}; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use reth_primitives_traits::Account; /// The state multiproof of target accounts and multiproofs of their storage tries. /// Multiproof is effectively a state subtrie that only contains the nodes -/// in the paths of target accounts. -#[derive(Clone, Default, Debug)] +/// in the paths of target accounts. +#[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct MultiProof { /// State trie multiproof for requested accounts. pub account_subtree: ProofNodes, + /// The hash masks of the branch nodes in the account proof. + pub branch_node_hash_masks: HashMap, /// Storage trie multiproofs. pub storages: HashMap, } impl MultiProof { + /// Return the account proof nodes for the given account path. + pub fn account_proof_nodes(&self, path: &Nibbles) -> Vec<(Nibbles, Bytes)> { + self.account_subtree.matching_nodes_sorted(path) + } + + /// Return the storage proof nodes for the given storage slots of the account path. + pub fn storage_proof_nodes( + &self, + hashed_address: B256, + slots: impl IntoIterator, + ) -> Vec<(B256, Vec<(Nibbles, Bytes)>)> { + self.storages + .get(&hashed_address) + .map(|storage_mp| { + slots + .into_iter() + .map(|slot| { + let nibbles = Nibbles::unpack(slot); + (slot, storage_mp.subtree.matching_nodes_sorted(&nibbles)) + }) + .collect() + }) + .unwrap_or_default() + } + /// Construct the account proof from the multiproof. pub fn account_proof( &self, @@ -36,10 +66,9 @@ impl MultiProof { // Retrieve the account proof. let proof = self - .account_subtree - .matching_nodes_iter(&nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)) - .map(|(_, node)| node.clone()) + .account_proof_nodes(&nibbles) + .into_iter() + .map(|(_, node)| node) .collect::>(); // Inspect the last node in the proof. If it's a leaf node with matching suffix, @@ -75,15 +104,39 @@ impl MultiProof { } Ok(AccountProof { address, info, proof, storage_root, storage_proofs }) } + + /// Extends this multiproof with another one, merging both account and storage + /// proofs. + pub fn extend(&mut self, other: Self) { + self.account_subtree.extend_from(other.account_subtree); + + self.branch_node_hash_masks.extend(other.branch_node_hash_masks); + + for (hashed_address, storage) in other.storages { + match self.storages.entry(hashed_address) { + hash_map::Entry::Occupied(mut entry) => { + debug_assert_eq!(entry.get().root, storage.root); + let entry = entry.get_mut(); + entry.subtree.extend_from(storage.subtree); + entry.branch_node_hash_masks.extend(storage.branch_node_hash_masks); + } + hash_map::Entry::Vacant(entry) => { + entry.insert(storage); + } + } + } + } } /// The merkle multiproof of storage trie. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct StorageMultiProof { /// Storage trie root. pub root: B256, /// Storage multiproof for requested slots. pub subtree: ProofNodes, + /// The hash masks of the branch nodes in the storage proof. + pub branch_node_hash_masks: HashMap, } impl StorageMultiProof { @@ -95,6 +148,7 @@ impl StorageMultiProof { Nibbles::default(), Bytes::from([EMPTY_STRING_CODE]), )]), + branch_node_hash_masks: HashMap::default(), } } @@ -128,8 +182,9 @@ impl StorageMultiProof { } /// The merkle proof with the relevant account info. -#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), serde(rename_all = "camelCase"))] pub struct AccountProof { /// The address associated with the account. pub address: Address, @@ -144,6 +199,33 @@ pub struct AccountProof { pub storage_proofs: Vec, } +#[cfg(feature = "eip1186")] +impl AccountProof { + /// Convert into an EIP-1186 account proof response + pub fn into_eip1186_response( + self, + slots: Vec, + ) -> alloy_rpc_types_eth::EIP1186AccountProofResponse { + let info = self.info.unwrap_or_default(); + alloy_rpc_types_eth::EIP1186AccountProofResponse { + address: self.address, + balance: info.balance, + code_hash: info.get_bytecode_hash(), + nonce: info.nonce, + storage_hash: self.storage_root, + account_proof: self.proof, + storage_proof: self + .storage_proofs + .into_iter() + .filter_map(|proof| { + let input_slot = slots.iter().find(|s| s.as_b256() == proof.key)?; + Some(proof.into_eip1186_proof(*input_slot)) + }) + .collect(), + } + } +} + impl Default for AccountProof { fn default() -> Self { Self::new(Address::default()) @@ -184,7 +266,8 @@ impl AccountProof { } /// The merkle proof of the storage entry. -#[derive(Clone, PartialEq, Eq, Default, Debug, Serialize, Deserialize)] +#[derive(Clone, PartialEq, Eq, Default, Debug)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageProof { /// The raw storage key. pub key: B256, @@ -197,6 +280,17 @@ pub struct StorageProof { pub proof: Vec, } +impl StorageProof { + /// Convert into an EIP-1186 storage proof + #[cfg(feature = "eip1186")] + pub fn into_eip1186_proof( + self, + slot: alloy_serde::JsonStorageKey, + ) -> alloy_rpc_types_eth::EIP1186StorageProof { + alloy_rpc_types_eth::EIP1186StorageProof { key: slot, value: self.value, proof: self.proof } + } +} + impl StorageProof { /// Create new storage proof from the storage slot. pub fn new(key: B256) -> Self { @@ -233,11 +327,12 @@ impl StorageProof { #[cfg(any(test, feature = "test-utils"))] pub mod triehash { use alloy_primitives::{keccak256, B256}; + use alloy_rlp::RlpEncodable; use hash_db::Hasher; use plain_hasher::PlainHasher; /// A [Hasher] that calculates a keccak256 hash of the given data. - #[derive(Default, Debug, Clone, PartialEq, Eq)] + #[derive(Default, Debug, Clone, PartialEq, Eq, RlpEncodable)] #[non_exhaustive] pub struct KeccakHasher; @@ -253,3 +348,75 @@ pub mod triehash { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_multiproof_extend_account_proofs() { + let mut proof1 = MultiProof::default(); + let mut proof2 = MultiProof::default(); + + let addr1 = B256::random(); + let addr2 = B256::random(); + + proof1.account_subtree.insert( + Nibbles::unpack(addr1), + alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), + ); + proof2.account_subtree.insert( + Nibbles::unpack(addr2), + alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), + ); + + proof1.extend(proof2); + + assert!(proof1.account_subtree.contains_key(&Nibbles::unpack(addr1))); + assert!(proof1.account_subtree.contains_key(&Nibbles::unpack(addr2))); + } + + #[test] + fn test_multiproof_extend_storage_proofs() { + let mut proof1 = MultiProof::default(); + let mut proof2 = MultiProof::default(); + + let addr = B256::random(); + let root = B256::random(); + + let mut subtree1 = ProofNodes::default(); + subtree1.insert( + Nibbles::from_nibbles(vec![0]), + alloy_rlp::encode_fixed_size(&U256::from(42)).to_vec().into(), + ); + proof1.storages.insert( + addr, + StorageMultiProof { + root, + subtree: subtree1, + branch_node_hash_masks: HashMap::default(), + }, + ); + + let mut subtree2 = ProofNodes::default(); + subtree2.insert( + Nibbles::from_nibbles(vec![1]), + alloy_rlp::encode_fixed_size(&U256::from(43)).to_vec().into(), + ); + proof2.storages.insert( + addr, + StorageMultiProof { + root, + subtree: subtree2, + branch_node_hash_masks: HashMap::default(), + }, + ); + + proof1.extend(proof2); + + let storage = proof1.storages.get(&addr).unwrap(); + assert_eq!(storage.root, root); + assert!(storage.subtree.contains_key(&Nibbles::from_nibbles(vec![0]))); + assert!(storage.subtree.contains_key(&Nibbles::from_nibbles(vec![1]))); + } +} diff --git a/crates/trie/common/src/root.rs b/crates/trie/common/src/root.rs index 20f3ba1366d..982dec98837 100644 --- a/crates/trie/common/src/root.rs +++ b/crates/trie/common/src/root.rs @@ -7,49 +7,6 @@ use alloy_trie::HashBuilder; use itertools::Itertools; use nybbles::Nibbles; -/// Adjust the index of an item for rlp encoding. -pub const fn adjust_index_for_rlp(i: usize, len: usize) -> usize { - if i > 0x7f { - i - } else if i == 0x7f || i + 1 == len { - 0 - } else { - i + 1 - } -} - -/// Compute a trie root of the collection of rlp encodable items. -pub fn ordered_trie_root(items: &[T]) -> B256 { - ordered_trie_root_with_encoder(items, |item, buf| item.encode(buf)) -} - -/// Compute a trie root of the collection of items with a custom encoder. -pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 -where - F: FnMut(&T, &mut Vec), -{ - if items.is_empty() { - return alloy_trie::EMPTY_ROOT_HASH; - } - - let mut value_buffer = Vec::new(); - - let mut hb = HashBuilder::default(); - let items_len = items.len(); - for i in 0..items_len { - let index = adjust_index_for_rlp(i, items_len); - - let index_buffer = alloy_rlp::encode_fixed_size(&index); - - value_buffer.clear(); - encode(&items[index], &mut value_buffer); - - hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); - } - - hb.root() -} - /// Hashes and sorts account keys, then proceeds to calculating the root hash of the state /// represented as MPT. /// See [`state_root_unsorted`] for more info. diff --git a/crates/trie/common/src/storage.rs b/crates/trie/common/src/storage.rs index b61abb11688..3ebcc4e810e 100644 --- a/crates/trie/common/src/storage.rs +++ b/crates/trie/common/src/storage.rs @@ -1,9 +1,8 @@ use super::{BranchNodeCompact, StoredNibblesSubKey}; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; /// Account storage trie node. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieEntry { /// The nibbles of the intermediate node pub nibbles: StoredNibblesSubKey, @@ -14,7 +13,8 @@ pub struct StorageTrieEntry { // NOTE: Removing reth_codec and manually encode subkey // and compress second part of the value. If we have compression // over whole value (Even SubKey) that would mess up fetching of values with seek_by_key_subkey -impl Compact for StorageTrieEntry { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StorageTrieEntry { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, diff --git a/crates/trie/common/src/subnode.rs b/crates/trie/common/src/subnode.rs index c64b2317cf3..de65a788780 100644 --- a/crates/trie/common/src/subnode.rs +++ b/crates/trie/common/src/subnode.rs @@ -1,6 +1,4 @@ use super::BranchNodeCompact; -use bytes::Buf; -use reth_codecs::Compact; /// Walker sub node for storing intermediate state root calculation state in the database. #[derive(Debug, Clone, PartialEq, Eq, Default)] @@ -13,7 +11,8 @@ pub struct StoredSubNode { pub node: Option, } -impl Compact for StoredSubNode { +#[cfg(any(test, feature = "reth-codec"))] +impl reth_codecs::Compact for StoredSubNode { fn to_compact(&self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, @@ -46,6 +45,8 @@ impl Compact for StoredSubNode { } fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + use bytes::Buf; + let key_len = buf.get_u16() as usize; let key = Vec::from(&buf[..key_len]); buf.advance(key_len); @@ -69,6 +70,7 @@ mod tests { use super::*; use crate::TrieMask; use alloy_primitives::B256; + use reth_codecs::Compact; #[test] fn subnode_roundtrip() { diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/common/src/updates.rs similarity index 87% rename from crates/trie/trie/src/updates.rs rename to crates/trie/common/src/updates.rs index 6d1bcab63d8..6f80eb16553 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -1,16 +1,21 @@ -use crate::{walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles}; -use alloy_primitives::B256; -use std::collections::{HashMap, HashSet}; +use crate::{BranchNodeCompact, HashBuilder, Nibbles}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; /// The aggregation of trie updates. #[derive(PartialEq, Eq, Clone, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdates { - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] - pub(crate) account_nodes: HashMap, - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] - pub(crate) removed_nodes: HashSet, - pub(crate) storage_tries: HashMap, + /// Collection of updated intermediate account nodes indexed by full path. + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_map"))] + pub account_nodes: HashMap, + /// Collection of removed intermediate account nodes indexed by full path. + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] + pub removed_nodes: HashSet, + /// Collection of updated storage tries indexed by the hashed address. + pub storage_tries: HashMap, } impl TrieUpdates { @@ -75,20 +80,19 @@ impl TrieUpdates { } /// Finalize state trie updates. - pub fn finalize( + pub fn finalize( &mut self, - walker: TrieWalker, hash_builder: HashBuilder, + removed_keys: HashSet, destroyed_accounts: HashSet, ) { - // Retrieve deleted keys from trie walker. - let (_, removed_node_keys) = walker.split(); - self.removed_nodes.extend(exclude_empty(removed_node_keys)); - // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); self.account_nodes.extend(exclude_empty_from_pair(updated_nodes)); + // Add deleted node paths. + self.removed_nodes.extend(exclude_empty(removed_keys)); + // Add deleted storage tries for destroyed accounts. for destroyed in destroyed_accounts { self.storage_tries.entry(destroyed).or_default().set_deleted(true); @@ -110,16 +114,16 @@ impl TrieUpdates { /// Trie updates for storage trie of a single account. #[derive(PartialEq, Eq, Clone, Default, Debug)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(any(test, feature = "serde"), derive(serde::Serialize, serde::Deserialize))] pub struct StorageTrieUpdates { /// Flag indicating whether the trie was deleted. - pub(crate) is_deleted: bool, + pub is_deleted: bool, /// Collection of updated storage trie nodes. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_map"))] - pub(crate) storage_nodes: HashMap, + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_map"))] + pub storage_nodes: HashMap, /// Collection of removed storage trie nodes. - #[cfg_attr(feature = "serde", serde(with = "serde_nibbles_set"))] - pub(crate) removed_nodes: HashSet, + #[cfg_attr(any(test, feature = "serde"), serde(with = "serde_nibbles_set"))] + pub removed_nodes: HashSet, } #[cfg(feature = "test-utils")] @@ -198,14 +202,13 @@ impl StorageTrieUpdates { } /// Finalize storage trie updates for by taking updates from walker and hash builder. - pub fn finalize(&mut self, walker: TrieWalker, hash_builder: HashBuilder) { - // Retrieve deleted keys from trie walker. - let (_, removed_keys) = walker.split(); - self.removed_nodes.extend(exclude_empty(removed_keys)); - + pub fn finalize(&mut self, hash_builder: HashBuilder, removed_keys: HashSet) { // Retrieve updated nodes from hash builder. let (_, updated_nodes) = hash_builder.split(); self.storage_nodes.extend(exclude_empty_from_pair(updated_nodes)); + + // Add deleted node paths. + self.removed_nodes.extend(exclude_empty(removed_keys)); } /// Convert storage trie updates into [`StorageTrieUpdatesSorted`]. @@ -224,11 +227,10 @@ impl StorageTrieUpdates { /// hex-encoded packed representation. /// /// This also sorts the set before serializing. -#[cfg(feature = "serde")] +#[cfg(any(test, feature = "serde"))] mod serde_nibbles_set { - use std::collections::HashSet; - - use reth_trie_common::Nibbles; + use crate::Nibbles; + use alloy_primitives::map::HashSet; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; pub(super) fn serialize(map: &HashSet, serializer: S) -> Result @@ -261,17 +263,16 @@ mod serde_nibbles_set { /// hex-encoded packed representation. /// /// This also sorts the map's keys before encoding and serializing. -#[cfg(feature = "serde")] +#[cfg(any(test, feature = "serde"))] mod serde_nibbles_map { - use std::{collections::HashMap, marker::PhantomData}; - - use alloy_primitives::hex; - use reth_trie_common::Nibbles; + use crate::Nibbles; + use alloy_primitives::{hex, map::HashMap}; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer, }; + use std::marker::PhantomData; pub(super) fn serialize( map: &HashMap, @@ -315,7 +316,10 @@ mod serde_nibbles_map { where A: MapAccess<'de>, { - let mut result = HashMap::with_capacity(map.size_hint().unwrap_or(0)); + let mut result = HashMap::with_capacity_and_hasher( + map.size_hint().unwrap_or(0), + Default::default(), + ); while let Some((key, value)) = map.next_entry::()? { let decoded_key = @@ -337,9 +341,13 @@ mod serde_nibbles_map { /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct TrieUpdatesSorted { - pub(crate) account_nodes: Vec<(Nibbles, BranchNodeCompact)>, - pub(crate) removed_nodes: HashSet, - pub(crate) storage_tries: HashMap, + /// Sorted collection of updated state nodes with corresponding paths. + pub account_nodes: Vec<(Nibbles, BranchNodeCompact)>, + /// The set of removed state node keys. + pub removed_nodes: HashSet, + /// Storage tries storage stored by hashed address of the account + /// the trie belongs to. + pub storage_tries: HashMap, } impl TrieUpdatesSorted { @@ -362,9 +370,12 @@ impl TrieUpdatesSorted { /// Sorted trie updates used for lookups and insertions. #[derive(PartialEq, Eq, Clone, Default, Debug)] pub struct StorageTrieUpdatesSorted { - pub(crate) is_deleted: bool, - pub(crate) storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, - pub(crate) removed_nodes: HashSet, + /// Flag indicating whether the trie has been deleted/wiped. + pub is_deleted: bool, + /// Sorted collection of updated storage nodes with corresponding paths. + pub storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, + /// The set of removed storage node keys. + pub removed_nodes: HashSet, } impl StorageTrieUpdatesSorted { @@ -397,23 +408,22 @@ fn exclude_empty_from_pair( } /// Bincode-compatible trie updates type serde implementations. -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] +#[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { - use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, + use crate::{BranchNodeCompact, Nibbles}; + use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, }; - - use alloy_primitives::B256; - use reth_trie_common::{BranchNodeCompact, Nibbles}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + use std::borrow::Cow; /// Bincode-compatible [`super::TrieUpdates`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_trie::{serde_bincode_compat, updates::TrieUpdates}; + /// use reth_trie_common::{serde_bincode_compat, updates::TrieUpdates}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -477,7 +487,7 @@ pub mod serde_bincode_compat { /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust - /// use reth_trie::{serde_bincode_compat, updates::StorageTrieUpdates}; + /// use reth_trie_common::{serde_bincode_compat, updates::StorageTrieUpdates}; /// use serde::{Deserialize, Serialize}; /// use serde_with::serde_as; /// @@ -538,12 +548,12 @@ pub mod serde_bincode_compat { #[cfg(test)] mod tests { - use crate::updates::StorageTrieUpdates; - - use super::super::{serde_bincode_compat, TrieUpdates}; - + use crate::{ + serde_bincode_compat, + updates::{StorageTrieUpdates, TrieUpdates}, + BranchNodeCompact, Nibbles, + }; use alloy_primitives::B256; - use reth_trie_common::{BranchNodeCompact, Nibbles}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -552,7 +562,7 @@ pub mod serde_bincode_compat { #[serde_as] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { - #[serde_as(as = "serde_bincode_compat::TrieUpdates")] + #[serde_as(as = "serde_bincode_compat::updates::TrieUpdates")] trie_updates: TrieUpdates, } @@ -585,7 +595,7 @@ pub mod serde_bincode_compat { #[serde_as] #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] struct Data { - #[serde_as(as = "serde_bincode_compat::StorageTrieUpdates")] + #[serde_as(as = "serde_bincode_compat::updates::StorageTrieUpdates")] trie_updates: StorageTrieUpdates, } diff --git a/crates/trie/db/Cargo.toml b/crates/trie/db/Cargo.toml index a0e1acbce35..2fbdf1d5756 100644 --- a/crates/trie/db/Cargo.toml +++ b/crates/trie/db/Cargo.toml @@ -17,9 +17,7 @@ reth-primitives.workspace = true reth-execution-errors.workspace = true reth-db.workspace = true reth-db-api.workspace = true -reth-stages-types.workspace = true reth-storage-errors.workspace = true -reth-trie-common.workspace = true reth-trie.workspace = true revm.workspace = true @@ -32,10 +30,7 @@ alloy-primitives.workspace = true tracing.workspace = true # misc -rayon.workspace = true derive_more.workspace = true -auto_impl.workspace = true -itertools.workspace = true # `metrics` feature reth-metrics = { workspace = true, optional = true } @@ -56,7 +51,6 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-storage-errors.workspace = true reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } -reth-node-types.workspace = true alloy-consensus.workspace = true @@ -66,16 +60,29 @@ triehash = "0.8" # misc proptest.workspace = true proptest-arbitrary-interop.workspace = true -tokio = { workspace = true, default-features = false, features = [ - "sync", - "rt", - "macros", -] } -tokio-stream.workspace = true serde_json.workspace = true similar-asserts.workspace = true [features] metrics = ["reth-metrics", "reth-trie/metrics", "dep:metrics"] -serde = ["dep:serde"] -test-utils = ["triehash", "reth-trie-common/test-utils"] +serde = [ + "dep:serde", + "similar-asserts/serde", + "revm/serde", + "alloy-consensus/serde", + "alloy-primitives/serde", + "reth-trie/serde", + "reth-trie-common/serde", + "reth-provider/serde", +] +test-utils = [ + "triehash", + "revm/test-utils", + "reth-trie-common/test-utils", + "reth-chainspec/test-utils", + "reth-primitives/test-utils", + "reth-db/test-utils", + "reth-db-api/test-utils", + "reth-provider/test-utils", + "reth-trie/test-utils", +] diff --git a/crates/trie/db/src/commitment.rs b/crates/trie/db/src/commitment.rs new file mode 100644 index 00000000000..c608aefff8a --- /dev/null +++ b/crates/trie/db/src/commitment.rs @@ -0,0 +1,39 @@ +use crate::{ + DatabaseHashedCursorFactory, DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, + DatabaseTrieCursorFactory, DatabaseTrieWitness, +}; +use reth_db::transaction::DbTx; +use reth_trie::{ + proof::Proof, witness::TrieWitness, KeccakKeyHasher, KeyHasher, StateRoot, StorageRoot, +}; + +/// The `StateCommitment` trait provides associated types for state commitment operations. +pub trait StateCommitment: std::fmt::Debug + Send + Sync + Unpin + 'static { + /// The state root type. + type StateRoot<'a, TX: DbTx + 'a>: DatabaseStateRoot<'a, TX>; + /// The storage root type. + type StorageRoot<'a, TX: DbTx + 'a>: DatabaseStorageRoot<'a, TX>; + /// The state proof type. + type StateProof<'a, TX: DbTx + 'a>: DatabaseProof<'a, TX>; + /// The state witness type. + type StateWitness<'a, TX: DbTx + 'a>: DatabaseTrieWitness<'a, TX>; + /// The key hasher type. + type KeyHasher: KeyHasher; +} + +/// The state commitment type for Ethereum's Merkle Patricia Trie. +#[derive(Debug)] +#[non_exhaustive] +pub struct MerklePatriciaTrie; + +impl StateCommitment for MerklePatriciaTrie { + type StateRoot<'a, TX: DbTx + 'a> = + StateRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StorageRoot<'a, TX: DbTx + 'a> = + StorageRoot, DatabaseHashedCursorFactory<'a, TX>>; + type StateProof<'a, TX: DbTx + 'a> = + Proof, DatabaseHashedCursorFactory<'a, TX>>; + type StateWitness<'a, TX: DbTx + 'a> = + TrieWitness, DatabaseHashedCursorFactory<'a, TX>>; + type KeyHasher = KeccakKeyHasher; +} diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index 3a9b1e32823..27c18af6cbf 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -1,5 +1,6 @@ //! An integration of [`reth-trie`] with [`reth-db`]. +mod commitment; mod hashed_cursor; mod prefix_set; mod proof; @@ -8,6 +9,7 @@ mod storage; mod trie_cursor; mod witness; +pub use commitment::{MerklePatriciaTrie, StateCommitment}; pub use hashed_cursor::{ DatabaseHashedAccountCursor, DatabaseHashedCursorFactory, DatabaseHashedStorageCursor, }; diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index 079fe393764..95ff6d91f37 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -1,4 +1,4 @@ -use alloy_primitives::{keccak256, BlockNumber, B256}; +use alloy_primitives::{BlockNumber, B256}; use derive_more::Deref; use reth_db::tables; use reth_db_api::{ @@ -8,25 +8,36 @@ use reth_db_api::{ DatabaseError, }; use reth_primitives::StorageEntry; -use reth_trie::prefix_set::{PrefixSetMut, TriePrefixSets}; -use reth_trie_common::Nibbles; +use reth_trie::{ + prefix_set::{PrefixSetMut, TriePrefixSets}, + KeyHasher, Nibbles, +}; use std::{ collections::{HashMap, HashSet}, + marker::PhantomData, ops::RangeInclusive, }; /// A wrapper around a database transaction that loads prefix sets within a given block range. -#[derive(Deref, Debug)] -pub struct PrefixSetLoader<'a, TX>(&'a TX); +#[derive(Debug)] +pub struct PrefixSetLoader<'a, TX, KH>(&'a TX, PhantomData); -impl<'a, TX> PrefixSetLoader<'a, TX> { +impl<'a, TX, KH> PrefixSetLoader<'a, TX, KH> { /// Create a new loader. pub const fn new(tx: &'a TX) -> Self { - Self(tx) + Self(tx, PhantomData) + } +} + +impl Deref for PrefixSetLoader<'_, TX, KH> { + type Target = TX; + + fn deref(&self) -> &Self::Target { + self.0 } } -impl PrefixSetLoader<'_, TX> { +impl PrefixSetLoader<'_, TX, KH> { /// Load all account and storage changes for the given block range. pub fn load(self, range: RangeInclusive) -> Result { // Initialize prefix sets. @@ -36,13 +47,13 @@ impl PrefixSetLoader<'_, TX> { // Walk account changeset and insert account prefixes. let mut account_changeset_cursor = self.cursor_read::()?; - let mut account_plain_state_cursor = self.cursor_read::()?; + let mut account_hashed_state_cursor = self.cursor_read::()?; for account_entry in account_changeset_cursor.walk_range(range.clone())? { let (_, AccountBeforeTx { address, .. }) = account_entry?; - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); - if account_plain_state_cursor.seek_exact(address)?.is_none() { + if account_hashed_state_cursor.seek_exact(hashed_address)?.is_none() { destroyed_accounts.insert(hashed_address); } } @@ -53,12 +64,12 @@ impl PrefixSetLoader<'_, TX> { let storage_range = BlockNumberAddress::range(range); for storage_entry in storage_cursor.walk_range(storage_range)? { let (BlockNumberAddress((_, address)), StorageEntry { key, .. }) = storage_entry?; - let hashed_address = keccak256(address); + let hashed_address = KH::hash_key(address); account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets .entry(hashed_address) .or_default() - .insert(Nibbles::unpack(keccak256(key))); + .insert(Nibbles::unpack(KH::hash_key(key))); } Ok(TriePrefixSets { diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 9bf08fe136f..99c87bf05eb 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -10,9 +10,8 @@ use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, proof::{Proof, StorageProof}, trie_cursor::InMemoryTrieCursorFactory, - HashedPostStateSorted, HashedStorage, MultiProof, TrieInput, + AccountProof, HashedPostStateSorted, HashedStorage, MultiProof, StorageMultiProof, TrieInput, }; -use reth_trie_common::AccountProof; /// Extends [`Proof`] with operations specific for working with a database transaction. pub trait DatabaseProof<'a, TX> { @@ -96,7 +95,15 @@ pub trait DatabaseStorageProof<'a, TX> { address: Address, slot: B256, storage: HashedStorage, - ) -> Result; + ) -> Result; + + /// Generates the storage multiproof for target slots based on [`TrieInput`]. + fn overlay_storage_multiproof( + tx: &'a TX, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> Result; } impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> @@ -111,12 +118,12 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> address: Address, slot: B256, storage: HashedStorage, - ) -> Result { + ) -> Result { let hashed_address = keccak256(address); let prefix_set = storage.construct_prefix_set(); let state_sorted = HashedPostStateSorted::new( Default::default(), - HashMap::from([(hashed_address, storage.into_sorted())]), + HashMap::from_iter([(hashed_address, storage.into_sorted())]), ); Self::from_tx(tx, address) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( @@ -126,4 +133,26 @@ impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> .with_prefix_set_mut(prefix_set) .storage_proof(slot) } + + fn overlay_storage_multiproof( + tx: &'a TX, + address: Address, + slots: &[B256], + storage: HashedStorage, + ) -> Result { + let hashed_address = keccak256(address); + let targets = slots.iter().map(keccak256).collect(); + let prefix_set = storage.construct_prefix_set(); + let state_sorted = HashedPostStateSorted::new( + Default::default(), + HashMap::from_iter([(hashed_address, storage.into_sorted())]), + ); + Self::from_tx(tx, address) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &state_sorted, + )) + .with_prefix_set_mut(prefix_set) + .storage_multiproof(targets) + } } diff --git a/crates/trie/db/src/state.rs b/crates/trie/db/src/state.rs index 4d46183dfda..5aaf3ebe5b0 100644 --- a/crates/trie/db/src/state.rs +++ b/crates/trie/db/src/state.rs @@ -1,5 +1,5 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory, PrefixSetLoader}; -use alloy_primitives::{keccak256, Address, BlockNumber, B256, U256}; +use alloy_primitives::{Address, BlockNumber, B256, U256}; use reth_db::tables; use reth_db_api::{ cursor::DbCursorRO, @@ -7,16 +7,13 @@ use reth_db_api::{ transaction::DbTx, }; use reth_execution_errors::StateRootError; -use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, trie_cursor::InMemoryTrieCursorFactory, - updates::TrieUpdates, HashedPostState, HashedStorage, StateRoot, StateRootProgress, TrieInput, -}; -use std::{ - collections::{hash_map, HashMap}, - ops::RangeInclusive, + updates::TrieUpdates, HashedPostState, HashedStorage, KeccakKeyHasher, KeyHasher, StateRoot, + StateRootProgress, TrieInput, }; +use std::{collections::HashMap, ops::RangeInclusive}; use tracing::debug; /// Extends [`StateRoot`] with operations specific for working with a database transaction. @@ -126,7 +123,7 @@ pub trait DatabaseStateRoot<'a, TX>: Sized { pub trait DatabaseHashedPostState: Sized { /// Initializes [`HashedPostState`] from reverts. Iterates over state reverts from the specified /// block up to the current tip and aggregates them into hashed state in reverse. - fn from_reverts(tx: &TX, from: BlockNumber) -> Result; + fn from_reverts(tx: &TX, from: BlockNumber) -> Result; } impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> @@ -140,7 +137,7 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> tx: &'a TX, range: RangeInclusive, ) -> Result { - let loaded_prefix_sets = PrefixSetLoader::new(tx).load(range)?; + let loaded_prefix_sets = PrefixSetLoader::<_, KeccakKeyHasher>::new(tx).load(range)?; Ok(Self::from_tx(tx).with_prefix_sets(loaded_prefix_sets)) } @@ -220,15 +217,13 @@ impl<'a, TX: DbTx> DatabaseStateRoot<'a, TX> } impl DatabaseHashedPostState for HashedPostState { - fn from_reverts(tx: &TX, from: BlockNumber) -> Result { + fn from_reverts(tx: &TX, from: BlockNumber) -> Result { // Iterate over account changesets and record value before first occurring account change. - let mut accounts = HashMap::>::default(); + let mut accounts = HashMap::new(); let mut account_changesets_cursor = tx.cursor_read::()?; for entry in account_changesets_cursor.walk_range(from..)? { let (_, AccountBeforeTx { address, info }) = entry?; - if let hash_map::Entry::Vacant(entry) = accounts.entry(address) { - entry.insert(info); - } + accounts.entry(address).or_insert(info); } // Iterate over storage changesets and record value before first occurring storage change. @@ -239,25 +234,23 @@ impl DatabaseHashedPostState for HashedPostState { { let (BlockNumberAddress((_, address)), storage) = entry?; let account_storage = storages.entry(address).or_default(); - if let hash_map::Entry::Vacant(entry) = account_storage.entry(storage.key) { - entry.insert(storage.value); - } + account_storage.entry(storage.key).or_insert(storage.value); } let hashed_accounts = - accounts.into_iter().map(|(address, info)| (keccak256(address), info)).collect(); + accounts.into_iter().map(|(address, info)| (KH::hash_key(address), info)).collect(); let hashed_storages = storages .into_iter() .map(|(address, storage)| { ( - keccak256(address), + KH::hash_key(address), HashedStorage::from_iter( // The `wiped` flag indicates only whether previous storage entries // should be looked up in db or not. For reverts it's a noop since all // wiped changes had been written as storage reverts. false, - storage.into_iter().map(|(slot, value)| (keccak256(slot), value)), + storage.into_iter().map(|(slot, value)| (KH::hash_key(slot), value)), ), ) }) @@ -273,8 +266,8 @@ mod tests { use alloy_primitives::{hex, map::HashMap, Address, U256}; use reth_db::test_utils::create_test_rw_db; use reth_db_api::database::Database; - use reth_primitives::revm_primitives::AccountInfo; - use revm::db::BundleState; + use reth_trie::KeccakKeyHasher; + use revm::{db::BundleState, primitives::AccountInfo}; #[test] fn from_bundle_state_with_rayon() { @@ -294,7 +287,7 @@ mod tests { .build(); assert_eq!(bundle_state.reverts.len(), 1); - let post_state = HashedPostState::from_bundle_state(&bundle_state.state); + let post_state = HashedPostState::from_bundle_state::(&bundle_state.state); assert_eq!(post_state.accounts.len(), 2); assert_eq!(post_state.storages.len(), 2); diff --git a/crates/trie/db/src/storage.rs b/crates/trie/db/src/storage.rs index 6a3bbe1b965..3e40b298fac 100644 --- a/crates/trie/db/src/storage.rs +++ b/crates/trie/db/src/storage.rs @@ -43,6 +43,7 @@ impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx), address, + Default::default(), #[cfg(feature = "metrics")] TrieRootMetrics::new(TrieType::Storage), ) @@ -53,6 +54,7 @@ impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx), hashed_address, + Default::default(), #[cfg(feature = "metrics")] TrieRootMetrics::new(TrieType::Storage), ) @@ -70,10 +72,10 @@ impl<'a, TX: DbTx> DatabaseStorageRoot<'a, TX> DatabaseTrieCursorFactory::new(tx), HashedPostStateCursorFactory::new(DatabaseHashedCursorFactory::new(tx), &state_sorted), address, + prefix_set, #[cfg(feature = "metrics")] TrieRootMetrics::new(TrieType::Storage), ) - .with_prefix_set(prefix_set) .root() } } diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index bfded342ba0..b364e9a86f1 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -11,9 +11,8 @@ use reth_storage_errors::db::DatabaseError; use reth_trie::{ trie_cursor::{TrieCursor, TrieCursorFactory}, updates::StorageTrieUpdates, - BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey, + BranchNodeCompact, Nibbles, StorageTrieEntry, StoredNibbles, StoredNibblesSubKey, }; -use reth_trie_common::StorageTrieEntry; /// Wrapper struct for database transaction implementing trie cursor factory trait. #[derive(Debug)] diff --git a/crates/trie/db/tests/proof.rs b/crates/trie/db/tests/proof.rs index 79a2ce96fce..eedeee276db 100644 --- a/crates/trie/db/tests/proof.rs +++ b/crates/trie/db/tests/proof.rs @@ -6,8 +6,7 @@ use alloy_rlp::EMPTY_STRING_CODE; use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET}; use reth_primitives::Account; use reth_provider::test_utils::{create_test_provider_factory, insert_genesis}; -use reth_trie::{proof::Proof, Nibbles}; -use reth_trie_common::{AccountProof, StorageProof}; +use reth_trie::{proof::Proof, AccountProof, Nibbles, StorageProof}; use reth_trie_db::DatabaseProof; use std::{ str::FromStr, diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index f5823404c89..4c614d83be6 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -1,38 +1,30 @@ #![allow(missing_docs)] use alloy_consensus::EMPTY_ROOT_HASH; -use alloy_primitives::{hex_literal::hex, keccak256, Address, B256, U256}; +use alloy_primitives::{hex_literal::hex, keccak256, map::HashMap, Address, B256, U256}; +use alloy_rlp::Encodable; use proptest::{prelude::ProptestConfig, proptest}; use proptest_arbitrary_interop::arb; use reth_db::{tables, test_utils::TempDatabase, DatabaseEnv}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - transaction::DbTxMut, + transaction::{DbTx, DbTxMut}, }; use reth_primitives::{Account, StorageEntry}; use reth_provider::{ - test_utils::create_test_provider_factory, DatabaseProviderRW, StorageTrieWriter, TrieWriter, + providers::ProviderNodeTypes, test_utils::create_test_provider_factory, DatabaseProviderRW, + StorageTrieWriter, TrieWriter, }; use reth_trie::{ - prefix_set::PrefixSetMut, + prefix_set::{PrefixSetMut, TriePrefixSets}, test_utils::{state_root, state_root_prehashed, storage_root, storage_root_prehashed}, - BranchNodeCompact, StateRoot, StorageRoot, TrieMask, + triehash::KeccakHasher, + updates::StorageTrieUpdates, + BranchNodeCompact, HashBuilder, IntermediateStateRootState, Nibbles, StateRoot, + StateRootProgress, StorageRoot, TrieAccount, TrieMask, }; -use reth_trie_common::triehash::KeccakHasher; use reth_trie_db::{DatabaseStateRoot, DatabaseStorageRoot}; -use std::{ - collections::{BTreeMap, HashMap}, - ops::Mul, - str::FromStr, - sync::Arc, -}; - -use alloy_rlp::Encodable; -use reth_db_api::transaction::DbTx; -use reth_trie::{ - prefix_set::TriePrefixSets, updates::StorageTrieUpdates, HashBuilder, - IntermediateStateRootState, Nibbles, StateRootProgress, TrieAccount, -}; +use std::{collections::BTreeMap, ops::Mul, str::FromStr, sync::Arc}; fn insert_account( tx: &impl DbTxMut, @@ -693,8 +685,8 @@ fn storage_trie_around_extension_node() { assert_trie_updates(updates.storage_nodes_ref()); } -fn extension_node_storage_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_storage_trie( + tx: &DatabaseProviderRW>, N>, hashed_address: B256, ) -> (B256, StorageTrieUpdates) { let value = U256::from(1); @@ -721,8 +713,8 @@ fn extension_node_storage_trie( (root, trie_updates) } -fn extension_node_trie( - tx: &DatabaseProviderRW>, Spec>, +fn extension_node_trie( + tx: &DatabaseProviderRW>, N>, ) -> B256 { let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; let val = encode_account(a, None); diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index dd4bcd6da8f..0e0b094920b 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -5,9 +5,9 @@ use reth_db::tables; use reth_db_api::{cursor::DbCursorRW, transaction::DbTxMut}; use reth_provider::test_utils::create_test_provider_factory; use reth_trie::{ - prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, StorageTrieEntry, + prefix_set::PrefixSetMut, trie_cursor::TrieCursor, walker::TrieWalker, BranchNodeCompact, + Nibbles, StorageTrieEntry, }; -use reth_trie_common::{BranchNodeCompact, Nibbles}; use reth_trie_db::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}; #[test] @@ -63,13 +63,14 @@ where // We're traversing the path in lexicographical order. for expected in expected { - let got = walker.advance().unwrap(); + walker.advance().unwrap(); + let got = walker.key().cloned(); assert_eq!(got.unwrap(), Nibbles::from_nibbles_unchecked(expected.clone())); } // There should be 8 paths traversed in total from 3 branches. - let got = walker.advance().unwrap(); - assert!(got.is_none()); + walker.advance().unwrap(); + assert!(walker.key().is_none()); } #[test] diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 8e00472b473..385f6269f39 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -27,7 +27,7 @@ fn includes_empty_node_preimage() { assert_eq!( TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), storages: HashMap::default(), }) .unwrap(), @@ -44,8 +44,8 @@ fn includes_empty_node_preimage() { let witness = TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([( + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( hashed_address, HashedStorage::from_iter(false, [(hashed_slot, U256::from(1))]), )]), @@ -80,12 +80,16 @@ fn includes_nodes_for_destroyed_storage_nodes() { .multiproof(HashMap::from_iter([(hashed_address, HashSet::from_iter([hashed_slot]))])) .unwrap(); - let witness = TrieWitness::from_tx(provider.tx_ref()) - .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([(hashed_address, HashedStorage::from_iter(true, []))]), // destroyed - }) - .unwrap(); + let witness = + TrieWitness::from_tx(provider.tx_ref()) + .compute(HashedPostState { + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( + hashed_address, + HashedStorage::from_iter(true, []), + )]), // destroyed + }) + .unwrap(); assert!(witness.contains_key(&state_root)); for node in multiproof.account_subtree.values() { assert_eq!(witness.get(&keccak256(node)), Some(node)); @@ -126,8 +130,8 @@ fn correctly_decodes_branch_node_values() { let witness = TrieWitness::from_tx(provider.tx_ref()) .compute(HashedPostState { - accounts: HashMap::from([(hashed_address, Some(Account::default()))]), - storages: HashMap::from([( + accounts: HashMap::from_iter([(hashed_address, Some(Account::default()))]), + storages: HashMap::from_iter([( hashed_address, HashedStorage::from_iter( false, diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 64a4644bdce..1b3e2d59be1 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -15,8 +15,8 @@ workspace = true # reth reth-primitives.workspace = true reth-db.workspace = true -reth-db-api.workspace = true reth-trie.workspace = true +reth-trie-common.workspace = true reth-trie-db.workspace = true reth-execution-errors.workspace = true reth-provider.workspace = true @@ -46,7 +46,11 @@ reth-trie = { workspace = true, features = ["test-utils"] } # misc rand.workspace = true -tokio = { workspace = true, default-features = false, features = ["sync", "rt", "macros"] } +tokio = { workspace = true, default-features = false, features = [ + "sync", + "rt", + "macros", +] } rayon.workspace = true criterion = { workspace = true, features = ["async_tokio"] } proptest.workspace = true diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index d1ffe49dd0a..a9300efa9b0 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -5,15 +5,14 @@ use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; use reth_primitives::Account; use reth_provider::{ - providers::ConsistentDbView, test_utils::create_test_provider_factory, StateChangeWriter, - TrieWriter, + providers::ConsistentDbView, test_utils::create_test_provider_factory, StateWriter, TrieWriter, }; use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, HashedPostState, HashedStorage, StateRoot, TrieInput, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; -use reth_trie_parallel::parallel_root::ParallelStateRoot; +use reth_trie_parallel::root::ParallelStateRoot; use std::collections::HashMap; pub fn calculate_state_root(c: &mut Criterion) { diff --git a/crates/trie/parallel/src/lib.rs b/crates/trie/parallel/src/lib.rs index 40a6af34758..5be2a658387 100644 --- a/crates/trie/parallel/src/lib.rs +++ b/crates/trie/parallel/src/lib.rs @@ -14,7 +14,10 @@ pub use storage_root_targets::StorageRootTargets; pub mod stats; /// Implementation of parallel state root computation. -pub mod parallel_root; +pub mod root; + +/// Implementation of parallel proof computation. +pub mod proof; /// Parallel state root metrics. #[cfg(feature = "metrics")] diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs new file mode 100644 index 00000000000..148f7cd5d4d --- /dev/null +++ b/crates/trie/parallel/src/proof.rs @@ -0,0 +1,333 @@ +use crate::{root::ParallelStateRootError, stats::ParallelTrieTracker, StorageRootTargets}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; +use alloy_rlp::{BufMut, Encodable}; +use itertools::Itertools; +use reth_db::DatabaseError; +use reth_execution_errors::StorageRootError; +use reth_provider::{ + providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, + StateCommitmentProvider, +}; +use reth_trie::{ + hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, + node_iter::{TrieElement, TrieNodeIter}, + prefix_set::{PrefixSetMut, TriePrefixSetsMut}, + proof::StorageProof, + trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, + walker::TrieWalker, + HashBuilder, MultiProof, Nibbles, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, +}; +use reth_trie_common::proof::ProofRetainer; +use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; +use std::sync::Arc; +use tracing::{debug, error}; + +#[cfg(feature = "metrics")] +use crate::metrics::ParallelStateRootMetrics; + +/// TODO: +#[derive(Debug)] +pub struct ParallelProof { + /// Consistent view of the database. + view: ConsistentDbView, + /// Trie input. + input: Arc, + /// Flag indicating whether to include branch node hash masks in the proof. + collect_branch_node_hash_masks: bool, + /// Parallel state root metrics. + #[cfg(feature = "metrics")] + metrics: ParallelStateRootMetrics, +} + +impl ParallelProof { + /// Create new state proof generator. + pub fn new(view: ConsistentDbView, input: Arc) -> Self { + Self { + view, + input, + collect_branch_node_hash_masks: false, + #[cfg(feature = "metrics")] + metrics: ParallelStateRootMetrics::default(), + } + } + + /// Set the flag indicating whether to include branch node hash masks in the proof. + pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { + self.collect_branch_node_hash_masks = branch_node_hash_masks; + self + } +} + +impl ParallelProof +where + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, +{ + /// Generate a state multiproof according to specified targets. + pub fn multiproof( + self, + targets: HashMap>, + ) -> Result { + let mut tracker = ParallelTrieTracker::default(); + + let trie_nodes_sorted = self.input.nodes.clone().into_sorted(); + let hashed_state_sorted = self.input.state.clone().into_sorted(); + + // Extend prefix sets with targets + let mut prefix_sets = self.input.prefix_sets.clone(); + prefix_sets.extend(TriePrefixSetsMut { + account_prefix_set: PrefixSetMut::from(targets.keys().copied().map(Nibbles::unpack)), + storage_prefix_sets: targets + .iter() + .filter(|&(_hashed_address, slots)| (!slots.is_empty())) + .map(|(hashed_address, slots)| { + (*hashed_address, PrefixSetMut::from(slots.iter().map(Nibbles::unpack))) + }) + .collect(), + destroyed_accounts: Default::default(), + }); + let prefix_sets = prefix_sets.freeze(); + + let storage_root_targets = StorageRootTargets::new( + prefix_sets.account_prefix_set.iter().map(|nibbles| B256::from_slice(&nibbles.pack())), + prefix_sets.storage_prefix_sets.clone(), + ); + + // Pre-calculate storage roots for accounts which were changed. + tracker.set_precomputed_storage_roots(storage_root_targets.len() as u64); + debug!(target: "trie::parallel_state_root", len = storage_root_targets.len(), "pre-generating storage proofs"); + let mut storage_proofs = HashMap::with_capacity(storage_root_targets.len()); + for (hashed_address, prefix_set) in + storage_root_targets.into_iter().sorted_unstable_by_key(|(address, _)| *address) + { + let view = self.view.clone(); + let target_slots: HashSet = + targets.get(&hashed_address).cloned().unwrap_or_default(); + + let trie_nodes_sorted = trie_nodes_sorted.clone(); + let hashed_state_sorted = hashed_state_sorted.clone(); + + let (tx, rx) = std::sync::mpsc::sync_channel(1); + + rayon::spawn_fifo(move || { + let result = (|| -> Result<_, ParallelStateRootError> { + let provider_ro = view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); + + StorageProof::new_hashed( + trie_cursor_factory, + hashed_cursor_factory, + hashed_address, + ) + .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().cloned())) + .with_branch_node_hash_masks(self.collect_branch_node_hash_masks) + .storage_multiproof(target_slots) + .map_err(|e| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(e.to_string()), + )) + }) + })(); + if let Err(err) = tx.send(result) { + error!(target: "trie::parallel", ?hashed_address, err_content = ?err.0, "Failed to send proof result"); + } + }); + storage_proofs.insert(hashed_address, rx); + } + + let provider_ro = self.view.provider_ro()?; + let trie_cursor_factory = InMemoryTrieCursorFactory::new( + DatabaseTrieCursorFactory::new(provider_ro.tx_ref()), + &trie_nodes_sorted, + ); + let hashed_cursor_factory = HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider_ro.tx_ref()), + &hashed_state_sorted, + ); + + // Create the walker. + let walker = TrieWalker::new( + trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, + prefix_sets.account_prefix_set, + ) + .with_deletions_retained(true); + + // Create a hash builder to rebuild the root node since it is not available in the database. + let retainer: ProofRetainer = targets.keys().map(Nibbles::unpack).collect(); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(self.collect_branch_node_hash_masks); + + let mut storages = HashMap::default(); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); + let mut account_node_iter = TrieNodeIter::new( + walker, + hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, + ); + while let Some(account_node) = + account_node_iter.try_next().map_err(ProviderError::Database)? + { + match account_node { + TrieElement::Branch(node) => { + hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); + } + TrieElement::Leaf(hashed_address, account) => { + let storage_multiproof = match storage_proofs.remove(&hashed_address) { + Some(rx) => rx.recv().map_err(|_| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(format!( + "channel closed for {hashed_address}" + )), + )) + })??, + // Since we do not store all intermediate nodes in the database, there might + // be a possibility of re-adding a non-modified leaf to the hash builder. + None => { + tracker.inc_missed_leaves(); + StorageProof::new_hashed( + trie_cursor_factory.clone(), + hashed_cursor_factory.clone(), + hashed_address, + ) + .with_prefix_set_mut(Default::default()) + .storage_multiproof( + targets.get(&hashed_address).cloned().unwrap_or_default(), + ) + .map_err(|e| { + ParallelStateRootError::StorageRoot(StorageRootError::Database( + DatabaseError::Other(e.to_string()), + )) + })? + } + }; + + // Encode account + account_rlp.clear(); + let account = TrieAccount::from((account, storage_multiproof.root)); + account.encode(&mut account_rlp as &mut dyn BufMut); + + hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); + + // We might be adding leaves that are not necessarily our proof targets. + if targets.contains_key(&hashed_address) { + storages.insert(hashed_address, storage_multiproof); + } + } + } + } + let _ = hash_builder.root(); + + #[cfg(feature = "metrics")] + self.metrics.record_state_trie(tracker.finish()); + + let account_subtree = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = if self.collect_branch_node_hash_masks { + hash_builder + .updated_branch_nodes + .unwrap_or_default() + .into_iter() + .map(|(path, node)| (path, node.hash_mask)) + .collect() + } else { + HashMap::default() + }; + + Ok(MultiProof { account_subtree, branch_node_hash_masks, storages }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_primitives::{keccak256, map::DefaultHashBuilder, Address, U256}; + use rand::Rng; + use reth_primitives::{Account, StorageEntry}; + use reth_provider::{test_utils::create_test_provider_factory, HashingWriter}; + use reth_trie::proof::Proof; + + #[test] + fn random_parallel_proof() { + let factory = create_test_provider_factory(); + let consistent_view = ConsistentDbView::new(factory.clone(), None); + + let mut rng = rand::thread_rng(); + let state = (0..100) + .map(|_| { + let address = Address::random(); + let account = + Account { balance: U256::from(rng.gen::()), ..Default::default() }; + let mut storage = HashMap::::default(); + let has_storage = rng.gen_bool(0.7); + if has_storage { + for _ in 0..100 { + storage.insert( + B256::from(U256::from(rng.gen::())), + U256::from(rng.gen::()), + ); + } + } + (address, (account, storage)) + }) + .collect::>(); + + { + let provider_rw = factory.provider_rw().unwrap(); + provider_rw + .insert_account_for_hashing( + state.iter().map(|(address, (account, _))| (*address, Some(*account))), + ) + .unwrap(); + provider_rw + .insert_storage_for_hashing(state.iter().map(|(address, (_, storage))| { + ( + *address, + storage + .iter() + .map(|(slot, value)| StorageEntry { key: *slot, value: *value }), + ) + })) + .unwrap(); + provider_rw.commit().unwrap(); + } + + let mut targets = + HashMap::, DefaultHashBuilder>::default(); + for (address, (_, storage)) in state.iter().take(10) { + let hashed_address = keccak256(*address); + let mut target_slots = HashSet::::default(); + + for (slot, _) in storage.iter().take(5) { + target_slots.insert(*slot); + } + + if !target_slots.is_empty() { + targets.insert(hashed_address, target_slots); + } + } + + let provider_rw = factory.provider_rw().unwrap(); + let trie_cursor_factory = DatabaseTrieCursorFactory::new(provider_rw.tx_ref()); + let hashed_cursor_factory = DatabaseHashedCursorFactory::new(provider_rw.tx_ref()); + + assert_eq!( + ParallelProof::new(consistent_view, Default::default()) + .multiproof(targets.clone()) + .unwrap(), + Proof::new(trie_cursor_factory, hashed_cursor_factory).multiproof(targets).unwrap() + ); + } +} diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/root.rs similarity index 94% rename from crates/trie/parallel/src/parallel_root.rs rename to crates/trie/parallel/src/root.rs index e432b91062c..2aace02ed9b 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/root.rs @@ -4,9 +4,11 @@ use crate::{stats::ParallelTrieTracker, storage_root_targets::StorageRootTargets use alloy_primitives::B256; use alloy_rlp::{BufMut, Encodable}; use itertools::Itertools; +use reth_db::DatabaseError; use reth_execution_errors::StorageRootError; use reth_provider::{ providers::ConsistentDbView, BlockReader, DBProvider, DatabaseProviderFactory, ProviderError, + StateCommitmentProvider, }; use reth_trie::{ hashed_cursor::{HashedCursorFactory, HashedPostStateCursorFactory}, @@ -14,7 +16,7 @@ use reth_trie::{ trie_cursor::{InMemoryTrieCursorFactory, TrieCursorFactory}, updates::TrieUpdates, walker::TrieWalker, - HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, + HashBuilder, Nibbles, StorageRoot, TrieAccount, TrieInput, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use std::{collections::HashMap, sync::Arc}; @@ -57,7 +59,12 @@ impl ParallelStateRoot { impl ParallelStateRoot where - Factory: DatabaseProviderFactory + Clone + Send + Sync + 'static, + Factory: DatabaseProviderFactory + + StateCommitmentProvider + + Clone + + Send + + Sync + + 'static, { /// Calculate incremental state root in parallel. pub fn incremental_root(self) -> Result { @@ -114,10 +121,10 @@ where trie_cursor_factory, hashed_state, hashed_address, + prefix_set, #[cfg(feature = "metrics")] metrics, ) - .with_prefix_set(prefix_set) .calculate(retain_updates)?) })(); let _ = tx.send(result); @@ -149,7 +156,7 @@ where ); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); while let Some(node) = account_node_iter.try_next().map_err(ProviderError::Database)? { match node { TrieElement::Branch(node) => { @@ -172,6 +179,7 @@ where trie_cursor_factory.clone(), hashed_cursor_factory.clone(), hashed_address, + Default::default(), #[cfg(feature = "metrics")] self.metrics.storage_trie.clone(), ) @@ -193,11 +201,8 @@ where let root = hash_builder.root(); - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - prefix_sets.destroyed_accounts, - ); + let removed_keys = account_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -228,6 +233,9 @@ pub enum ParallelStateRootError { /// Provider error. #[error(transparent)] Provider(#[from] ProviderError), + /// Other unspecified error. + #[error("{_0}")] + Other(String), } impl From for ProviderError { @@ -237,6 +245,7 @@ impl From for ProviderError { ParallelStateRootError::StorageRoot(StorageRootError::Database(error)) => { Self::Database(error) } + ParallelStateRootError::Other(other) => Self::Database(DatabaseError::Other(other)), } } } diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index c31bbe2df2f..09826e41084 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -14,31 +14,38 @@ workspace = true [dependencies] # reth -reth-primitives.workspace = true +reth-primitives-traits.workspace = true +reth-execution-errors.workspace = true reth-trie-common.workspace = true -reth-trie.workspace = true +reth-tracing.workspace = true # alloy alloy-primitives.workspace = true alloy-rlp.workspace = true -# tracing -tracing.workspace = true - # misc -thiserror.workspace = true -rayon.workspace = true smallvec = { workspace = true, features = ["const_new"] } +thiserror.workspace = true [dev-dependencies] -reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } -reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary"] } reth-trie = { workspace = true, features = ["test-utils"] } +reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } +reth-testing-utils.workspace = true + +arbitrary.workspace = true assert_matches.workspace = true +criterion.workspace = true itertools.workspace = true +pretty_assertions = "1.4" +proptest-arbitrary-interop.workspace = true proptest.workspace = true -criterion.workspace = true +rand.workspace = true [[bench]] name = "root" harness = false + +[[bench]] +name = "rlp_node" +harness = false diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs new file mode 100644 index 00000000000..57ab52978b6 --- /dev/null +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -0,0 +1,78 @@ +#![allow(missing_docs, unreachable_pub)] + +use std::time::{Duration, Instant}; + +use alloy_primitives::{B256, U256}; +use criterion::{criterion_group, criterion_main, Criterion}; +use prop::strategy::ValueTree; +use proptest::{prelude::*, test_runner::TestRunner}; +use rand::seq::IteratorRandom; +use reth_testing_utils::generators; +use reth_trie::Nibbles; +use reth_trie_sparse::RevealedSparseTrie; + +pub fn update_rlp_node_level(c: &mut Criterion) { + let mut rng = generators::rng(); + + let mut group = c.benchmark_group("update rlp node level"); + group.sample_size(20); + + for size in [100_000] { + let mut runner = TestRunner::new(ProptestConfig::default()); + let state = proptest::collection::hash_map(any::(), any::(), size) + .new_tree(&mut runner) + .unwrap() + .current(); + + // Create a sparse trie with `size` leaves + let mut sparse = RevealedSparseTrie::default(); + for (key, value) in &state { + sparse + .update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()) + .unwrap(); + } + sparse.root(); + + for updated_leaves in [0.1, 1.0] { + for key in state + .keys() + .choose_multiple(&mut rng, (size as f64 * (updated_leaves / 100.0)) as usize) + { + sparse + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(&rng.gen::()).to_vec(), + ) + .unwrap(); + } + + // Calculate the maximum depth of the trie for the given number of leaves + let max_depth = (size as f64).log(16.0).ceil() as usize; + + for depth in 0..=max_depth { + group.bench_function( + format!("size {size} | updated {updated_leaves}% | depth {depth}"), + |b| { + // Use `iter_custom` to avoid measuring clones and drops + b.iter_custom(|iters| { + let mut elapsed = Duration::ZERO; + + let mut cloned = sparse.clone(); + for _ in 0..iters { + let start = Instant::now(); + cloned.update_rlp_node_level(depth); + elapsed += start.elapsed(); + cloned = sparse.clone(); + } + + elapsed + }) + }, + ); + } + } + } +} + +criterion_group!(rlp_node, update_rlp_node_level); +criterion_main!(rlp_node); diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 4078eb7af31..d8d210c1b19 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -1,4 +1,5 @@ #![allow(missing_docs, unreachable_pub)] + use alloy_primitives::{map::HashMap, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use itertools::Itertools; @@ -90,12 +91,23 @@ pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { }, |(init_storage, storage_updates, mut trie_updates)| { let mut storage = init_storage; - for update in storage_updates { + let mut storage_updates = storage_updates.into_iter().peekable(); + while let Some(update) = storage_updates.next() { storage.extend(&update); let prefix_set = update.construct_prefix_set().freeze(); - let storage_sorted = storage.clone().into_sorted(); - let trie_updates_sorted = trie_updates.clone().into_sorted(); + let (storage_sorted, trie_updates_sorted) = + if storage_updates.peek().is_some() { + ( + storage.clone().into_sorted(), + trie_updates.clone().into_sorted(), + ) + } else { + ( + std::mem::take(&mut storage).into_sorted(), + std::mem::take(&mut trie_updates).into_sorted(), + ) + }; let walker = TrieWalker::new( InMemoryStorageTrieCursor::new( @@ -133,7 +145,9 @@ pub fn calculate_root_from_leaves_repeated(c: &mut Criterion) { } hb.root(); - trie_updates.finalize(node_iter.walker, hb); + if storage_updates.peek().is_some() { + trie_updates.finalize(hb, node_iter.walker.take_removed_keys()); + } } }, ) @@ -185,6 +199,8 @@ fn generate_test_data(size: usize) -> HashMap { .new_tree(&mut runner) .unwrap() .current() + .into_iter() + .collect() } criterion_group!(root, calculate_root_from_leaves, calculate_root_from_leaves_repeated); diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs new file mode 100644 index 00000000000..22471cf99ff --- /dev/null +++ b/crates/trie/sparse/src/blinded.rs @@ -0,0 +1,65 @@ +//! Traits and default implementations related to retrieval of blinded trie nodes. + +use alloy_primitives::{Bytes, B256}; +use reth_execution_errors::SparseTrieError; +use reth_trie_common::Nibbles; + +/// Factory for instantiating blinded node providers. +pub trait BlindedProviderFactory { + /// Type capable of fetching blinded account nodes. + type AccountNodeProvider: BlindedProvider; + /// Type capable of fetching blinded storage nodes. + type StorageNodeProvider: BlindedProvider; + + /// Returns blinded account node provider. + fn account_node_provider(&self) -> Self::AccountNodeProvider; + + /// Returns blinded storage node provider. + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider; +} + +/// Trie node provider for retrieving blinded nodes. +pub trait BlindedProvider { + /// The error type for the provider. + type Error: Into; + + /// Retrieve blinded node by path. + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error>; +} + +/// Default blinded node provider factory that creates [`DefaultBlindedProvider`]. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct DefaultBlindedProviderFactory; + +impl BlindedProviderFactory for DefaultBlindedProviderFactory { + type AccountNodeProvider = DefaultBlindedProvider; + type StorageNodeProvider = DefaultBlindedProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + DefaultBlindedProvider + } + + fn storage_node_provider(&self, _account: B256) -> Self::StorageNodeProvider { + DefaultBlindedProvider + } +} + +/// Default blinded node provider that always returns `Ok(None)`. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct DefaultBlindedProvider; + +impl BlindedProvider for DefaultBlindedProvider { + type Error = SparseTrieError; + + fn blinded_node(&mut self, _path: Nibbles) -> Result, Self::Error> { + Ok(None) + } +} + +/// Right pad the path with 0s and return as [`B256`]. +#[inline] +pub fn pad_path_to_key(path: &Nibbles) -> B256 { + let mut padded = path.pack(); + padded.resize(32, 0); + B256::from_slice(&padded) +} diff --git a/crates/trie/sparse/src/errors.rs b/crates/trie/sparse/src/errors.rs deleted file mode 100644 index f60d1736c06..00000000000 --- a/crates/trie/sparse/src/errors.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Errors for sparse trie. - -use alloy_primitives::{Bytes, B256}; -use reth_trie::Nibbles; -use thiserror::Error; - -/// Result type with [`SparseStateTrieError`] as error. -pub type SparseStateTrieResult = Result; - -/// Error encountered in [`crate::SparseStateTrie`]. -#[derive(Error, Debug)] -pub enum SparseStateTrieError { - /// Encountered invalid root node. - #[error("invalid root node at {path:?}: {node:?}")] - InvalidRootNode { - /// Path to first proof node. - path: Nibbles, - /// Encoded first proof node. - node: Bytes, - }, - /// Sparse trie error. - #[error(transparent)] - Sparse(#[from] SparseTrieError), - /// RLP error. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), -} - -/// Result type with [`SparseTrieError`] as error. -pub type SparseTrieResult = Result; - -/// Error encountered in [`crate::SparseTrie`]. -#[derive(Error, Debug)] -pub enum SparseTrieError { - /// Sparse trie is still blind. Thrown on attempt to update it. - #[error("sparse trie is blind")] - Blind, - /// Encountered blinded node on update. - #[error("attempted to update blind node at {path:?}: {hash}")] - BlindedNode { - /// Blind node path. - path: Nibbles, - /// Node hash - hash: B256, - }, - /// RLP error. - #[error(transparent)] - Rlp(#[from] alloy_rlp::Error), -} diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index b3cb2c5fdff..1a0f3f73648 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -6,5 +6,11 @@ pub use state::*; mod trie; pub use trie::*; -mod errors; -pub use errors::*; +pub mod blinded; + +/// Re-export sparse trie error types. +pub mod errors { + pub use reth_execution_errors::{ + SparseStateTrieError, SparseStateTrieResult, SparseTrieError, SparseTrieResult, + }; +} diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index cfb17ef36ff..6638632f0ad 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,22 +1,61 @@ -use crate::{SparseStateTrieError, SparseStateTrieResult, SparseTrie}; +use crate::{ + blinded::{BlindedProvider, BlindedProviderFactory, DefaultBlindedProviderFactory}, + RevealedSparseTrie, SparseTrie, +}; use alloy_primitives::{ + hex, map::{HashMap, HashSet}, Bytes, B256, }; -use alloy_rlp::Decodable; -use reth_trie::{Nibbles, TrieNode}; +use alloy_rlp::{Decodable, Encodable}; +use reth_execution_errors::{SparseStateTrieError, SparseStateTrieResult, SparseTrieError}; +use reth_primitives_traits::Account; +use reth_tracing::tracing::trace; +use reth_trie_common::{ + updates::{StorageTrieUpdates, TrieUpdates}, + MultiProof, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, +}; +use std::{fmt, iter::Peekable}; /// Sparse state trie representing lazy-loaded Ethereum state trie. -#[derive(Default, Debug)] -pub struct SparseStateTrie { +pub struct SparseStateTrie { + /// Blinded node provider factory. + provider_factory: F, /// Sparse account trie. - pub(crate) state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - #[allow(dead_code)] - pub(crate) storages: HashMap, + storages: HashMap>, /// Collection of revealed account and storage keys. - #[allow(dead_code)] - pub(crate) revealed: HashMap>, + revealed: HashMap>, + /// Flag indicating whether trie updates should be retained. + retain_updates: bool, + /// Reusable buffer for RLP encoding of trie accounts. + account_rlp_buf: Vec, +} + +impl Default for SparseStateTrie { + fn default() -> Self { + Self { + provider_factory: Default::default(), + state: Default::default(), + storages: Default::default(), + revealed: Default::default(), + retain_updates: false, + account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), + } + } +} + +impl fmt::Debug for SparseStateTrie

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SparseStateTrie") + .field("state", &self.state) + .field("storages", &self.storages) + .field("revealed", &self.revealed) + .field("retain_updates", &self.retain_updates) + .field("account_rlp_buf", &hex::encode(&self.account_rlp_buf)) + .finish_non_exhaustive() + } } impl SparseStateTrie { @@ -24,6 +63,26 @@ impl SparseStateTrie { pub fn from_state(state: SparseTrie) -> Self { Self { state, ..Default::default() } } +} + +impl SparseStateTrie { + /// Create new [`SparseStateTrie`] with blinded node provider factory. + pub fn new(provider_factory: F) -> Self { + Self { + provider_factory, + state: Default::default(), + storages: Default::default(), + revealed: Default::default(), + retain_updates: false, + account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), + } + } + + /// Set the retention of branch node updates and deletions. + pub const fn with_updates(mut self, retain_updates: bool) -> Self { + self.retain_updates = retain_updates; + self + } /// Returns `true` if account was already revealed. pub fn is_account_revealed(&self, account: &B256) -> bool { @@ -32,68 +91,361 @@ impl SparseStateTrie { /// Returns `true` if storage slot for account was already revealed. pub fn is_storage_slot_revealed(&self, account: &B256, slot: &B256) -> bool { - self.revealed.get(account).map_or(false, |slots| slots.contains(slot)) + self.revealed.get(account).is_some_and(|slots| slots.contains(slot)) + } + + /// Returns mutable reference to storage sparse trie if it was revealed. + pub fn storage_trie_mut( + &mut self, + account: &B256, + ) -> Option<&mut RevealedSparseTrie> { + self.storages.get_mut(account).and_then(|e| e.as_revealed_mut()) } - /// Reveal unknown trie paths from provided leaf path and its proof. + /// Reveal unknown trie paths from provided leaf path and its proof for the account. + /// + /// Panics if trie updates retention is enabled. + /// /// NOTE: This method does not extensively validate the proof. pub fn reveal_account( &mut self, account: B256, proof: impl IntoIterator, ) -> SparseStateTrieResult<()> { + assert!(!self.retain_updates); + + if self.is_account_revealed(&account) { + return Ok(()); + } + let mut proof = proof.into_iter().peekable(); - // reveal root and initialize the trie if not already - let Some((path, node)) = proof.next() else { return Ok(()) }; - if !path.is_empty() { - return Err(SparseStateTrieError::InvalidRootNode { path, node }) + let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; + + // Reveal root node if it wasn't already. + let trie = self.state.reveal_root_with_provider( + self.provider_factory.account_node_provider(), + root_node, + None, + self.retain_updates, + )?; + + // Reveal the remaining proof nodes. + for (path, bytes) in proof { + let node = TrieNode::decode(&mut &bytes[..])?; + trie.reveal_node(path, node, None)?; } - // Decode root node and perform sanity check. - let root_node = TrieNode::decode(&mut &node[..])?; - if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { - return Err(SparseStateTrieError::InvalidRootNode { path, node }) + // Mark leaf path as revealed. + self.revealed.entry(account).or_default(); + + Ok(()) + } + + /// Reveal unknown trie paths from provided leaf path and its proof for the storage slot. + /// + /// Panics if trie updates retention is enabled. + /// + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_storage_slot( + &mut self, + account: B256, + slot: B256, + proof: impl IntoIterator, + ) -> SparseStateTrieResult<()> { + assert!(!self.retain_updates); + + if self.is_storage_slot_revealed(&account, &slot) { + return Ok(()); } + let mut proof = proof.into_iter().peekable(); + + let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; + // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node)?; + let trie = self.storages.entry(account).or_default().reveal_root_with_provider( + self.provider_factory.storage_node_provider(account), + root_node, + None, + self.retain_updates, + )?; - // add the remaining proof nodes + // Reveal the remaining proof nodes. for (path, bytes) in proof { let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node)?; + trie.reveal_node(path, node, None)?; } // Mark leaf path as revealed. - self.revealed.entry(account).or_default(); + self.revealed.entry(account).or_default().insert(slot); Ok(()) } - /// Update the leaf node. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseStateTrieResult<()> { - self.state.update_leaf(path, value)?; + /// Reveal unknown trie paths from multiproof and the list of included accounts and slots. + /// NOTE: This method does not extensively validate the proof. + pub fn reveal_multiproof( + &mut self, + targets: HashMap>, + multiproof: MultiProof, + ) -> SparseStateTrieResult<()> { + let account_subtree = multiproof.account_subtree.into_nodes_sorted(); + let mut account_nodes = account_subtree.into_iter().peekable(); + + if let Some(root_node) = self.validate_root_node(&mut account_nodes)? { + // Reveal root node if it wasn't already. + let trie = self.state.reveal_root_with_provider( + self.provider_factory.account_node_provider(), + root_node, + multiproof.branch_node_hash_masks.get(&Nibbles::default()).copied(), + self.retain_updates, + )?; + + // Reveal the remaining proof nodes. + for (path, bytes) in account_nodes { + let node = TrieNode::decode(&mut &bytes[..])?; + let hash_mask = if let TrieNode::Branch(_) = node { + multiproof.branch_node_hash_masks.get(&path).copied() + } else { + None + }; + + trace!(target: "trie::sparse", ?path, ?node, ?hash_mask, "Revealing account node"); + trie.reveal_node(path, node, hash_mask)?; + } + } + + for (account, storage_subtree) in multiproof.storages { + let subtree = storage_subtree.subtree.into_nodes_sorted(); + let mut nodes = subtree.into_iter().peekable(); + + if let Some(root_node) = self.validate_root_node(&mut nodes)? { + // Reveal root node if it wasn't already. + let trie = self.storages.entry(account).or_default().reveal_root_with_provider( + self.provider_factory.storage_node_provider(account), + root_node, + storage_subtree.branch_node_hash_masks.get(&Nibbles::default()).copied(), + self.retain_updates, + )?; + + // Reveal the remaining proof nodes. + for (path, bytes) in nodes { + let node = TrieNode::decode(&mut &bytes[..])?; + let hash_mask = if let TrieNode::Branch(_) = node { + storage_subtree.branch_node_hash_masks.get(&path).copied() + } else { + None + }; + + trace!(target: "trie::sparse", ?account, ?path, ?node, ?hash_mask, "Revealing storage node"); + trie.reveal_node(path, node, hash_mask)?; + } + } + } + + for (account, slots) in targets { + self.revealed.entry(account).or_default().extend(slots); + } + + Ok(()) + } + + /// Validates the root node of the proof and returns it if it exists and is valid. + fn validate_root_node>( + &self, + proof: &mut Peekable, + ) -> SparseStateTrieResult> { + let mut proof = proof.into_iter().peekable(); + + // Validate root node. + let Some((path, node)) = proof.next() else { return Ok(None) }; + if !path.is_empty() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + // Decode root node and perform sanity check. + let root_node = TrieNode::decode(&mut &node[..])?; + if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { + return Err(SparseStateTrieError::InvalidRootNode { path, node }) + } + + Ok(Some(root_node)) + } + + /// Wipe the storage trie at the provided address. + pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { + if let Some(trie) = self.storages.get_mut(&address) { + trie.wipe()?; + } Ok(()) } + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.state.calculate_below_level(level); + } + + /// Returns storage sparse trie root if the trie has been revealed. + pub fn storage_root(&mut self, account: B256) -> Option { + self.storages.get_mut(&account).and_then(|trie| trie.root()) + } + /// Returns sparse trie root if the trie has been revealed. pub fn root(&mut self) -> Option { self.state.root() } + + /// Returns [`TrieUpdates`] by taking the updates from the revealed sparse tries. + /// + /// Returns `None` if the accounts trie is not revealed. + pub fn take_trie_updates(&mut self) -> Option { + self.state.as_revealed_mut().map(|state| { + let updates = state.take_updates(); + TrieUpdates { + account_nodes: updates.updated_nodes, + removed_nodes: updates.removed_nodes, + storage_tries: self + .storages + .iter_mut() + .map(|(address, trie)| { + let trie = trie.as_revealed_mut().unwrap(); + let updates = trie.take_updates(); + let updates = StorageTrieUpdates { + is_deleted: updates.wiped, + storage_nodes: updates.updated_nodes, + removed_nodes: updates.removed_nodes, + }; + (*address, updates) + }) + .filter(|(_, updates)| !updates.is_empty()) + .collect(), + } + }) + } +} +impl SparseStateTrie +where + F: BlindedProviderFactory, + SparseTrieError: From<::Error> + + From<::Error>, +{ + /// Update the account leaf node. + pub fn update_account_leaf( + &mut self, + path: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + self.state.update_leaf(path, value)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn update_storage_leaf( + &mut self, + address: B256, + slot: Nibbles, + value: Vec, + ) -> SparseStateTrieResult<()> { + if let Some(storage_trie) = self.storages.get_mut(&address) { + Ok(storage_trie.update_leaf(slot, value)?) + } else { + Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) + } + } + + /// Update or remove trie account based on new account info. This method will either recompute + /// the storage root based on update storage trie or look it up from existing leaf value. + /// + /// If the new account info and storage trie are empty, the account leaf will be removed. + pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { + let nibbles = Nibbles::unpack(address); + let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { + trace!(target: "trie::sparse", ?address, "Calculating storage root to update account"); + storage_trie.root().ok_or(SparseTrieError::Blind)? + } else if self.revealed.contains_key(&address) { + trace!(target: "trie::sparse", ?address, "Retrieving storage root from account leaf to update account"); + let state = self.state.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + // The account was revealed, either... + if let Some(value) = state.get_leaf_value(&nibbles) { + // ..it exists and we should take it's current storage root or... + TrieAccount::decode(&mut &value[..])?.storage_root + } else { + // ...the account is newly created and the storage trie is empty. + EMPTY_ROOT_HASH + } + } else { + return Err(SparseTrieError::Blind.into()) + }; + + if account.is_empty() && storage_root == EMPTY_ROOT_HASH { + trace!(target: "trie::sparse", ?address, "Removing account"); + self.remove_account_leaf(&nibbles) + } else { + trace!(target: "trie::sparse", ?address, "Updating account"); + self.account_rlp_buf.clear(); + TrieAccount::from((account, storage_root)).encode(&mut self.account_rlp_buf); + self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) + } + } + + /// Remove the account leaf node. + pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { + self.state.remove_leaf(path)?; + Ok(()) + } + + /// Update the leaf node of a storage trie at the provided address. + pub fn remove_storage_leaf( + &mut self, + address: B256, + slot: &Nibbles, + ) -> SparseStateTrieResult<()> { + if let Some(storage_trie) = self.storages.get_mut(&address) { + Ok(storage_trie.remove_leaf(slot)?) + } else { + Err(SparseStateTrieError::Sparse(SparseTrieError::Blind)) + } + } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::Bytes; + use alloy_primitives::{b256, Bytes, U256}; use alloy_rlp::EMPTY_STRING_CODE; + use arbitrary::Arbitrary; use assert_matches::assert_matches; - use reth_trie::HashBuilder; - use reth_trie_common::proof::ProofRetainer; + use rand::{rngs::StdRng, Rng, SeedableRng}; + use reth_primitives_traits::Account; + use reth_trie::{updates::StorageTrieUpdates, HashBuilder, TrieAccount, EMPTY_ROOT_HASH}; + use reth_trie_common::{proof::ProofRetainer, StorageMultiProof, TrieMask}; + + #[test] + fn validate_root_node_first_node_not_root() { + let sparse = SparseStateTrie::default(); + let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; + assert_matches!( + sparse.validate_root_node(&mut proof.into_iter().peekable(),), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } #[test] - fn sparse_trie_reveal_empty() { + fn validate_root_node_invalid_proof_with_empty_root() { + let sparse = SparseStateTrie::default(); + let proof = [ + (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), + (Nibbles::from_nibbles([0x1]), Bytes::new()), + ]; + assert_matches!( + sparse.validate_root_node(&mut proof.into_iter().peekable(),), + Err(SparseStateTrieError::InvalidRootNode { .. }) + ); + } + + #[test] + fn reveal_account_empty() { let retainer = ProofRetainer::from_iter([Nibbles::default()]); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); hash_builder.root(); @@ -102,30 +454,156 @@ mod tests { let mut sparse = SparseStateTrie::default(); assert_eq!(sparse.state, SparseTrie::Blind); + sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); assert_eq!(sparse.state, SparseTrie::revealed_empty()); } #[test] - fn reveal_first_node_not_root() { + fn reveal_storage_slot_empty() { + let retainer = ProofRetainer::from_iter([Nibbles::default()]); + let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + hash_builder.root(); + let proofs = hash_builder.take_proof_nodes(); + assert_eq!(proofs.len(), 1); + let mut sparse = SparseStateTrie::default(); - let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; - assert_matches!( - sparse.reveal_account(Default::default(), proof), - Err(SparseStateTrieError::InvalidRootNode { .. }) + assert!(sparse.storages.is_empty()); + + sparse + .reveal_storage_slot(Default::default(), Default::default(), proofs.into_inner()) + .unwrap(); + assert_eq!( + sparse.storages, + HashMap::from_iter([(Default::default(), SparseTrie::revealed_empty())]) ); } #[test] - fn reveal_invalid_proof_with_empty_root() { - let mut sparse = SparseStateTrie::default(); - let proof = [ - (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), - (Nibbles::from_nibbles([0x1]), Bytes::new()), - ]; - assert_matches!( - sparse.reveal_account(Default::default(), proof), - Err(SparseStateTrieError::InvalidRootNode { .. }) + fn take_trie_updates() { + reth_tracing::init_test_tracing(); + + // let mut rng = generators::rng(); + let mut rng = StdRng::seed_from_u64(1); + + let mut bytes = [0u8; 1024]; + rng.fill(bytes.as_mut_slice()); + + let slot_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); + let slot_path_1 = Nibbles::unpack(slot_1); + let value_1 = U256::from(rng.gen::()); + let slot_2 = b256!("1100000000000000000000000000000000000000000000000000000000000000"); + let slot_path_2 = Nibbles::unpack(slot_2); + let value_2 = U256::from(rng.gen::()); + let slot_3 = b256!("2000000000000000000000000000000000000000000000000000000000000000"); + let slot_path_3 = Nibbles::unpack(slot_3); + let value_3 = U256::from(rng.gen::()); + + let mut storage_hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ + slot_path_1.clone(), + slot_path_2.clone(), + ])); + storage_hash_builder.add_leaf(slot_path_1, &alloy_rlp::encode_fixed_size(&value_1)); + storage_hash_builder.add_leaf(slot_path_2, &alloy_rlp::encode_fixed_size(&value_2)); + + let storage_root = storage_hash_builder.root(); + let storage_proof_nodes = storage_hash_builder.take_proof_nodes(); + let storage_branch_node_hash_masks = HashMap::from_iter([ + (Nibbles::default(), TrieMask::new(0b010)), + (Nibbles::from_nibbles([0x1]), TrieMask::new(0b11)), + ]); + + let address_1 = b256!("1000000000000000000000000000000000000000000000000000000000000000"); + let address_path_1 = Nibbles::unpack(address_1); + let account_1 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let mut trie_account_1 = TrieAccount::from((account_1, storage_root)); + let address_2 = b256!("1100000000000000000000000000000000000000000000000000000000000000"); + let address_path_2 = Nibbles::unpack(address_2); + let account_2 = Account::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let mut trie_account_2 = TrieAccount::from((account_2, EMPTY_ROOT_HASH)); + + let mut hash_builder = + HashBuilder::default().with_proof_retainer(ProofRetainer::from_iter([ + address_path_1.clone(), + address_path_2.clone(), + ])); + hash_builder.add_leaf(address_path_1.clone(), &alloy_rlp::encode(trie_account_1)); + hash_builder.add_leaf(address_path_2.clone(), &alloy_rlp::encode(trie_account_2)); + + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); + + let mut sparse = SparseStateTrie::default().with_updates(true); + sparse + .reveal_multiproof( + HashMap::from_iter([ + (address_1, HashSet::from_iter([slot_1, slot_2])), + (address_2, HashSet::from_iter([slot_1, slot_2])), + ]), + MultiProof { + account_subtree: proof_nodes, + branch_node_hash_masks: HashMap::from_iter([( + Nibbles::from_nibbles([0x1]), + TrieMask::new(0b00), + )]), + storages: HashMap::from_iter([ + ( + address_1, + StorageMultiProof { + root, + subtree: storage_proof_nodes.clone(), + branch_node_hash_masks: storage_branch_node_hash_masks.clone(), + }, + ), + ( + address_2, + StorageMultiProof { + root, + subtree: storage_proof_nodes, + branch_node_hash_masks: storage_branch_node_hash_masks, + }, + ), + ]), + }, + ) + .unwrap(); + + assert_eq!(sparse.root(), Some(root)); + + let address_3 = b256!("2000000000000000000000000000000000000000000000000000000000000000"); + let address_path_3 = Nibbles::unpack(address_3); + let account_3 = Account { nonce: account_1.nonce + 1, ..account_1 }; + let trie_account_3 = TrieAccount::from((account_3, EMPTY_ROOT_HASH)); + + sparse.update_account_leaf(address_path_3, alloy_rlp::encode(trie_account_3)).unwrap(); + + sparse.update_storage_leaf(address_1, slot_path_3, alloy_rlp::encode(value_3)).unwrap(); + trie_account_1.storage_root = sparse.storage_root(address_1).unwrap(); + sparse.update_account_leaf(address_path_1, alloy_rlp::encode(trie_account_1)).unwrap(); + + sparse.wipe_storage(address_2).unwrap(); + trie_account_2.storage_root = sparse.storage_root(address_2).unwrap(); + sparse.update_account_leaf(address_path_2, alloy_rlp::encode(trie_account_2)).unwrap(); + + sparse.root(); + + let sparse_updates = sparse.take_trie_updates().unwrap(); + // TODO(alexey): assert against real state root calculation updates + pretty_assertions::assert_eq!( + sparse_updates, + TrieUpdates { + account_nodes: HashMap::default(), + storage_tries: HashMap::from_iter([( + b256!("1100000000000000000000000000000000000000000000000000000000000000"), + StorageTrieUpdates { + is_deleted: true, + storage_nodes: HashMap::default(), + removed_nodes: HashSet::default() + } + )]), + removed_nodes: HashSet::default() + } ); } } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 2edaaf76b27..3cc0e8703c4 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,41 +1,79 @@ -use crate::{SparseTrieError, SparseTrieResult}; -use alloy_primitives::{hex, keccak256, map::HashMap, B256}; -use alloy_rlp::Decodable; -use reth_trie::{ - prefix_set::{PrefixSet, PrefixSetMut}, - RlpNode, +use crate::blinded::{BlindedProvider, DefaultBlindedProvider}; +use alloy_primitives::{ + hex, keccak256, + map::{Entry, HashMap, HashSet}, + B256, }; +use alloy_rlp::Decodable; +use reth_execution_errors::{SparseTrieError, SparseTrieResult}; +use reth_tracing::tracing::trace; use reth_trie_common::{ - BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, TrieMask, TrieNode, CHILD_INDEX_RANGE, - EMPTY_ROOT_HASH, + prefix_set::{PrefixSet, PrefixSetMut}, + BranchNodeCompact, BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieMask, + TrieNode, CHILD_INDEX_RANGE, EMPTY_ROOT_HASH, }; use smallvec::SmallVec; -use std::{collections::HashSet, fmt}; +use std::{borrow::Cow, fmt}; /// Inner representation of the sparse trie. /// Sparse trie is blind by default until nodes are revealed. -#[derive(PartialEq, Eq, Default, Debug)] -pub enum SparseTrie { +#[derive(PartialEq, Eq)] +pub enum SparseTrie

{ /// None of the trie nodes are known. - #[default] Blind, /// The trie nodes have been revealed. - Revealed(RevealedSparseTrie), + Revealed(Box>), +} + +impl

fmt::Debug for SparseTrie

{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Blind => write!(f, "Blind"), + Self::Revealed(revealed) => write!(f, "Revealed({revealed:?})"), + } + } +} + +impl

Default for SparseTrie

{ + fn default() -> Self { + Self::Blind + } } impl SparseTrie { + /// Creates new blind trie. + pub const fn blind() -> Self { + Self::Blind + } + /// Creates new revealed empty trie. pub fn revealed_empty() -> Self { - Self::Revealed(RevealedSparseTrie::default()) + Self::Revealed(Box::default()) } + /// Reveals the root node if the trie is blinded. + /// + /// # Returns + /// + /// Mutable reference to [`RevealedSparseTrie`]. + pub fn reveal_root( + &mut self, + root: TrieNode, + hash_mask: Option, + retain_updates: bool, + ) -> SparseTrieResult<&mut RevealedSparseTrie> { + self.reveal_root_with_provider(Default::default(), root, hash_mask, retain_updates) + } +} + +impl

SparseTrie

{ /// Returns `true` if the sparse trie has no revealed nodes. pub const fn is_blind(&self) -> bool { matches!(self, Self::Blind) } /// Returns mutable reference to revealed sparse trie if the trie is not blind. - pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie> { + pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie

> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -48,17 +86,28 @@ impl SparseTrie { /// # Returns /// /// Mutable reference to [`RevealedSparseTrie`]. - pub fn reveal_root(&mut self, root: TrieNode) -> SparseTrieResult<&mut RevealedSparseTrie> { + pub fn reveal_root_with_provider( + &mut self, + provider: P, + root: TrieNode, + hash_mask: Option, + retain_updates: bool, + ) -> SparseTrieResult<&mut RevealedSparseTrie

> { if self.is_blind() { - *self = Self::Revealed(RevealedSparseTrie::from_root(root)?) + *self = Self::Revealed(Box::new(RevealedSparseTrie::from_provider_and_root( + provider, + root, + hash_mask, + retain_updates, + )?)) } Ok(self.as_revealed_mut().unwrap()) } - /// Update the leaf node. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. + pub fn wipe(&mut self) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; - revealed.update_leaf(path, value)?; + revealed.wipe(); Ok(()) } @@ -66,38 +115,81 @@ impl SparseTrie { pub fn root(&mut self) -> Option { Some(self.as_revealed_mut()?.root()) } + + /// Calculates the hashes of the nodes below the provided level. + pub fn calculate_below_level(&mut self, level: usize) { + self.as_revealed_mut().unwrap().update_rlp_node_level(level); + } +} + +impl

SparseTrie

+where + P: BlindedProvider, + SparseTrieError: From, +{ + /// Update the leaf node. + pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.update_leaf(path, value)?; + Ok(()) + } + + /// Remove the leaf node. + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + let revealed = self.as_revealed_mut().ok_or(SparseTrieError::Blind)?; + revealed.remove_leaf(path)?; + Ok(()) + } } /// The representation of revealed sparse trie. -#[derive(PartialEq, Eq)] -pub struct RevealedSparseTrie { +/// +/// ## Invariants +/// +/// - The root node is always present in `nodes` collection. +/// - Each leaf entry in `nodes` collection must have a corresponding entry in `values` collection. +/// The opposite is also true. +/// - All keys in `values` collection are full leaf paths. +#[derive(Clone, PartialEq, Eq)] +pub struct RevealedSparseTrie

{ + /// Blinded node provider. + provider: P, /// All trie nodes. nodes: HashMap, + /// All branch node hash masks. + branch_node_hash_masks: HashMap, /// All leaf values. values: HashMap>, /// Prefix set. prefix_set: PrefixSetMut, + /// Retained trie updates. + updates: Option, /// Reusable buffer for RLP encoding of nodes. rlp_buf: Vec, } -impl fmt::Debug for RevealedSparseTrie { +impl

fmt::Debug for RevealedSparseTrie

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RevealedSparseTrie") .field("nodes", &self.nodes) + .field("branch_hash_masks", &self.branch_node_hash_masks) .field("values", &self.values) .field("prefix_set", &self.prefix_set) + .field("updates", &self.updates) .field("rlp_buf", &hex::encode(&self.rlp_buf)) - .finish() + .finish_non_exhaustive() } } impl Default for RevealedSparseTrie { fn default() -> Self { Self { + provider: Default::default(), nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), + branch_node_hash_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), + updates: None, rlp_buf: Vec::new(), } } @@ -105,20 +197,95 @@ impl Default for RevealedSparseTrie { impl RevealedSparseTrie { /// Create new revealed sparse trie from the given root node. - pub fn from_root(node: TrieNode) -> SparseTrieResult { + pub fn from_root( + node: TrieNode, + hash_mask: Option, + retain_updates: bool, + ) -> SparseTrieResult { let mut this = Self { + provider: Default::default(), nodes: HashMap::default(), + branch_node_hash_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), - }; - this.reveal_node(Nibbles::default(), node)?; + updates: None, + } + .with_updates(retain_updates); + this.reveal_node(Nibbles::default(), node, hash_mask)?; Ok(this) } +} + +impl

RevealedSparseTrie

{ + /// Create new revealed sparse trie from the given root node. + pub fn from_provider_and_root( + provider: P, + node: TrieNode, + hash_mask: Option, + retain_updates: bool, + ) -> SparseTrieResult { + let mut this = Self { + provider, + nodes: HashMap::default(), + branch_node_hash_masks: HashMap::default(), + values: HashMap::default(), + prefix_set: PrefixSetMut::default(), + rlp_buf: Vec::new(), + updates: None, + } + .with_updates(retain_updates); + this.reveal_node(Nibbles::default(), node, hash_mask)?; + Ok(this) + } + + /// Set new blinded node provider on sparse trie. + pub fn with_provider(self, provider: BP) -> RevealedSparseTrie { + RevealedSparseTrie { + provider, + nodes: self.nodes, + branch_node_hash_masks: self.branch_node_hash_masks, + values: self.values, + prefix_set: self.prefix_set, + updates: self.updates, + rlp_buf: self.rlp_buf, + } + } + + /// Set the retention of branch node updates and deletions. + pub fn with_updates(mut self, retain_updates: bool) -> Self { + if retain_updates { + self.updates = Some(SparseTrieUpdates::default()); + } + self + } + + /// Returns a reference to the retained sparse node updates without taking them. + pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + + /// Returns a reference to the leaf value if present. + pub fn get_leaf_value(&self, path: &Nibbles) -> Option<&Vec> { + self.values.get(path) + } + + /// Takes and returns the retained sparse node updates + pub fn take_updates(&mut self) -> SparseTrieUpdates { + self.updates.take().unwrap_or_default() + } /// Reveal the trie node only if it was not known already. - pub fn reveal_node(&mut self, path: Nibbles, node: TrieNode) -> SparseTrieResult<()> { - // TODO: revise all inserts to not overwrite existing entries + pub fn reveal_node( + &mut self, + path: Nibbles, + node: TrieNode, + hash_mask: Option, + ) -> SparseTrieResult<()> { + if let Some(hash_mask) = hash_mask { + self.branch_node_hash_masks.insert(path.clone(), hash_mask); + } + match node { TrieNode::EmptyRoot => { debug_assert!(path.is_empty()); @@ -134,21 +301,82 @@ impl RevealedSparseTrie { stack_ptr += 1; } } - self.nodes - .insert(path, SparseNode::Branch { state_mask: branch.state_mask, hash: None }); - } - TrieNode::Extension(ext) => { - let mut child_path = path.clone(); - child_path.extend_from_slice_unchecked(&ext.key); - self.reveal_node_or_hash(child_path, &ext.child)?; - self.nodes.insert(path, SparseNode::Extension { key: ext.key, hash: None }); - } - TrieNode::Leaf(leaf) => { - let mut full = path.clone(); - full.extend_from_slice_unchecked(&leaf.key); - self.values.insert(full, leaf.value); - self.nodes.insert(path, SparseNode::new_leaf(leaf.key)); + + match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + // Blinded nodes can be replaced. + SparseNode::Hash(_) => { + entry.insert(SparseNode::new_branch(branch.state_mask)); + } + // Branch node already exists, or an extension node was placed where a + // branch node was before. + SparseNode::Branch { .. } | SparseNode::Extension { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::new_branch(branch.state_mask)); + } + } } + TrieNode::Extension(ext) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + SparseNode::Hash(_) => { + let mut child_path = entry.key().clone(); + child_path.extend_from_slice_unchecked(&ext.key); + entry.insert(SparseNode::new_ext(ext.key)); + self.reveal_node_or_hash(child_path, &ext.child)?; + } + // Extension node already exists, or an extension node was placed where a branch + // node was before. + SparseNode::Extension { .. } | SparseNode::Branch { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | SparseNode::Leaf { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + let mut child_path = entry.key().clone(); + child_path.extend_from_slice_unchecked(&ext.key); + entry.insert(SparseNode::new_ext(ext.key)); + self.reveal_node_or_hash(child_path, &ext.child)?; + } + }, + TrieNode::Leaf(leaf) => match self.nodes.entry(path) { + Entry::Occupied(mut entry) => match entry.get() { + SparseNode::Hash(_) => { + let mut full = entry.key().clone(); + full.extend_from_slice_unchecked(&leaf.key); + entry.insert(SparseNode::new_leaf(leaf.key)); + self.values.insert(full, leaf.value); + } + // Left node already exists. + SparseNode::Leaf { .. } => {} + // All other node types can't be handled. + node @ (SparseNode::Empty | + SparseNode::Extension { .. } | + SparseNode::Branch { .. }) => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(node.clone()), + }) + } + }, + Entry::Vacant(entry) => { + let mut full = entry.key().clone(); + full.extend_from_slice_unchecked(&leaf.key); + entry.insert(SparseNode::new_leaf(leaf.key)); + self.values.insert(full, leaf.value); + } + }, } Ok(()) @@ -156,14 +384,431 @@ impl RevealedSparseTrie { fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { if child.len() == B256::len_bytes() + 1 { - // TODO: revise insert to not overwrite existing entries - self.nodes.insert(path, SparseNode::Hash(B256::from_slice(&child[1..]))); + let hash = B256::from_slice(&child[1..]); + match self.nodes.entry(path) { + Entry::Occupied(entry) => match entry.get() { + // Hash node with a different hash can't be handled. + SparseNode::Hash(previous_hash) if previous_hash != &hash => { + return Err(SparseTrieError::Reveal { + path: entry.key().clone(), + node: Box::new(SparseNode::Hash(hash)), + }) + } + _ => {} + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::Hash(hash)); + } + } return Ok(()) } - self.reveal_node(path, TrieNode::decode(&mut &child[..])?) + self.reveal_node(path, TrieNode::decode(&mut &child[..])?, None) + } + + /// Traverse trie nodes down to the leaf node and collect all nodes along the path. + fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { + let mut current = Nibbles::default(); // Start traversal from the root + let mut nodes = Vec::new(); // Collect traversed nodes + + while let Some(node) = self.nodes.remove(¤t) { + match &node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: current, hash: *hash }) + } + SparseNode::Leaf { key: _key, .. } => { + // Leaf node is always the one that we're deleting, and no other leaf nodes can + // be found during traversal. + + #[cfg(debug_assertions)] + { + let mut current = current.clone(); + current.extend_from_slice_unchecked(_key); + assert_eq!(¤t, path); + } + + nodes.push(RemovedSparseNode { + path: current.clone(), + node, + unset_branch_nibble: None, + }); + break + } + SparseNode::Extension { key, .. } => { + #[cfg(debug_assertions)] + { + let mut current = current.clone(); + current.extend_from_slice_unchecked(key); + assert!( + path.starts_with(¤t), + "path: {:?}, current: {:?}, key: {:?}", + path, + current, + key + ); + } + + let path = current.clone(); + current.extend_from_slice_unchecked(key); + nodes.push(RemovedSparseNode { path, node, unset_branch_nibble: None }); + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path[current.len()]; + debug_assert!( + state_mask.is_bit_set(nibble), + "current: {:?}, path: {:?}, nibble: {:?}, state_mask: {:?}", + current, + path, + nibble, + state_mask + ); + + // If the branch node has a child that is a leaf node that we're removing, + // we need to unset this nibble. + // Any other branch nodes will not require unsetting the nibble, because + // deleting one leaf node can not remove the whole path + // where the branch node is located. + let mut child_path = + Nibbles::from_nibbles([current.as_slice(), &[nibble]].concat()); + let unset_branch_nibble = self + .nodes + .get(&child_path) + .is_some_and(move |node| match node { + SparseNode::Leaf { key, .. } => { + // Get full path of the leaf node + child_path.extend_from_slice_unchecked(key); + &child_path == path + } + _ => false, + }) + .then_some(nibble); + + nodes.push(RemovedSparseNode { + path: current.clone(), + node, + unset_branch_nibble, + }); + + current.push_unchecked(nibble); + } + } + } + + Ok(nodes) + } + + /// Wipe the trie, removing all values and nodes, and replacing the root with an empty node. + pub fn wipe(&mut self) { + self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); + self.values = HashMap::default(); + self.prefix_set = PrefixSetMut::all(); + self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); } + /// Return the root of the sparse trie. + /// Updates all remaining dirty nodes before calculating the root. + pub fn root(&mut self) -> B256 { + // take the current prefix set. + let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); + let rlp_node = self.rlp_node_allocate(Nibbles::default(), &mut prefix_set); + if let Some(root_hash) = rlp_node.as_hash() { + root_hash + } else { + keccak256(rlp_node) + } + } + + /// Update hashes of the nodes that are located at a level deeper than or equal to the provided + /// depth. Root node has a level of 0. + pub fn update_rlp_node_level(&mut self, depth: usize) { + let mut prefix_set = self.prefix_set.clone().freeze(); + let mut buffers = RlpNodeBuffers::default(); + + let targets = self.get_changed_nodes_at_depth(&mut prefix_set, depth); + for target in targets { + buffers.path_stack.push((target, Some(true))); + self.rlp_node(&mut prefix_set, &mut buffers); + } + } + + /// Returns a list of paths to the nodes that were changed according to the prefix set and are + /// located at the provided depth when counting from the root node. If there's a leaf at a + /// depth less than the provided depth, it will be included in the result. + fn get_changed_nodes_at_depth(&self, prefix_set: &mut PrefixSet, depth: usize) -> Vec { + let mut paths = Vec::from([(Nibbles::default(), 0)]); + let mut targets = Vec::new(); + + while let Some((mut path, level)) = paths.pop() { + match self.nodes.get(&path).unwrap() { + SparseNode::Empty | SparseNode::Hash(_) => {} + SparseNode::Leaf { hash, .. } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + targets.push(path); + } + SparseNode::Extension { key, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + if level >= depth { + targets.push(path); + } else { + path.extend_from_slice_unchecked(key); + paths.push((path, level + 1)); + } + } + SparseNode::Branch { state_mask, hash, .. } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + if level >= depth { + targets.push(path); + } else { + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child_path = path.clone(); + child_path.push_unchecked(bit); + paths.push((child_path, level + 1)); + } + } + } + } + } + } + + targets + } + + fn rlp_node_allocate(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { + let mut buffers = RlpNodeBuffers::new_with_path(path); + self.rlp_node(prefix_set, &mut buffers) + } + + fn rlp_node(&mut self, prefix_set: &mut PrefixSet, buffers: &mut RlpNodeBuffers) -> RlpNode { + 'main: while let Some((path, mut is_in_prefix_set)) = buffers.path_stack.pop() { + // Check if the path is in the prefix set. + // First, check the cached value. If it's `None`, then check the prefix set, and update + // the cached value. + let mut prefix_set_contains = + |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); + + let (rlp_node, calculated, node_type) = match self.nodes.get_mut(&path).unwrap() { + SparseNode::Empty => { + (RlpNode::word_rlp(&EMPTY_ROOT_HASH), false, SparseNodeType::Empty) + } + SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), false, SparseNodeType::Hash), + SparseNode::Leaf { key, hash } => { + self.rlp_buf.clear(); + let mut path = path.clone(); + path.extend_from_slice_unchecked(key); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + (RlpNode::word_rlp(&hash), false, SparseNodeType::Leaf) + } else { + let value = self.values.get(&path).unwrap(); + let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); + *hash = rlp_node.as_hash(); + (rlp_node, true, SparseNodeType::Leaf) + } + } + SparseNode::Extension { key, hash } => { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(key); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + ( + RlpNode::word_rlp(&hash), + false, + SparseNodeType::Extension { store_in_db_trie: true }, + ) + } else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) { + let (_, child, _, node_type) = buffers.rlp_node_stack.pop().unwrap(); + self.rlp_buf.clear(); + let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); + *hash = rlp_node.as_hash(); + + ( + rlp_node, + true, + SparseNodeType::Extension { + // Inherit the `store_in_db_trie` flag from the child node, which is + // always the branch node + store_in_db_trie: node_type.store_in_db_trie(), + }, + ) + } else { + // need to get rlp node for child first + buffers.path_stack.extend([(path, is_in_prefix_set), (child_path, None)]); + continue + } + } + SparseNode::Branch { state_mask, hash, store_in_db_trie } => { + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + buffers.rlp_node_stack.push(( + path, + RlpNode::word_rlp(&hash), + false, + SparseNodeType::Branch { store_in_db_trie }, + )); + continue + } + let retain_updates = self.updates.is_some() && prefix_set_contains(&path); + + buffers.branch_child_buf.clear(); + // Walk children in a reverse order from `f` to `0`, so we pop the `0` first + // from the stack and keep walking in the sorted order. + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child = path.clone(); + child.push_unchecked(bit); + buffers.branch_child_buf.push(child); + } + } + + buffers + .branch_value_stack_buf + .resize(buffers.branch_child_buf.len(), Default::default()); + let mut added_children = false; + + // TODO(alexey): set the `TrieMask` bits directly + let mut tree_mask_values = Vec::new(); + let mut hash_mask_values = Vec::new(); + let mut hashes = Vec::new(); + for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { + if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) { + let (_, child, calculated, node_type) = + buffers.rlp_node_stack.pop().unwrap(); + + // Update the masks only if we need to retain trie updates + if retain_updates { + // Set the trie mask + let tree_mask_value = if node_type.store_in_db_trie() { + // A branch or an extension node explicitly set the + // `store_in_db_trie` flag + true + } else { + // Set the flag according to whether a child node was + // pre-calculated (`calculated = false`), meaning that it wasn't + // in the database + !calculated + }; + tree_mask_values.push(tree_mask_value); + + // Set the hash mask. If a child node is a revealed branch node OR + // is a blinded node that has its hash mask bit set according to the + // database, set the hash mask bit and save the hash. + let hash = child.as_hash().filter(|_| { + node_type.is_branch() || + (node_type.is_hash() && + self.branch_node_hash_masks + .get(&path) + .is_some_and(|mask| { + mask.is_bit_set(child_path.last().unwrap()) + })) + }); + let hash_mask_value = hash.is_some(); + hash_mask_values.push(hash_mask_value); + if let Some(hash) = hash { + hashes.push(hash); + } + + trace!( + target: "trie::sparse", + ?path, + ?child_path, + ?tree_mask_value, + ?hash_mask_value, + "Updating branch node child masks" + ); + } + + // Insert children in the resulting buffer in a normal order, + // because initially we iterated in reverse. + buffers.branch_value_stack_buf + [buffers.branch_child_buf.len() - i - 1] = child; + added_children = true; + } else { + debug_assert!(!added_children); + buffers.path_stack.push((path, is_in_prefix_set)); + buffers + .path_stack + .extend(buffers.branch_child_buf.drain(..).map(|p| (p, None))); + continue 'main + } + } + + self.rlp_buf.clear(); + let branch_node_ref = + BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); + let rlp_node = branch_node_ref.rlp(&mut self.rlp_buf); + *hash = rlp_node.as_hash(); + + // Save a branch node update only if it's not a root node, and we need to + // persist updates. + let store_in_db_trie_value = if let Some(updates) = + self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) + { + let mut tree_mask_values = tree_mask_values.into_iter().rev(); + let mut hash_mask_values = hash_mask_values.into_iter().rev(); + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); + for (i, child) in branch_node_ref.children() { + if child.is_some() { + if hash_mask_values.next().unwrap() { + hash_mask.set_bit(i); + } + if tree_mask_values.next().unwrap() { + tree_mask.set_bit(i); + } + } + } + + // Store in DB trie if there are either any children that are stored in the + // DB trie, or any children represent hashed values + let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); + if store_in_db_trie { + hashes.reverse(); + let branch_node = BranchNodeCompact::new( + *state_mask, + tree_mask, + hash_mask, + hashes, + hash.filter(|_| path.len() == 0), + ); + updates.updated_nodes.insert(path.clone(), branch_node); + } + + store_in_db_trie + } else { + false + }; + *store_in_db_trie = Some(store_in_db_trie_value); + + ( + rlp_node, + true, + SparseNodeType::Branch { store_in_db_trie: store_in_db_trie_value }, + ) + } + }; + buffers.rlp_node_stack.push((path, rlp_node, calculated, node_type)); + } + + debug_assert_eq!(buffers.rlp_node_stack.len(), 1); + buffers.rlp_node_stack.pop().unwrap().1 + } +} + +impl

RevealedSparseTrie

+where + P: BlindedProvider, + SparseTrieError: From, +{ /// Update the leaf node with provided value. pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { self.prefix_set.insert(path.clone()); @@ -216,12 +861,30 @@ impl RevealedSparseTrie { } SparseNode::Extension { key, .. } => { current.extend_from_slice(key); + if !path.starts_with(¤t) { // find the common prefix let common = current.common_prefix_length(&path); - *key = current.slice(current.len() - key.len()..common); + // If branch node updates retention is enabled, we need to query the + // extension node child to later set the hash mask for a parent branch node + // correctly. + if self.updates.is_some() { + // Check if the extension node child is a hash that needs to be revealed + if self.nodes.get(¤t).unwrap().is_hash() { + if let Some(node) = self.provider.blinded_node(current.clone())? { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!(target: "trie::sparse", ?current, ?decoded, "Revealing extension node child"); + // We'll never have to update the revealed child node, only + // remove or do nothing, so + // we can safely ignore the hash mask here and + // pass `None`. + self.reveal_node(current.clone(), decoded, None)?; + } + } + } + // create state mask for new branch node // NOTE: this might overwrite the current extension node let branch = SparseNode::new_split_branch(current[common], path[common]); @@ -257,157 +920,234 @@ impl RevealedSparseTrie { } /// Remove leaf node from the trie. - pub fn remove_leaf(&mut self, _path: Nibbles) { - unimplemented!() - } + pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + if self.values.remove(path).is_none() { + if let Some(SparseNode::Hash(hash)) = self.nodes.get(path) { + // Leaf is present in the trie, but it's blinded. + return Err(SparseTrieError::BlindedNode { path: path.clone(), hash: *hash }) + } - /// Return the root of the sparse trie. - /// Updates all remaining dirty nodes before calculating the root. - pub fn root(&mut self) -> B256 { - // take the current prefix set. - let mut prefix_set = std::mem::take(&mut self.prefix_set).freeze(); - let root_rlp = self.rlp_node(Nibbles::default(), &mut prefix_set); - if let Some(root_hash) = root_rlp.as_hash() { - root_hash - } else { - keccak256(root_rlp) + // Leaf is not present in the trie. + return Ok(()) } - } - - /// Update node hashes only if their path exceeds the provided level. - pub fn update_rlp_node_level(&mut self, min_len: usize) { - let mut paths = Vec::from([Nibbles::default()]); - let mut targets = HashSet::::default(); + self.prefix_set.insert(path.clone()); - while let Some(mut path) = paths.pop() { - match self.nodes.get(&path).unwrap() { - SparseNode::Empty | SparseNode::Hash(_) => {} - SparseNode::Leaf { .. } => { - targets.insert(path); - } - SparseNode::Extension { key, .. } => { - if path.len() >= min_len { - targets.insert(path); - } else { - path.extend_from_slice_unchecked(key); - paths.push(path); - } - } - SparseNode::Branch { state_mask, .. } => { - if path.len() >= min_len { - targets.insert(path); - } else { - for bit in CHILD_INDEX_RANGE { - if state_mask.is_bit_set(bit) { - let mut child_path = path.clone(); - child_path.push_unchecked(bit); - paths.push(child_path); - } - } - } - } - } + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. + + let mut removed_nodes = self.take_nodes_for_path(path)?; + trace!(target: "trie::sparse", ?path, ?removed_nodes, "Removed nodes for path"); + // Pop the first node from the stack which is the leaf node we want to remove. + let mut child = removed_nodes.pop().expect("leaf exists"); + #[cfg(debug_assertions)] + { + let mut child_path = child.path.clone(); + let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; + child_path.extend_from_slice_unchecked(key); + assert_eq!(&child_path, path); } - let mut prefix_set = self.prefix_set.clone().freeze(); - for target in targets { - self.rlp_node(target, &mut prefix_set); + // If we don't have any other removed nodes, insert an empty node at the root. + if removed_nodes.is_empty() { + debug_assert!(self.nodes.is_empty()); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); + + return Ok(()) } - } - fn rlp_node(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { - // stack of paths we need rlp nodes for - let mut path_stack = Vec::from([path]); - // stack of rlp nodes - let mut rlp_node_stack = Vec::<(Nibbles, RlpNode)>::new(); - // reusable branch child path - let mut branch_child_buf = SmallVec::<[Nibbles; 16]>::new_const(); - // reusable branch value stack - let mut branch_value_stack_buf = SmallVec::<[RlpNode; 16]>::new_const(); + // Walk the stack of removed nodes from the back and re-insert them back into the trie, + // adjusting the node type as needed. + while let Some(removed_node) = removed_nodes.pop() { + let removed_path = removed_node.path; - 'main: while let Some(path) = path_stack.pop() { - let rlp_node = match self.nodes.get_mut(&path).unwrap() { - SparseNode::Empty => RlpNode::word_rlp(&EMPTY_ROOT_HASH), - SparseNode::Hash(hash) => RlpNode::word_rlp(hash), - SparseNode::Leaf { key, hash } => { - self.rlp_buf.clear(); - let mut path = path.clone(); - path.extend_from_slice_unchecked(key); - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { - RlpNode::word_rlp(&hash) - } else { - let value = self.values.get(&path).unwrap(); - let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); - } - rlp_node - } + let new_node = match &removed_node.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { path: removed_path, hash: *hash }) } - SparseNode::Extension { key, hash } => { - let mut child_path = path.clone(); - child_path.extend_from_slice_unchecked(key); - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { - RlpNode::word_rlp(&hash) - } else if rlp_node_stack.last().map_or(false, |e| e.0 == child_path) { - let (_, child) = rlp_node_stack.pop().unwrap(); - self.rlp_buf.clear(); - let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); + SparseNode::Leaf { .. } => { + unreachable!("we already popped the leaf node") + } + SparseNode::Extension { key, .. } => { + // If the node is an extension node, we need to look at its child to see if we + // need to merge them. + match &child.node { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child.path, + hash: *hash, + }) } - rlp_node - } else { - path_stack.extend([path, child_path]); // need to get rlp node for child first - continue + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from + // another node type. + SparseNode::Leaf { key: leaf_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(leaf_key); + SparseNode::new_leaf(new_key) + } + // For an extension node, we collapse them into one extension node, + // extending the key + SparseNode::Extension { key: extension_key, .. } => { + self.nodes.remove(&child.path); + + let mut new_key = key.clone(); + new_key.extend_from_slice_unchecked(extension_key); + SparseNode::new_ext(new_key) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => removed_node.node, } } - SparseNode::Branch { state_mask, hash } => { - if let Some(hash) = hash.filter(|_| !prefix_set.contains(&path)) { - rlp_node_stack.push((path, RlpNode::word_rlp(&hash))); - continue + SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { + // If the node is a branch node, we need to check the number of children left + // after deleting the child at the given nibble. + + if let Some(removed_nibble) = removed_node.unset_branch_nibble { + state_mask.unset_bit(removed_nibble); } - branch_child_buf.clear(); - for bit in CHILD_INDEX_RANGE { - if state_mask.is_bit_set(bit) { - let mut child = path.clone(); - child.push_unchecked(bit); - branch_child_buf.push(child); + // If only one child is left set in the branch node, we need to collapse it. + if state_mask.count_bits() == 1 { + let child_nibble = + state_mask.first_set_bit_index().expect("state mask is not empty"); + + // Get full path of the only child node left. + let mut child_path = removed_path.clone(); + child_path.push_unchecked(child_nibble); + + trace!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); + + if self.nodes.get(&child_path).unwrap().is_hash() { + trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); + if let Some(node) = self.provider.blinded_node(child_path.clone())? { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!(target: "trie::sparse", ?child_path, ?decoded, "Revealing remaining blinded branch child"); + // We'll never have to update the revealed branch node, only remove + // or do nothing, so we can safely ignore the hash mask here and + // pass `None`. + self.reveal_node(child_path.clone(), decoded, None)?; + } } - } - branch_value_stack_buf.clear(); - for child_path in &branch_child_buf { - if rlp_node_stack.last().map_or(false, |e| &e.0 == child_path) { - let (_, child) = rlp_node_stack.pop().unwrap(); - branch_value_stack_buf.push(child); - } else { - debug_assert!(branch_value_stack_buf.is_empty()); - path_stack.push(path); - path_stack.extend(branch_child_buf.drain(..)); - continue 'main + // Get the only child node. + let child = self.nodes.get(&child_path).unwrap(); + + let mut delete_child = false; + let new_node = match child { + SparseNode::Empty => return Err(SparseTrieError::Blind), + SparseNode::Hash(hash) => { + return Err(SparseTrieError::BlindedNode { + path: child_path, + hash: *hash, + }) + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_leaf(new_key) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + delete_child = true; + + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend_from_slice_unchecked(key); + SparseNode::new_ext(new_key) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => { + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) + } + }; + + if delete_child { + self.nodes.remove(&child_path); } - } - self.rlp_buf.clear(); - let rlp_node = BranchNodeRef::new(&branch_value_stack_buf, *state_mask) - .rlp(&mut self.rlp_buf); - if let Some(node_hash) = rlp_node.as_hash() { - *hash = Some(node_hash); + if let Some(updates) = self.updates.as_mut() { + updates.removed_nodes.insert(removed_path.clone()); + } + + new_node + } + // If more than one child is left set in the branch, we just re-insert it + // as-is. + else { + SparseNode::new_branch(state_mask) } - rlp_node } }; - rlp_node_stack.push((path, rlp_node)); + + child = RemovedSparseNode { + path: removed_path.clone(), + node: new_node.clone(), + unset_branch_nibble: None, + }; + trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); + self.nodes.insert(removed_path, new_node); } - rlp_node_stack.pop().unwrap().1 + Ok(()) + } +} + +/// Enum representing sparse trie node type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum SparseNodeType { + /// Empty trie node. + Empty, + /// The hash of the node that was not revealed. + Hash, + /// Sparse leaf node. + Leaf, + /// Sparse extension node. + Extension { + /// A flag indicating whether the extension node should be stored in the database. + store_in_db_trie: bool, + }, + /// Sparse branch node. + Branch { + /// A flag indicating whether the branch node should be stored in the database. + store_in_db_trie: bool, + }, +} + +impl SparseNodeType { + const fn is_hash(&self) -> bool { + matches!(self, Self::Hash) + } + + const fn is_branch(&self) -> bool { + matches!(self, Self::Branch { .. }) + } + + const fn store_in_db_trie(&self) -> bool { + match *self { + Self::Extension { store_in_db_trie } | Self::Branch { store_in_db_trie } => { + store_in_db_trie + } + _ => false, + } } } /// Enum representing trie nodes in sparse trie. -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum SparseNode { /// Empty trie node. Empty, @@ -436,6 +1176,9 @@ pub enum SparseNode { /// Pre-computed hash of the sparse node. /// Can be reused unless this trie path has been updated. hash: Option, + /// Pre-computed flag indicating whether the trie node should be stored in the database. + /// Can be reused unless this trie path has been updated. + store_in_db_trie: Option, }, } @@ -452,7 +1195,7 @@ impl SparseNode { /// Create new [`SparseNode::Branch`] from state mask. pub const fn new_branch(state_mask: TrieMask) -> Self { - Self::Branch { state_mask, hash: None } + Self::Branch { state_mask, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Branch`] with two bits set. @@ -461,7 +1204,7 @@ impl SparseNode { // set bits for both children (1u16 << bit_a) | (1u16 << bit_b), ); - Self::Branch { state_mask, hash: None } + Self::Branch { state_mask, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Extension`] from the key slice. @@ -473,73 +1216,293 @@ impl SparseNode { pub const fn new_leaf(key: Nibbles) -> Self { Self::Leaf { key, hash: None } } + + /// Returns `true` if the node is a hash node. + pub const fn is_hash(&self) -> bool { + matches!(self, Self::Hash(_)) + } +} + +#[derive(Debug)] +struct RemovedSparseNode { + path: Nibbles, + node: SparseNode, + unset_branch_nibble: Option, +} + +/// Collection of reusable buffers for [`RevealedSparseTrie::rlp_node`]. +#[derive(Debug, Default)] +struct RlpNodeBuffers { + /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. + path_stack: Vec<(Nibbles, Option)>, + /// Stack of rlp nodes + rlp_node_stack: Vec<(Nibbles, RlpNode, bool, SparseNodeType)>, + /// Reusable branch child path + branch_child_buf: SmallVec<[Nibbles; 16]>, + /// Reusable branch value stack + branch_value_stack_buf: SmallVec<[RlpNode; 16]>, +} + +impl RlpNodeBuffers { + /// Creates a new instance of buffers with the given path on the stack. + fn new_with_path(path: Nibbles) -> Self { + Self { + path_stack: vec![(path, None)], + rlp_node_stack: Vec::new(), + branch_child_buf: SmallVec::<[Nibbles; 16]>::new_const(), + branch_value_stack_buf: SmallVec::<[RlpNode; 16]>::new_const(), + } + } +} + +/// The aggregation of sparse trie updates. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct SparseTrieUpdates { + pub(crate) updated_nodes: HashMap, + pub(crate) removed_nodes: HashSet, + pub(crate) wiped: bool, +} + +impl SparseTrieUpdates { + /// Create new wiped sparse trie updates. + pub fn wiped() -> Self { + Self { wiped: true, ..Default::default() } + } } #[cfg(test)] mod tests { use super::*; - use alloy_primitives::U256; + use alloy_primitives::{map::HashSet, U256}; + use alloy_rlp::Encodable; + use assert_matches::assert_matches; use itertools::Itertools; + use prop::sample::SizeRange; use proptest::prelude::*; - use reth_trie_common::HashBuilder; + use proptest_arbitrary_interop::arb; + use rand::seq::IteratorRandom; + use reth_primitives_traits::Account; + use reth_trie::{ + hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, + node_iter::{TrieElement, TrieNodeIter}, + trie_cursor::noop::NoopAccountTrieCursor, + updates::TrieUpdates, + walker::TrieWalker, + BranchNode, ExtensionNode, HashedPostState, LeafNode, TrieAccount, + }; + use reth_trie_common::{ + proof::{ProofNodes, ProofRetainer}, + HashBuilder, + }; + use std::collections::BTreeMap; + + /// Pad nibbles to the length of a B256 hash with zeros on the left. + fn pad_nibbles_left(nibbles: Nibbles) -> Nibbles { + let mut base = + Nibbles::from_nibbles_unchecked(vec![0; B256::len_bytes() * 2 - nibbles.len()]); + base.extend_from_slice_unchecked(&nibbles); + base + } + + /// Pad nibbles to the length of a B256 hash with zeros on the right. + fn pad_nibbles_right(mut nibbles: Nibbles) -> Nibbles { + nibbles.extend_from_slice_unchecked(&vec![0; B256::len_bytes() * 2 - nibbles.len()]); + nibbles + } + + /// Calculate the state root by feeding the provided state to the hash builder and retaining the + /// proofs for the provided targets. + /// + /// Returns the state root and the retained proof nodes. + fn run_hash_builder( + state: impl IntoIterator + Clone, + destroyed_accounts: HashSet, + proof_targets: impl IntoIterator, + ) -> (B256, TrieUpdates, ProofNodes, HashMap) { + let mut account_rlp = Vec::new(); + + let mut hash_builder = HashBuilder::default() + .with_updates(true) + .with_proof_retainer(ProofRetainer::from_iter(proof_targets)); + + let mut prefix_set = PrefixSetMut::default(); + prefix_set.extend_keys(state.clone().into_iter().map(|(nibbles, _)| nibbles)); + let walker = TrieWalker::new(NoopAccountTrieCursor::default(), prefix_set.freeze()) + .with_deletions_retained(true); + let hashed_post_state = HashedPostState::default() + .with_accounts(state.into_iter().map(|(nibbles, account)| { + (nibbles.pack().into_inner().unwrap().into(), Some(account)) + })) + .into_sorted(); + let mut node_iter = TrieNodeIter::new( + walker, + HashedPostStateAccountCursor::new( + NoopHashedAccountCursor::default(), + hashed_post_state.accounts(), + ), + ); + + while let Some(node) = node_iter.try_next().unwrap() { + match node { + TrieElement::Branch(branch) => { + hash_builder.add_branch(branch.key, branch.value, branch.children_are_in_trie); + } + TrieElement::Leaf(key, account) => { + let account = TrieAccount::from((account, EMPTY_ROOT_HASH)); + account.encode(&mut account_rlp); + + hash_builder.add_leaf(Nibbles::unpack(key), &account_rlp); + account_rlp.clear(); + } + } + } + let root = hash_builder.root(); + let proof_nodes = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = hash_builder + .updated_branch_nodes + .clone() + .unwrap_or_default() + .iter() + .map(|(path, node)| (path.clone(), node.hash_mask)) + .collect(); + + let mut trie_updates = TrieUpdates::default(); + let removed_keys = node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, destroyed_accounts); + + (root, trie_updates, proof_nodes, branch_node_hash_masks) + } + + /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. + fn assert_eq_sparse_trie_proof_nodes( + sparse_trie: &RevealedSparseTrie, + proof_nodes: ProofNodes, + ) { + let proof_nodes = proof_nodes + .into_nodes_sorted() + .into_iter() + .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); + + let sparse_nodes = sparse_trie.nodes.iter().sorted_by_key(|(path, _)| *path); + + for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in + proof_nodes.zip(sparse_nodes) + { + assert_eq!(&proof_node_path, sparse_node_path); + + let equals = match (&proof_node, &sparse_node) { + // Both nodes are empty + (TrieNode::EmptyRoot, SparseNode::Empty) => true, + // Both nodes are branches and have the same state mask + ( + TrieNode::Branch(BranchNode { state_mask: proof_state_mask, .. }), + SparseNode::Branch { state_mask: sparse_state_mask, .. }, + ) => proof_state_mask == sparse_state_mask, + // Both nodes are extensions and have the same key + ( + TrieNode::Extension(ExtensionNode { key: proof_key, .. }), + SparseNode::Extension { key: sparse_key, .. }, + ) | + // Both nodes are leaves and have the same key + ( + TrieNode::Leaf(LeafNode { key: proof_key, .. }), + SparseNode::Leaf { key: sparse_key, .. }, + ) => proof_key == sparse_key, + // Empty and hash nodes are specific to the sparse trie, skip them + (_, SparseNode::Empty | SparseNode::Hash(_)) => continue, + _ => false, + }; + assert!(equals, "proof node: {:?}, sparse node: {:?}", proof_node, sparse_node); + } + } #[test] fn sparse_trie_is_blind() { - assert!(SparseTrie::default().is_blind()); + assert!(SparseTrie::blind().is_blind()); assert!(!SparseTrie::revealed_empty().is_blind()); } #[test] fn sparse_trie_empty_update_one() { - let path = Nibbles::unpack(B256::with_last_byte(42)); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let key = Nibbles::unpack(B256::with_last_byte(42)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let mut hash_builder = HashBuilder::default(); - hash_builder.add_leaf(path.clone(), &value); - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder([(key.clone(), value())], Default::default(), [key.clone()]); - let mut sparse = RevealedSparseTrie::default(); - sparse.update_leaf(path, value.to_vec()).unwrap(); - let root = sparse.root(); - assert_eq!(root, expected); + let mut sparse = RevealedSparseTrie::default().with_updates(true); + sparse.update_leaf(key, value_encoded()).unwrap(); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] fn sparse_trie_empty_update_multiple_lower_nibbles() { + reth_tracing::init_test_tracing(); + let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let mut hash_builder = HashBuilder::default(); - for path in &paths { - hash_builder.add_leaf(path.clone(), &value); - } - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), + Default::default(), + paths.clone(), + ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] fn sparse_trie_empty_update_multiple_upper_nibbles() { let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let mut hash_builder = HashBuilder::default(); - for path in &paths { - hash_builder.add_leaf(path.clone(), &value); - } - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(value)), + Default::default(), + paths.clone(), + ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] @@ -553,75 +1516,869 @@ mod tests { }) }) .collect::>(); - let value = alloy_rlp::encode_fixed_size(&U256::from(1)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let mut hash_builder = HashBuilder::default(); - for path in paths.iter().sorted_unstable_by_key(|key| *key) { - hash_builder.add_leaf(path.clone(), &value); - } - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), + Default::default(), + paths.clone(), + ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), value_encoded()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder_root); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(sparse_updates.updated_nodes), + BTreeMap::from_iter(hash_builder_updates.account_nodes) + ); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] fn sparse_trie_empty_update_repeated() { let paths = (0..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); - let old_value = alloy_rlp::encode_fixed_size(&U256::from(1)); - let new_value = alloy_rlp::encode_fixed_size(&U256::from(2)); + let old_value = Account { nonce: 1, ..Default::default() }; + let old_value_encoded = { + let mut account_rlp = Vec::new(); + TrieAccount::from((old_value, EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + let new_value = Account { nonce: 2, ..Default::default() }; + let new_value_encoded = { + let mut account_rlp = Vec::new(); + TrieAccount::from((new_value, EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; - let mut hash_builder = HashBuilder::default(); - for path in paths.iter().sorted_unstable_by_key(|key| *key) { - hash_builder.add_leaf(path.clone(), &old_value); - } - let expected = hash_builder.root(); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), + Default::default(), + paths.clone(), + ); - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(path.clone(), old_value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), old_value_encoded.clone()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + let sparse_updates = sparse.updates_ref(); - let mut hash_builder = HashBuilder::default(); - for path in paths.iter().sorted_unstable_by_key(|key| *key) { - hash_builder.add_leaf(path.clone(), &new_value); - } - let expected = hash_builder.root(); + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), + Default::default(), + paths.clone(), + ); for path in &paths { - sparse.update_leaf(path.clone(), new_value.to_vec()).unwrap(); + sparse.update_leaf(path.clone(), new_value_encoded.clone()).unwrap(); } - let root = sparse.root(); - assert_eq!(root, expected); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); } #[test] - fn sparse_trie_empty_update_fuzz() { - proptest!(ProptestConfig::with_cases(10), |(updates: Vec>)| { - let mut state = std::collections::BTreeMap::default(); - let mut sparse = RevealedSparseTrie::default(); - - for update in updates { - for (key, value) in &update { - sparse.update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()).unwrap(); - } - let root = sparse.root(); + fn sparse_trie_remove_leaf() { + reth_tracing::init_test_tracing(); - state.extend(update); - let mut hash_builder = HashBuilder::default(); - for (key, value) in &state { - hash_builder.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(value)); - } - let expected = hash_builder.root(); + let mut sparse = RevealedSparseTrie::default(); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1011) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) + // │ └── 3 -> Leaf (Key = 3, Path = 50233) + // ├── 2 -> Leaf (Key = 013, Path = 52013) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1101.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x1, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 0231, Path = 50231) + // │ └── 3 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_ext(Nibbles::from_nibbles([0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Leaf (Key = 3302, Path = 53302) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x0, 0x2])) + ), + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); + + // Leaf (Key = 53302) + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([( + Nibbles::default(), + SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])) + ),]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); + + // Empty + pretty_assertions::assert_eq!( + sparse.nodes.clone().into_iter().collect::>(), + BTreeMap::from_iter([(Nibbles::default(), SparseNode::Empty)]) + ); + } + + #[test] + fn sparse_trie_remove_leaf_blinded() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let mut sparse = + RevealedSparseTrie::from_root(branch.clone(), Some(TrieMask::new(0b01)), false) + .unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse.reveal_node(Nibbles::default(), branch, Some(TrieMask::new(0b01))).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), None).unwrap(); + + // Removing a blinded leaf should result in an error + assert_matches!( + sparse.remove_leaf(&Nibbles::from_nibbles([0x0])), + Err(SparseTrieError::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) + ); + } + + #[test] + fn sparse_trie_remove_leaf_non_existent() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let mut sparse = + RevealedSparseTrie::from_root(branch.clone(), Some(TrieMask::new(0b01)), false) + .unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse.reveal_node(Nibbles::default(), branch, Some(TrieMask::new(0b01))).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), None).unwrap(); + + // Removing a non-existent leaf should be a noop + let sparse_old = sparse.clone(); + assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2])), Ok(())); + assert_eq!(sparse, sparse_old); + } + + #[allow(clippy::type_complexity)] + #[test] + fn sparse_trie_fuzz() { + // Having only the first 3 nibbles set, we narrow down the range of keys + // to 4096 different hashes. It allows us to generate collisions more likely + // to test the sparse trie updates. + const KEY_NIBBLES_LEN: usize = 3; + + fn test(updates: Vec<(HashMap, HashSet)>) { + { + let mut state = BTreeMap::default(); + let mut sparse = RevealedSparseTrie::default().with_updates(true); + + for (update, keys_to_delete) in updates { + // Insert state updates into the sparse trie and calculate the root + for (key, account) in update.clone() { + let account = TrieAccount::from((account, EMPTY_ROOT_HASH)); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + sparse.update_leaf(key, account_rlp).unwrap(); + } + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); + + // Insert state updates into the hash builder and calculate the root + state.extend(update); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + state.clone(), + Default::default(), + state.keys().cloned().collect::>(), + ); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + sparse_updates.updated_nodes, + hash_builder_updates.account_nodes + ); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); + + // Delete some keys from both the hash builder and the sparse trie and check + // that the sparse trie root still matches the hash builder root + for key in keys_to_delete { + state.remove(&key).unwrap(); + sparse.remove_leaf(&key).unwrap(); + } - assert_eq!(root, expected); + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); + + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + run_hash_builder( + state.clone(), + Default::default(), + state.keys().cloned().collect::>(), + ); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + sparse_updates.updated_nodes, + hash_builder_updates.account_nodes + ); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_sparse_trie_proof_nodes(&updated_sparse, hash_builder_proof_nodes); + } } + } + + fn transform_updates( + updates: Vec>, + mut rng: impl Rng, + ) -> Vec<(HashMap, HashSet)> { + let mut keys = HashSet::new(); + updates + .into_iter() + .map(|update| { + keys.extend(update.keys().cloned()); + + let keys_to_delete_len = update.len() / 2; + let keys_to_delete = (0..keys_to_delete_len) + .map(|_| { + let key = keys.iter().choose(&mut rng).unwrap().clone(); + keys.take(&key).unwrap() + }) + .collect(); + + (update, keys_to_delete) + }) + .collect::>() + } + + proptest!(ProptestConfig::with_cases(10), |( + updates in proptest::collection::vec( + proptest::collection::hash_map( + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles_right), + arb::(), + 1..100, + ).prop_map(HashMap::from_iter), + 1..100, + ).prop_perturb(transform_updates) + )| { + test(updates) }); } + + /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has + /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the + /// sparse trie first. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Insert leaf 0x01 into the sparse trie. + /// 3. Reveal the hash builder proof to leaf 0x02 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x02 didn't have the leaf 0x01 at the corresponding + /// nibble of the branch node, so we need to adjust the branch node instead of fully + /// replacing it. + #[test] + fn sparse_trie_reveal_node_1() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x02])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( + [(key1(), value()), (key3(), value())], + Default::default(), + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + branch_node_hash_masks.get(&Nibbles::default()).copied(), + false, + ) + .unwrap(); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = + run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key1()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + } + + // Check that the branch node exists with only two nibbles set + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b101.into())) + ); + + // Insert the leaf for the second key + sparse.update_leaf(key2(), value_encoded()).unwrap(); + + // Check that the branch node was updated and another nibble was set + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = + run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key3()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + } + + // Check that nothing changed in the branch node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the nodes for the full trie with all three key using the hash builder, and + // compare them to the sparse trie + let (_, _, hash_builder_proof_nodes, _) = run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [key1(), key2(), key3()], + ); + + assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + /// We have three leaves: 0x0000, 0x0101, and 0x0102. Hash builder trie has all nodes, and we + /// have proofs for them. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Remove leaf 0x00 from the sparse trie (that will remove the branch node and create an + /// extension node with the key 0x0000). + /// 3. Reveal the hash builder proof to leaf 0x0101 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x0101 had a branch node in the path, but we turned it + /// into an extension node, so it should ignore this node. + #[test] + fn sparse_trie_reveal_node_2() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x02])); + let value = || Account::default(); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + branch_node_hash_masks.get(&Nibbles::default()).copied(), + false, + ) + .unwrap(); + + // Generate the proof for the children of the root branch node and reveal it in the sparse + // trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [key1(), Nibbles::from_nibbles_unchecked([0x01])], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + } + + // Check that the branch node exists + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b11.into())) + ); + + // Remove the leaf for the first key + sparse.remove_leaf(&key1()).unwrap(); + + // Check that the branch node was turned into an extension node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [key2()], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + } + + // Check that nothing changed in the extension node + assert_eq!( + sparse.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + } + + /// We have two leaves that share the same prefix: 0x0001 and 0x0002, and a leaf with a + /// different prefix: 0x0100. Hash builder trie has only the first two leaves, and we have + /// proofs for them. + /// + /// 1. Insert the leaf 0x0100 into the sparse trie, and check that the root extensino node was + /// turned into a branch node. + /// 2. Reveal the leaf 0x0001 in the sparse trie, and check that the root branch node wasn't + /// overwritten with the extension node from the proof. + #[test] + fn sparse_trie_reveal_node_3() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x01])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x02])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x00])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( + [(key1(), value()), (key2(), value())], + Default::default(), + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + branch_node_hash_masks.get(&Nibbles::default()).copied(), + false, + ) + .unwrap(); + + // Check that the root extension node exists + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Extension { key, hash: None }) if *key == Nibbles::from_nibbles([0x00]) + ); + + // Insert the leaf with a different prefix + sparse.update_leaf(key3(), value_encoded()).unwrap(); + + // Check that the extension node was turned into a branch node + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) + ); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = + run_hash_builder([(key1(), value()), (key2(), value())], Default::default(), [key1()]); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + } + + // Check that the branch node wasn't overwritten by the extension node in the proof + assert_matches!( + sparse.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) + ); + } + + #[test] + fn sparse_trie_get_changed_nodes_at_depth() { + let mut sparse = RevealedSparseTrie::default(); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + // Extension (Key = 5) – Level 0 + // └── Branch (Mask = 1011) – Level 1 + // ├── 0 -> Extension (Key = 23) – Level 2 + // │ └── Branch (Mask = 0101) – Level 3 + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) – Level 4 + // │ └── 3 -> Leaf (Key = 3, Path = 50233) – Level 4 + // ├── 2 -> Leaf (Key = 013, Path = 52013) – Level 2 + // └── 3 -> Branch (Mask = 0101) – Level 2 + // ├── 1 -> Leaf (Key = 3102, Path = 53102) – Level 3 + // └── 3 -> Branch (Mask = 1010) – Level 3 + // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 + // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 0), + vec![Nibbles::default()] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 1), + vec![Nibbles::from_nibbles_unchecked([0x5])] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 2), + vec![ + Nibbles::from_nibbles_unchecked([0x5, 0x0]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3]) + ] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 3), + vec![ + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3]) + ] + ); + assert_eq!( + sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 4), + vec![ + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x0, 0x2, 0x3, 0x3]), + Nibbles::from_nibbles_unchecked([0x5, 0x2]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x1]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x0]), + Nibbles::from_nibbles_unchecked([0x5, 0x3, 0x3, 0x2]) + ] + ); + } + + #[test] + fn hash_builder_branch_hash_mask() { + let key1 = || pad_nibbles_left(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_left(Nibbles::from_nibbles_unchecked([0x01])); + let value = || Account { bytecode_hash: Some(B256::repeat_byte(1)), ..Default::default() }; + let value_encoded = || { + let mut account_rlp = Vec::new(); + TrieAccount::from((value(), EMPTY_ROOT_HASH)).encode(&mut account_rlp); + account_rlp + }; + + let (hash_builder_root, hash_builder_updates, _, _) = run_hash_builder( + [(key1(), value()), (key2(), value())], + Default::default(), + [Nibbles::default()], + ); + let mut sparse = RevealedSparseTrie::default(); + sparse.update_leaf(key1(), value_encoded()).unwrap(); + sparse.update_leaf(key2(), value_encoded()).unwrap(); + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + } + + #[test] + fn sparse_trie_wipe() { + let mut sparse = RevealedSparseTrie::default().with_updates(true); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + // Extension (Key = 5) – Level 0 + // └── Branch (Mask = 1011) – Level 1 + // ├── 0 -> Extension (Key = 23) – Level 2 + // │ └── Branch (Mask = 0101) – Level 3 + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) – Level 4 + // │ └── 3 -> Leaf (Key = 3, Path = 50233) – Level 4 + // ├── 2 -> Leaf (Key = 013, Path = 52013) – Level 2 + // └── 3 -> Branch (Mask = 0101) – Level 2 + // ├── 1 -> Leaf (Key = 3102, Path = 53102) – Level 3 + // └── 3 -> Branch (Mask = 1010) – Level 3 + // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 + // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .unwrap(); + sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); + + sparse.wipe(); + + assert_eq!(sparse.root(), EMPTY_ROOT_HASH); + } } diff --git a/crates/trie/trie/Cargo.toml b/crates/trie/trie/Cargo.toml index 31b5ac3e25c..cfce88fa020 100644 --- a/crates/trie/trie/Cargo.toml +++ b/crates/trie/trie/Cargo.toml @@ -17,21 +17,23 @@ reth-execution-errors.workspace = true reth-primitives.workspace = true reth-stages-types.workspace = true reth-storage-errors.workspace = true +reth-trie-sparse.workspace = true reth-trie-common.workspace = true revm.workspace = true # alloy +alloy-eips.workspace = true alloy-rlp.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-trie.workspace = true # tracing tracing.workspace = true # misc rayon.workspace = true -derive_more.workspace = true auto_impl.workspace = true itertools.workspace = true @@ -42,15 +44,8 @@ metrics = { workspace = true, optional = true } # `test-utils` feature triehash = { version = "0.8", optional = true } -# `serde` feature -serde = { workspace = true, optional = true } - -# `serde-bincode-compat` feature -serde_with = { workspace = true, optional = true } - [dev-dependencies] # reth -reth-chainspec.workspace = true reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } @@ -60,24 +55,26 @@ triehash = "0.8" # misc proptest.workspace = true proptest-arbitrary-interop.workspace = true -tokio = { workspace = true, default-features = false, features = [ - "sync", - "rt", - "macros", -] } serde_json.workspace = true criterion.workspace = true -bincode.workspace = true [features] metrics = ["reth-metrics", "dep:metrics"] -serde = ["dep:serde"] -serde-bincode-compat = ["serde_with"] -test-utils = ["triehash", "reth-trie-common/test-utils"] - -[[bench]] -name = "prefix_set" -harness = false +serde = [ + "alloy-primitives/serde", + "alloy-consensus/serde", + "alloy-trie/serde", + "alloy-eips/serde", + "revm/serde", + "reth-trie-common/serde" +] +test-utils = [ + "triehash", + "revm/test-utils", + "reth-primitives/test-utils", + "reth-trie-common/test-utils", + "reth-stages-types/test-utils" +] [[bench]] name = "hash_post_state" diff --git a/crates/trie/trie/benches/hash_post_state.rs b/crates/trie/trie/benches/hash_post_state.rs index 6e913ef78a3..da47d01e15c 100644 --- a/crates/trie/trie/benches/hash_post_state.rs +++ b/crates/trie/trie/benches/hash_post_state.rs @@ -2,7 +2,7 @@ use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; -use reth_trie::{HashedPostState, HashedStorage}; +use reth_trie::{HashedPostState, HashedStorage, KeccakKeyHasher}; use revm::db::{states::BundleBuilder, BundleAccount}; pub fn hash_post_state(c: &mut Criterion) { @@ -19,7 +19,7 @@ pub fn hash_post_state(c: &mut Criterion) { // parallel group.bench_function(BenchmarkId::new("parallel hashing", size), |b| { - b.iter(|| HashedPostState::from_bundle_state(&state)) + b.iter(|| HashedPostState::from_bundle_state::(&state)) }); } } @@ -29,7 +29,7 @@ fn from_bundle_state_seq(state: &HashMap) -> HashedPostS for (address, account) in state { let hashed_address = keccak256(address); - this.accounts.insert(hashed_address, account.info.clone().map(Into::into)); + this.accounts.insert(hashed_address, account.info.as_ref().map(Into::into)); let hashed_storage = HashedStorage::from_iter( account.status.was_destroyed(), diff --git a/crates/trie/trie/benches/trie_root.rs b/crates/trie/trie/benches/trie_root.rs index ad169936463..be6e4954579 100644 --- a/crates/trie/trie/benches/trie_root.rs +++ b/crates/trie/trie/benches/trie_root.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use proptest::{prelude::*, strategy::ValueTree, test_runner::TestRunner}; use proptest_arbitrary_interop::arb; -use reth_primitives::ReceiptWithBloom; +use reth_primitives::{Receipt, ReceiptWithBloom}; use reth_trie::triehash::KeccakHasher; /// Benchmarks different implementations of the root calculation. @@ -27,8 +27,8 @@ pub fn trie_root_benchmark(c: &mut Criterion) { } } -fn generate_test_data(size: usize) -> Vec { - prop::collection::vec(arb::(), size) +fn generate_test_data(size: usize) -> Vec> { + prop::collection::vec(arb::>(), size) .new_tree(&mut TestRunner::new(ProptestConfig::default())) .unwrap() .current() @@ -43,18 +43,19 @@ criterion_main!(benches); mod implementations { use super::*; + use alloy_eips::eip2718::Encodable2718; use alloy_rlp::Encodable; - use reth_trie_common::{root::adjust_index_for_rlp, HashBuilder, Nibbles}; + use alloy_trie::root::adjust_index_for_rlp; + use reth_primitives::Receipt; + use reth_trie_common::{HashBuilder, Nibbles}; - pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { - triehash::ordered_trie_root::(receipts.iter().map(|receipt| { - let mut receipt_rlp = Vec::new(); - receipt.encode_inner(&mut receipt_rlp, false); - receipt_rlp - })) + pub fn trie_hash_ordered_trie_root(receipts: &[ReceiptWithBloom]) -> B256 { + triehash::ordered_trie_root::( + receipts.iter().map(|receipt_with_bloom| receipt_with_bloom.encoded_2718()), + ) } - pub fn hash_builder_root(receipts: &[ReceiptWithBloom]) -> B256 { + pub fn hash_builder_root(receipts: &[ReceiptWithBloom]) -> B256 { let mut index_buffer = Vec::new(); let mut value_buffer = Vec::new(); @@ -67,7 +68,7 @@ mod implementations { index.encode(&mut index_buffer); value_buffer.clear(); - receipts[index].encode_inner(&mut value_buffer, false); + receipts[index].encode_2718(&mut value_buffer); hb.add_leaf(Nibbles::unpack(&index_buffer), &value_buffer); } diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs index 6db214bb51a..745fc351b90 100644 --- a/crates/trie/trie/src/forward_cursor.rs +++ b/crates/trie/trie/src/forward_cursor.rs @@ -30,7 +30,7 @@ where /// exhausted. Returns the first entry for which `comparator` returns `false` or `None`. fn advance_while_false(&mut self, comparator: impl Fn(&K) -> bool) -> Option<(K, V)> { let mut entry = self.entries.get(self.index); - while entry.map_or(false, |entry| comparator(&entry.0)) { + while entry.is_some_and(|entry| comparator(&entry.0)) { self.index += 1; entry = self.entries.get(self.index); } diff --git a/crates/trie/trie/src/hashed_cursor/noop.rs b/crates/trie/trie/src/hashed_cursor/noop.rs index 4783d5afd9d..a21e1026b38 100644 --- a/crates/trie/trie/src/hashed_cursor/noop.rs +++ b/crates/trie/trie/src/hashed_cursor/noop.rs @@ -32,11 +32,11 @@ pub struct NoopHashedAccountCursor; impl HashedCursor for NoopHashedAccountCursor { type Value = Account; - fn next(&mut self) -> Result, DatabaseError> { + fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) } - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { + fn next(&mut self) -> Result, DatabaseError> { Ok(None) } } @@ -49,11 +49,11 @@ pub struct NoopHashedStorageCursor; impl HashedCursor for NoopHashedStorageCursor { type Value = U256; - fn next(&mut self) -> Result, DatabaseError> { + fn seek(&mut self, _key: B256) -> Result, DatabaseError> { Ok(None) } - fn seek(&mut self, _key: B256) -> Result, DatabaseError> { + fn next(&mut self) -> Result, DatabaseError> { Ok(None) } } diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 67891419152..e0689d45087 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -3,10 +3,9 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, HashedStorageSorted, }; -use alloy_primitives::{B256, U256}; +use alloy_primitives::{map::HashSet, B256, U256}; use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; -use std::collections::HashSet; /// The hashed cursor factory for the post state. #[derive(Clone, Debug)] @@ -82,14 +81,14 @@ where // It's an exact match, return the account from post state without looking up in the // database. - if post_state_entry.map_or(false, |entry| entry.0 == key) { + if post_state_entry.is_some_and(|entry| entry.0 == key) { return Ok(post_state_entry) } // It's not an exact match, reposition to the first greater or equal account that wasn't // cleared. let mut db_entry = self.cursor.seek(key)?; - while db_entry.as_ref().map_or(false, |(address, _)| self.is_account_cleared(address)) { + while db_entry.as_ref().is_some_and(|(address, _)| self.is_account_cleared(address)) { db_entry = self.cursor.next()?; } @@ -103,7 +102,7 @@ where // If post state was given precedence or account was cleared, move the cursor forward. let mut db_entry = self.cursor.seek(last_account)?; - while db_entry.as_ref().map_or(false, |(address, _)| { + while db_entry.as_ref().is_some_and(|(address, _)| { address <= &last_account || self.is_account_cleared(address) }) { db_entry = self.cursor.next()?; @@ -200,14 +199,14 @@ where let post_state_cursor = post_state_storage.map(|s| ForwardInMemoryCursor::new(&s.non_zero_valued_slots)); let cleared_slots = post_state_storage.map(|s| &s.zero_valued_slots); - let storage_wiped = post_state_storage.map_or(false, |s| s.wiped); + let storage_wiped = post_state_storage.is_some_and(|s| s.wiped); Self { cursor, post_state_cursor, cleared_slots, storage_wiped, last_slot: None } } /// Check if the slot was zeroed out in the post state. /// The database is not checked since it already has no zero-valued slots. fn is_slot_zero_valued(&self, slot: &B256) -> bool { - self.cleared_slots.map_or(false, |s| s.contains(slot)) + self.cleared_slots.is_some_and(|s| s.contains(slot)) } /// Find the storage entry in post state or database that's greater or equal to provided subkey. @@ -217,14 +216,14 @@ where // If database storage was wiped or it's an exact match, // return the storage slot from post state without looking up in the database. - if self.storage_wiped || post_state_entry.map_or(false, |entry| entry.0 == subkey) { + if self.storage_wiped || post_state_entry.is_some_and(|entry| entry.0 == subkey) { return Ok(post_state_entry) } // It's not an exact match and storage was not wiped, // reposition to the first greater or equal account. let mut db_entry = self.cursor.seek(subkey)?; - while db_entry.as_ref().map_or(false, |entry| self.is_slot_zero_valued(&entry.0)) { + while db_entry.as_ref().is_some_and(|entry| self.is_slot_zero_valued(&entry.0)) { db_entry = self.cursor.next()?; } @@ -248,7 +247,7 @@ where let mut db_entry = self.cursor.seek(last_slot)?; while db_entry .as_ref() - .map_or(false, |entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) + .is_some_and(|entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) { db_entry = self.cursor.next()?; } diff --git a/crates/trie/trie/src/input.rs b/crates/trie/trie/src/input.rs index 18f9ada2f4a..ea71558c2c1 100644 --- a/crates/trie/trie/src/input.rs +++ b/crates/trie/trie/src/input.rs @@ -1,7 +1,7 @@ use crate::{prefix_set::TriePrefixSetsMut, updates::TrieUpdates, HashedPostState}; /// Inputs for trie-related computations. -#[derive(Default, Debug)] +#[derive(Default, Debug, Clone)] pub struct TrieInput { /// The collection of cached in-memory intermediate trie nodes that /// can be reused for computation. diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index bb568ae8b8c..1e7eeb9b52b 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -13,10 +13,6 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// The implementation of a container for storing intermediate changes to a trie. -/// The container indicates when the trie has been modified. -pub mod prefix_set; - /// The implementation of forward-only in-memory cursor. pub mod forward_cursor; @@ -50,9 +46,6 @@ pub mod witness; mod trie; pub use trie::{StateRoot, StorageRoot}; -/// Buffer for trie updates. -pub mod updates; - /// Utilities for state root checkpoint progress. mod progress; pub use progress::{IntermediateStateRootState, StateRootProgress}; @@ -63,17 +56,6 @@ pub mod stats; // re-export for convenience pub use reth_trie_common::*; -/// Bincode-compatible serde implementations for trie types. -/// -/// `bincode` crate allows for more efficient serialization of trie types, because it allows -/// non-string map keys. -/// -/// Read more: -#[cfg(all(feature = "serde", feature = "serde-bincode-compat"))] -pub mod serde_bincode_compat { - pub use super::updates::serde_bincode_compat as updates; -} - /// Trie calculation metrics. #[cfg(feature = "metrics")] pub mod metrics; diff --git a/crates/trie/trie/src/metrics.rs b/crates/trie/trie/src/metrics.rs index 7582f37418d..006dc7e3655 100644 --- a/crates/trie/trie/src/metrics.rs +++ b/crates/trie/trie/src/metrics.rs @@ -1,5 +1,5 @@ use crate::stats::TrieStats; -use metrics::Histogram; +use metrics::{Counter, Histogram}; use reth_metrics::Metrics; /// Wrapper for state root metrics. @@ -63,3 +63,23 @@ impl TrieType { } } } + +/// Metrics for trie walker +#[derive(Clone, Metrics)] +#[metrics(scope = "trie.walker")] +pub struct WalkerMetrics { + /// The number of subnodes out of order due to wrong tree mask. + out_of_order_subnode: Counter, +} + +impl WalkerMetrics { + /// Create new metrics for the given trie type. + pub fn new(ty: TrieType) -> Self { + Self::new_with_labels(&[("type", ty.as_str())]) + } + + /// Increment `out_of_order_subnode`. + pub fn inc_out_of_order_subnode(&self, amount: u64) { + self.out_of_order_subnode.increment(amount); + } +} diff --git a/crates/trie/trie/src/node_iter.rs b/crates/trie/trie/src/node_iter.rs index feebe36e16e..60219eedd7c 100644 --- a/crates/trie/trie/src/node_iter.rs +++ b/crates/trie/trie/src/node_iter.rs @@ -106,7 +106,7 @@ where if let Some((hashed_key, value)) = self.current_hashed_entry.take() { // If the walker's key is less than the unpacked hashed key, // reset the checked status and continue - if self.walker.key().map_or(false, |key| key < &Nibbles::unpack(hashed_key)) { + if self.walker.key().is_some_and(|key| key < &Nibbles::unpack(hashed_key)) { self.current_walker_key_checked = false; continue } diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs new file mode 100644 index 00000000000..a7b60bc6b27 --- /dev/null +++ b/crates/trie/trie/src/proof/blinded.rs @@ -0,0 +1,148 @@ +use super::{Proof, StorageProof}; +use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; +use alloy_primitives::{ + map::{HashMap, HashSet}, + Bytes, B256, +}; +use reth_execution_errors::SparseTrieError; +use reth_trie_common::{prefix_set::TriePrefixSetsMut, Nibbles}; +use reth_trie_sparse::blinded::{pad_path_to_key, BlindedProvider, BlindedProviderFactory}; +use std::sync::Arc; + +/// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. +#[derive(Debug)] +pub struct ProofBlindedProviderFactory { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, +} + +impl ProofBlindedProviderFactory { + /// Create new proof-based blinded provider factory. + pub const fn new( + trie_cursor_factory: T, + hashed_cursor_factory: H, + prefix_sets: Arc, + ) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + } +} + +impl BlindedProviderFactory for ProofBlindedProviderFactory +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type AccountNodeProvider = ProofBlindedAccountProvider; + type StorageNodeProvider = ProofBlindedStorageProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + ProofBlindedAccountProvider { + trie_cursor_factory: self.trie_cursor_factory.clone(), + hashed_cursor_factory: self.hashed_cursor_factory.clone(), + prefix_sets: self.prefix_sets.clone(), + } + } + + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { + ProofBlindedStorageProvider { + trie_cursor_factory: self.trie_cursor_factory.clone(), + hashed_cursor_factory: self.hashed_cursor_factory.clone(), + prefix_sets: self.prefix_sets.clone(), + account, + } + } +} + +/// Blinded provider for retrieving account trie nodes by path. +#[derive(Debug)] +pub struct ProofBlindedAccountProvider { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, +} + +impl ProofBlindedAccountProvider { + /// Create new proof-based blinded account node provider. + pub const fn new( + trie_cursor_factory: T, + hashed_cursor_factory: H, + prefix_sets: Arc, + ) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets } + } +} + +impl BlindedProvider for ProofBlindedAccountProvider +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type Error = SparseTrieError; + + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { + let targets = HashMap::from_iter([(pad_path_to_key(&path), HashSet::default())]); + let proof = + Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) + .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) + .multiproof(targets) + .map_err(|error| SparseTrieError::Other(Box::new(error)))?; + + Ok(proof.account_subtree.into_inner().remove(&path)) + } +} + +/// Blinded provider for retrieving storage trie nodes by path. +#[derive(Debug)] +pub struct ProofBlindedStorageProvider { + /// The cursor factory for traversing trie nodes. + trie_cursor_factory: T, + /// The factory for hashed cursors. + hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: Arc, + /// Target account. + account: B256, +} + +impl ProofBlindedStorageProvider { + /// Create new proof-based blinded storage node provider. + pub const fn new( + trie_cursor_factory: T, + hashed_cursor_factory: H, + prefix_sets: Arc, + account: B256, + ) -> Self { + Self { trie_cursor_factory, hashed_cursor_factory, prefix_sets, account } + } +} + +impl BlindedProvider for ProofBlindedStorageProvider +where + T: TrieCursorFactory + Clone, + H: HashedCursorFactory + Clone, +{ + type Error = SparseTrieError; + + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { + let targets = HashSet::from_iter([pad_path_to_key(&path)]); + let storage_prefix_set = + self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); + let proof = StorageProof::new_hashed( + self.trie_cursor_factory.clone(), + self.hashed_cursor_factory.clone(), + self.account, + ) + .with_prefix_set_mut(storage_prefix_set) + .storage_multiproof(targets) + .map_err(|error| SparseTrieError::Other(Box::new(error)))?; + + Ok(proof.subtree.into_inner().remove(&path)) + } +} diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof/mod.rs similarity index 71% rename from crates/trie/trie/src/proof.rs rename to crates/trie/trie/src/proof/mod.rs index e99d686aca7..8e3d0aec2ab 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -4,7 +4,7 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, trie_cursor::TrieCursorFactory, walker::TrieWalker, - HashBuilder, Nibbles, + HashBuilder, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_primitives::{ keccak256, @@ -17,6 +17,9 @@ use reth_trie_common::{ proof::ProofRetainer, AccountProof, MultiProof, StorageMultiProof, TrieAccount, }; +mod blinded; +pub use blinded::*; + /// A struct for generating merkle proofs. /// /// Proof generator adds the target address and slots to the prefix set, enables the proof retainer @@ -30,6 +33,8 @@ pub struct Proof { hashed_cursor_factory: H, /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, + /// Flag indicating whether to include branch node hash masks in the proof. + collect_branch_node_hash_masks: bool, } impl Proof { @@ -39,6 +44,7 @@ impl Proof { trie_cursor_factory: t, hashed_cursor_factory: h, prefix_sets: TriePrefixSetsMut::default(), + collect_branch_node_hash_masks: false, } } @@ -48,6 +54,7 @@ impl Proof { trie_cursor_factory, hashed_cursor_factory: self.hashed_cursor_factory, prefix_sets: self.prefix_sets, + collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, } } @@ -57,6 +64,7 @@ impl Proof { trie_cursor_factory: self.trie_cursor_factory, hashed_cursor_factory, prefix_sets: self.prefix_sets, + collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, } } @@ -65,6 +73,12 @@ impl Proof { self.prefix_sets = prefix_sets; self } + + /// Set the flag indicating whether to include branch node hash masks in the proof. + pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { + self.collect_branch_node_hash_masks = branch_node_hash_masks; + self + } } impl Proof @@ -101,10 +115,15 @@ where // Create a hash builder to rebuild the root node since it is not available in the database. let retainer = targets.keys().map(Nibbles::unpack).collect(); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(self.collect_branch_node_hash_masks); - let mut storages = HashMap::default(); - let mut account_rlp = Vec::with_capacity(128); + // Initialize all storage multiproofs as empty. + // Storage multiproofs for non empty tries will be overwritten if necessary. + let mut storages: HashMap<_, _> = + targets.keys().map(|key| (*key, StorageMultiProof::empty())).collect(); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut account_node_iter = TrieNodeIter::new(walker, hashed_account_cursor); while let Some(account_node) = account_node_iter.try_next()? { match account_node { @@ -112,19 +131,21 @@ where hash_builder.add_branch(node.key, node.value, node.children_are_in_trie); } TrieElement::Leaf(hashed_address, account) => { + let proof_targets = targets.remove(&hashed_address); + let leaf_is_proof_target = proof_targets.is_some(); let storage_prefix_set = self .prefix_sets .storage_prefix_sets .remove(&hashed_address) .unwrap_or_default(); - let proof_targets = targets.remove(&hashed_address).unwrap_or_default(); let storage_multiproof = StorageProof::new_hashed( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), hashed_address, ) .with_prefix_set_mut(storage_prefix_set) - .storage_multiproof(proof_targets)?; + .with_branch_node_hash_masks(self.collect_branch_node_hash_masks) + .storage_multiproof(proof_targets.unwrap_or_default())?; // Encode account account_rlp.clear(); @@ -132,12 +153,29 @@ where account.encode(&mut account_rlp as &mut dyn BufMut); hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); - storages.insert(hashed_address, storage_multiproof); + + // We might be adding leaves that are not necessarily our proof targets. + if leaf_is_proof_target { + // Overwrite storage multiproof. + storages.insert(hashed_address, storage_multiproof); + } } } } let _ = hash_builder.root(); - Ok(MultiProof { account_subtree: hash_builder.take_proof_nodes(), storages }) + let account_subtree = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = if self.collect_branch_node_hash_masks { + hash_builder + .updated_branch_nodes + .unwrap_or_default() + .into_iter() + .map(|(path, node)| (path, node.hash_mask)) + .collect() + } else { + HashMap::default() + }; + + Ok(MultiProof { account_subtree, branch_node_hash_masks, storages }) } } @@ -152,6 +190,8 @@ pub struct StorageProof { hashed_address: B256, /// The set of storage slot prefixes that have changed. prefix_set: PrefixSetMut, + /// Flag indicating whether to include branch node hash masks in the proof. + collect_branch_node_hash_masks: bool, } impl StorageProof { @@ -167,6 +207,7 @@ impl StorageProof { hashed_cursor_factory: h, hashed_address, prefix_set: PrefixSetMut::default(), + collect_branch_node_hash_masks: false, } } @@ -177,6 +218,7 @@ impl StorageProof { hashed_cursor_factory: self.hashed_cursor_factory, hashed_address: self.hashed_address, prefix_set: self.prefix_set, + collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, } } @@ -187,6 +229,7 @@ impl StorageProof { hashed_cursor_factory, hashed_address: self.hashed_address, prefix_set: self.prefix_set, + collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, } } @@ -195,6 +238,12 @@ impl StorageProof { self.prefix_set = prefix_set; self } + + /// Set the flag indicating whether to include branch node hash masks in the proof. + pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { + self.collect_branch_node_hash_masks = branch_node_hash_masks; + self + } } impl StorageProof @@ -231,7 +280,9 @@ where let walker = TrieWalker::new(trie_cursor, self.prefix_set.freeze()); let retainer = ProofRetainer::from_iter(target_nibbles); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); + let mut hash_builder = HashBuilder::default() + .with_proof_retainer(retainer) + .with_updates(self.collect_branch_node_hash_masks); let mut storage_node_iter = TrieNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { @@ -248,6 +299,18 @@ where } let root = hash_builder.root(); - Ok(StorageMultiProof { root, subtree: hash_builder.take_proof_nodes() }) + let subtree = hash_builder.take_proof_nodes(); + let branch_node_hash_masks = if self.collect_branch_node_hash_masks { + hash_builder + .updated_branch_nodes + .unwrap_or_default() + .into_iter() + .map(|(path, node)| (path, node.hash_mask)) + .collect() + } else { + HashMap::default() + }; + + Ok(StorageMultiProof { root, subtree, branch_node_hash_masks }) } } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 2af48dfff79..cc5c9d15eac 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -2,15 +2,17 @@ use crate::{ prefix_set::{PrefixSetMut, TriePrefixSetsMut}, Nibbles, }; -use alloy_primitives::{keccak256, Address, B256, U256}; +use alloy_primitives::{ + keccak256, + map::{hash_map, HashMap, HashSet}, + Address, B256, U256, +}; use itertools::Itertools; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use reth_primitives::Account; +use reth_trie_common::KeyHasher; use revm::db::{states::CacheAccount, AccountStatus, BundleAccount}; -use std::{ - borrow::Cow, - collections::{hash_map, HashMap, HashSet}, -}; +use std::borrow::Cow; /// Representation of in-memory hashed state. #[derive(PartialEq, Eq, Clone, Default, Debug)] @@ -25,14 +27,14 @@ impl HashedPostState { /// Initialize [`HashedPostState`] from bundle state. /// Hashes all changed accounts and storage entries that are currently stored in the bundle /// state. - pub fn from_bundle_state<'a>( + pub fn from_bundle_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { let hashed = state .into_par_iter() .map(|(address, account)| { - let hashed_address = keccak256(address); - let hashed_account = account.info.clone().map(Into::into); + let hashed_address = KH::hash_key(address); + let hashed_account = account.info.as_ref().map(Into::into); let hashed_storage = HashedStorage::from_plain_storage( account.status, account.storage.iter().map(|(slot, value)| (slot, &value.present_value)), @@ -41,8 +43,8 @@ impl HashedPostState { }) .collect::, HashedStorage))>>(); - let mut accounts = HashMap::with_capacity(hashed.len()); - let mut storages = HashMap::with_capacity(hashed.len()); + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); for (address, (account, storage)) in hashed { accounts.insert(address, account); storages.insert(address, storage); @@ -52,14 +54,14 @@ impl HashedPostState { /// Initialize [`HashedPostState`] from cached state. /// Hashes all changed accounts and storage entries that are currently stored in cache. - pub fn from_cache_state<'a>( + pub fn from_cache_state<'a, KH: KeyHasher>( state: impl IntoParallelIterator, ) -> Self { let hashed = state .into_par_iter() .map(|(address, account)| { - let hashed_address = keccak256(address); - let hashed_account = account.account.as_ref().map(|a| a.info.clone().into()); + let hashed_address = KH::hash_key(address); + let hashed_account = account.account.as_ref().map(|a| (&a.info).into()); let hashed_storage = HashedStorage::from_plain_storage( account.status, account.account.as_ref().map(|a| a.storage.iter()).into_iter().flatten(), @@ -68,8 +70,8 @@ impl HashedPostState { }) .collect::, HashedStorage))>>(); - let mut accounts = HashMap::with_capacity(hashed.len()); - let mut storages = HashMap::with_capacity(hashed.len()); + let mut accounts = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); + let mut storages = HashMap::with_capacity_and_hasher(hashed.len(), Default::default()); for (address, (account, storage)) in hashed { accounts.insert(address, account); storages.insert(address, storage); @@ -79,7 +81,10 @@ impl HashedPostState { /// Construct [`HashedPostState`] from a single [`HashedStorage`]. pub fn from_hashed_storage(hashed_address: B256, storage: HashedStorage) -> Self { - Self { accounts: HashMap::default(), storages: HashMap::from([(hashed_address, storage)]) } + Self { + accounts: HashMap::default(), + storages: HashMap::from_iter([(hashed_address, storage)]), + } } /// Set account entries on hashed state. @@ -121,7 +126,8 @@ impl HashedPostState { } // Populate storage prefix sets. - let mut storage_prefix_sets = HashMap::with_capacity(self.storages.len()); + let mut storage_prefix_sets = + HashMap::with_capacity_and_hasher(self.storages.len(), Default::default()); for (hashed_address, hashed_storage) in &self.storages { account_prefix_set.insert(Nibbles::unpack(hashed_address)); storage_prefix_sets.insert(*hashed_address, hashed_storage.construct_prefix_set()); @@ -347,7 +353,9 @@ impl HashedStorageSorted { #[cfg(test)] mod tests { + use super::*; use alloy_primitives::Bytes; + use reth_trie_common::KeccakKeyHasher; use revm::{ db::{ states::{plain_account::PlainStorage, StorageSlot}, @@ -356,8 +364,6 @@ mod tests { primitives::{AccountInfo, Bytecode}, }; - use super::*; - #[test] fn hashed_state_wiped_extension() { let hashed_address = B256::default(); @@ -463,7 +469,7 @@ mod tests { let state = vec![(&address, &account)]; // Convert the bundle state into a hashed post state. - let hashed_state = HashedPostState::from_bundle_state(state); + let hashed_state = HashedPostState::from_bundle_state::(state); // Validate the hashed post state. assert_eq!(hashed_state.accounts.len(), 1); @@ -502,7 +508,7 @@ mod tests { let state = vec![(&address, &account)]; // Convert the cache state into a hashed post state. - let hashed_state = HashedPostState::from_cache_state(state); + let hashed_state = HashedPostState::from_cache_state::(state); // Validate the hashed post state. assert_eq!(hashed_state.accounts.len(), 1); diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 1bf8cf1ce79..953b8d31fc4 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -7,7 +7,7 @@ use crate::{ trie_cursor::TrieCursorFactory, updates::{StorageTrieUpdates, TrieUpdates}, walker::TrieWalker, - HashBuilder, Nibbles, TrieAccount, + HashBuilder, Nibbles, TrieAccount, TRIE_ACCOUNT_RLP_MAX_SIZE, }; use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{keccak256, Address, B256}; @@ -178,7 +178,7 @@ where } }; - let mut account_rlp = Vec::with_capacity(128); + let mut account_rlp = Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE); let mut hashed_entries_walked = 0; let mut updated_storage_nodes = 0; while let Some(node) = account_node_iter.try_next()? { @@ -202,15 +202,13 @@ where self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), hashed_address, - #[cfg(feature = "metrics")] - self.metrics.storage_trie.clone(), - ) - .with_prefix_set( self.prefix_sets .storage_prefix_sets .get(&hashed_address) .cloned() .unwrap_or_default(), + #[cfg(feature = "metrics")] + self.metrics.storage_trie.clone(), ); let storage_root = if retain_updates { @@ -258,11 +256,8 @@ where let root = hash_builder.root(); - trie_updates.finalize( - account_node_iter.walker, - hash_builder, - self.prefix_sets.destroyed_accounts, - ); + let removed_keys = account_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys, self.prefix_sets.destroyed_accounts); let stats = tracker.finish(); @@ -304,29 +299,32 @@ impl StorageRoot { trie_cursor_factory: T, hashed_cursor_factory: H, address: Address, + prefix_set: PrefixSet, #[cfg(feature = "metrics")] metrics: TrieRootMetrics, ) -> Self { Self::new_hashed( trie_cursor_factory, hashed_cursor_factory, keccak256(address), + prefix_set, #[cfg(feature = "metrics")] metrics, ) } /// Creates a new storage root calculator given a hashed address. - pub fn new_hashed( + pub const fn new_hashed( trie_cursor_factory: T, hashed_cursor_factory: H, hashed_address: B256, + prefix_set: PrefixSet, #[cfg(feature = "metrics")] metrics: TrieRootMetrics, ) -> Self { Self { trie_cursor_factory, hashed_cursor_factory, hashed_address, - prefix_set: PrefixSet::default(), + prefix_set, #[cfg(feature = "metrics")] metrics, } @@ -434,7 +432,8 @@ where let root = hash_builder.root(); let mut trie_updates = StorageTrieUpdates::default(); - trie_updates.finalize(storage_node_iter.walker, hash_builder); + let removed_keys = storage_node_iter.walker.take_removed_keys(); + trie_updates.finalize(hash_builder, removed_keys); let stats = tracker.finish(); diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 851670f4267..fa59b70d1fd 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -3,10 +3,9 @@ use crate::{ forward_cursor::ForwardInMemoryCursor, updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, }; -use alloy_primitives::B256; +use alloy_primitives::{map::HashSet, B256}; use reth_storage_errors::db::DatabaseError; use reth_trie_common::{BranchNodeCompact, Nibbles}; -use std::collections::HashSet; /// The trie cursor factory for the trie updates. #[derive(Debug, Clone)] @@ -79,13 +78,13 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { exact: bool, ) -> Result, DatabaseError> { let in_memory = self.in_memory_cursor.seek(&key); - if exact && in_memory.as_ref().map_or(false, |entry| entry.0 == key) { + if exact && in_memory.as_ref().is_some_and(|entry| entry.0 == key) { return Ok(in_memory) } // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(key.clone())?; - while db_entry.as_ref().map_or(false, |entry| self.removed_nodes.contains(&entry.0)) { + while db_entry.as_ref().is_some_and(|entry| self.removed_nodes.contains(&entry.0)) { db_entry = self.cursor.next()?; } @@ -105,7 +104,7 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { let mut db_entry = self.cursor.seek(last.clone())?; while db_entry .as_ref() - .map_or(false, |entry| entry.0 < last || self.removed_nodes.contains(&entry.0)) + .is_some_and(|entry| entry.0 < last || self.removed_nodes.contains(&entry.0)) { db_entry = self.cursor.next()?; } @@ -184,7 +183,7 @@ impl<'a, C> InMemoryStorageTrieCursor<'a, C> { ) -> Self { let in_memory_cursor = updates.map(|u| ForwardInMemoryCursor::new(&u.storage_nodes)); let removed_nodes = updates.map(|u| &u.removed_nodes); - let storage_trie_cleared = updates.map_or(false, |u| u.is_deleted); + let storage_trie_cleared = updates.is_some_and(|u| u.is_deleted); Self { hashed_address, cursor, @@ -204,16 +203,17 @@ impl InMemoryStorageTrieCursor<'_, C> { ) -> Result, DatabaseError> { let in_memory = self.in_memory_cursor.as_mut().and_then(|c| c.seek(&key)); if self.storage_trie_cleared || - (exact && in_memory.as_ref().map_or(false, |entry| entry.0 == key)) + (exact && in_memory.as_ref().is_some_and(|entry| entry.0 == key)) { return Ok(in_memory.filter(|(nibbles, _)| !exact || nibbles == &key)) } // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(key.clone())?; - while db_entry.as_ref().map_or(false, |entry| { - self.removed_nodes.as_ref().map_or(false, |r| r.contains(&entry.0)) - }) { + while db_entry + .as_ref() + .is_some_and(|entry| self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0))) + { db_entry = self.cursor.next()?; } @@ -234,8 +234,8 @@ impl InMemoryStorageTrieCursor<'_, C> { // Reposition the cursor to the first greater or equal node that wasn't removed. let mut db_entry = self.cursor.seek(last.clone())?; - while db_entry.as_ref().map_or(false, |entry| { - entry.0 < last || self.removed_nodes.as_ref().map_or(false, |r| r.contains(&entry.0)) + while db_entry.as_ref().is_some_and(|entry| { + entry.0 < last || self.removed_nodes.as_ref().is_some_and(|r| r.contains(&entry.0)) }) { db_entry = self.cursor.next()?; } diff --git a/crates/trie/trie/src/trie_cursor/subnode.rs b/crates/trie/trie/src/trie_cursor/subnode.rs index 9d5a2770b26..457c1ba4685 100644 --- a/crates/trie/trie/src/trie_cursor/subnode.rs +++ b/crates/trie/trie/src/trie_cursor/subnode.rs @@ -76,7 +76,7 @@ impl CursorSubNode { pub fn state_flag(&self) -> bool { self.node .as_ref() - .map_or(true, |node| self.nibble < 0 || node.state_mask.is_bit_set(self.nibble as u8)) + .is_none_or(|node| self.nibble < 0 || node.state_mask.is_bit_set(self.nibble as u8)) } /// Returns `true` if the tree flag is set for the current nibble. @@ -84,12 +84,12 @@ impl CursorSubNode { pub fn tree_flag(&self) -> bool { self.node .as_ref() - .map_or(true, |node| self.nibble < 0 || node.tree_mask.is_bit_set(self.nibble as u8)) + .is_none_or(|node| self.nibble < 0 || node.tree_mask.is_bit_set(self.nibble as u8)) } /// Returns `true` if the current nibble has a root hash. pub fn hash_flag(&self) -> bool { - self.node.as_ref().map_or(false, |node| match self.nibble { + self.node.as_ref().is_some_and(|node| match self.nibble { // This guy has it -1 => node.root_hash.is_some(), // Or get it from the children diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index e75a96d0f1f..647c1486ef0 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -3,9 +3,11 @@ use crate::{ trie_cursor::{CursorSubNode, TrieCursor}, BranchNodeCompact, Nibbles, }; -use alloy_primitives::B256; +use alloy_primitives::{map::HashSet, B256}; use reth_storage_errors::db::DatabaseError; -use std::collections::HashSet; + +#[cfg(feature = "metrics")] +use crate::metrics::WalkerMetrics; /// `TrieWalker` is a structure that enables traversal of a Merkle trie. /// It allows moving through the trie in a depth-first manner, skipping certain branches @@ -24,13 +26,23 @@ pub struct TrieWalker { pub changes: PrefixSet, /// The retained trie node keys that need to be removed. removed_keys: Option>, + #[cfg(feature = "metrics")] + /// Walker metrics. + metrics: WalkerMetrics, } impl TrieWalker { /// Constructs a new `TrieWalker` from existing stack and a cursor. pub fn from_stack(cursor: C, stack: Vec, changes: PrefixSet) -> Self { - let mut this = - Self { cursor, changes, stack, can_skip_current_node: false, removed_keys: None }; + let mut this = Self { + cursor, + changes, + stack, + can_skip_current_node: false, + removed_keys: None, + #[cfg(feature = "metrics")] + metrics: WalkerMetrics::default(), + }; this.update_skip_node(); this } @@ -45,8 +57,13 @@ impl TrieWalker { /// Split the walker into stack and trie updates. pub fn split(mut self) -> (Vec, HashSet) { - let keys = self.removed_keys.take(); - (self.stack, keys.unwrap_or_default()) + let keys = self.take_removed_keys(); + (self.stack, keys) + } + + /// Take removed keys from the walker. + pub fn take_removed_keys(&mut self) -> HashSet { + self.removed_keys.take().unwrap_or_default() } /// Prints the current stack of trie nodes. @@ -75,7 +92,7 @@ impl TrieWalker { /// Indicates whether the children of the current node are present in the trie. pub fn children_are_in_trie(&self) -> bool { - self.stack.last().map_or(false, |n| n.tree_flag()) + self.stack.last().is_some_and(|n| n.tree_flag()) } /// Returns the next unprocessed key in the trie. @@ -99,7 +116,7 @@ impl TrieWalker { self.can_skip_current_node = self .stack .last() - .map_or(false, |node| !self.changes.contains(node.full_key()) && node.hash_flag()); + .is_some_and(|node| !self.changes.contains(node.full_key()) && node.hash_flag()); } } @@ -113,6 +130,8 @@ impl TrieWalker { stack: vec![CursorSubNode::default()], can_skip_current_node: false, removed_keys: None, + #[cfg(feature = "metrics")] + metrics: WalkerMetrics::default(), }; // Set up the root node of the trie in the stack, if it exists. @@ -126,11 +145,12 @@ impl TrieWalker { } /// Advances the walker to the next trie node and updates the skip node flag. + /// The new key can then be obtained via `key()`. /// /// # Returns /// - /// * `Result, Error>` - The next key in the trie or an error. - pub fn advance(&mut self) -> Result, DatabaseError> { + /// * `Result<(), Error>` - Unit on success or an error. + pub fn advance(&mut self) -> Result<(), DatabaseError> { if let Some(last) = self.stack.last() { if !self.can_skip_current_node && self.children_are_in_trie() { // If we can't skip the current node and the children are in the trie, @@ -148,8 +168,7 @@ impl TrieWalker { self.update_skip_node(); } - // Return the current key. - Ok(self.key().cloned()) + Ok(()) } /// Retrieves the current root node from the DB, seeking either the exact node or the next one. @@ -179,6 +198,19 @@ impl TrieWalker { self.stack[0].set_nibble(key[0] as i8); } + // The current tree mask might have been set incorrectly. + // Sanity check that the newly retrieved trie node key is the child of the last item + // on the stack. If not, advance to the next sibling instead of adding the node to the + // stack. + if let Some(subnode) = self.stack.last() { + if !key.starts_with(subnode.full_key()) { + #[cfg(feature = "metrics")] + self.metrics.inc_out_of_order_subnode(1); + self.move_to_next_sibling(false)?; + return Ok(()) + } + } + // Create a new CursorSubNode and push it to the stack. let subnode = CursorSubNode::new(key, Some(node)); let nibble = subnode.nibble(); diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 39d82a7bda7..e8f5b8741a5 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -1,23 +1,25 @@ use crate::{ hashed_cursor::{HashedCursor, HashedCursorFactory}, prefix_set::TriePrefixSetsMut, - proof::{Proof, StorageProof}, + proof::{Proof, ProofBlindedProviderFactory}, trie_cursor::TrieCursorFactory, HashedPostState, }; -use alloy_consensus::EMPTY_ROOT_HASH; use alloy_primitives::{ keccak256, - map::{HashMap, HashSet}, + map::{Entry, HashMap, HashSet}, Bytes, B256, }; -use alloy_rlp::{BufMut, Decodable, Encodable}; -use itertools::{Either, Itertools}; -use reth_execution_errors::{StateProofError, TrieWitnessError}; -use reth_trie_common::{ - BranchNode, HashBuilder, Nibbles, StorageMultiProof, TrieAccount, TrieNode, CHILD_INDEX_RANGE, +use itertools::Itertools; +use reth_execution_errors::{ + SparseStateTrieError, SparseTrieError, StateProofError, TrieWitnessError, }; -use std::collections::BTreeMap; +use reth_trie_common::Nibbles; +use reth_trie_sparse::{ + blinded::{BlindedProvider, BlindedProviderFactory}, + SparseStateTrie, +}; +use std::sync::{mpsc, Arc}; /// State transition witness for the trie. #[derive(Debug)] @@ -90,168 +92,76 @@ where } let proof_targets = self.get_proof_targets(&state)?; - let mut account_multiproof = + let multiproof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.clone()) .multiproof(proof_targets.clone())?; - // Attempt to compute state root from proofs and gather additional - // information for the witness. - let mut account_rlp = Vec::with_capacity(128); - let mut account_trie_nodes = BTreeMap::default(); - for (hashed_address, hashed_slots) in proof_targets { - let storage_multiproof = account_multiproof - .storages - .remove(&hashed_address) - .unwrap_or_else(StorageMultiProof::empty); + // Record all nodes from multiproof in the witness + for account_node in multiproof.account_subtree.values() { + if let Entry::Vacant(entry) = self.witness.entry(keccak256(account_node.as_ref())) { + entry.insert(account_node.clone()); + } + } + for storage_node in multiproof.storages.values().flat_map(|s| s.subtree.values()) { + if let Entry::Vacant(entry) = self.witness.entry(keccak256(storage_node.as_ref())) { + entry.insert(storage_node.clone()); + } + } - // Gather and record account trie nodes. - let account = state - .accounts - .get(&hashed_address) - .ok_or(TrieWitnessError::MissingAccount(hashed_address))?; - let value = - (account.is_some() || storage_multiproof.root != EMPTY_ROOT_HASH).then(|| { - account_rlp.clear(); - TrieAccount::from((account.unwrap_or_default(), storage_multiproof.root)) - .encode(&mut account_rlp as &mut dyn BufMut); - account_rlp.clone() - }); - let key = Nibbles::unpack(hashed_address); - account_trie_nodes.extend( - self.target_nodes( - key.clone(), - value, - account_multiproof - .account_subtree - .matching_nodes_iter(&key) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?, - ); + let (tx, rx) = mpsc::channel(); + let proof_provider_factory = ProofBlindedProviderFactory::new( + self.trie_cursor_factory, + self.hashed_cursor_factory, + Arc::new(self.prefix_sets), + ); + let mut sparse_trie = + SparseStateTrie::new(WitnessBlindedProviderFactory::new(proof_provider_factory, tx)); + sparse_trie.reveal_multiproof(proof_targets.clone(), multiproof)?; - // Gather and record storage trie nodes for this account. - let mut storage_trie_nodes = BTreeMap::default(); + // Attempt to update state trie to gather additional information for the witness. + for (hashed_address, hashed_slots) in + proof_targets.into_iter().sorted_unstable_by_key(|(ha, _)| *ha) + { + // Update storage trie first. let storage = state.storages.get(&hashed_address); - for hashed_slot in hashed_slots { - let slot_nibbles = Nibbles::unpack(hashed_slot); - let slot_value = storage + let storage_trie = sparse_trie + .storage_trie_mut(&hashed_address) + .ok_or(SparseStateTrieError::Sparse(SparseTrieError::Blind))?; + for hashed_slot in hashed_slots.into_iter().sorted_unstable() { + let storage_nibbles = Nibbles::unpack(hashed_slot); + let maybe_leaf_value = storage .and_then(|s| s.storage.get(&hashed_slot)) .filter(|v| !v.is_zero()) .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); - storage_trie_nodes.extend( - self.target_nodes( - slot_nibbles.clone(), - slot_value, - storage_multiproof - .subtree - .matching_nodes_iter(&slot_nibbles) - .sorted_by(|a, b| a.0.cmp(b.0)), - )?, - ); - } - - Self::next_root_from_proofs(storage_trie_nodes, |key: Nibbles| { - // Right pad the target with 0s. - let mut padded_key = key.pack(); - padded_key.resize(32, 0); - let target_key = B256::from_slice(&padded_key); - let storage_prefix_set = self - .prefix_sets - .storage_prefix_sets - .get(&hashed_address) - .cloned() - .unwrap_or_default(); - let proof = StorageProof::new_hashed( - self.trie_cursor_factory.clone(), - self.hashed_cursor_factory.clone(), - hashed_address, - ) - .with_prefix_set_mut(storage_prefix_set) - .storage_multiproof(HashSet::from_iter([target_key]))?; - - // The subtree only contains the proof for a single target. - let node = - proof.subtree.get(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; - self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness - Ok(node.clone()) - })?; - } - - Self::next_root_from_proofs(account_trie_nodes, |key: Nibbles| { - // Right pad the target with 0s. - let mut padded_key = key.pack(); - padded_key.resize(32, 0); - let targets = HashMap::from_iter([(B256::from_slice(&padded_key), HashSet::default())]); - let proof = - Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) - .with_prefix_sets_mut(self.prefix_sets.clone()) - .multiproof(targets)?; - - // The subtree only contains the proof for a single target. - let node = - proof.account_subtree.get(&key).ok_or(TrieWitnessError::MissingTargetNode(key))?; - self.witness.insert(keccak256(node.as_ref()), node.clone()); // record in witness - Ok(node.clone()) - })?; - Ok(self.witness) - } + if let Some(value) = maybe_leaf_value { + storage_trie + .update_leaf(storage_nibbles, value) + .map_err(SparseStateTrieError::Sparse)?; + } else { + storage_trie + .remove_leaf(&storage_nibbles) + .map_err(SparseStateTrieError::Sparse)?; + } + } - /// Decodes and unrolls all nodes from the proof. Returns only sibling nodes - /// in the path of the target and the final leaf node with updated value. - fn target_nodes<'b>( - &mut self, - key: Nibbles, - value: Option>, - proof: impl IntoIterator, - ) -> Result>>, TrieWitnessError> { - let mut trie_nodes = BTreeMap::default(); - let mut proof_iter = proof.into_iter().enumerate().peekable(); - while let Some((idx, (path, encoded))) = proof_iter.next() { - // Record the node in witness. - self.witness.insert(keccak256(encoded.as_ref()), encoded.clone()); + // Calculate storage root after updates. + storage_trie.root(); - let mut next_path = path.clone(); - match TrieNode::decode(&mut &encoded[..])? { - TrieNode::Branch(branch) => { - next_path.push(key[path.len()]); - let children = branch_node_children(path.clone(), &branch); - for (child_path, value) in children { - if !key.starts_with(&child_path) { - let value = if value.len() < B256::len_bytes() { - Either::Right(value.to_vec()) - } else { - Either::Left(B256::from_slice(&value[1..])) - }; - trie_nodes.insert(child_path, value); - } - } - } - TrieNode::Extension(extension) => { - next_path.extend_from_slice(&extension.key); - } - TrieNode::Leaf(leaf) => { - next_path.extend_from_slice(&leaf.key); - if next_path != key { - trie_nodes.insert( - next_path.clone(), - Either::Right(leaf.value.as_slice().to_vec()), - ); - } - } - TrieNode::EmptyRoot => { - if idx != 0 || proof_iter.peek().is_some() { - return Err(TrieWitnessError::UnexpectedEmptyRoot(next_path)) - } - } - }; - } + let account = state + .accounts + .get(&hashed_address) + .ok_or(TrieWitnessError::MissingAccount(hashed_address))? + .unwrap_or_default(); + sparse_trie.update_account(hashed_address, account)?; - if let Some(value) = value { - trie_nodes.insert(key, Either::Right(value)); + while let Ok(node) = rx.try_recv() { + self.witness.insert(keccak256(&node), node); + } } - Ok(trie_nodes) + Ok(self.witness) } /// Retrieve proof targets for incoming hashed state. @@ -272,97 +182,77 @@ where let mut storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor(*hashed_address)?; // position cursor at the start - if let Some((hashed_slot, _)) = storage_cursor.seek(B256::ZERO)? { - storage_keys.insert(hashed_slot); - } - while let Some((hashed_slot, _)) = storage_cursor.next()? { + let mut current_entry = storage_cursor.seek(B256::ZERO)?; + while let Some((hashed_slot, _)) = current_entry { storage_keys.insert(hashed_slot); + current_entry = storage_cursor.next()?; } } proof_targets.insert(*hashed_address, storage_keys); } Ok(proof_targets) } +} - fn next_root_from_proofs( - trie_nodes: BTreeMap>>, - mut trie_node_provider: impl FnMut(Nibbles) -> Result, - ) -> Result { - // Ignore branch child hashes in the path of leaves or lower child hashes. - let mut keys = trie_nodes.keys().peekable(); - let mut ignored = HashSet::::default(); - while let Some(key) = keys.next() { - if keys.peek().map_or(false, |next| next.starts_with(key)) { - ignored.insert(key.clone()); - } - } +#[derive(Debug)] +struct WitnessBlindedProviderFactory { + /// Blinded node provider factory. + provider_factory: F, + /// Sender for forwarding fetched blinded node. + tx: mpsc::Sender, +} - let mut hash_builder = HashBuilder::default(); - let mut trie_nodes = trie_nodes.into_iter().filter(|e| !ignored.contains(&e.0)).peekable(); - while let Some((path, value)) = trie_nodes.next() { - match value { - Either::Left(branch_hash) => { - let parent_branch_path = path.slice(..path.len() - 1); - if hash_builder.key.starts_with(&parent_branch_path) || - trie_nodes - .peek() - .map_or(false, |next| next.0.starts_with(&parent_branch_path)) - { - hash_builder.add_branch(path, branch_hash, false); - } else { - // Parent is a branch node that needs to be turned into an extension node. - let mut path = path.clone(); - loop { - let node = trie_node_provider(path.clone())?; - match TrieNode::decode(&mut &node[..])? { - TrieNode::Branch(branch) => { - let children = branch_node_children(path, &branch); - for (child_path, value) in children { - if value.len() < B256::len_bytes() { - hash_builder.add_leaf(child_path, value); - } else { - let hash = B256::from_slice(&value[1..]); - hash_builder.add_branch(child_path, hash, false); - } - } - break - } - TrieNode::Leaf(leaf) => { - let mut child_path = path; - child_path.extend_from_slice(&leaf.key); - hash_builder.add_leaf(child_path, &leaf.value); - break - } - TrieNode::Extension(ext) => { - path.extend_from_slice(&ext.key); - } - TrieNode::EmptyRoot => { - return Err(TrieWitnessError::UnexpectedEmptyRoot(path)) - } - } - } - } - } - Either::Right(leaf_value) => { - hash_builder.add_leaf(path, &leaf_value); - } - } - } - Ok(hash_builder.root()) +impl WitnessBlindedProviderFactory { + const fn new(provider_factory: F, tx: mpsc::Sender) -> Self { + Self { provider_factory, tx } + } +} + +impl BlindedProviderFactory for WitnessBlindedProviderFactory +where + F: BlindedProviderFactory, + F::AccountNodeProvider: BlindedProvider, + F::StorageNodeProvider: BlindedProvider, +{ + type AccountNodeProvider = WitnessBlindedProvider; + type StorageNodeProvider = WitnessBlindedProvider; + + fn account_node_provider(&self) -> Self::AccountNodeProvider { + let provider = self.provider_factory.account_node_provider(); + WitnessBlindedProvider::new(provider, self.tx.clone()) + } + + fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { + let provider = self.provider_factory.storage_node_provider(account); + WitnessBlindedProvider::new(provider, self.tx.clone()) + } +} + +#[derive(Debug)] +struct WitnessBlindedProvider

{ + /// Proof-based blinded. + provider: P, + /// Sender for forwarding fetched blinded node. + tx: mpsc::Sender, +} + +impl

WitnessBlindedProvider

{ + const fn new(provider: P, tx: mpsc::Sender) -> Self { + Self { provider, tx } } } -/// Returned branch node children with keys in order. -fn branch_node_children(prefix: Nibbles, node: &BranchNode) -> Vec<(Nibbles, &[u8])> { - let mut children = Vec::with_capacity(node.state_mask.count_ones() as usize); - let mut stack_ptr = node.as_ref().first_child_index(); - for index in CHILD_INDEX_RANGE { - if node.state_mask.is_bit_set(index) { - let mut child_path = prefix.clone(); - child_path.push(index); - children.push((child_path, &node.stack[stack_ptr][..])); - stack_ptr += 1; +impl

BlindedProvider for WitnessBlindedProvider

+where + P: BlindedProvider, +{ + type Error = P::Error; + + fn blinded_node(&mut self, path: Nibbles) -> Result, Self::Error> { + let maybe_node = self.provider.blinded_node(path)?; + if let Some(node) = &maybe_node { + self.tx.send(node.clone()).map_err(|error| SparseTrieError::Other(Box::new(error)))?; } + Ok(maybe_node) } - children } diff --git a/deny.toml b/deny.toml index e5823460250..8d0807f9de5 100644 --- a/deny.toml +++ b/deny.toml @@ -4,8 +4,12 @@ [advisories] yanked = "warn" ignore = [ - # proc-macro-error 1.0.4 unmaintained https://rustsec.org/advisories/RUSTSEC-2024-0370 - "RUSTSEC-2024-0370" + # https://rustsec.org/advisories/RUSTSEC-2024-0379 used by boa (js-tracer) + "RUSTSEC-2024-0379", + # https://rustsec.org/advisories/RUSTSEC-2024-0384 used by sse example + "RUSTSEC-2024-0384", + # https://rustsec.org/advisories/RUSTSEC-2024-0388 used by ssz, will be removed https://github.com/sigp/ethereum_ssz/pull/34 + "RUSTSEC-2024-0388" ] # This section is considered when running `cargo deny check bans`. @@ -58,7 +62,7 @@ allow = [ # aren't accepted for every possible crate as with the normal allow list exceptions = [ # TODO: decide on MPL-2.0 handling - # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 + # These dependencies are grandfathered in https://github.com/paradigmxyz/reth/pull/6980 { allow = ["MPL-2.0"], name = "option-ext" }, { allow = ["MPL-2.0"], name = "webpki-roots" }, ] diff --git a/docs/crates/db.md b/docs/crates/db.md index 3ccfb72e344..688f7ea76cc 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -61,7 +61,6 @@ There are many tables within the node, all used to store different types of data - StageCheckpointProgresses - PruneCheckpoints - VersionHistory -- BlockRequests - ChainState
@@ -213,7 +212,7 @@ pub trait DbTxMut: Send + Sync { Let's take a look at the `DbTx` and `DbTxMut` traits in action. -Revisiting the `DatabaseProvider` struct as an exampl, the `DatabaseProvider::header_by_number()` function uses the `DbTx::get()` function to get a header from the `Headers` table. +Revisiting the `DatabaseProvider` struct as an example, the `DatabaseProvider::header_by_number()` function uses the `DbTx::get()` function to get a header from the `Headers` table. [File: crates/storage/provider/src/providers/database/provider.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/storage/provider/src/providers/database/provider.rs#L1319-L1336) @@ -268,7 +267,7 @@ let mut headers_cursor = provider.tx_ref().cursor_read::()?; let headers_walker = headers_cursor.walk_range(block_range.clone())?; ``` -Lets look at an examples of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. +Let's look at an examples of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. [File: crates/stages/stages/src/stages/bodies.rs](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/crates/stages/stages/src/stages/bodies.rs#L267-L345) @@ -283,7 +282,6 @@ fn unwind(&mut self, provider: &DatabaseProviderRW, input: UnwindInput) { let mut body_cursor = tx.cursor_write::()?; let mut ommers_cursor = tx.cursor_write::()?; let mut withdrawals_cursor = tx.cursor_write::()?; - let mut requests_cursor = tx.cursor_write::()?; // Cursors to unwind transitions let mut tx_block_cursor = tx.cursor_write::()?; @@ -322,7 +320,7 @@ fn unwind(&mut self, provider: &DatabaseProviderRW, input: UnwindInput) { } ``` -This function first grabs a mutable cursor for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals`, `BlockRequests`, `TransactionBlocks` tables. +This function first grabs a mutable cursor for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals`, `TransactionBlocks` tables. Then it gets a walker of the block body cursor, and then walk backwards through the cursor to delete the block body entries from the last block number to the block number specified in the `UnwindInput` struct. @@ -332,7 +330,7 @@ While this is a brief look at how cursors work in the context of database tables ## Summary -This chapter was packed with information, so lets do a quick review. The database is comprised of tables, with each table being a collection of key-value pairs representing various pieces of data in the blockchain. Any struct that implements the `Database` trait can view, update or delete entries in the various tables. The database design leverages nested traits and generic associated types to provide methods to interact with each table in the database. +This chapter was packed with information, so let's do a quick review. The database is comprised of tables, with each table being a collection of key-value pairs representing various pieces of data in the blockchain. Any struct that implements the `Database` trait can view, update or delete entries in the various tables. The database design leverages nested traits and generic associated types to provide methods to interact with each table in the database.
diff --git a/docs/crates/network.md b/docs/crates/network.md index a6ac2430565..7e38ac5d601 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -787,8 +787,24 @@ The `TransactionsManager.network_events` stream is the first to have all of its The events received in this channel are of type `NetworkEvent`: [File: crates/net/network/src/manager.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/manager.rs) + +```rust,ignore +pub enum NetworkEvent { + /// Basic peer lifecycle event. + Peer(PeerEvent), + /// Session established with requests. + ActivePeerSession { + /// Session information + info: SessionInfo, + /// A request channel to the session task. + messages: PeerRequestSender, + }, +} +``` + +and with ```rust,ignore -pub enum NetworkEvent { +pub enum PeerEvent { /// Closed the peer session. SessionClosed { /// The identifier of the peer to which a session was closed. @@ -797,29 +813,29 @@ pub enum NetworkEvent { reason: Option, }, /// Established a new session with the given peer. - SessionEstablished { - /// The identifier of the peer to which a session was established. - peer_id: PeerId, - /// Capabilities the peer announced - capabilities: Arc, - /// A request channel to the session task. - messages: PeerRequestSender, - /// The status of the peer to which a session was established. - status: Status, - }, + SessionEstablished(SessionInfo), /// Event emitted when a new peer is added PeerAdded(PeerId), /// Event emitted when a new peer is removed PeerRemoved(PeerId), } ``` +[File: crates/net/network-api/src/events.rs](https://github.com/paradigmxyz/reth/blob/c46b5fc1157d12184d1dceb4dc45e26cf74b2bc6/crates/net/network-api/src/events.rs) -They're handled with the `on_network_event` method, which responds to the two variants of the `NetworkEvent` enum in the following ways: +They're handled with the `on_network_event` method, which processes session events through both `NetworkEvent::Peer(PeerEvent::SessionClosed)`, `NetworkEvent::Peer(PeerEvent::SessionEstablished)`, and `NetworkEvent::ActivePeerSession` for initializing peer connections and transaction broadcasting. -**`NetworkEvent::SessionClosed`** +Variants of the `PeerEvent` enum are defined in the following ways: + +**`PeerEvent::PeerAdded`** +Adds a peer to the network node via network handle + +**`PeerEvent::PeerRemoved`** Removes the peer given by `NetworkEvent::SessionClosed.peer_id` from the `TransactionsManager.peers` map. -**`NetworkEvent::SessionEstablished`** +**`PeerEvent::SessionClosed`** +Closes the peer session after disconnection + +**`PeerEvent::SessionEstablished`** Begins by inserting a `Peer` into `TransactionsManager.peers` by `peer_id`, which is a struct of the following form: [File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) @@ -840,33 +856,30 @@ After the `Peer` is added to `TransactionsManager.peers`, the hashes of all of t [File: crates/net/network/src/transactions.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/network/src/transactions.rs) ```rust,ignore -fn on_network_event(&mut self, event: NetworkEvent) { - match event { - NetworkEvent::SessionClosed { peer_id, .. } => { +fn on_network_event(&mut self, event_result: NetworkEvent) { + match event_result { + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, .. }) => { // remove the peer self.peers.remove(&peer_id); + self.transaction_fetcher.remove_peer(&peer_id); } - NetworkEvent::SessionEstablished { peer_id, messages, .. } => { - // insert a new peer - self.peers.insert( - peer_id, - Peer { - transactions: LruCache::new( - NonZeroUsize::new(PEER_TRANSACTION_CACHE_LIMIT).unwrap(), - ), - request_tx: messages, - }, - ); - - // Send a `NewPooledTransactionHashes` to the peer with _all_ transactions in the - // pool - let msg = NewPooledTransactionHashes(self.pool.pooled_transactions()); - self.network.send_message(NetworkHandleMessage::SendPooledTransactionHashes { - peer_id, - msg, - }) + NetworkEvent::ActivePeerSession { info, messages } => { + // process active peer session and broadcast available transaction from the pool + self.handle_peer_session(info, messages); + } + NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { + let peer_id = info.peer_id; + // get messages from existing peer + let messages = match self.peers.get(&peer_id) { + Some(p) => p.request_tx.clone(), + None => { + debug!(target: "net::tx", ?peer_id, "No peer request sender found"); + return; + } + }; + self.handle_peer_session(info, messages); } - _ => {} + _ => {} } } ``` @@ -991,9 +1004,9 @@ fn import_transactions(&mut self, peer_id: PeerId, transactions: Vec { // transaction was already inserted entry.get_mut().push(peer_id); diff --git a/docs/crates/stages.md b/docs/crates/stages.md index c7815b453b4..14666c1f44f 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -43,7 +43,7 @@ pub trait Stage: Send + Sync { } ``` -To get a better idea of what is happening at each part of the pipeline, lets walk through what is going on under the hood within the `execute()` function at each stage, starting with `HeaderStage`. +To get a better idea of what is happening at each part of the pipeline, let's walk through what is going on under the hood within the `execute()` function at each stage, starting with `HeaderStage`.
diff --git a/docs/design/database.md b/docs/design/database.md index cf2a6c8fcc1..48fc8612cba 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -56,7 +56,7 @@ BlockWithdrawals { } Transactions { u64 TxNumber "PK" - TransactionSignedNoHash Data + TransactionSigned Data } TransactionHashNumbers { B256 TxHash "PK" diff --git a/docs/design/metrics.md b/docs/design/metrics.md index 0ac1f71c90d..cc386a11251 100644 --- a/docs/design/metrics.md +++ b/docs/design/metrics.md @@ -42,7 +42,7 @@ There will only ever exist one description per metric `KeyName`; it is not possi The `metrics` crate provides three macros per metric variant: `register_!`, `!`, and `describe_!`. Prefer to use these where possible, since they generate the code necessary to register and update metrics under various conditions. - The `register_!` macro simply creates the metric and returns a handle to it (e.g. a `Counter`). These metric structs are thread-safe and cheap to clone. -- The `!` macro registers the metric if it does not exist, and updates it's value. +- The `!` macro registers the metric if it does not exist, and updates its value. - The `describe_!` macro adds an end-user description for the metric. How the metrics are exposed to the end-user is determined by the CLI. diff --git a/docs/repo/ci.md b/docs/repo/ci.md index 5ed2cec0091..863a18f9c38 100644 --- a/docs/repo/ci.md +++ b/docs/repo/ci.md @@ -7,8 +7,7 @@ The CI runs a couple of workflows: - **[unit]**: Runs unit tests (tests in `src/`) and doc tests - **[integration]**: Runs integration tests (tests in `tests/` and sync tests) - **[bench]**: Runs benchmarks -- **[eth-sync]**: Runs Ethereum mainnet sync tests -- **[op-sync]**: Runs base mainnet sync tests for Optimism +- **[sync]**: Runs sync tests - **[stage]**: Runs all `stage run` commands ### Docs @@ -38,8 +37,7 @@ The CI runs a couple of workflows: [unit]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/unit.yml [integration]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/integration.yml [bench]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/bench.yml -[eth-sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/eth-sync.yml -[op-sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/op-sync.yml +[sync]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/sync.yml [stage]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/stage.yml [book]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/book.yml [deny]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/deny.yml diff --git a/docs/repo/labels.md b/docs/repo/labels.md index 6b3dba97ee6..6772b828ffc 100644 --- a/docs/repo/labels.md +++ b/docs/repo/labels.md @@ -30,7 +30,7 @@ For easier at-a-glance communication of the status of issues and PRs the followi - https://github.com/paradigmxyz/reth/labels/S-duplicate - https://github.com/paradigmxyz/reth/labels/S-wontfix -**Misc.** +**Miscellaneous** - https://github.com/paradigmxyz/reth/labels/S-needs-triage - https://github.com/paradigmxyz/reth/labels/S-controversial diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 6ed91e79656..dcb475e020e 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -82,7 +82,6 @@ The networking component mainly lives in [`net/network`](../../crates/net/networ Different consensus mechanisms. - [`consensus/common`](../../crates/consensus/common): Common consensus functions and traits (e.g. fee calculation) -- [`consensus/auto-seal`](../../crates/consensus/auto-seal): A consensus mechanism that auto-seals blocks for local development (also commonly known as "auto-mine") - [`consensus/beacon`](../../crates/consensus/beacon): Consensus mechanism that handles messages from a beacon node ("eth2") ### Execution @@ -133,7 +132,7 @@ The IPC transport lives in [`rpc/ipc`](../../crates/rpc/ipc). - Supported transports: HTTP, WS, IPC - Supported namespaces: `eth_`, `engine_`, `debug_` - [`rpc/rpc-eth-api`](../../crates/rpc/rpc-eth-api/): Reth RPC 'eth' namespace API (including interface and implementation), this crate is re-exported by `rpc/rpc-api` -- [`rpc/rpc-eth-types`](../../crates/rpc/rpc-eth-types/): Types `supporting implementation` of 'eth' namespace RPC server API +- [`rpc/rpc-eth-types`](../../crates/rpc/rpc-eth-types/): Types `supporting the implementation` of 'eth' namespace RPC server API - [`rpc/rpc-server-types`](../../crates/rpc/rpc-server-types/): RPC server types and constants #### Utilities Crates @@ -160,7 +159,7 @@ These crates define primitive types or algorithms. ### Optimism -Crates related to the Optimism rollup are lives in [optimism](../../crates/optimism/). +Crates related to the Optimism rollup live in [optimism](../../crates/optimism/). ### Misc diff --git a/etc/README.md b/etc/README.md index f80b5b774b6..4f4ce7f20e4 100644 --- a/etc/README.md +++ b/etc/README.md @@ -2,7 +2,8 @@ This directory contains miscellaneous files, such as example Grafana dashboards and Prometheus configuration. -The files in this directory may undergo a lot of changes while reth is unstable, so do not expect them to necessarily be up to date. +The files in this directory may undergo a lot of changes while reth is unstable, so do not expect them to necessarily be +up to date. ### Overview @@ -11,8 +12,67 @@ The files in this directory may undergo a lot of changes while reth is unstable, ### Docker Compose -To run Reth, Grafana or Prometheus with Docker Compose, refer to the [docker docs](/book/installation/docker.md#using-docker-compose). +To run Reth, Grafana or Prometheus with Docker Compose, refer to +the [docker docs](/book/installation/docker.md#using-docker-compose). -### Import Grafana dashboards +### Grafana -Running Grafana in Docker makes it possible to import existing dashboards, refer to [docs on how to run only Grafana in Docker](/book/installation/docker.md#using-docker-compose#run-only-grafana-in-docker). \ No newline at end of file +#### Adding a new metric to Grafana + +To set up a new metric in Reth and its Grafana dashboard (this assumes running Reth and Grafana instances): + +1. Add the metric to the codebase following the [metrics section](../docs/design/metrics.md#creating-metrics) + documentation. + +1. Access Grafana: + + - Open `http://localhost:3000/` in a browser + - Log in with username and password `admin` + - Navigate to the `Dashboards` tab + +1. Create or modify a dashboard: + + - Select an existing dashboard or create a new one + - Click `Add` > `Visualization` to create a new panel + +1. Configure your metric panel: + + - Set a panel title and description + - Select metric(s) from the `Metrics browser` or use the `PromQL` terminal + - Document your metric(s) by setting units, legends, etc. + - When adding multiple metrics, use field overwrites if needed + +1. Save and arrange: + + - Click `Apply` to save the panel + - Drag the panel to desired position on the dashboard + +1. Export the dashboard: + + - Click `Share` > `Export` + - Toggle `Export for sharing externally` + - Click `Save to file` + +1. Update dashboard file: + - Replace the content of the corresponding file in the [dashboards folder](./grafana/dashboards) with the exported + JSON + +Your new metric is now integrated into the Reth Grafana dashboard. + +#### Import Grafana dashboards + +If you are running Reth and Grafana outside of docker, and wish to import new Grafana dashboards or update a dashboard: + +1. Go to `Home` > `Dashboards` + +1. Click `New` > `Import` + +1. Drag the JSON dashboard file to import it + +1. If updating an existing dashboard, you will need to change the name and UID of the imported dashboard in order to + avoid conflict + +1. Delete the old dashboard + +If you are running Reth and Grafana using docker, after having pulled the updated dashboards from `main`, restart the +Grafana service. This will update all dashboards. \ No newline at end of file diff --git a/etc/docker-compose.yml b/etc/docker-compose.yml index 618aa6f5ae6..cd7dd6dd263 100644 --- a/etc/docker-compose.yml +++ b/etc/docker-compose.yml @@ -65,7 +65,7 @@ services: sh -c "cp -r /etc/grafana/provisioning_temp/dashboards/. /etc/grafana/provisioning/dashboards && find /etc/grafana/provisioning/dashboards/ -name '*.json' -exec sed -i 's/$${DS_PROMETHEUS}/Prometheus/g' {} \+ && /run.sh" - + volumes: mainnet_data: driver: local diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 15786764f42..39ccdffe34f 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -1007,13 +1007,242 @@ "title": "Sync progress (stage progress as highest block number reached)", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of critical tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "semi-dark-red", + "value": 0 + } + ] + }, + "unit": "tasks" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 248, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_critical_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_critical_tasks_total{instance=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor critical tasks", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Tracks the number of regular tasks currently ran by the executor.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "semi-dark-red", + "value": 80 + } + ] + }, + "unit": "tasks/s" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "C" + }, + "properties": [ + { + "id": "unit", + "value": "tasks" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 247, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "rate(reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Tasks started", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_executor_spawn_regular_tasks_total{instance=\"$instance\"}- reth_executor_spawn_finished_regular_tasks_total{instance=\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Tasks running", + "range": true, + "refId": "C" + } + ], + "title": "Task Executor regular tasks", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 20 + "y": 28 }, "id": 38, "panels": [], @@ -1085,7 +1314,7 @@ "h": 8, "w": 12, "x": 0, - "y": 21 + "y": 29 }, "id": 40, "options": { @@ -1145,7 +1374,7 @@ "h": 8, "w": 12, "x": 12, - "y": 21 + "y": 29 }, "id": 42, "maxDataPoints": 25, @@ -1273,7 +1502,7 @@ "h": 8, "w": 12, "x": 0, - "y": 29 + "y": 37 }, "id": 117, "options": { @@ -1370,7 +1599,7 @@ "h": 8, "w": 12, "x": 12, - "y": 29 + "y": 37 }, "id": 116, "options": { @@ -1471,7 +1700,7 @@ "h": 8, "w": 12, "x": 0, - "y": 37 + "y": 45 }, "id": 119, "options": { @@ -1572,7 +1801,7 @@ "h": 8, "w": 12, "x": 12, - "y": 37 + "y": 45 }, "id": 118, "options": { @@ -1634,7 +1863,7 @@ "h": 8, "w": 12, "x": 0, - "y": 45 + "y": 53 }, "id": 48, "options": { @@ -1724,6 +1953,7 @@ "mode": "off" } }, + "decimals": 4, "mappings": [], "thresholds": { "mode": "absolute", @@ -1746,7 +1976,7 @@ "h": 8, "w": 12, "x": 12, - "y": 45 + "y": 53 }, "id": 52, "options": { @@ -1804,7 +2034,7 @@ "h": 8, "w": 12, "x": 0, - "y": 53 + "y": 61 }, "id": 50, "options": { @@ -1972,7 +2202,7 @@ "h": 8, "w": 12, "x": 12, - "y": 53 + "y": 61 }, "id": 58, "options": { @@ -2073,7 +2303,7 @@ "h": 8, "w": 12, "x": 0, - "y": 61 + "y": 69 }, "id": 113, "options": { @@ -2110,7 +2340,7 @@ "h": 1, "w": 24, "x": 0, - "y": 69 + "y": 77 }, "id": 203, "panels": [], @@ -2144,7 +2374,7 @@ "h": 8, "w": 8, "x": 0, - "y": 70 + "y": 78 }, "id": 202, "options": { @@ -2305,7 +2535,7 @@ "h": 8, "w": 8, "x": 8, - "y": 70 + "y": 78 }, "id": 204, "options": { @@ -2455,7 +2685,7 @@ "h": 8, "w": 8, "x": 16, - "y": 70 + "y": 78 }, "id": 205, "options": { @@ -2556,7 +2786,7 @@ "h": 8, "w": 12, "x": 0, - "y": 78 + "y": 86 }, "id": 206, "options": { @@ -2653,7 +2883,7 @@ "h": 8, "w": 12, "x": 12, - "y": 78 + "y": 86 }, "id": 207, "options": { @@ -2690,7 +2920,7 @@ "h": 1, "w": 24, "x": 0, - "y": 86 + "y": 94 }, "id": 46, "panels": [], @@ -2761,7 +2991,7 @@ "h": 8, "w": 24, "x": 0, - "y": 87 + "y": 95 }, "id": 56, "options": { @@ -2787,6 +3017,102 @@ "legendFormat": "Gas/s", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[1m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1m)", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[5m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (5m)", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[10m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (10m)", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[30m])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (30m)", + "range": true, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[1h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (1h)", + "range": true, + "refId": "F", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "avg_over_time(reth_sync_execution_gas_per_second{instance=~\"$instance\"}[24h])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "legendFormat": "Avg Gas/s (24h)", + "range": true, + "refId": "G", + "useBackend": false } ], "title": "Execution throughput", @@ -2857,7 +3183,7 @@ "h": 11, "w": 24, "x": 0, - "y": 95 + "y": 103 }, "id": 240, "options": { @@ -2916,7 +3242,7 @@ "h": 1, "w": 24, "x": 0, - "y": 106 + "y": 114 }, "id": 24, "panels": [], @@ -3014,7 +3340,7 @@ "h": 8, "w": 12, "x": 0, - "y": 107 + "y": 115 }, "id": 26, "options": { @@ -3148,7 +3474,7 @@ "h": 8, "w": 12, "x": 12, - "y": 107 + "y": 115 }, "id": 33, "options": { @@ -3268,7 +3594,7 @@ "h": 8, "w": 12, "x": 0, - "y": 115 + "y": 123 }, "id": 36, "options": { @@ -3317,7 +3643,7 @@ "h": 1, "w": 24, "x": 0, - "y": 123 + "y": 131 }, "id": 32, "panels": [], @@ -3425,7 +3751,7 @@ "h": 8, "w": 12, "x": 0, - "y": 124 + "y": 132 }, "id": 30, "options": { @@ -3591,7 +3917,7 @@ "h": 8, "w": 12, "x": 12, - "y": 124 + "y": 132 }, "id": 28, "options": { @@ -3711,7 +4037,7 @@ "h": 8, "w": 12, "x": 0, - "y": 132 + "y": 140 }, "id": 35, "options": { @@ -3837,7 +4163,7 @@ "h": 8, "w": 12, "x": 12, - "y": 132 + "y": 140 }, "id": 73, "options": { @@ -3964,7 +4290,7 @@ "h": 8, "w": 12, "x": 0, - "y": 140 + "y": 148 }, "id": 102, "options": { @@ -4027,7 +4353,7 @@ "h": 1, "w": 24, "x": 0, - "y": 148 + "y": 156 }, "id": 79, "panels": [], @@ -4101,7 +4427,7 @@ "h": 8, "w": 12, "x": 0, - "y": 149 + "y": 157 }, "id": 74, "options": { @@ -4198,7 +4524,7 @@ "h": 8, "w": 12, "x": 12, - "y": 149 + "y": 157 }, "id": 80, "options": { @@ -4295,7 +4621,7 @@ "h": 8, "w": 12, "x": 0, - "y": 157 + "y": 165 }, "id": 81, "options": { @@ -4392,7 +4718,7 @@ "h": 8, "w": 12, "x": 12, - "y": 157 + "y": 165 }, "id": 114, "options": { @@ -4489,7 +4815,7 @@ "h": 8, "w": 12, "x": 12, - "y": 165 + "y": 173 }, "id": 190, "options": { @@ -4527,7 +4853,7 @@ "h": 1, "w": 24, "x": 0, - "y": 173 + "y": 181 }, "id": 87, "panels": [], @@ -4601,7 +4927,7 @@ "h": 8, "w": 12, "x": 0, - "y": 174 + "y": 182 }, "id": 83, "options": { @@ -4697,7 +5023,7 @@ "h": 8, "w": 12, "x": 12, - "y": 174 + "y": 182 }, "id": 84, "options": { @@ -4805,7 +5131,7 @@ "h": 8, "w": 12, "x": 0, - "y": 182 + "y": 190 }, "id": 85, "options": { @@ -4836,6 +5162,80 @@ "title": "Pipeline runs", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Latency histogram for the engine_newPayload to Forkchoice Update", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "mode": "none" + } + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 188 + }, + "id": 213, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "builder", + "expr": "reth_engine_rpc_new_payload_forkchoice_updated_time_diff{instance=~\"$instance\"}", + "legendFormat": "new_payload_forkchoice_updated", + "range": true, + "refId": "A" + } + ], + "title": "Engine API newPayload Forkchoice Update Latency", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -4902,7 +5302,7 @@ "h": 8, "w": 12, "x": 12, - "y": 182 + "y": 190 }, "id": 210, "options": { @@ -5227,7 +5627,7 @@ "h": 8, "w": 12, "x": 0, - "y": 190 + "y": 198 }, "id": 211, "options": { @@ -5552,7 +5952,7 @@ "h": 8, "w": 12, "x": 12, - "y": 190 + "y": 198 }, "id": 212, "options": { @@ -5775,9 +6175,9 @@ "h": 8, "w": 24, "x": 0, - "y": 198 + "y": 206 }, - "id": 213, + "id": 213, "options": { "legend": { "calcs": [], @@ -5811,7 +6211,7 @@ "h": 1, "w": 24, "x": 0, - "y": 198 + "y": 214 }, "id": 214, "panels": [], @@ -5883,7 +6283,7 @@ "h": 8, "w": 12, "x": 0, - "y": 199 + "y": 215 }, "id": 215, "options": { @@ -5979,7 +6379,7 @@ "h": 8, "w": 12, "x": 12, - "y": 199 + "y": 215 }, "id": 216, "options": { @@ -6030,7 +6430,7 @@ "h": 1, "w": 24, "x": 0, - "y": 207 + "y": 223 }, "id": 68, "panels": [], @@ -6104,7 +6504,7 @@ "h": 8, "w": 12, "x": 0, - "y": 208 + "y": 224 }, "id": 60, "options": { @@ -6200,7 +6600,7 @@ "h": 8, "w": 12, "x": 12, - "y": 208 + "y": 224 }, "id": 62, "options": { @@ -6296,7 +6696,7 @@ "h": 8, "w": 12, "x": 0, - "y": 216 + "y": 232 }, "id": 64, "options": { @@ -6333,7 +6733,7 @@ "h": 1, "w": 24, "x": 0, - "y": 224 + "y": 240 }, "id": 97, "panels": [], @@ -6418,7 +6818,7 @@ "h": 8, "w": 12, "x": 0, - "y": 225 + "y": 241 }, "id": 98, "options": { @@ -6581,7 +6981,7 @@ "h": 8, "w": 12, "x": 12, - "y": 225 + "y": 241 }, "id": 101, "options": { @@ -6679,7 +7079,7 @@ "h": 8, "w": 12, "x": 0, - "y": 233 + "y": 249 }, "id": 99, "options": { @@ -6777,7 +7177,7 @@ "h": 8, "w": 12, "x": 12, - "y": 233 + "y": 249 }, "id": 100, "options": { @@ -6815,7 +7215,7 @@ "h": 1, "w": 24, "x": 0, - "y": 241 + "y": 257 }, "id": 105, "panels": [], @@ -6888,7 +7288,7 @@ "h": 8, "w": 12, "x": 0, - "y": 242 + "y": 258 }, "id": 106, "options": { @@ -6986,7 +7386,7 @@ "h": 8, "w": 12, "x": 12, - "y": 242 + "y": 258 }, "id": 107, "options": { @@ -7083,7 +7483,7 @@ "h": 8, "w": 12, "x": 0, - "y": 250 + "y": 266 }, "id": 217, "options": { @@ -7121,7 +7521,7 @@ "h": 1, "w": 24, "x": 0, - "y": 258 + "y": 274 }, "id": 108, "panels": [], @@ -7219,7 +7619,7 @@ "h": 8, "w": 12, "x": 0, - "y": 259 + "y": 275 }, "id": 109, "options": { @@ -7281,7 +7681,7 @@ "h": 8, "w": 12, "x": 12, - "y": 259 + "y": 275 }, "id": 111, "maxDataPoints": 25, @@ -7411,7 +7811,7 @@ "h": 8, "w": 12, "x": 0, - "y": 267 + "y": 283 }, "id": 120, "options": { @@ -7469,7 +7869,7 @@ "h": 8, "w": 12, "x": 12, - "y": 267 + "y": 283 }, "id": 112, "maxDataPoints": 25, @@ -7623,7 +8023,7 @@ "h": 8, "w": 12, "x": 0, - "y": 275 + "y": 291 }, "id": 198, "options": { @@ -7809,9 +8209,9 @@ "h": 8, "w": 12, "x": 12, - "y": 275 + "y": 291 }, - "id": 213, + "id": 246, "options": { "legend": { "calcs": [], @@ -7848,7 +8248,7 @@ "h": 1, "w": 24, "x": 0, - "y": 283 + "y": 299 }, "id": 236, "panels": [], @@ -7920,7 +8320,7 @@ "h": 8, "w": 12, "x": 0, - "y": 284 + "y": 300 }, "id": 237, "options": { @@ -8017,7 +8417,7 @@ "h": 8, "w": 12, "x": 12, - "y": 284 + "y": 300 }, "id": 238, "options": { @@ -8114,7 +8514,7 @@ "h": 8, "w": 12, "x": 0, - "y": 292 + "y": 308 }, "id": 239, "options": { @@ -8223,7 +8623,7 @@ "h": 8, "w": 12, "x": 12, - "y": 292 + "y": 308 }, "id": 219, "options": { @@ -8288,7 +8688,7 @@ "h": 8, "w": 12, "x": 0, - "y": 300 + "y": 316 }, "id": 220, "options": { @@ -8332,7 +8732,7 @@ "h": 1, "w": 24, "x": 0, - "y": 308 + "y": 324 }, "id": 241, "panels": [], @@ -8405,7 +8805,7 @@ "h": 8, "w": 12, "x": 0, - "y": 309 + "y": 325 }, "id": 243, "options": { @@ -8517,7 +8917,7 @@ "h": 8, "w": 12, "x": 12, - "y": 309 + "y": 325 }, "id": 244, "options": { @@ -8630,7 +9030,7 @@ "h": 8, "w": 12, "x": 0, - "y": 317 + "y": 333 }, "id": 245, "options": { @@ -8669,7 +9069,7 @@ "h": 1, "w": 24, "x": 0, - "y": 325 + "y": 341 }, "id": 226, "panels": [], @@ -8767,7 +9167,7 @@ "h": 8, "w": 12, "x": 0, - "y": 326 + "y": 342 }, "id": 225, "options": { @@ -8896,7 +9296,7 @@ "h": 8, "w": 12, "x": 12, - "y": 326 + "y": 342 }, "id": 227, "options": { @@ -9025,7 +9425,7 @@ "h": 8, "w": 12, "x": 0, - "y": 334 + "y": 350 }, "id": 235, "options": { @@ -9154,7 +9554,7 @@ "h": 8, "w": 12, "x": 12, - "y": 334 + "y": 350 }, "id": 234, "options": { diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index ebb693184a5..bba5dbd0e22 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -1493,6 +1493,108 @@ "title": "Incoming Gossip and Requests", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Measures the message send rate (MPS) for queued outgoing messages. Outgoing messages are added to the queue before being sent to other peers, and this metric helps track the rate of message dispatch.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "mps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 219, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_queued_outgoing_messages{instance=\"$instance\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Queued Messages per Second", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Queued Outgoing Messages", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -2931,7 +3033,7 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, + "x": 0, "y": 69 }, "id": 214, diff --git a/examples/README.md b/examples/README.md index b7847c904a8..0b42c0c488b 100644 --- a/examples/README.md +++ b/examples/README.md @@ -30,7 +30,7 @@ See examples in a [dedicated repository](https://github.com/paradigmxyz/reth-exe | Example | Description | | ----------------------- | --------------------------------------------------------------------------- | -| [DB over RPC](./rpc-db) | Illustrates how to run a standalone RPC server over a Rethdatabase instance | +| [DB over RPC](./rpc-db) | Illustrates how to run a standalone RPC server over a Reth database instance | ## Database diff --git a/examples/beacon-api-sidecar-fetcher/Cargo.toml b/examples/beacon-api-sidecar-fetcher/Cargo.toml index 47a2a181f7e..d9590f87e07 100644 --- a/examples/beacon-api-sidecar-fetcher/Cargo.toml +++ b/examples/beacon-api-sidecar-fetcher/Cargo.toml @@ -11,6 +11,7 @@ reth-node-ethereum.workspace = true alloy-rpc-types-beacon.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true clap.workspace = true eyre.workspace = true diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index cc761aa98a6..5ab85119184 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,4 +1,5 @@ use crate::BeaconSidecarConfig; +use alloy_consensus::Transaction as _; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; use eyre::Result; @@ -13,6 +14,7 @@ use serde::{Deserialize, Serialize}; use std::{ collections::VecDeque, pin::Pin, + sync::Arc, task::{Context, Poll}, }; use thiserror::Error; @@ -96,6 +98,7 @@ where fn process_block(&mut self, block: &SealedBlockWithSenders) { let txs: Vec<_> = block .transactions() + .iter() .filter(|tx| tx.is_eip4844()) .map(|tx| (tx.clone(), tx.blob_versioned_hashes().unwrap().len())) .collect(); @@ -109,9 +112,11 @@ where match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { Ok(blobs) => { - for ((tx, _), sidecar) in txs.iter().zip(blobs.iter()) { - let transaction = BlobTransaction::try_from_signed(tx.clone(), sidecar.clone()) - .expect("should not fail to convert blob tx if it is already eip4844"); + actions_to_queue.reserve_exact(txs.len()); + for ((tx, _), sidecar) in txs.iter().zip(blobs.into_iter()) { + let transaction = + BlobTransaction::try_from_signed(tx.clone(), Arc::unwrap_or_clone(sidecar)) + .expect("should not fail to convert blob tx if it is already eip4844"); let block_metadata = BlockMetadata { block_hash: block.hash(), @@ -187,6 +192,7 @@ where for (_, block) in old.blocks().iter() { let txs: Vec = block .transactions() + .iter() .filter(|tx: &&reth::primitives::TransactionSigned| { tx.is_eip4844() }) diff --git a/examples/bsc-p2p/src/main.rs b/examples/bsc-p2p/src/main.rs index e46ea4bec35..cea87918322 100644 --- a/examples/bsc-p2p/src/main.rs +++ b/examples/bsc-p2p/src/main.rs @@ -14,8 +14,13 @@ use chainspec::{boot_nodes, bsc_chain_spec}; use reth_discv4::Discv4ConfigBuilder; -use reth_network::{NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager}; -use reth_network_api::PeersInfo; +use reth_network::{ + EthNetworkPrimitives, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, +}; +use reth_network_api::{ + events::{PeerEvent, SessionInfo}, + PeersInfo, +}; use reth_primitives::{ForkHash, ForkId}; use reth_tracing::{ tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, @@ -62,7 +67,7 @@ async fn main() { // latest BSC forkId, we need to override this to allow connections from BSC nodes let fork_id = ForkId { hash: ForkHash([0x07, 0xb5, 0x43, 0x28]), next: 0 }; net_cfg.fork_filter.set_current_fork_id(fork_id); - let net_manager = NetworkManager::new(net_cfg).await.unwrap(); + let net_manager = NetworkManager::::new(net_cfg).await.unwrap(); // The network handle is our entrypoint into the network. let net_handle = net_manager.handle().clone(); @@ -76,10 +81,11 @@ async fn main() { // For the sake of the example we only print the session established event // with the chain specific details match evt { - NetworkEvent::SessionEstablished { status, client_version, peer_id, .. } => { + NetworkEvent::ActivePeerSession { info, .. } => { + let SessionInfo { status, client_version, peer_id, .. } = info; info!(peers=%net_handle.num_connected_peers() , %peer_id, chain = %status.chain, ?client_version, "Session established with a new peer."); } - NetworkEvent::SessionClosed { peer_id, reason } => { + NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }) => { info!(peers=%net_handle.num_connected_peers() , %peer_id, ?reason, "Session closed."); } diff --git a/examples/custom-beacon-withdrawals/Cargo.toml b/examples/custom-beacon-withdrawals/Cargo.toml new file mode 100644 index 00000000000..c396ca11df8 --- /dev/null +++ b/examples/custom-beacon-withdrawals/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "example-custom-beacon-withdrawals" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +reth.workspace = true +reth-node-ethereum.workspace = true +reth-evm-ethereum.workspace = true +reth-chainspec.workspace = true +reth-evm.workspace = true +reth-primitives.workspace = true + +alloy-sol-macro = "0.8.9" +alloy-sol-types.workspace = true +alloy-eips.workspace = true +alloy-consensus.workspace = true + +eyre.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism" +] \ No newline at end of file diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs new file mode 100644 index 00000000000..26109db1e03 --- /dev/null +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -0,0 +1,281 @@ +//! Example for how to modify a block post-execution step. It credits beacon withdrawals with a +//! custom mechanism instead of minting native tokens + +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; +use alloy_sol_macro::sol; +use alloy_sol_types::SolCall; +#[cfg(feature = "optimism")] +use reth::revm::primitives::OptimismFields; +use reth::{ + api::{ConfigureEvm, ConfigureEvmEnv, NodeTypesWithEngine}, + builder::{components::ExecutorBuilder, BuilderContext, FullNodeTypes}, + cli::Cli, + providers::ProviderError, + revm::{ + interpreter::Host, + primitives::{address, Address, Bytes, Env, EnvWithHandlerCfg, TransactTo, TxEnv, U256}, + Database, DatabaseCommit, Evm, State, + }, +}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_evm::execute::{ + BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, ExecuteOutput, + InternalBlockExecutionError, +}; +use reth_evm_ethereum::EthEvmConfig; +use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; +use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; +use std::{fmt::Display, sync::Arc}; + +pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); +pub const WITHDRAWALS_ADDRESS: Address = address!("4200000000000000000000000000000000000000"); + +fn main() { + Cli::parse_args() + .run(|builder, _| async move { + let handle = builder + // use the default ethereum node types + .with_types::() + // Configure the components of the node + // use default ethereum components but use our custom pool + .with_components( + EthereumNode::components().executor(CustomExecutorBuilder::default()), + ) + .with_add_ons(EthereumAddOns::default()) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) + .unwrap(); +} + +/// A custom executor builder +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct CustomExecutorBuilder; + +impl ExecutorBuilder for CustomExecutorBuilder +where + Types: NodeTypesWithEngine, + Node: FullNodeTypes, +{ + type EVM = EthEvmConfig; + type Executor = BasicBlockExecutorProvider; + + async fn build_evm( + self, + ctx: &BuilderContext, + ) -> eyre::Result<(Self::EVM, Self::Executor)> { + let chain_spec = ctx.chain_spec(); + let evm_config = EthEvmConfig::new(ctx.chain_spec()); + let strategy_factory = + CustomExecutorStrategyFactory { chain_spec, evm_config: evm_config.clone() }; + let executor = BasicBlockExecutorProvider::new(strategy_factory); + + Ok((evm_config, executor)) + } +} + +#[derive(Clone)] +pub struct CustomExecutorStrategyFactory { + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EthEvmConfig, +} + +impl BlockExecutionStrategyFactory for CustomExecutorStrategyFactory { + type Primitives = EthPrimitives; + type Strategy + Display>> = CustomExecutorStrategy; + + fn create_strategy(&self, db: DB) -> Self::Strategy + where + DB: Database + Display>, + { + let state = + State::builder().with_database(db).with_bundle_update().without_state_clear().build(); + CustomExecutorStrategy { + state, + chain_spec: self.chain_spec.clone(), + evm_config: self.evm_config.clone(), + } + } +} + +pub struct CustomExecutorStrategy +where + DB: Database + Display>, +{ + /// The chainspec + chain_spec: Arc, + /// How to create an EVM. + evm_config: EthEvmConfig, + /// Current state for block execution. + state: State, +} + +impl CustomExecutorStrategy +where + DB: Database + Display>, +{ + /// Configures a new evm configuration and block environment for the given block. + /// + /// # Caution + /// + /// This does not initialize the tx environment. + fn evm_env_for_block( + &self, + header: &alloy_consensus::Header, + total_difficulty: U256, + ) -> EnvWithHandlerCfg { + let (cfg, block_env) = self.evm_config.cfg_and_block_env(header, total_difficulty); + EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) + } +} + +impl BlockExecutionStrategy for CustomExecutorStrategy +where + DB: Database + Display>, +{ + type DB = DB; + type Primitives = EthPrimitives; + type Error = BlockExecutionError; + + fn apply_pre_execution_changes( + &mut self, + block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result<(), Self::Error> { + // Set state clear flag if the block is after the Spurious Dragon hardfork. + let state_clear_flag = + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + self.state.set_state_clear_flag(state_clear_flag); + + Ok(()) + } + + fn execute_transactions( + &mut self, + _block: &BlockWithSenders, + _total_difficulty: U256, + ) -> Result, Self::Error> { + Ok(ExecuteOutput { receipts: vec![], gas_used: 0 }) + } + + fn apply_post_execution_changes( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + _receipts: &[Receipt], + ) -> Result { + let env = self.evm_env_for_block(&block.header, total_difficulty); + let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + + if let Some(withdrawals) = block.body.withdrawals.as_ref() { + apply_withdrawals_contract_call(withdrawals, &mut evm)?; + } + + Ok(Requests::default()) + } + + fn state_ref(&self) -> &State { + &self.state + } + + fn state_mut(&mut self) -> &mut State { + &mut self.state + } +} + +sol!( + function withdrawals( + uint64[] calldata amounts, + address[] calldata addresses + ); +); + +/// Applies the post-block call to the withdrawal / deposit contract, using the given block, +/// [`ChainSpec`], EVM. +pub fn apply_withdrawals_contract_call( + withdrawals: &[Withdrawal], + evm: &mut Evm<'_, EXT, DB>, +) -> Result<(), BlockExecutionError> +where + DB::Error: std::fmt::Display, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // modify env for pre block call + fill_tx_env_with_system_contract_call( + &mut evm.context.evm.env, + SYSTEM_ADDRESS, + WITHDRAWALS_ADDRESS, + withdrawalsCall { + amounts: withdrawals.iter().map(|w| w.amount).collect::>(), + addresses: withdrawals.iter().map(|w| w.address).collect::>(), + } + .abi_encode() + .into(), + ); + + let mut state = match evm.transact() { + Ok(res) => res.state, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockExecutionError::Internal(InternalBlockExecutionError::Other( + format!("withdrawal contract system call revert: {}", e).into(), + ))) + } + }; + + // Clean-up post system tx context + state.remove(&SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + // re-set the previous env + evm.context.evm.env = previous_env; + + Ok(()) +} + +fn fill_tx_env_with_system_contract_call( + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, +) { + env.tx = TxEnv { + caller, + transact_to: TransactTo::Call(contract), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data, + // Setting the gas price to zero enforces that no value is transferred as part of the call, + // and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from the + // `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + authorization_list: None, + #[cfg(feature = "optimism")] + optimism: OptimismFields::default(), + }; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; +} diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index 7fa44418c52..42bb83782aa 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> eyre::Result<()> { let head = notifications.next().await.unwrap(); - let tx = head.tip().transactions().next().unwrap(); + let tx = &head.tip().transactions()[0]; assert_eq!(tx.hash(), hash); println!("mined transaction: {hash}"); Ok(()) diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index f826451d203..d6642a8edfe 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -10,15 +10,16 @@ reth.workspace = true reth-chainspec.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-primitives.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true +reth-trie-db.workspace = true alloy-genesis.workspace = true alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-primitives.workspace = true +alloy-eips.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index f833da86236..f30956d8f5c 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -17,11 +17,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] -use std::{convert::Infallible, sync::Arc}; - -use serde::{Deserialize, Serialize}; -use thiserror::Error; - +use alloy_eips::eip4895::Withdrawals; use alloy_genesis::Genesis; use alloy_primitives::{Address, B256}; use alloy_rpc_types::{ @@ -32,16 +28,24 @@ use alloy_rpc_types::{ Withdrawal, }; use reth::{ - api::PayloadTypes, + api::{InvalidPayloadAttributesError, PayloadTypes}, builder::{ - components::{ComponentsBuilder, EngineValidatorBuilder, PayloadServiceBuilder}, + components::{ComponentsBuilder, PayloadServiceBuilder}, node::{NodeTypes, NodeTypesWithEngine}, + rpc::{EngineValidatorBuilder, RpcAddOns}, BuilderContext, FullNodeTypes, Node, NodeAdapter, NodeBuilder, NodeComponentsBuilder, PayloadBuilderConfig, }, - providers::{CanonStateSubscriptions, StateProviderFactory}, + network::NetworkHandle, + payload::ExecutionPayloadValidator, + primitives::{Block, EthPrimitives, SealedBlockFor, TransactionSigned}, + providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, + rpc::{ + eth::EthApi, + types::engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}, + }, tasks::TaskManager, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_basic_payload_builder::{ BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig, BuildArguments, BuildOutcome, @@ -50,13 +54,13 @@ use reth_basic_payload_builder::{ use reth_chainspec::{Chain, ChainSpec, ChainSpecProvider}; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, - validate_version_specific_fields, EngineTypes, EngineValidator, PayloadAttributes, - PayloadBuilderAttributes, + validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, + FullNodeComponents, PayloadAttributes, PayloadBuilderAttributes, PayloadValidator, }; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ node::{ - EthereumAddOns, EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, + EthereumConsensusBuilder, EthereumExecutorBuilder, EthereumNetworkBuilder, EthereumPoolBuilder, }, EthEvmConfig, @@ -65,8 +69,11 @@ use reth_payload_builder::{ EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError, PayloadBuilderHandle, PayloadBuilderService, }; -use reth_primitives::Withdrawals; use reth_tracing::{RethTracer, Tracer}; +use reth_trie_db::MerklePatriciaTrie; +use serde::{Deserialize, Serialize}; +use std::{convert::Infallible, sync::Arc}; +use thiserror::Error; /// A custom payload attributes type. #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -107,7 +114,11 @@ impl PayloadBuilderAttributes for CustomPayloadBuilderAttributes { type RpcPayloadAttributes = CustomPayloadAttributes; type Error = Infallible; - fn try_new(parent: B256, attributes: CustomPayloadAttributes) -> Result { + fn try_new( + parent: B256, + attributes: CustomPayloadAttributes, + _version: u8, + ) -> Result { Ok(Self(EthPayloadBuilderAttributes::new(parent, attributes.inner))) } @@ -153,16 +164,41 @@ impl PayloadTypes for CustomEngineTypes { } impl EngineTypes for CustomEngineTypes { - type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; + type ExecutionPayloadEnvelopeV1 = ExecutionPayloadV1; + type ExecutionPayloadEnvelopeV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadEnvelopeV3 = ExecutionPayloadEnvelopeV3; + type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; } /// Custom engine validator #[derive(Debug, Clone)] pub struct CustomEngineValidator { - chain_spec: Arc, + inner: ExecutionPayloadValidator, +} + +impl CustomEngineValidator { + /// Instantiates a new validator. + pub const fn new(chain_spec: Arc) -> Self { + Self { inner: ExecutionPayloadValidator::new(chain_spec) } + } + + /// Returns the chain spec used by the validator. + #[inline] + fn chain_spec(&self) -> &ChainSpec { + self.inner.chain_spec() + } +} + +impl PayloadValidator for CustomEngineValidator { + type Block = Block; + + fn ensure_well_formed_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result, PayloadError> { + self.inner.ensure_well_formed_payload(payload, sidecar) + } } impl EngineValidator for CustomEngineValidator @@ -174,7 +210,7 @@ where version: EngineApiMessageVersion, payload_or_attrs: PayloadOrAttributes<'_, T::PayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, payload_or_attrs) + validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } fn ensure_well_formed_attributes( @@ -182,7 +218,7 @@ where version: EngineApiMessageVersion, attributes: &T::PayloadAttributes, ) -> Result<(), EngineObjectValidationError> { - validate_version_specific_fields(&self.chain_spec, version, attributes.into())?; + validate_version_specific_fields(self.chain_spec(), version, attributes.into())?; // custom validation logic - ensure that the custom field is not zero if attributes.custom == 0 { @@ -193,6 +229,15 @@ where Ok(()) } + + fn validate_payload_attributes_against_header( + &self, + _attr: &::PayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } } /// Custom engine validator builder @@ -202,12 +247,18 @@ pub struct CustomEngineValidatorBuilder; impl EngineValidatorBuilder for CustomEngineValidatorBuilder where - N: FullNodeTypes>, + N: FullNodeComponents< + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, + >, { type Validator = CustomEngineValidator; - async fn build_validator(self, ctx: &BuilderContext) -> eyre::Result { - Ok(CustomEngineValidator { chain_spec: ctx.chain_spec() }) + async fn build(self, ctx: &AddOnsContext<'_, N>) -> eyre::Result { + Ok(CustomEngineValidator::new(ctx.config.chain.clone())) } } @@ -217,8 +268,10 @@ struct MyCustomNode; /// Configure the node types impl NodeTypes for MyCustomNode { - type Primitives = (); + type Primitives = EthPrimitives; type ChainSpec = ChainSpec; + type StateCommitment = MerklePatriciaTrie; + type Storage = EthStorage; } /// Configure the node types with the custom engine types @@ -226,12 +279,31 @@ impl NodeTypesWithEngine for MyCustomNode { type Engine = CustomEngineTypes; } +/// Custom addons configuring RPC types +pub type MyNodeAddOns = RpcAddOns< + N, + EthApi< + ::Provider, + ::Pool, + NetworkHandle, + ::Evm, + >, + CustomEngineValidatorBuilder, +>; + /// Implement the Node trait for the custom node /// /// This provides a preset configuration for the node impl Node for MyCustomNode where - N: FullNodeTypes>, + N: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + Storage = EthStorage, + >, + >, { type ComponentsBuilder = ComponentsBuilder< N, @@ -240,9 +312,8 @@ where EthereumNetworkBuilder, EthereumExecutorBuilder, EthereumConsensusBuilder, - CustomEngineValidatorBuilder, >; - type AddOns = EthereumAddOns< + type AddOns = MyNodeAddOns< NodeAdapter>::Components>, >; @@ -254,11 +325,10 @@ where .network(EthereumNetworkBuilder::default()) .executor(EthereumExecutorBuilder::default()) .consensus(EthereumConsensusBuilder::default()) - .engine_validator(CustomEngineValidatorBuilder::default()) } fn add_ons(&self) -> Self::AddOns { - EthereumAddOns::default() + MyNodeAddOns::default() } } @@ -270,9 +340,15 @@ pub struct CustomPayloadServiceBuilder; impl PayloadServiceBuilder for CustomPayloadServiceBuilder where Node: FullNodeTypes< - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine< + Engine = CustomEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, >, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, { async fn spawn_payload_service( self, @@ -312,7 +388,7 @@ pub struct CustomPayloadBuilder; impl PayloadBuilder for CustomPayloadBuilder where Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, + Pool: TransactionPool>, { type Attributes = CustomPayloadBuilderAttributes; type BuiltPayload = EthBuiltPayload; @@ -322,7 +398,7 @@ where args: BuildArguments, ) -> Result, PayloadBuilderError> { let BuildArguments { client, pool, cached_reads, config, cancel, best_payload } = args; - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; let chain_spec = client.chain_spec(); @@ -335,7 +411,7 @@ where client, pool, cached_reads, - config: PayloadConfig { parent_block, extra_data, attributes: attributes.0 }, + config: PayloadConfig { parent_header, extra_data, attributes: attributes.0 }, cancel, best_payload, }) @@ -346,10 +422,10 @@ where client: &Client, config: PayloadConfig, ) -> Result { - let PayloadConfig { parent_block, extra_data, attributes } = config; + let PayloadConfig { parent_header, extra_data, attributes } = config; let chain_spec = client.chain_spec(); >::build_empty_payload(&reth_ethereum_payload_builder::EthereumPayloadBuilder::new(EthEvmConfig::new(chain_spec.clone())),client, - PayloadConfig { parent_block, extra_data, attributes: attributes.0}) + PayloadConfig { parent_header, extra_data, attributes: attributes.0}) } } diff --git a/examples/custom-evm/Cargo.toml b/examples/custom-evm/Cargo.toml index 53563ab9575..e763a932eab 100644 --- a/examples/custom-evm/Cargo.toml +++ b/examples/custom-evm/Cargo.toml @@ -16,6 +16,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true eyre.workspace = true tokio.workspace = true diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 9c421f9c6a5..8990ba2252e 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -2,6 +2,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{address, Address, Bytes, U256}; use reth::{ @@ -10,17 +11,16 @@ use reth::{ BuilderContext, NodeBuilder, }, payload::{EthBuiltPayload, EthPayloadBuilderAttributes}, - primitives::revm_primitives::{Env, PrecompileResult}, revm::{ handler::register::EvmHandler, inspector_handle_register, precompile::{Precompile, PrecompileOutput, PrecompileSpecId}, - primitives::BlockEnv, + primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, }, rpc::types::engine::PayloadAttributes, tasks::TaskManager, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_chainspec::{Chain, ChainSpec}; use reth_evm_ethereum::EthEvmConfig; @@ -31,14 +31,11 @@ use reth_node_api::{ use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::{ node::{EthereumAddOns, EthereumPayloadBuilder}, - EthExecutorProvider, EthereumNode, -}; -use reth_primitives::{ - revm_primitives::{CfgEnvWithHandlerCfg, TxEnv}, - Header, TransactionSigned, + BasicBlockExecutorProvider, EthExecutionStrategyFactory, EthereumNode, }; +use reth_primitives::{EthPrimitives, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; -use std::sync::Arc; +use std::{convert::Infallible, sync::Arc}; /// Custom EVM configuration #[derive(Debug, Clone)] @@ -87,6 +84,9 @@ impl MyEvmConfig { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Transaction = TransactionSigned; + + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { self.inner.fill_tx_env(tx_env, transaction, sender); @@ -115,7 +115,7 @@ impl ConfigureEvmEnv for MyEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { self.inner.next_cfg_and_block_env(parent, attributes) } } @@ -155,10 +155,10 @@ pub struct MyExecutorBuilder; impl ExecutorBuilder for MyExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = MyEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider>; async fn build_evm( self, @@ -166,7 +166,10 @@ where ) -> eyre::Result<(Self::EVM, Self::Executor)> { Ok(( MyEvmConfig::new(ctx.chain_spec()), - EthExecutorProvider::new(ctx.chain_spec(), MyEvmConfig::new(ctx.chain_spec())), + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::new( + ctx.chain_spec(), + MyEvmConfig::new(ctx.chain_spec()), + )), )) } } @@ -180,9 +183,11 @@ pub struct MyPayloadBuilder { impl PayloadServiceBuilder for MyPayloadBuilder where - Types: NodeTypesWithEngine, + Types: NodeTypesWithEngine, Node: FullNodeTypes, - Pool: TransactionPool + Unpin + 'static, + Pool: TransactionPool> + + Unpin + + 'static, Types::Engine: PayloadTypes< BuiltPayload = EthBuiltPayload, PayloadAttributes = PayloadAttributes, diff --git a/examples/custom-inspector/Cargo.toml b/examples/custom-inspector/Cargo.toml index 18629556c42..ee6f887e64c 100644 --- a/examples/custom-inspector/Cargo.toml +++ b/examples/custom-inspector/Cargo.toml @@ -8,7 +8,8 @@ license.workspace = true [dependencies] reth.workspace = true reth-node-ethereum.workspace = true -alloy-rpc-types.workspace = true +alloy-rpc-types-eth.workspace = true clap = { workspace = true, features = ["derive"] } futures-util.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 12b7620f4ad..6b25c46b76c 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -10,15 +10,15 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_eips::BlockNumberOrTag; use alloy_primitives::Address; -use alloy_rpc_types::state::EvmOverrides; +use alloy_rpc_types_eth::state::EvmOverrides; use clap::Parser; use futures_util::StreamExt; use reth::{ builder::NodeHandle, chainspec::EthereumChainSpecParser, cli::Cli, - primitives::BlockNumberOrTag, revm::{ inspector_handle_register, interpreter::{Interpreter, OpCode}, @@ -54,8 +54,7 @@ fn main() { if let Some(recipient) = tx.to() { if args.is_match(&recipient) { // convert the pool transaction - let call_request = - transaction_to_call_request(tx.to_recovered_transaction()); + let call_request = transaction_to_call_request(tx.to_consensus()); let result = eth_api .spawn_with_call_at( diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index d00b8a70224..7924aabd869 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -7,6 +7,7 @@ use reth::{ builder::{components::PoolBuilder, BuilderContext, FullNodeTypes}, chainspec::ChainSpec, cli::Cli, + primitives::EthPrimitives, providers::CanonStateSubscriptions, transaction_pool::{ blobstore::InMemoryBlobStore, EthTransactionPool, TransactionValidationTaskExecutor, @@ -47,7 +48,7 @@ pub struct CustomPoolBuilder { /// This will be used to build the transaction pool and its maintenance tasks during launch. impl PoolBuilder for CustomPoolBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type Pool = EthTransactionPool; diff --git a/examples/custom-payload-builder/Cargo.toml b/examples/custom-payload-builder/Cargo.toml index 1c160fe5ec8..b77a3f2945c 100644 --- a/examples/custom-payload-builder/Cargo.toml +++ b/examples/custom-payload-builder/Cargo.toml @@ -16,6 +16,7 @@ reth-node-ethereum.workspace = true reth-ethereum-payload-builder.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true tracing.workspace = true futures-util.workspace = true diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index f5d64e41cd0..da48a0754f9 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -1,4 +1,5 @@ use crate::job::EmptyBlockPayloadJob; +use alloy_eips::BlockNumberOrTag; use alloy_primitives::Bytes; use reth::{ providers::{BlockReaderIdExt, BlockSource, StateProviderFactory}, @@ -8,7 +9,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::BlockNumberOrTag; +use reth_primitives::{BlockExt, SealedHeader}; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. @@ -47,7 +48,11 @@ impl EmptyBlockPayloadJobGenerator PayloadJobGenerator for EmptyBlockPayloadJobGenerator where - Client: StateProviderFactory + BlockReaderIdExt + Clone + Unpin + 'static, + Client: StateProviderFactory + + BlockReaderIdExt + + Clone + + Unpin + + 'static, Pool: TransactionPool + Unpin + 'static, Tasks: TaskSpawner + Clone + Unpin + 'static, Builder: PayloadBuilder + Unpin + 'static, @@ -77,7 +82,10 @@ where // we already know the hash, so we can seal it block.seal(attributes.parent()) }; - let config = PayloadConfig::new(Arc::new(parent_block), Bytes::default(), attributes); + let hash = parent_block.hash(); + let header = SealedHeader::new(parent_block.header().clone(), hash); + + let config = PayloadConfig::new(Arc::new(header), Bytes::default(), attributes); Ok(EmptyBlockPayloadJob { client: self.client.clone(), _pool: self.pool.clone(), diff --git a/examples/custom-payload-builder/src/job.rs b/examples/custom-payload-builder/src/job.rs index 26b594be94b..01419825959 100644 --- a/examples/custom-payload-builder/src/job.rs +++ b/examples/custom-payload-builder/src/job.rs @@ -3,6 +3,7 @@ use reth::{ providers::StateProviderFactory, tasks::TaskSpawner, transaction_pool::TransactionPool, }; use reth_basic_payload_builder::{PayloadBuilder, PayloadConfig}; +use reth_node_api::PayloadKind; use reth_payload_builder::{KeepPayloadJobAlive, PayloadBuilderError, PayloadJob}; use std::{ @@ -52,7 +53,10 @@ where Ok(self.config.attributes.clone()) } - fn resolve(&mut self) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { + fn resolve_kind( + &mut self, + _kind: PayloadKind, + ) -> (Self::ResolvePayloadFuture, KeepPayloadJobAlive) { let payload = self.best_payload(); (futures_util::future::ready(payload), KeepPayloadJobAlive::No) } diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index e46b969adaa..d7c42e341b5 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -17,13 +17,14 @@ use reth::{ cli::{config::PayloadBuilderConfig, Cli}, payload::PayloadBuilderHandle, providers::CanonStateSubscriptions, - transaction_pool::TransactionPool, + transaction_pool::{PoolTransaction, TransactionPool}, }; use reth_basic_payload_builder::BasicPayloadJobGeneratorConfig; use reth_chainspec::ChainSpec; use reth_node_api::NodeTypesWithEngine; use reth_node_ethereum::{node::EthereumAddOns, EthEngineTypes, EthEvmConfig, EthereumNode}; use reth_payload_builder::PayloadBuilderService; +use reth_primitives::{EthPrimitives, TransactionSigned}; pub mod generator; pub mod job; @@ -34,8 +35,16 @@ pub struct CustomPayloadBuilder; impl PayloadServiceBuilder for CustomPayloadBuilder where - Node: FullNodeTypes>, - Pool: TransactionPool + Unpin + 'static, + Node: FullNodeTypes< + Types: NodeTypesWithEngine< + Engine = EthEngineTypes, + ChainSpec = ChainSpec, + Primitives = EthPrimitives, + >, + >, + Pool: TransactionPool> + + Unpin + + 'static, { async fn spawn_payload_service( self, diff --git a/examples/custom-rlpx-subprotocol/Cargo.toml b/examples/custom-rlpx-subprotocol/Cargo.toml index d59d16f35cf..18c136671c0 100644 --- a/examples/custom-rlpx-subprotocol/Cargo.toml +++ b/examples/custom-rlpx-subprotocol/Cargo.toml @@ -13,8 +13,6 @@ reth-eth-wire.workspace = true reth-network.workspace = true reth-network-api.workspace = true reth-node-ethereum.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } -reth-primitives.workspace = true reth.workspace = true tokio-stream.workspace = true eyre.workspace = true diff --git a/examples/custom-rlpx-subprotocol/src/main.rs b/examples/custom-rlpx-subprotocol/src/main.rs index e16f71071c8..702d0e8cf5e 100644 --- a/examples/custom-rlpx-subprotocol/src/main.rs +++ b/examples/custom-rlpx-subprotocol/src/main.rs @@ -14,8 +14,8 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use reth::builder::NodeHandle; use reth_network::{ - config::SecretKey, protocol::IntoRlpxSubProtocol, NetworkConfig, NetworkManager, - NetworkProtocols, + config::SecretKey, protocol::IntoRlpxSubProtocol, EthNetworkPrimitives, NetworkConfig, + NetworkManager, NetworkProtocols, }; use reth_network_api::{test_utils::PeersHandleProvider, NetworkInfo}; use reth_node_ethereum::EthereumNode; @@ -53,7 +53,7 @@ fn main() -> eyre::Result<()> { .build_with_noop_provider(node.chain_spec()); // spawn the second network instance - let subnetwork = NetworkManager::new(net_cfg).await?; + let subnetwork = NetworkManager::::new(net_cfg).await?; let subnetwork_peer_id = *subnetwork.peer_id(); let subnetwork_peer_addr = subnetwork.local_addr(); let subnetwork_handle = subnetwork.peers_handle(); diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index 0a7ef9bb6b2..ec278ac1cc1 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -14,7 +14,8 @@ reth-provider.workspace = true reth-node-ethereum.workspace = true reth-node-types.workspace = true -alloy-rpc-types.workspace = true +alloy-consensus.workspace = true +alloy-rpc-types-eth.workspace = true alloy-primitives.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 5772461bd7a..727bd1bfff3 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -1,10 +1,11 @@ -use alloy_primitives::{Address, Sealable, B256}; -use alloy_rpc_types::{Filter, FilteredParams}; +use alloy_consensus::BlockHeader; +use alloy_primitives::{Address, B256}; +use alloy_rpc_types_eth::{Filter, FilteredParams}; use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; -use reth_primitives::SealedHeader; +use reth_primitives::{BlockExt, SealedHeader, TransactionSigned}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, @@ -63,9 +64,7 @@ fn header_provider_example(provider: T, number: u64) -> eyre: // We can convert a header to a sealed header which contains the hash w/o needing to re-compute // it every time. - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - let sealed_header = SealedHeader::new(header, seal); + let sealed_header = SealedHeader::seal(header); // Can also query the header by hash! let header_by_hash = @@ -85,7 +84,9 @@ fn header_provider_example(provider: T, number: u64) -> eyre: } /// The `TransactionsProvider` allows querying transaction-related information -fn txs_provider_example(provider: T) -> eyre::Result<()> { +fn txs_provider_example>( + provider: T, +) -> eyre::Result<()> { // Try the 5th tx let txid = 5; @@ -94,16 +95,17 @@ fn txs_provider_example(provider: T) -> eyre::Result<() // Can query the tx by hash let tx_by_hash = - provider.transaction_by_hash(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + provider.transaction_by_hash(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(tx, tx_by_hash); // Can query the tx by hash with info about the block it was included in - let (tx, meta) = - provider.transaction_by_hash_with_meta(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; - assert_eq!(tx.hash, meta.tx_hash); + let (tx, meta) = provider + .transaction_by_hash_with_meta(tx.hash())? + .ok_or(eyre::eyre!("txhash not found"))?; + assert_eq!(tx.hash(), meta.tx_hash); // Can reverse lookup the key too - let id = provider.transaction_id(tx.hash)?.ok_or(eyre::eyre!("txhash not found"))?; + let id = provider.transaction_id(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(id, txid); // Can find the block of a transaction given its key @@ -118,7 +120,10 @@ fn txs_provider_example(provider: T) -> eyre::Result<() } /// The `BlockReader` allows querying the headers-related tables. -fn block_provider_example(provider: T, number: u64) -> eyre::Result<()> { +fn block_provider_example>( + provider: T, + number: u64, +) -> eyre::Result<()> { // Can query a block by number let block = provider.block(number.into())?.ok_or(eyre::eyre!("block num not found"))?; assert_eq!(block.number, number); @@ -161,7 +166,11 @@ fn block_provider_example(provider: T, number: u64) -> eyre::Res } /// The `ReceiptProvider` allows querying the receipts tables. -fn receipts_provider_example( +fn receipts_provider_example< + T: ReceiptProvider + + TransactionsProvider + + HeaderProvider, +>( provider: T, ) -> eyre::Result<()> { let txid = 5; @@ -173,7 +182,7 @@ fn receipts_provider_example>; -type AuthedEthStream = EthStream>>; +type AuthedEthStream = EthStream>, EthNetworkPrimitives>; pub static MAINNET_BOOT_NODES: LazyLock> = LazyLock::new(mainnet_nodes); @@ -105,7 +106,8 @@ async fn handshake_eth(p2p_stream: AuthedP2PStream) -> eyre::Result<(AuthedEthSt .forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Shanghai).unwrap()) .build(); - let status = Status { version: p2p_stream.shared_capabilities().eth()?.version(), ..status }; + let status = + Status { version: p2p_stream.shared_capabilities().eth()?.version().try_into()?, ..status }; let eth_unauthed = UnauthedEthStream::new(p2p_stream); Ok(eth_unauthed.handshake(status, fork_filter).await?) } diff --git a/examples/network-txpool/src/main.rs b/examples/network-txpool/src/main.rs index 6f8d69eab02..716e6cc57c9 100644 --- a/examples/network-txpool/src/main.rs +++ b/examples/network-txpool/src/main.rs @@ -7,7 +7,7 @@ //! cargo run --release -p network-txpool -- node //! ``` -use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +use reth_network::{config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, validate::ValidTransaction, CoinbaseTipOrdering, @@ -34,7 +34,9 @@ async fn main() -> eyre::Result<()> { let local_key = rng_secret_key(); // Configure the network - let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); + let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key) + .mainnet_boot_nodes() + .build(client); let transactions_manager_config = config.transactions_manager_config.clone(); // create the network instance let (_handle, network, txpool, _) = NetworkManager::builder(config) @@ -82,7 +84,7 @@ impl TransactionValidator for OkValidator { ) -> TransactionValidationOutcome { // Always return valid TransactionValidationOutcome::Valid { - balance: transaction.cost(), + balance: *transaction.cost(), state_nonce: transaction.nonce(), transaction: ValidTransaction::Valid(transaction), propagate: false, diff --git a/examples/network/src/main.rs b/examples/network/src/main.rs index 1d8f436f318..bd4f232a754 100644 --- a/examples/network/src/main.rs +++ b/examples/network/src/main.rs @@ -8,7 +8,8 @@ use futures::StreamExt; use reth_network::{ - config::rng_secret_key, NetworkConfig, NetworkEventListenerProvider, NetworkManager, + config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkEventListenerProvider, + NetworkManager, }; use reth_provider::test_utils::NoopProvider; @@ -24,7 +25,7 @@ async fn main() -> eyre::Result<()> { let config = NetworkConfig::builder(local_key).mainnet_boot_nodes().build(client); // create the network instance - let network = NetworkManager::new(config).await?; + let network = NetworkManager::::new(config).await?; // get a handle to the network to interact with it let handle = network.handle().clone(); diff --git a/examples/polygon-p2p/Cargo.toml b/examples/polygon-p2p/Cargo.toml index bdf9a27ce56..34536ed52d7 100644 --- a/examples/polygon-p2p/Cargo.toml +++ b/examples/polygon-p2p/Cargo.toml @@ -16,10 +16,10 @@ secp256k1 = { workspace = true, features = [ tokio.workspace = true reth-network.workspace = true reth-chainspec.workspace = true +reth-network-api.workspace = true reth-primitives.workspace = true serde_json.workspace = true reth-tracing.workspace = true tokio-stream.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } reth-discv4 = { workspace = true, features = ["test-utils"] } alloy-primitives.workspace = true diff --git a/examples/polygon-p2p/src/main.rs b/examples/polygon-p2p/src/main.rs index 6078ae14cb8..bae5399d9cd 100644 --- a/examples/polygon-p2p/src/main.rs +++ b/examples/polygon-p2p/src/main.rs @@ -12,8 +12,10 @@ use chain_cfg::{boot_nodes, head, polygon_chain_spec}; use reth_discv4::Discv4ConfigBuilder; use reth_network::{ - config::NetworkMode, NetworkConfig, NetworkEvent, NetworkEventListenerProvider, NetworkManager, + config::NetworkMode, EthNetworkPrimitives, NetworkConfig, NetworkEvent, + NetworkEventListenerProvider, NetworkManager, }; +use reth_network_api::events::SessionInfo; use reth_tracing::{ tracing::info, tracing_subscriber::filter::LevelFilter, LayerInfo, LogFormat, RethTracer, Tracer, @@ -57,7 +59,7 @@ async fn main() { discv4_cfg.add_boot_nodes(boot_nodes()).lookup_interval(interval); let net_cfg = net_cfg.set_discovery_v4(discv4_cfg.build()); - let net_manager = NetworkManager::new(net_cfg).await.unwrap(); + let net_manager = NetworkManager::::new(net_cfg).await.unwrap(); // The network handle is our entrypoint into the network. let net_handle = net_manager.handle(); @@ -70,7 +72,8 @@ async fn main() { while let Some(evt) = events.next().await { // For the sake of the example we only print the session established event // with the chain specific details - if let NetworkEvent::SessionEstablished { status, client_version, .. } = evt { + if let NetworkEvent::ActivePeerSession { info, .. } = evt { + let SessionInfo { status, client_version, .. } = info; let chain = status.chain; info!(?chain, ?client_version, "Session established with a new peer."); } diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 1b2899a6485..cde891036e6 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -16,6 +16,7 @@ use std::{path::Path, sync::Arc}; use reth::{ api::NodeTypesWithDBAdapter, + beacon_consensus::EthBeaconConsensus, providers::{ providers::{BlockchainProvider, StaticFileProvider}, ProviderFactory, @@ -33,7 +34,9 @@ use reth::rpc::builder::{ // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; use reth::{blockchain_tree::noop::NoopBlockchainTree, tasks::TokioTaskExecutor}; -use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; +use reth_node_ethereum::{ + node::EthereumEngineValidator, EthEvmConfig, EthExecutorProvider, EthereumNode, +}; use reth_provider::{test_utils::TestCanonStateSubscriptions, ChainSpecProvider}; // Custom rpc extension @@ -66,13 +69,18 @@ async fn main() -> eyre::Result<()> { .with_noop_pool() .with_noop_network() .with_executor(TokioTaskExecutor::default()) - .with_evm_config(EthEvmConfig::new(spec)) + .with_evm_config(EthEvmConfig::new(spec.clone())) .with_events(TestCanonStateSubscriptions::default()) - .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())); + .with_block_executor(EthExecutorProvider::ethereum(provider.chain_spec())) + .with_consensus(EthBeaconConsensus::new(spec.clone())); // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); - let mut server = rpc_builder.build(config, Box::new(EthApi::with_spawner)); + let mut server = rpc_builder.build( + config, + Box::new(EthApi::with_spawner), + Arc::new(EthereumEngineValidator::new(spec)), + ); // Add a custom rpc namespace let custom_rpc = MyRpcExt { provider }; diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs index e38b6fc24d3..6cc7a4142f5 100644 --- a/examples/rpc-db/src/myrpc_ext.rs +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -22,7 +22,7 @@ pub struct MyRpcExt { impl MyRpcExtApiServer for MyRpcExt where - Provider: BlockReaderIdExt + 'static, + Provider: BlockReaderIdExt + 'static, { /// Showcasing how to implement a custom rpc method /// using the provider. diff --git a/examples/stateful-precompile/Cargo.toml b/examples/stateful-precompile/Cargo.toml index 47a784c36e1..478886d061f 100644 --- a/examples/stateful-precompile/Cargo.toml +++ b/examples/stateful-precompile/Cargo.toml @@ -15,6 +15,7 @@ reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +alloy-consensus.workspace = true eyre.workspace = true parking_lot.workspace = true diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 26ebdfe4124..03ed1fa6943 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -2,17 +2,21 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::Header; use alloy_genesis::Genesis; use alloy_primitives::{Address, Bytes, U256}; use parking_lot::RwLock; use reth::{ api::NextBlockEnvAttributes, builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, - primitives::revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, revm::{ handler::register::EvmHandler, inspector_handle_register, precompile::{Precompile, PrecompileSpecId}, + primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, Env, PrecompileResult, SpecId, StatefulPrecompileMut, + TxEnv, + }, ContextPrecompile, ContextPrecompiles, Database, Evm, EvmBuilder, GetInspector, }, tasks::TaskManager, @@ -20,14 +24,14 @@ use reth::{ use reth_chainspec::{Chain, ChainSpec}; use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes, NodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{node::EthereumAddOns, EthEvmConfig, EthExecutorProvider, EthereumNode}; -use reth_primitives::{ - revm_primitives::{SpecId, StatefulPrecompileMut}, - Header, TransactionSigned, +use reth_node_ethereum::{ + node::EthereumAddOns, BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, + EthereumNode, }; +use reth_primitives::{EthPrimitives, TransactionSigned}; use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, convert::Infallible, sync::Arc}; /// Type alias for the LRU cache used within the [`PrecompileCache`]. type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>; @@ -144,6 +148,8 @@ impl StatefulPrecompileMut for WrappedPrecompile { impl ConfigureEvmEnv for MyEvmConfig { type Header = Header; + type Transaction = TransactionSigned; + type Error = Infallible; fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { self.inner.fill_tx_env(tx_env, transaction, sender) @@ -172,7 +178,7 @@ impl ConfigureEvmEnv for MyEvmConfig { &self, parent: &Self::Header, attributes: NextBlockEnvAttributes, - ) -> (CfgEnvWithHandlerCfg, BlockEnv) { + ) -> Result<(CfgEnvWithHandlerCfg, BlockEnv), Self::Error> { self.inner.next_cfg_and_block_env(parent, attributes) } } @@ -221,10 +227,10 @@ pub struct MyExecutorBuilder { impl ExecutorBuilder for MyExecutorBuilder where - Node: FullNodeTypes>, + Node: FullNodeTypes>, { type EVM = MyEvmConfig; - type Executor = EthExecutorProvider; + type Executor = BasicBlockExecutorProvider>; async fn build_evm( self, @@ -234,7 +240,13 @@ where inner: EthEvmConfig::new(ctx.chain_spec()), precompile_cache: self.precompile_cache.clone(), }; - Ok((evm_config.clone(), EthExecutorProvider::new(ctx.chain_spec(), evm_config))) + Ok(( + evm_config.clone(), + BasicBlockExecutorProvider::new(EthExecutionStrategyFactory::new( + ctx.chain_spec(), + evm_config, + )), + )) } } diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index 94f800987a9..76abd65f4af 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -44,8 +44,7 @@ fn main() { if let Some(recipient) = tx.to() { if args.is_match(&recipient) { // trace the transaction with `trace_call` - let callrequest = - transaction_to_call_request(tx.to_recovered_transaction()); + let callrequest = transaction_to_call_request(tx.to_consensus()); let tracerequest = TraceCallRequest::new(callrequest) .with_trace_type(TraceType::Trace); if let Ok(trace_result) = traceapi.trace_call(tracerequest).await { diff --git a/testing/ef-tests/Cargo.toml b/testing/ef-tests/Cargo.toml index df68f5154fc..2fc0c751244 100644 --- a/testing/ef-tests/Cargo.toml +++ b/testing/ef-tests/Cargo.toml @@ -13,12 +13,20 @@ workspace = true [features] ef-tests = [] -asm-keccak = ["reth-primitives/asm-keccak"] +asm-keccak = [ + "reth-primitives/asm-keccak", + "alloy-primitives/asm-keccak", + "revm/asm-keccak", +] [dependencies] reth-chainspec.workspace = true reth-primitives.workspace = true -reth-db = { workspace = true, features = ["mdbx", "test-utils", "disable-lock"] } +reth-db = { workspace = true, features = [ + "mdbx", + "test-utils", + "disable-lock", +] } reth-db-api.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-stages.workspace = true @@ -29,6 +37,8 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } alloy-rlp.workspace = true alloy-primitives.workspace = true +alloy-eips.workspace = true +alloy-consensus.workspace = true walkdir = "2.3.3" serde.workspace = true diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index d29aafa8212..7d80ec6c47f 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -6,6 +6,7 @@ use crate::{ }; use alloy_rlp::Decodable; use rayon::iter::{ParallelBridge, ParallelIterator}; +use reth_chainspec::ChainSpec; use reth_primitives::{BlockBody, SealedBlock, StaticFileSegment}; use reth_provider::{ providers::StaticFileWriter, test_utils::create_test_provider_factory_with_chain_spec, @@ -83,11 +84,10 @@ impl Case for BlockchainTestCase { .par_bridge() .try_for_each(|case| { // Create a new test database and initialize a provider for the test case. - let provider = create_test_provider_factory_with_chain_spec(Arc::new( - case.network.clone().into(), - )) - .database_provider_rw() - .unwrap(); + let chain_spec: Arc = Arc::new(case.network.into()); + let provider = create_test_provider_factory_with_chain_spec(chain_spec.clone()) + .database_provider_rw() + .unwrap(); // Insert initial test state into the provider. provider.insert_historical_block( @@ -127,9 +127,7 @@ impl Case for BlockchainTestCase { // Execute the execution stage using the EVM processor factory for the test case // network. let _ = ExecutionStage::new_with_executor( - reth_evm_ethereum::execute::EthExecutorProvider::ethereum(Arc::new( - case.network.clone().into(), - )), + reth_evm_ethereum::execute::EthExecutorProvider::ethereum(chain_spec), ) .execute( &provider, diff --git a/testing/ef-tests/src/models.rs b/testing/ef-tests/src/models.rs index 3f3df15363a..7f6c0cdae34 100644 --- a/testing/ef-tests/src/models.rs +++ b/testing/ef-tests/src/models.rs @@ -1,6 +1,8 @@ //! Shared models for use crate::{assert::assert_equal, Error}; +use alloy_consensus::Header as RethHeader; +use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{keccak256, Address, Bloom, Bytes, B256, B64, U256}; use reth_chainspec::{ChainSpec, ChainSpecBuilder}; use reth_db::tables; @@ -8,9 +10,7 @@ use reth_db_api::{ cursor::DbDupCursorRO, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{ - Account as RethAccount, Bytecode, Header as RethHeader, SealedHeader, StorageEntry, Withdrawals, -}; +use reth_primitives::{Account as RethAccount, Bytecode, SealedHeader, StorageEntry}; use serde::Deserialize; use std::{collections::BTreeMap, ops::Deref}; @@ -87,7 +87,9 @@ pub struct Header { /// Parent beacon block root. pub parent_beacon_block_root: Option, /// Requests root. - pub requests_root: Option, + pub requests_hash: Option, + /// Target blobs per block. + pub target_blobs_per_block: Option, } impl From

for SealedHeader { @@ -113,7 +115,8 @@ impl From
for SealedHeader { blob_gas_used: value.blob_gas_used.map(|v| v.to::()), excess_blob_gas: value.excess_blob_gas.map(|v| v.to::()), parent_beacon_block_root: value.parent_beacon_block_root, - requests_root: value.requests_root, + requests_hash: value.requests_hash, + target_blobs_per_block: value.target_blobs_per_block.map(|v| v.to::()), }; Self::new(header, value.hash) } @@ -165,10 +168,15 @@ impl State { }; tx.put::(address, reth_account)?; tx.put::(hashed_address, reth_account)?; + if let Some(code_hash) = code_hash { tx.put::(code_hash, Bytecode::new_raw(account.code.clone()))?; } - account.storage.iter().filter(|(_, v)| !v.is_zero()).try_for_each(|(k, v)| { + + for (k, v) in &account.storage { + if v.is_zero() { + continue + } let storage_key = B256::from_slice(&k.to_be_bytes::<32>()); tx.put::( address, @@ -177,10 +185,9 @@ impl State { tx.put::( hashed_address, StorageEntry { key: keccak256(storage_key), value: *v }, - ) - })?; + )?; + } } - Ok(()) } } @@ -212,9 +219,12 @@ impl Account { /// /// In case of a mismatch, `Err(Error::Assertion)` is returned. pub fn assert_db(&self, address: Address, tx: &impl DbTx) -> Result<(), Error> { - let account = tx.get::(address)?.ok_or_else(|| { - Error::Assertion(format!("Expected account ({address}) is missing from DB: {self:?}")) - })?; + let account = + tx.get_by_encoded_key::(&address)?.ok_or_else(|| { + Error::Assertion(format!( + "Expected account ({address}) is missing from DB: {self:?}" + )) + })?; assert_equal(self.balance, account.balance, "Balance does not match")?; assert_equal(self.nonce.to(), account.nonce, "Nonce does not match")?; @@ -257,7 +267,7 @@ impl Account { } /// Fork specification. -#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Deserialize)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy, Deserialize)] pub enum ForkSpec { /// Frontier Frontier, diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index 49a59ecf6ae..a6197d7e0cf 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -12,12 +12,16 @@ repository.workspace = true workspace = true [dependencies] -reth-primitives = { workspace = true, features = ["secp256k1"] } +reth-primitives = { workspace = true, features = ["secp256k1", "arbitrary"] } -alloy-eips.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true +alloy-eips.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } + +[dev-dependencies] +alloy-eips.workspace = true +reth-primitives-traits .workspace = true diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index d07af00ce4c..28ba171bdb3 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,17 +1,19 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. -use alloy_consensus::{Transaction as _, TxLegacy}; +use alloy_consensus::{Header, Transaction as _, TxLegacy}; use alloy_eips::{ - eip6110::DepositRequest, eip7002::WithdrawalRequest, eip7251::ConsolidationRequest, + eip1898::BlockWithParent, + eip4895::{Withdrawal, Withdrawals}, + NumHash, }; -use alloy_primitives::{Address, BlockNumber, Bytes, Parity, Sealable, TxKind, B256, U256}; +use alloy_primitives::{Address, BlockNumber, Bytes, TxKind, B256, U256}; pub use rand::Rng; use rand::{ distributions::uniform::SampleRange, rngs::StdRng, seq::SliceRandom, thread_rng, SeedableRng, }; use reth_primitives::{ - proofs, sign_message, Account, BlockBody, Header, Log, Receipt, Request, Requests, SealedBlock, - SealedHeader, StorageEntry, Transaction, TransactionSigned, Withdrawal, Withdrawals, + proofs, sign_message, Account, BlockBody, Log, Receipt, SealedBlock, SealedHeader, + StorageEntry, Transaction, TransactionSigned, }; use secp256k1::{Keypair, Secp256k1}; use std::{ @@ -97,20 +99,27 @@ pub fn random_header_range( headers } +/// Generate a random [`BlockWithParent`]. +pub fn random_block_with_parent( + rng: &mut R, + number: u64, + parent: Option, +) -> BlockWithParent { + BlockWithParent { parent: parent.unwrap_or_default(), block: NumHash::new(number, rng.gen()) } +} + /// Generate a random [`SealedHeader`]. /// /// The header is assumed to not be correct if validated. pub fn random_header(rng: &mut R, number: u64, parent: Option) -> SealedHeader { - let header = reth_primitives::Header { + let header = alloy_consensus::Header { number, nonce: rng.gen(), difficulty: U256::from(rng.gen::()), parent_hash: parent.unwrap_or_default(), ..Default::default() }; - let sealed = header.seal_slow(); - let (header, seal) = sealed.into_parts(); - SealedHeader::new(header, seal) + SealedHeader::seal(header) } /// Generates a random legacy [Transaction]. @@ -150,18 +159,10 @@ pub fn sign_tx_with_random_key_pair(rng: &mut R, tx: Transaction) -> Tra /// Signs the [Transaction] with the given key pair. pub fn sign_tx_with_key_pair(key_pair: Keypair, tx: Transaction) -> TransactionSigned { - let mut signature = + let signature = sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), tx.signature_hash()).unwrap(); - if matches!(tx, Transaction::Legacy(_)) { - signature = if let Some(chain_id) = tx.chain_id() { - signature.with_chain_id(chain_id) - } else { - signature.with_parity(Parity::NonEip155(signature.v().y_parity())) - } - } - - TransactionSigned::from_transaction_and_signature(tx, signature) + TransactionSigned::new_unhashed(tx, signature) } /// Generates a set of [Keypair]s based on the desired count. @@ -201,11 +202,6 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) let transactions_root = proofs::calculate_transaction_root(&transactions); let ommers_hash = proofs::calculate_ommers_root(&ommers); - let requests = block_params - .requests_count - .map(|count| (0..count).map(|_| random_request(rng)).collect::>()); - let requests_root = requests.as_ref().map(|requests| proofs::calculate_requests_root(requests)); - let withdrawals = block_params.withdrawals_count.map(|count| { (0..count) .map(|i| Withdrawal { @@ -218,7 +214,7 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) }); let withdrawals_root = withdrawals.as_ref().map(|w| proofs::calculate_withdrawals_root(w)); - let sealed = Header { + let header = Header { parent_hash: block_params.parent.unwrap_or_default(), number, gas_used: total_gas, @@ -226,22 +222,15 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) transactions_root, ommers_hash, base_fee_per_gas: Some(rng.gen()), - requests_root, + // TODO(onbjerg): Proper EIP-7685 request support + requests_hash: None, withdrawals_root, ..Default::default() - } - .seal_slow(); - - let (header, seal) = sealed.into_parts(); + }; SealedBlock { - header: SealedHeader::new(header, seal), - body: BlockBody { - transactions, - ommers, - withdrawals: withdrawals.map(Withdrawals::new), - requests: requests.map(Requests), - }, + header: SealedHeader::seal(header), + body: BlockBody { transactions, ommers, withdrawals: withdrawals.map(Withdrawals::new) }, } } @@ -470,38 +459,14 @@ pub fn random_log(rng: &mut R, address: Option
, topics_count: O ) } -/// Generate random request -pub fn random_request(rng: &mut R) -> Request { - let request_type = rng.gen_range(0..3); - match request_type { - 0 => Request::DepositRequest(DepositRequest { - pubkey: rng.gen(), - withdrawal_credentials: rng.gen(), - amount: rng.gen(), - signature: rng.gen(), - index: rng.gen(), - }), - 1 => Request::WithdrawalRequest(WithdrawalRequest { - source_address: rng.gen(), - validator_pubkey: rng.gen(), - amount: rng.gen(), - }), - 2 => Request::ConsolidationRequest(ConsolidationRequest { - source_address: rng.gen(), - source_pubkey: rng.gen(), - target_pubkey: rng.gen(), - }), - _ => panic!("invalid request type"), - } -} - #[cfg(test)] mod tests { use super::*; use alloy_consensus::TxEip1559; use alloy_eips::eip2930::AccessList; - use alloy_primitives::{hex, Parity}; - use reth_primitives::{public_key_to_address, Signature}; + use alloy_primitives::{hex, PrimitiveSignature as Signature}; + use reth_primitives::public_key_to_address; + use reth_primitives_traits::SignedTransaction; use std::str::FromStr; #[test] @@ -528,7 +493,7 @@ mod tests { sign_message(B256::from_slice(&key_pair.secret_bytes()[..]), signature_hash) .unwrap(); - let signed = TransactionSigned::from_transaction_and_signature(tx.clone(), signature); + let signed = TransactionSigned::new_unhashed(tx.clone(), signature); let recovered = signed.recover_signer().unwrap(); let expected = public_key_to_address(key_pair.public_key()); @@ -574,7 +539,7 @@ mod tests { "46948507304638947509940763649030358759909902576025900602547168820602576006531", ) .unwrap(), - Parity::Parity(false), + false, ); assert_eq!(expected, signature); }