diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index ee2646490..cfefdb13a 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,5 @@ blank_issues_enabled: false contact_links: - name: GitHub Discussions - url: https://github.com/foundry-rs/reth/discussions + url: https://github.com/paradigmxyz/reth/discussions about: Please ask and answer questions here to keep the issue tracker clean. - - name: Security - url: mailto:georgios@paradigm.xyz - about: Please report security vulnerabilities here. diff --git a/.github/scripts/check_no_std.sh b/.github/assets/check_no_std.sh similarity index 100% rename from .github/scripts/check_no_std.sh rename to .github/assets/check_no_std.sh diff --git a/.github/assets/hive/Dockerfile b/.github/assets/hive/Dockerfile new file mode 100644 index 000000000..9f75ba6f1 --- /dev/null +++ b/.github/assets/hive/Dockerfile @@ -0,0 +1,8 @@ +FROM ubuntu + +COPY dist/reth /usr/local/bin + +COPY LICENSE-* ./ + +EXPOSE 30303 30303/udp 9001 8545 8546 +ENTRYPOINT ["/usr/local/bin/reth"] \ No newline at end of file diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh new file mode 100755 index 000000000..45583d549 --- /dev/null +++ b/.github/assets/hive/build_simulators.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -eo pipefail + +# Create the hive_assets directory +mkdir hive_assets/ + +cd hivetests +go build . + +./hive -client reth # first builds and caches the client + +# Run each hive command in the background for each simulator and wait +echo "Building images" +./hive -client reth --sim "pyspec" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & +./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & +./hive -client reth --sim "smoke/genesis" -sim.timelimit 1s || true & +./hive -client reth --sim "smoke/network" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/sync" -sim.timelimit 1s || true & +wait + +# Run docker save in parallel and wait +echo "Saving images" +docker save hive/hiveproxy:latest -o ../hive_assets/hiveproxy.tar & +docker save hive/simulators/devp2p:latest -o ../hive_assets/devp2p.tar & +docker save hive/simulators/ethereum/engine:latest -o ../hive_assets/engine.tar & +docker save hive/simulators/ethereum/rpc-compat:latest -o ../hive_assets/rpc_compat.tar & +docker save hive/simulators/ethereum/pyspec:latest -o ../hive_assets/pyspec.tar & +docker save hive/simulators/smoke/genesis:latest -o ../hive_assets/smoke_genesis.tar & +docker save hive/simulators/smoke/network:latest -o ../hive_assets/smoke_network.tar & +docker save hive/simulators/ethereum/sync:latest -o ../hive_assets/ethereum_sync.tar & +wait + +# Make sure we don't rebuild images on the CI jobs +git apply ../.github/assets/hive/no_sim_build.diff +go build . +mv ./hive ../hive_assets/ \ No newline at end of file diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml new file mode 100644 index 000000000..ba29ca5e7 --- /dev/null +++ b/.github/assets/hive/expected_failures.yaml @@ -0,0 +1,120 @@ +# https://github.com/paradigmxyz/reth/issues/7015 +# https://github.com/paradigmxyz/reth/issues/6332 +rpc-compat: + - debug_getRawBlock/get-invalid-number (reth) + - debug_getRawHeader/get-invalid-number (reth) + - debug_getRawReceipts/get-invalid-number (reth) + - debug_getRawTransaction/get-invalid-hash (reth) + + - eth_call/call-callenv (reth) + - eth_createAccessList/create-al-contract-eip1559 (reth) + - eth_createAccessList/create-al-contract (reth) + - eth_feeHistory/fee-history (reth) + - eth_getStorageAt/get-storage-invalid-key-too-large (reth) + - eth_getStorageAt/get-storage-invalid-key (reth) + - eth_getTransactionReceipt/get-access-list (reth) + - eth_getTransactionReceipt/get-blob-tx (reth) + - eth_getTransactionReceipt/get-dynamic-fee (reth) + +# https://github.com/paradigmxyz/reth/issues/8732 +engine-withdrawals: + - Withdrawals Fork On Genesis (Paris) (reth) + - Withdrawals Fork on Block 1 (Paris) (reth) + - Withdrawals Fork on Block 2 (Paris) (reth) + - Withdrawals Fork on Block 3 (Paris) (reth) + - Withdraw to a single account (Paris) (reth) + - Withdraw to two accounts (Paris) (reth) + - Withdraw many accounts (Paris) (reth) + - Withdraw zero amount (Paris) (reth) + - Empty Withdrawals (Paris) (reth) + - Corrupted Block Hash Payload (INVALID) (Paris) (reth) + - Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload (Paris) (reth) + - Withdrawals Fork on Block 1 - 8 Block Re-Org, Sync (Paris) (reth) + - Withdrawals Fork on Block 8 - 10 Block Re-Org NewPayload (Paris) (reth) + - Withdrawals Fork on Block 8 - 10 Block Re-Org Sync (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org Sync (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) + +# https://github.com/paradigmxyz/reth/issues/8305 +# https://github.com/paradigmxyz/reth/issues/6217 +engine-api: + - Inconsistent Head in ForkchoiceState (Paris) (reth) + - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=True, DynFeeTxs=False (Paris) (reth) + - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Paris) (reth) + - Invalid NewPayload, PrevRandao, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasLimit, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Timestamp, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Paris) (reth) + + # Hive issue + # https://github.com/ethereum/hive/issues/1135 + - Invalid Missing Ancestor Syncing ReOrg, Transaction Signature, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Signature, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Nonce, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Nonce, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Gas, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Gas, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction GasPrice, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction GasPrice, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Value, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Value, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, ReceiptsRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, ReceiptsRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasLimit, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasUsed, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasUsed, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Timestamp, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Incomplete Transactions, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Paris) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Incomplete Transactions, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Paris) (reth) + +# https://github.com/paradigmxyz/reth/issues/8305 +# https://github.com/paradigmxyz/reth/issues/6217 +# https://github.com/paradigmxyz/reth/issues/8306 +# https://github.com/paradigmxyz/reth/issues/7144 +engine-cancun: + - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) + - Inconsistent Head in ForkchoiceState (Cancun) (reth) + - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=True, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, StateRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, PrevRandao, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=True, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, StateRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasLimit, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Timestamp, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Cancun) (reth) + - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) + - Invalid NewPayload, ParentBeaconBlockRoot, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, BlobGasUsed, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, Blob Count on BlobGasUsed, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + + # Hive issue + # https://github.com/ethereum/hive/issues/1135 + - Invalid Missing Ancestor Syncing ReOrg, ReceiptsRoot, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, ReceiptsRoot, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasLimit, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasUsed, EmptyTxs=False, CanonicalReOrg=False, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, GasUsed, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Timestamp, EmptyTxs=False, CanonicalReOrg=True, Invalid P8 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Incomplete Transactions, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Incomplete Transactions, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Signature, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Signature, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Nonce, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Nonce, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Gas, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Gas, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction GasPrice, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction GasPrice, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Value, EmptyTxs=False, CanonicalReOrg=False, Invalid P9 (Cancun) (reth) + - Invalid Missing Ancestor Syncing ReOrg, Transaction Value, EmptyTxs=False, CanonicalReOrg=True, Invalid P9 (Cancun) (reth) + +# https://github.com/paradigmxyz/reth/issues/8579 +sync: + - sync reth -> reth \ No newline at end of file diff --git a/.github/assets/hive/load_images.sh b/.github/assets/hive/load_images.sh new file mode 100755 index 000000000..05e1cb990 --- /dev/null +++ b/.github/assets/hive/load_images.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -eo pipefail + +# List of tar files to load +IMAGES=( + "/tmp/hiveproxy.tar" + "/tmp/devp2p.tar" + "/tmp/engine.tar" + "/tmp/rpc_compat.tar" + "/tmp/pyspec.tar" + "/tmp/smoke_genesis.tar" + "/tmp/smoke_network.tar" + "/tmp/ethereum_sync.tar" + "/tmp/reth_image.tar" +) + +# Loop through the images and load them +for IMAGE_TAR in "${IMAGES[@]}"; do + echo "Loading image $IMAGE_TAR..." + docker load -i "$IMAGE_TAR" & +done + +wait + +docker image ls -a \ No newline at end of file diff --git a/.github/assets/hive/no_sim_build.diff b/.github/assets/hive/no_sim_build.diff new file mode 100644 index 000000000..0b109efe7 --- /dev/null +++ b/.github/assets/hive/no_sim_build.diff @@ -0,0 +1,53 @@ +diff --git a/internal/libdocker/builder.go b/internal/libdocker/builder.go +index 4731c9d..d717f52 100644 +--- a/internal/libdocker/builder.go ++++ b/internal/libdocker/builder.go +@@ -7,9 +7,7 @@ import ( + "fmt" + "io" + "io/fs" +- "os" + "path/filepath" +- "strings" + + "github.com/ethereum/hive/internal/libhive" + docker "github.com/fsouza/go-dockerclient" +@@ -53,24 +51,8 @@ func (b *Builder) BuildClientImage(ctx context.Context, client libhive.ClientDes + + // BuildSimulatorImage builds a docker image of a simulator. + func (b *Builder) BuildSimulatorImage(ctx context.Context, name string) (string, error) { +- dir := b.config.Inventory.SimulatorDirectory(name) +- buildContextPath := dir +- buildDockerfile := "Dockerfile" +- // build context dir of simulator can be overridden with "hive_context.txt" file containing the desired build path +- if contextPathBytes, err := os.ReadFile(filepath.Join(filepath.FromSlash(dir), "hive_context.txt")); err == nil { +- buildContextPath = filepath.Join(dir, strings.TrimSpace(string(contextPathBytes))) +- if strings.HasPrefix(buildContextPath, "../") { +- return "", fmt.Errorf("cannot access build directory outside of Hive root: %q", buildContextPath) +- } +- if p, err := filepath.Rel(buildContextPath, filepath.Join(filepath.FromSlash(dir), "Dockerfile")); err != nil { +- return "", fmt.Errorf("failed to derive relative simulator Dockerfile path: %v", err) +- } else { +- buildDockerfile = p +- } +- } + tag := fmt.Sprintf("hive/simulators/%s:latest", name) +- err := b.buildImage(ctx, buildContextPath, buildDockerfile, tag, nil) +- return tag, err ++ return tag, nil + } + + // BuildImage creates a container by archiving the given file system, +diff --git a/internal/libdocker/proxy.go b/internal/libdocker/proxy.go +index a53e5af..0bb2ea9 100644 +--- a/internal/libdocker/proxy.go ++++ b/internal/libdocker/proxy.go +@@ -16,7 +16,7 @@ const hiveproxyTag = "hive/hiveproxy" + + // Build builds the hiveproxy image. + func (cb *ContainerBackend) Build(ctx context.Context, b libhive.Builder) error { +- return b.BuildImage(ctx, hiveproxyTag, hiveproxy.Source) ++ return nil + } + + // ServeAPI starts the API server. diff --git a/.github/assets/hive/parse.py b/.github/assets/hive/parse.py new file mode 100644 index 000000000..ee75fdf55 --- /dev/null +++ b/.github/assets/hive/parse.py @@ -0,0 +1,43 @@ +import json +import yaml +import sys +import argparse + +# Argument parser setup +parser = argparse.ArgumentParser(description="Check for unexpected test results based on an exclusion list.") +parser.add_argument("report_json", help="Path to the hive report JSON file.") +parser.add_argument("--exclusion", required=True, help="Path to the exclusion YAML file.") +args = parser.parse_args() + +# Load hive JSON +with open(args.report_json, 'r') as file: + report = json.load(file) + +# Load exclusion YAML +with open(args.exclusion, 'r') as file: + exclusion_data = yaml.safe_load(file) + exclusions = exclusion_data.get(report['name'], []) + +# Collect unexpected failures and passes +unexpected_failures = [] +unexpected_passes = [] + +for test in report['testCases'].values(): + test_name = test['name'] + test_pass = test['summaryResult']['pass'] + if test_name in exclusions: + if test_pass: + unexpected_passes.append(test_name) + else: + if not test_pass: + unexpected_failures.append(test_name) + +# Check if there are any unexpected failures or passes and exit with error +if unexpected_failures or unexpected_passes: + if unexpected_failures: + print("Unexpected Failures:", unexpected_failures) + if unexpected_passes: + print("Unexpected Passes:", unexpected_passes) + sys.exit(1) + +print("Success.") \ No newline at end of file diff --git a/.github/assets/hive/run_simulator.sh b/.github/assets/hive/run_simulator.sh new file mode 100755 index 000000000..018077bdc --- /dev/null +++ b/.github/assets/hive/run_simulator.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# set -x + +cd hivetests/ + +sim="${1}" +limit="${2}" + +run_hive() { + hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 4 --client reth 2>&1 | tee /tmp/log || true +} + +check_log() { + tail -n 1 /tmp/log | sed -r 's/\x1B\[[0-9;]*[mK]//g' +} + +attempt=0 +max_attempts=5 + +while [ $attempt -lt $max_attempts ]; do + run_hive + + # Check if no tests were run. sed removes ansi colors + if check_log | grep -q "suites=0"; then + echo "no tests were run, retrying in 5 seconds" + sleep 5 + attempt=$((attempt + 1)) + continue + fi + + # Check the last line of the log for "finished", "tests failed", or "test failed" + if check_log | grep -Eq "(finished|tests? failed)"; then + exit 0 + else + exit 1 + fi +done +exit 1 \ No newline at end of file diff --git a/.github/scripts/install_geth.sh b/.github/assets/install_geth.sh similarity index 100% rename from .github/scripts/install_geth.sh rename to .github/assets/install_geth.sh diff --git a/.github/scripts/label_pr.js b/.github/assets/label_pr.js similarity index 100% rename from .github/scripts/label_pr.js rename to .github/assets/label_pr.js diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 32add5fb3..273bda4e5 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -52,12 +52,12 @@ jobs: with: ref: ${{ github.base_ref || 'main' }} - name: Generate test vectors - run: cargo run --bin reth -- test-vectors tables + run: cargo run --bin reth --features dev -- test-vectors tables - name: Save baseline - run: cargo bench -p reth-db --bench iai --features test-utils -- --save-baseline=$BASELINE + run: cargo bench -p reth-db --bench iai --profile profiling --features test-utils -- --save-baseline=$BASELINE - name: Checkout PR uses: actions/checkout@v4 with: clean: false - name: Compare PR benchmarks - run: cargo bench -p reth-db --bench iai --features test-utils -- --baseline=$BASELINE + run: cargo bench -p reth-db --bench iai --profile profiling --features test-utils -- --baseline=$BASELINE diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 723640c4f..d1fed1685 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -41,20 +41,14 @@ jobs: docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 docker buildx create --use --name cross-builder # - name: Build and push reth image, tag as "latest" - # if: ${{ contains(github.event.ref, 'beta') }} # run: make PROFILE=maxperf docker-build-push-latest # - name: Build and push reth image - # if: ${{ ! contains(github.event.ref, 'beta') }} # run: make PROFILE=maxperf docker-build-push - name: Build and push op-reth image, tag as "latest" - if: ${{ contains(github.event.ref, 'beta') }} run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push-latest - name: Build and push op-reth image - if: ${{ ! contains(github.event.ref, 'beta') }} run: make IMAGE_NAME=$OP_IMAGE_NAME DOCKER_IMAGE_NAME=$OP_DOCKER_IMAGE_NAME PROFILE=maxperf op-docker-build-push - name: Build and push bsc-reth image, tag as "latest" - if: ${{ contains(github.event.ref, 'beta') }} run: make IMAGE_NAME=$BSC_IMAGE_NAME DOCKER_IMAGE_NAME=$BSC_DOCKER_IMAGE_NAME PROFILE=maxperf bsc-docker-build-push-latest - name: Build and push bsc-reth image - if: ${{ ! contains(github.event.ref, 'beta') }} run: make IMAGE_NAME=$BSC_IMAGE_NAME DOCKER_IMAGE_NAME=$BSC_DOCKER_IMAGE_NAME PROFILE=maxperf bsc-docker-build-push \ No newline at end of file diff --git a/.github/workflows/eth-sync.yml b/.github/workflows/eth-sync.yml new file mode 100644 index 000000000..54c0d9607 --- /dev/null +++ b/.github/workflows/eth-sync.yml @@ -0,0 +1,50 @@ +# Runs an ethereum mainnet sync test. + +name: eth-sync-test + +on: + pull_request: + merge_group: + push: + branches: [main] + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: sync / 100k blocks + # Only run sync tests in merge groups + if: github.event_name == 'merge_group' + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build reth + run: | + cargo install --features asm-keccak,jemalloc --path bin/reth + - name: Run sync + run: | + reth node \ + --debug.tip 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 \ + --debug.max-block 100000 \ + --debug.terminate + - name: Verify the target block hash + run: | + reth db get static-file headers 100000 \ + | grep 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 + - name: Run stage unwind for 100 blocks + run: | + reth stage unwind num-blocks 100 diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index e34470e4d..65063dd01 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -16,28 +16,47 @@ concurrency: cancel-in-progress: true jobs: - prepare: + prepare-reth: if: github.repository == 'paradigmxyz/reth' timeout-minutes: 45 runs-on: group: Reth steps: - uses: actions/checkout@v4 - - run: mkdir artifacts + - run: mkdir artifacts + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build reth + run: | + cargo build --features asm-keccak --profile hivetests --bin reth --locked + mkdir dist && cp ./target/hivetests/reth ./dist/reth - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Build and export reth image uses: docker/build-push-action@v6 with: context: . + file: .github/assets/hive/Dockerfile tags: ghcr.io/paradigmxyz/reth:latest - build-args: | - BUILD_PROFILE=hivetests - FEATURES=asm-keccak outputs: type=docker,dest=./artifacts/reth_image.tar cache-from: type=gha cache-to: type=gha,mode=max + - name: Upload reth image + uses: actions/upload-artifact@v4 + with: + name: artifacts + path: ./artifacts + + prepare-hive: + if: github.repository == 'paradigmxyz/reth' + timeout-minutes: 45 + runs-on: + group: Reth + steps: + - uses: actions/checkout@v4 - name: Checkout hive tests uses: actions/checkout@v4 with: @@ -49,29 +68,23 @@ jobs: with: go-version: "^1.13.1" - run: go version - - name: Build hive tool - run: | - cd hivetests - go build . - mv ./hive ../artifacts/ - - name: Upload artifacts + - name: Build hive assets + run: .github/assets/hive/build_simulators.sh + + - name: Upload hive assets uses: actions/upload-artifact@v4 with: - name: artifacts - path: ./artifacts - + name: hive_assets + path: ./hive_assets test: timeout-minutes: 60 strategy: fail-fast: false matrix: - # TODO: enable etherem/sync once resolved: - # https://github.com/paradigmxyz/reth/issues/8579 - # TODO: enable ethereum/rpc once resolved: + # ethereum/rpc to be deprecated: # https://github.com/ethereum/hive/pull/1117 - # sim: [ethereum/rpc, smoke/genesis, smoke/network, ethereum/sync] - sim: [smoke/genesis, smoke/network] + sim: [smoke/genesis, smoke/network, ethereum/sync] include: - sim: devp2p limit: discv4 @@ -98,25 +111,14 @@ jobs: - TestBlobViolations - sim: ethereum/engine limit: engine-exchange-capabilities - # TODO: enable engine-withdrawals once resolved: - # https://github.com/paradigmxyz/reth/issues/8732 - # - sim: ethereum/engine - # limit: engine-withdrawals + - sim: ethereum/engine + limit: engine-withdrawals - sim: ethereum/engine limit: engine-auth - sim: ethereum/engine - limit: engine-transition - # TODO: enable engine-api once resolved: - # https://github.com/paradigmxyz/reth/issues/6217 - # https://github.com/paradigmxyz/reth/issues/8305 - # - sim: ethereum/engine - # limit: engine-api - # TODO: enable cancun once resolved: - # https://github.com/paradigmxyz/reth/issues/6217 - # https://github.com/paradigmxyz/reth/issues/8306 - # https://github.com/paradigmxyz/reth/issues/7144 - # - sim: ethereum/engine - # limit: cancun + limit: engine-api + - sim: ethereum/engine + limit: cancun # eth_ rpc methods - sim: ethereum/rpc-compat include: @@ -137,12 +139,9 @@ jobs: - eth_getTransactionReceipt - eth_sendRawTransaction - eth_syncing - # TODO: enable debug_ rpc-compat once resolved: - # https://github.com/paradigmxyz/reth/issues/7015 - # https://github.com/paradigmxyz/reth/issues/6332 # debug_ rpc methods - # - sim: ethereum/rpc-compat - # include: [debug_] + - sim: ethereum/rpc-compat + include: [debug_] # Pyspec cancun jobs - sim: pyspec include: [cancun/eip4844] @@ -176,23 +175,34 @@ jobs: include: [homestead/] - sim: pyspec include: [frontier/] - needs: prepare + needs: + - prepare-reth + - prepare-hive name: run runs-on: group: Reth permissions: issues: write steps: - - name: Download artifacts + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download hive assets + uses: actions/download-artifact@v4 + with: + name: hive_assets + path: /tmp + + - name: Download reth image uses: actions/download-artifact@v4 with: name: artifacts path: /tmp - - name: Load Docker image - run: | - docker load --input /tmp/reth_image.tar - docker image ls -a + - name: Load Docker images + run: .github/assets/hive/load_images.sh + - name: Move hive binary run: | mv /tmp/hive /usr/local/bin @@ -206,32 +216,12 @@ jobs: path: hivetests - name: Run ${{ matrix.sim }} simulator - run: | - cd hivetests - hive --sim "${{ matrix.sim }}$" --sim.limit "${{matrix.limit}}/${{join(matrix.include, '|')}}" --client reth + run: .github/assets/hive/run_simulator.sh "${{ matrix.sim }}$" "${{matrix.limit}}/${{join(matrix.include, '|')}}" - - name: Create github issue if sim failed - env: - GH_TOKEN: ${{ github.token }} - if: ${{ failure() }} + - name: Parse hive output run: | - echo "Simulator failed, creating issue" - # Check if issue already exists - # get all issues with the label C-hivetest, loop over each page and check if the issue already exists - - existing_issues=$(gh api /repos/paradigmxyz/reth/issues -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" -F "labels=C-hivetest" --method GET | jq '.[].title') - if [[ $existing_issues == *"Hive Test Failure: ${{ matrix.sim }}"* ]]; then - echo "Issue already exists" - exit 0 - fi - gh api \ - --method POST \ - -H "Accept: application/vnd.github+json" \ - -H "X-GitHub-Api-Version: 2022-11-28" \ - /repos/${{ github.repository }}/issues \ - -f title='Hive Test Failure: ${{ matrix.sim }}' \ - -f body="!!!!!!! This is an automated issue created by the hive test failure !!!!!!!

The hive test for ${{ matrix.sim }} failed. Please investigate and fix the issue.

[Link to the failed run](https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }})" \ - -f "labels[]=C-hivetest" + find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml + - name: Print simulator output if: ${{ failure() }} run: | diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 858f10485..bb244bfb0 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -40,7 +40,7 @@ jobs: with: toolchain: ${{ env.TOOL_CHAIN }} - name: Install Geth - run: .github/scripts/install_geth.sh + run: .github/assets/install_geth.sh - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 with: @@ -64,38 +64,6 @@ jobs: --locked -p reth-node-bsc --features "bsc ${{ matrix.extra-features }}" \ -E "kind(test)" - sync: - name: sync / 100k blocks - # Only run sync tests in merge groups - if: github.event_name == 'merge_group' - runs-on: [ bnb-chain-ap-qa-cicd-runners ] - env: - RUST_LOG: info,sync=error - RUST_BACKTRACE: 1 - timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: Run sync - run: | - cargo run --release --features asm-keccak,jemalloc,min-error-logs --bin reth \ - -- node \ - --debug.tip 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 \ - --debug.max-block 100000 \ - --debug.terminate - - name: Verify the target block hash - run: | - cargo run --release --bin reth \ - -- db get static-file headers 100000 \ - | grep 0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4 - - name: Run stage unwind for 100 blocks - run: | - cargo run --release --bin reth \ - -- stage unwind num-blocks 100 - integration-success: name: integration success runs-on: ubuntu-latest diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml index 857d354a8..077271735 100644 --- a/.github/workflows/label-pr.yml +++ b/.github/workflows/label-pr.yml @@ -19,5 +19,5 @@ jobs: uses: actions/github-script@v7 with: script: | - const label_pr = require('./.github/scripts/label_pr.js') + const label_pr = require('./.github/assets/label_pr.js') await label_pr({github, context}) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3264ba21d..a48d73414 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -68,7 +68,7 @@ jobs: with: cache-on-failure: true - name: Run no_std checks - run: .github/scripts/check_no_std.sh + run: .github/assets/check_no_std.sh crate-checks: runs-on: ubuntu-latest @@ -143,6 +143,15 @@ jobs: with: cmd: jq empty etc/grafana/dashboards/overview.json + no-test-deps: + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - name: Ensure no arbitrary or proptest dependency on default build + run: cargo tree --package reth -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0 + lint-success: name: lint success runs-on: ubuntu-latest @@ -153,6 +162,7 @@ jobs: - fmt - codespell - grafana + - no-test-deps timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/op-sync.yml b/.github/workflows/op-sync.yml new file mode 100644 index 000000000..73303b032 --- /dev/null +++ b/.github/workflows/op-sync.yml @@ -0,0 +1,52 @@ +# Runs a base mainnet sync test. + +name: op-sync-test + +on: + pull_request: + merge_group: + push: + branches: [main] + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + sync: + name: op sync / 10k blocks + # Only run sync tests in merge groups + if: github.event_name == 'merge_group' + runs-on: + group: Reth + env: + RUST_LOG: info,sync=error + RUST_BACKTRACE: 1 + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Build op-reth + run: | + cargo install --features asm-keccak,jemalloc,optimism --bin op-reth --path bin/reth + - name: Run sync + # https://basescan.org/block/10000 + run: | + op-reth node \ + --chain base \ + --debug.tip 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 \ + --debug.max-block 10000 \ + --debug.terminate + - name: Verify the target block hash + run: | + op-reth db --chain base get static-file headers 10000 \ + | grep 0xbb9b85352c7ebca6ba8efc63bd66cecd038c92ec8ebd02e153a3e0b197e672b7 + - name: Run stage unwind for 100 blocks + run: | + op-reth stage --chain base unwind num-blocks 100 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f08f122ae..b54c182e5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -136,7 +136,7 @@ jobs: # https://github.com/openethereum/openethereum/blob/6c2d392d867b058ff867c4373e40850ca3f96969/.github/workflows/build.yml run: | body=$(cat <<- "ENDBODY" - ![image](https://github.com/paradigmxyz/reth/assets/17802178/d02595cf-7130-418f-81a3-ec91f614abf5) + ![image](https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-prod.png) ## Testing Checklist (DELETE ME) diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 6c72f5a42..55bddd542 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -51,7 +51,7 @@ jobs: run: | cargo nextest run \ --locked --features "asm-keccak ${{ matrix.network }} opbnb" \ - --workspace --exclude examples --exclude ef-tests --exclude reth-node-ethereum \ + --workspace --exclude ef-tests --exclude reth-node-ethereum \ --partition hash:${{ matrix.partition }}/3 \ -E "!kind(test)" - if: matrix.network == 'bsc' @@ -59,8 +59,8 @@ jobs: run: | cargo nextest run \ --locked --features "asm-keccak ${{ matrix.network }}" \ - --workspace --exclude examples --exclude exex-rollup --exclude ef-tests --exclude reth-beacon-consensus \ - --exclude reth-blockchain-tree --exclude reth-node-ethereum --exclude reth-rpc-engine-api --exclude example-exex-rollup \ + --workspace --exclude examples --exclude example-exex-rollup --exclude ef-tests --exclude reth-beacon-consensus \ + --exclude reth-blockchain-tree --exclude reth-node-ethereum --exclude reth-rpc-engine-api \ --partition hash:${{ matrix.partition }}/3 \ -E "!kind(test)" diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ea901166..ce92f8eff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,15 +1,59 @@ # Changelog -## v0.1.0-beta.1 -### FEATURE -* [\#10](https://github.com/bnb-chain/reth/pull/10) feat: support opbnb network -## v0.1.0-beta.2 +## v1.0.0-rc.2 + +### FEATURES +* [\#49](https://github.com/bnb-chain/reth/pull/49) ci: specify rust toolchain version +* [\#50](https://github.com/bnb-chain/reth/pull/50) chore: add bsc and docker commands to readme +* [\#51](https://github.com/bnb-chain/reth/pull/51) ci: tag non-beta version as latest image +* [\#54](https://github.com/bnb-chain/reth/pull/54) feat: set finalized and safe hash (parlia) +* [\#58](https://github.com/bnb-chain/reth/pull/58) chore: fix system account issue and hertz storage patch issue on testnet +* [\#61](https://github.com/bnb-chain/reth/pull/61) chore: merge with upstream v1.0.1 +* [\#67](https://github.com/bnb-chain/reth/pull/67) doc: update README.md + +### BUGFIX +* [\#48](https://github.com/bnb-chain/reth/pull/48) fix: disable parlia task if running with debug.tip +* [\#69](https://github.com/bnb-chain/reth/pull/69) fix: add exception for slash system transaction +* [\#70](https://github.com/bnb-chain/reth/pull/70) fix: remove sidecars when calling block_with_senders +* [\#64](https://github.com/bnb-chain/reth/pull/64) fix: create empty sidecars file if no sidecars before +* [\#65](https://github.com/bnb-chain/reth/pull/65) fix: hertz storage patch issue on bsc mainnet +* [\#59](https://github.com/bnb-chain/reth/pull/59) fix: add check of plato hardfork in verify_vote_attestation +* [\#60](https://github.com/bnb-chain/reth/pull/60) fix: zero block reward issue +* [\#57](https://github.com/bnb-chain/reth/pull/57) fix: fix the hardforks' order +* [\#52](https://github.com/bnb-chain/reth/pull/52) fix: spec mapping issue and snapshot overwrite issue +* [\#53](https://github.com/bnb-chain/reth/pull/53) fix: system account status issue + +## v1.0.0-rc.1 + +### FEATURES + +* [\#38](https://github.com/bnb-chain/reth/pull/38) chore: merge v1.0.0 into develop +* [\#39](https://github.com/bnb-chain/reth/pull/39) feat: add bootnodes for opbnb +* [\#41](https://github.com/bnb-chain/reth/pull/41) feat: add bootnodes for opbnb +* [\#42](https://github.com/bnb-chain/reth/pull/42) chore: add HaberFix upgrade and other optimization +* [\#43](https://github.com/bnb-chain/reth/pull/43) feat: add docker file for optimism and opbnb +* [\#44](https://github.com/bnb-chain/reth/pull/44) chore: add docker image workflow for bsc +* [\#45](https://github.com/bnb-chain/reth/pull/45) feat: add support of block sidecars for bsc + +### BUGFIX +* [\#34](https://github.com/bnb-chain/reth/pull/34) fix: update executor type in commands for bsc +* [\#36](https://github.com/bnb-chain/reth/pull/36) fix: base fee configuration of bsc +* [\#40](https://github.com/bnb-chain/reth/pull/40) fix: unwrap failed on fcu_resp +* [\#46](https://github.com/bnb-chain/reth/pull/46) fix: check header timestamp in parlia task + +## v0.1.0-beta.3 ### FEATURE -* [\#27](https://github.com/bnb-chain/reth/pull/27) feat: introduce Haber fork into opBNB testnet +* [\#33](https://github.com/bnb-chain/reth/pull/33) feat: enable Shanghai, Canyon, Cancun, Ecotone, Haber on opBNB mainnet +## v0.1.0-beta.2 ### BUGFIX -* [\#17](https://github.com/bnb-chain/reth/pull/17) fix: p2p incompatible forks for opbnb testnet and mainnet -* [\#19](https://github.com/bnb-chain/reth/pull/19) chore: fix ci issues -* [\#24](https://github.com/bnb-chain/reth/pull/24) fix: opbnb synchronization failure issue * [\#25](https://github.com/bnb-chain/reth/pull/25) chore: add pr template +* [\#27](https://github.com/bnb-chain/reth/pull/27) feat: introduce Haber fork into opBNB testnet * [\#26](https://github.com/bnb-chain/reth/pull/26) fix: opbnb p2p forkid mismatch issue +* [\#24](https://github.com/bnb-chain/reth/pull/24) fix: opbnb synchronization failure issue +* [\#19](https://github.com/bnb-chain/reth/pull/19) chore: fix ci issues +* [\#17](https://github.com/bnb-chain/reth/pull/17) fix p2p incompatible forks for opbnb testnet and mainnet + +## v0.1.0-beta.1 +### FEATURE +* [\#10](https://github.com/bnb-chain/reth/pull/10) feat: support opbnb network diff --git a/CODEOWNERS b/CODEOWNERS index 1fd984ec1..225d0f08b 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -6,8 +6,9 @@ crates/chainspec/ @Rjected @joshieDo @mattsse crates/cli/ @onbjerg @mattsse crates/config/ @onbjerg crates/consensus/ @rkrasiuk @mattsse @Rjected +crates/engine @rkrasiuk @mattsse @Rjected crates/e2e-test-utils/ @mattsse @Rjected -crates/engine-primitives/ @rkrasiuk @mattsse @Rjected +crates/engine/ @rkrasiuk @mattsse @Rjected @fgimenez crates/errors/ @mattsse crates/ethereum/ @mattsse @Rjected crates/ethereum-forks/ @mattsse @Rjected @@ -26,7 +27,7 @@ crates/primitives/ @DaniPopes @Rjected crates/primitives-traits/ @DaniPopes @Rjected @joshieDo crates/prune/ @shekhirin @joshieDo crates/revm/ @mattsse @rakita -crates/rpc/ @mattsse @Rjected +crates/rpc/ @mattsse @Rjected @emhane crates/stages/ @onbjerg @rkrasiuk @shekhirin crates/static-file/ @joshieDo @shekhirin crates/storage/codecs/ @joshieDo diff --git a/Cargo.lock b/Cargo.lock index e818a21de..7e96896e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.22.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -122,9 +122,8 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a016bfa21193744d4c38b3f3ab845462284d129e5e23c7cc0fafca7e92d9db37" +version = "0.1.4" +source = "git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301#18f098dd78be661433bae682ad161a41f8a9c301" dependencies = [ "alloy-eips", "alloy-primitives", @@ -133,15 +132,15 @@ dependencies = [ "arbitrary", "c-kzg", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "serde", ] [[package]] name = "alloy-dyn-abi" -version = "0.7.6" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6e6436a9530f25010d13653e206fab4c9feddacf21a54de8d7311b275bc56b" +checksum = "8425a283510106b1a6ad25dd4bb648ecde7da3fd2baeb9400a85ad62f51ec90b" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -152,14 +151,13 @@ dependencies = [ "itoa", "serde", "serde_json", - "winnow 0.6.13", + "winnow 0.6.8", ] [[package]] name = "alloy-eips" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d6d8118b83b0489cfb7e6435106948add2b35217f4a5004ef895f613f60299" +version = "0.1.4" +source = "git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301#18f098dd78be661433bae682ad161a41f8a9c301" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -169,16 +167,16 @@ dependencies = [ "derive_more", "once_cell", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "serde", "sha2 0.10.8", ] [[package]] name = "alloy-genesis" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "894f33a7822abb018db56b10ab90398e63273ce1b5a33282afd186c132d764a6" +checksum = "bca15afde1b6d15e3fc1c97421262b1bbb37aee45752e3c8b6d6f13f776554ff" dependencies = [ "alloy-primitives", "alloy-serde", @@ -199,9 +197,21 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f0ae6e93b885cc70fe8dae449e7fd629751dbee8f59767eaaa7285333c5727" +checksum = "6d6f34930b7e3e2744bcc79056c217f00cb2abb33bc5d4ff88da7623c5bb078b" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.1.4" +source = "git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301#18f098dd78be661433bae682ad161a41f8a9c301" dependencies = [ "alloy-primitives", "serde", @@ -212,13 +222,12 @@ dependencies = [ [[package]] name = "alloy-network" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc122cbee2b8523854cc11d87bcd5773741602c553d2d2d106d82eeb9c16924a" +version = "0.1.4" +source = "git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301#18f098dd78be661433bae682ad161a41f8a9c301" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-json-rpc", + "alloy-json-rpc 0.1.4 (git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301)", "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", @@ -232,9 +241,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df0e005ecc1b41f0b3bf90f68df5a446971e7eb34e1ea051da401e7e8eeef8fd" +checksum = "494b2fb0276a78ec13791446a417c2517eee5c8e8a8c520ae0681975b8056e5c" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -266,7 +275,7 @@ dependencies = [ "k256 0.13.3", "keccak-asm", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "rand 0.8.5", "ruint", "serde", @@ -275,14 +284,14 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d5af289798fe8783acd0c5f10644d9d26f54a12bc52a083e4f3b31718e9bf92" +checksum = "9c538bfa893d07e27cb4f3c1ab5f451592b7c526d511d62b576a2ce59e146e4a" dependencies = [ "alloy-chains", "alloy-consensus", "alloy-eips", - "alloy-json-rpc", + "alloy-json-rpc 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-network", "alloy-primitives", "alloy-pubsub", @@ -301,7 +310,7 @@ dependencies = [ "futures-utils-wasm", "lru", "pin-project", - "reqwest 0.12.5", + "reqwest 0.12.4", "serde", "serde_json", "tokio", @@ -311,11 +320,11 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702f330b7da123a71465ab9d39616292f8344a2811c28f2cc8d8438a69d79e35" +checksum = "0a7341322d9bc0e49f6e9fd9f2eb8e30f73806f2dd12cbb3d6bab2694c921f87" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-transport", "bimap", @@ -330,9 +339,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b155716bab55763c95ba212806cf43d05bcc70e5f35b02bad20cf5ec7fe11fed" +checksum = "a43b18702501396fa9bcdeecd533bc85fac75150d308fc0f6800a01e6234a003" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -341,22 +350,22 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8037e03c7f462a063f28daec9fda285a9a89da003c552f8637a80b9c8fd96241" +checksum = "d83524c1f6162fcb5b0decf775498a125066c86dda6066ed609531b0e912f85a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] name = "alloy-rpc-client" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40fcb53b2a9d0a78a4968b2eca8805a4b7011b9ee3fdfa2acaf137c5128f36b" +checksum = "5ba31bae67773fd5a60020bea900231f8396202b7feca4d0c70c6b59308ab4a8" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-primitives", "alloy-pubsub", "alloy-transport", @@ -364,7 +373,7 @@ dependencies = [ "alloy-transport-ws", "futures", "pin-project", - "reqwest 0.12.5", + "reqwest 0.12.4", "serde", "serde_json", "tokio", @@ -376,9 +385,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f2fbe956a3e0f0975c798f488dc6be96b669544df3737e18f4a325b42f4c86" +checksum = "184a7a42c7ba9141cc9e76368356168c282c3bc3d9e5d78f3556bdfe39343447" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -388,9 +397,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "334a8c00cde17a48e073031f1534e71a75b529dbf25552178c43c2337632e0ab" +checksum = "7e953064025c49dc9f6a3f3ac07a713487849065692228b33948f2714f2bb60d" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -400,9 +409,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87f724e6170f558b809a520e37bdb34d99123092b78118bff31fb5b21dc2a2e" +checksum = "8c7cf4356a9d00df76d6e90d002e2a7b5edc1c8476e90e6f17ab868d99db6435" dependencies = [ "alloy-primitives", "alloy-serde", @@ -411,9 +420,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb383cd3981cee0031aeacbd394c4e726e907f3a0180fe36d5fc76d37c41cd82" +checksum = "a5f2e67d3e2478902b71bbadcd564ee5bbcc71945a0010a1f0e87a2339c6f3f9" dependencies = [ "alloy-eips", "alloy-primitives", @@ -425,9 +434,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd473d98ec552f8229cd6d566bd2b0bbfc5bb4efcefbb5288c834aa8fd832020" +checksum = "6e765962e3b82fd6f276a0873b5bd897e5d75a25f78fa9a6a21bd350d8e98a4e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -444,9 +453,8 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "083f443a83b9313373817236a8f4bea09cca862618e9177d822aee579640a5d6" +version = "0.1.4" +source = "git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301#18f098dd78be661433bae682ad161a41f8a9c301" dependencies = [ "alloy-consensus", "alloy-eips", @@ -458,17 +466,30 @@ dependencies = [ "itertools 0.13.0", "jsonrpsee-types", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "serde", "serde_json", "thiserror", ] +[[package]] +name = "alloy-rpc-types-mev" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd8624e01721deacad6bc9af75abdf2e99d248df0e1ad5f3f0bda0b3c1d50fd" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", + "serde_json", +] + [[package]] name = "alloy-rpc-types-trace" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7a838f9a34aae7022c6cb53ecf21bc0a5a30c82f8d9eb0afed701ab5fd88de" +checksum = "567933b1d95fd42cb70b75126e32afec2e5e2c3c16e7100a3f83dc1c80f4dc0e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -480,9 +501,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1572267dbc660843d87c02994029d1654c2c32867e186b266d1c03644b43af97" +checksum = "3115f4eb1bb9ae9aaa0b24ce875a1d86d6689b16438a12377832def2b09e373c" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -492,23 +513,21 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d94da1c0c4e27cc344b05626fe22a89dc6b8b531b9475f3b7691dbf6913e4109" +version = "0.1.4" +source = "git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301#18f098dd78be661433bae682ad161a41f8a9c301" dependencies = [ "alloy-primitives", "arbitrary", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "serde", "serde_json", ] [[package]] name = "alloy-signer" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58d876be3afd8b78979540084ff63995292a26aa527ad0d44276405780aa0ffd" +version = "0.1.4" +source = "git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301#18f098dd78be661433bae682ad161a41f8a9c301" dependencies = [ "alloy-primitives", "async-trait", @@ -520,9 +539,8 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40a37dc216c269b8a7244047cb1c18a9c69f7a0332ab2c4c2aa4cbb1a31468b" +version = "0.1.4" +source = "git+https://github.com/bnb-chain/alloy?rev=18f098dd78be661433bae682ad161a41f8a9c301#18f098dd78be661433bae682ad161a41f8a9c301" dependencies = [ "alloy-consensus", "alloy-network", @@ -547,7 +565,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -564,7 +582,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", "syn-solidity", "tiny-keccak", ] @@ -582,7 +600,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.68", + "syn 2.0.65", "syn-solidity", ] @@ -592,7 +610,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baa2fbd22d353d8685bd9fee11ba2d8b5c3b1d11e56adb3265fcf1f32bfdf404" dependencies = [ - "winnow 0.6.13", + "winnow 0.6.8", ] [[package]] @@ -610,11 +628,11 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245af9541f0a0dbd5258669c80dfe3af118164cacec978a520041fc130550deb" +checksum = "01b51a291f949f755e6165c3ed562883175c97423703703355f4faa4b7d0a57c" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "base64 0.22.1", "futures-util", "futures-utils-wasm", @@ -623,18 +641,19 @@ dependencies = [ "thiserror", "tokio", "tower", + "tracing", "url", ] [[package]] name = "alloy-transport-http" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5619c017e1fdaa1db87f9182f4f0ed97c53d674957f4902fba655e972d359c6c" +checksum = "86d65871f9f1cafe1ed25cde2f1303be83e6473e995a2d56c275ae4fcce6119c" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-transport", - "reqwest 0.12.5", + "reqwest 0.12.4", "serde_json", "tower", "tracing", @@ -643,11 +662,11 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "173cefa110afac7a53cf2e75519327761f2344d305eea2993f3af1b2c1fc1c44" +checksum = "cd7fbc8b6282ce41b01cbddef7bffb133fe6e1bf65dcd39770d45a905c051179" dependencies = [ - "alloy-json-rpc", + "alloy-json-rpc 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-pubsub", "alloy-transport", "bytes 1.6.0", @@ -662,9 +681,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c0aff8af5be5e58856c5cdd1e46db2c67c7ecd3a652d9100b4822c96c899947" +checksum = "aec83fd052684556c78c54df111433493267234d82321c2236560c752f595f20" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -692,7 +711,7 @@ dependencies = [ "hashbrown 0.14.5", "nybbles", "proptest", - "proptest-derive", + "proptest-derive 0.4.0", "serde", "smallvec", "tracing", @@ -760,9 +779,9 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] @@ -794,7 +813,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -1021,9 +1040,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.11" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +checksum = "9c90a406b4495d129f00461241616194cb8a032c8d1c53c657f0961d5f8e0498" dependencies = [ "brotli", "flate2", @@ -1068,7 +1087,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -1079,7 +1098,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -1117,7 +1136,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -1140,9 +1159,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -1238,7 +1257,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.68", + "syn 2.0.65", "which", ] @@ -1281,9 +1300,9 @@ dependencies = [ [[package]] name = "bitm" -version = "0.4.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06e8e5bec3490b9f6f3adbb78aa4f53e8396fd9994e8a62a346b44ea7c15f35" +checksum = "31b9ea263f0faf826a1c9de0e8bf8f32f5986c05f5e3abcf6bcde74616009586" dependencies = [ "dyn_size_of", ] @@ -1490,7 +1509,7 @@ checksum = "6be9c93793b60dac381af475b98634d4b451e28336e72218cad9a20176218dbc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", "synstructure 0.13.1", ] @@ -1541,9 +1560,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "e6221fe77a248b9117d431ad93761222e1cf8ff282d9d1d5d9f53d6299a1cf76" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1590,22 +1609,22 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.16.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" dependencies = [ "bytemuck_derive", ] [[package]] name = "bytemuck_derive" -version = "1.7.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" +checksum = "4da9a32f3fed317401fa3c862968128267c3106685286e15d5aaa3d7389c2f60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -1698,9 +1717,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.100" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c891175c3fb232128f48de6590095e59198bbeb8620c310be349bfc3afd12c7b" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" dependencies = [ "jobserver", "libc", @@ -1791,9 +1810,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -1802,9 +1821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.7" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -1812,9 +1831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.7" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -1824,21 +1843,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.5" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] name = "clap_lex" -version = "0.7.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "coins-bip32" @@ -1944,7 +1963,7 @@ dependencies = [ "flex-error", "serde", "serde_json", - "toml 0.8.14", + "toml 0.8.13", "url", ] @@ -2085,7 +2104,7 @@ dependencies = [ "directories", "serde", "thiserror", - "toml 0.8.14", + "toml 0.8.13", ] [[package]] @@ -2126,10 +2145,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] -name = "const-str" -version = "0.5.7" +name = "const_format" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +dependencies = [ + "const_format_proc_macros", + "konst", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3618cccc083bb987a415d85c02ca6c9994ea5b44731ec28b9ecf09658655fba9" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] [[package]] name = "contracts" @@ -2446,15 +2480,16 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.3" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", + "platforms", "rustc_version 0.4.0", "subtle", "zeroize", @@ -2468,7 +2503,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -2505,7 +2540,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -2516,7 +2551,7 @@ checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ "darling_core", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -2631,20 +2666,20 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] name = "derive_more" -version = "0.99.18" +version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.68", + "syn 1.0.109", ] [[package]] @@ -2761,13 +2796,13 @@ dependencies = [ [[package]] name = "displaydoc" -version = "0.2.5" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -2879,7 +2914,7 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.3", + "curve25519-dalek 4.1.2", "ed25519 2.2.3", "rand_core 0.6.4", "serde", @@ -2890,7 +2925,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "rayon", @@ -3000,7 +3035,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -3011,7 +3046,7 @@ checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -3059,9 +3094,9 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.5.4" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3627f83d8b87b432a5fad9934b4565260722a141a2c40f371f8080adec9425" +checksum = "e61ffea29f26e8249d35128a82ec8d3bd4fbc80179ea5f5e5e3daafef6a80fcb" dependencies = [ "ethereum-types", "itertools 0.10.5", @@ -3082,7 +3117,7 @@ dependencies = [ "clap", "eyre", "futures-util", - "reqwest 0.12.5", + "reqwest 0.12.4", "reth", "reth-node-ethereum", "serde", @@ -3112,6 +3147,7 @@ dependencies = [ "reth-discv4", "reth-network", "reth-network-api", + "reth-network-peers", "reth-primitives", "reth-tracing", "secp256k1", @@ -3165,6 +3201,7 @@ dependencies = [ "eyre", "reth", "reth-chainspec", + "reth-evm-ethereum", "reth-node-api", "reth-node-core", "reth-node-ethereum", @@ -3212,6 +3249,26 @@ dependencies = [ "tracing", ] +[[package]] +name = "example-custom-rlpx-subprotocol" +version = "0.0.0" +dependencies = [ + "eyre", + "futures", + "rand 0.8.5", + "reth", + "reth-eth-wire", + "reth-network", + "reth-network-api", + "reth-node-ethereum", + "reth-primitives", + "reth-provider", + "reth-rpc-types", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "example-db-access" version = "0.0.0" @@ -3224,6 +3281,32 @@ dependencies = [ "reth-rpc-types", ] +[[package]] +name = "example-exex-discv5" +version = "0.0.0" +dependencies = [ + "clap", + "discv5", + "enr", + "eyre", + "futures", + "futures-util", + "reth", + "reth-chainspec", + "reth-discv5", + "reth-exex", + "reth-exex-test-utils", + "reth-network-peers", + "reth-node-api", + "reth-node-ethereum", + "reth-testing-utils", + "reth-tracing", + "serde_json", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "example-exex-in-memory-state" version = "0.0.0" @@ -3395,6 +3478,7 @@ dependencies = [ "reth-db", "reth-db-api", "reth-node-ethereum", + "reth-provider", "tokio", ] @@ -3616,7 +3700,7 @@ dependencies = [ "alloy-rpc-types-eth", "alloy-serde", "chrono", - "reqwest 0.12.5", + "reqwest 0.12.4", "serde", ] @@ -3710,7 +3794,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -3815,9 +3899,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" @@ -4160,12 +4244,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes 1.6.0", - "futures-util", + "futures-core", "http 1.1.0", "http-body 1.0.0", "pin-project-lite", @@ -4199,9 +4283,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -4233,9 +4317,9 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes 1.6.0", "futures-channel", @@ -4284,12 +4368,29 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.28", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", ] +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +dependencies = [ + "futures-util", + "http 1.1.0", + "hyper 1.3.1", + "hyper-util", + "rustls 0.22.4", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tower-service", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -4302,19 +4403,17 @@ dependencies = [ "hyper-util", "log", "rustls 0.23.10", - "rustls-native-certs 0.7.0", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", - "webpki-roots", ] [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes 1.6.0", "futures-channel", @@ -4353,7 +4452,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -4450,9 +4549,9 @@ checksum = "545c6c3e8bf9580e2dafee8de6f9ec14826aaf359787789c7724f1f85f47d3dc" [[package]] name = "icu_normalizer" -version = "1.4.3" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accb85c5b2e76f8dade22978b3795ae1e550198c6cfc7e915144e17cd6e2ab56" +checksum = "c183e31ed700f1ecd6b032d104c52fe8b15d028956b73727c97ec176b170e187" dependencies = [ "displaydoc", "icu_collections", @@ -4468,15 +4567,15 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.4.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3744fecc0df9ce19999cdaf1f9f3a48c253431ce1d67ef499128fe9d0b607ab" +checksum = "22026918a80e6a9a330cb01b60f950e2b4e5284c59528fd0c6150076ef4c8522" [[package]] name = "icu_properties" -version = "1.4.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8173ba888885d250016e957b8ebfd5a65cdb690123d8833a19f6833f9c2b579" +checksum = "976e296217453af983efa25f287a4c1da04b9a63bf1ed63719455068e4453eb5" dependencies = [ "displaydoc", "icu_collections", @@ -4489,9 +4588,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.4.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e70a8b51ee5dd4ff8f20ee9b1dd1bc07afc110886a3747b1fec04cc6e5a15815" +checksum = "f6a86c0e384532b06b6c104814f9c1b13bcd5b64409001c0d05713a1f3529d99" [[package]] name = "icu_provider" @@ -4518,7 +4617,7 @@ checksum = "d2abdd3a62551e8337af119c5899e600ca0c88ec8f23a46c60ba216c803dcf1a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -4940,7 +5039,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -5068,13 +5167,28 @@ dependencies = [ "sha3-asm", ] +[[package]] +name = "konst" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "330f0e13e6483b8c34885f7e6c9f19b1a7bd449c673fbb948a51c99d66ef74f4" +dependencies = [ + "konst_macro_rules", +] + +[[package]] +name = "konst_macro_rules" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4933f3f57a8e9d9da04db23fb153356ecaf00cbd14aee46279c33dc80925c37" + [[package]] name = "lazy_static" -version = "1.5.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" dependencies = [ - "spin", + "spin 0.5.2", ] [[package]] @@ -5091,9 +5205,9 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", "windows-targets 0.52.5", @@ -5182,9 +5296,9 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.9" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" +checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" dependencies = [ "asn1_der", "bs58", @@ -5325,9 +5439,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" -version = "0.7.3" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "f9d642685b028806386b2b6e75685faadd3eb65a85fff7df711ce18446a422da" [[package]] name = "lock_api" @@ -5395,9 +5509,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memmap2" @@ -5487,7 +5601,7 @@ dependencies = [ "futures-util", "http-types", "pin-project-lite", - "reqwest 0.12.5", + "reqwest 0.12.4", "serde", "serde_json", "thiserror", @@ -5519,9 +5633,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] @@ -5562,7 +5676,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -5754,7 +5868,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -5836,7 +5950,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -5864,9 +5978,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.0" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576dfe1fc8f9df304abb159d767a29d0476f7750fbf8aa7ad07816004a207434" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -5883,6 +5997,35 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +[[package]] +name = "op-alloy-consensus" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f491085509d77ebd05dbf75592093a9bebc8e7fc642b90fb4ac13b747d48b2fc" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "op-alloy-rpc-types" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f26a0cb2f7183c5e51d2806bf4ab9ec050e47c4595deff9bec7f2ba218db9d7" +dependencies = [ + "alloy-network", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "op-alloy-consensus", + "serde", + "serde_json", +] + [[package]] name = "opaque-debug" version = "0.2.3" @@ -6026,7 +6169,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.2", + "redox_syscall 0.5.1", "smallvec", "windows-targets 0.52.5", ] @@ -6154,7 +6297,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -6183,7 +6326,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -6232,6 +6375,12 @@ dependencies = [ "crunchy", ] +[[package]] +name = "platforms" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" + [[package]] name = "plotters" version = "0.3.6" @@ -6351,7 +6500,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ "proc-macro2", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -6411,9 +6560,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43" dependencies = [ "unicode-ident", ] @@ -6446,9 +6595,9 @@ dependencies = [ [[package]] name = "proptest" -version = "1.5.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", @@ -6458,7 +6607,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -6485,6 +6634,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "proptest-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + [[package]] name = "prost" version = "0.6.1" @@ -6552,7 +6712,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -6614,57 +6774,10 @@ dependencies = [ ] [[package]] -name = "quinn" -version = "0.11.2" +name = "quote" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ceeeeabace7857413798eb1ffa1e9c905a9946a57d81fb69b4b71c4d8eb3ad" -dependencies = [ - "bytes 1.6.0", - "pin-project-lite", - "quinn-proto", - "quinn-udp", - "rustc-hash 1.1.0", - "rustls 0.23.10", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "quinn-proto" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddf517c03a109db8100448a4be38d498df8a210a99fe0e1b9eaf39e78c640efe" -dependencies = [ - "bytes 1.6.0", - "rand 0.8.5", - "ring", - "rustc-hash 1.1.0", - "rustls 0.23.10", - "slab", - "thiserror", - "tinyvec", - "tracing", -] - -[[package]] -name = "quinn-udp" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9096629c45860fc7fb143e125eb826b5e721e10be3263160c7d60ca832cf8c46" -dependencies = [ - "libc", - "once_cell", - "socket2 0.5.7", - "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "quote" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -6773,19 +6886,20 @@ dependencies = [ [[package]] name = "ratatui" -version = "0.26.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f44c9e68fd46eda15c646fbb85e1040b657a58cdc8c98db1d97a55930d991eef" +checksum = "d16546c5b5962abf8ce6e2881e722b4e0ae3b6f1a08a26ae3573c55853ca68d3" dependencies = [ "bitflags 2.5.0", "cassowary", "compact_str", "crossterm", - "itertools 0.12.1", + "itertools 0.13.0", "lru", "paste", "stability", "strum", + "strum_macros", "unicode-segmentation", "unicode-truncate", "unicode-width", @@ -6837,9 +6951,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ "bitflags 2.5.0", ] @@ -6857,14 +6971,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -6878,13 +6992,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.3", ] [[package]] @@ -6895,9 +7009,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "regress" @@ -6923,7 +7037,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.28", "hyper-rustls 0.24.2", "ipnet", "js-sys", @@ -6938,7 +7052,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper", "system-configuration", "tokio", "tokio-rustls 0.24.1", @@ -6952,9 +7066,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ "base64 0.22.1", "bytes 1.6.0", @@ -6964,7 +7078,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", - "hyper-rustls 0.27.2", + "hyper-rustls 0.26.0", "hyper-util", "ipnet", "js-sys", @@ -6973,17 +7087,16 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "quinn", - "rustls 0.23.10", + "rustls 0.22.4", "rustls-native-certs 0.7.0", "rustls-pemfile 2.1.2", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper", "tokio", - "tokio-rustls 0.26.0", + "tokio-rustls 0.25.0", "tokio-util", "tower-service", "url", @@ -7007,38 +7120,27 @@ dependencies = [ [[package]] name = "reth" -version = "1.0.0" +version = "1.0.1" dependencies = [ - "ahash", "alloy-rlp", "aquamarine", - "arbitrary", - "assert_matches", "backon", - "boyer-moore-magiclen", "clap", - "comfy-table", "confy", - "crossterm", "discv5", "eyre", "fdlimit", "futures", - "human_bytes", "itertools 0.13.0", - "jsonrpsee", "libc", "metrics-process", - "proptest", - "proptest-arbitrary-interop", - "rand 0.8.5", - "ratatui", - "rayon", "reth-basic-payload-builder", "reth-beacon-consensus", "reth-blockchain-tree", "reth-chainspec", + "reth-cli-commands", "reth-cli-runner", + "reth-cli-util", "reth-config", "reth-consensus", "reth-consensus-common", @@ -7046,19 +7148,17 @@ dependencies = [ "reth-db-api", "reth-db-common", "reth-discv4", - "reth-discv5", "reth-downloaders", + "reth-engine-util", "reth-errors", "reth-ethereum-payload-builder", "reth-evm", "reth-execution-types", "reth-exex", "reth-fs-util", - "reth-net-banlist", "reth-network", "reth-network-api", "reth-network-p2p", - "reth-nippy-jar", "reth-node-api", "reth-node-bsc", "reth-node-builder", @@ -7066,20 +7166,24 @@ dependencies = [ "reth-node-ethereum", "reth-node-events", "reth-node-optimism", + "reth-optimism-cli", "reth-optimism-primitives", "reth-payload-builder", "reth-payload-primitives", "reth-payload-validator", "reth-primitives", "reth-provider", - "reth-prune-types", + "reth-prune", "reth-revm", "reth-rpc", "reth-rpc-api", "reth-rpc-builder", + "reth-rpc-eth-types", + "reth-rpc-server-types", "reth-rpc-types", "reth-rpc-types-compat", "reth-stages", + "reth-stages-api", "reth-static-file", "reth-static-file-types", "reth-tasks", @@ -7092,13 +7196,13 @@ dependencies = [ "tempfile", "tikv-jemallocator", "tokio", - "toml 0.8.14", + "toml 0.8.13", "tracing", ] [[package]] name = "reth-auto-seal-consensus" -version = "1.0.0" +version = "1.0.1" dependencies = [ "futures-util", "reth-beacon-consensus", @@ -7124,7 +7228,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "futures-core", @@ -7146,7 +7250,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-genesis", "assert_matches", @@ -7155,6 +7259,7 @@ dependencies = [ "metrics", "reth-blockchain-tree", "reth-blockchain-tree-api", + "reth-bsc-consensus", "reth-chainspec", "reth-config", "reth-consensus", @@ -7197,11 +7302,11 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-consensus", "alloy-eips", - "alloy-json-rpc", + "alloy-json-rpc 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "alloy-provider", "alloy-pubsub", "alloy-rpc-client", @@ -7216,7 +7321,7 @@ dependencies = [ "eyre", "futures", "libc", - "reqwest 0.12.5", + "reqwest 0.12.4", "reth-cli-runner", "reth-db", "reth-node-api", @@ -7238,7 +7343,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-genesis", "aquamarine", @@ -7272,7 +7377,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -7281,9 +7386,52 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-bsc-cli" +version = "1.0.1" + [[package]] name = "reth-bsc-consensus" -version = "1.0.0" +version = "1.0.1" +dependencies = [ + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-rlp", + "bitset", + "blst", + "bytes 1.6.0", + "futures-util", + "lazy_static", + "lru", + "mockall", + "parking_lot 0.12.3", + "rand 0.8.5", + "reth-chainspec", + "reth-codecs", + "reth-consensus", + "reth-consensus-common", + "reth-db-api", + "reth-engine-primitives", + "reth-network", + "reth-network-p2p", + "reth-network-peers", + "reth-primitives", + "reth-provider", + "reth-rpc-types", + "secp256k1", + "serde", + "serde_cbor", + "serde_json", + "sha3 0.10.8", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "reth-bsc-engine" +version = "1.0.1" dependencies = [ "alloy-dyn-abi", "alloy-json-abi", @@ -7298,16 +7446,20 @@ dependencies = [ "parking_lot 0.12.3", "rand 0.8.5", "reth-beacon-consensus", + "reth-bsc-consensus", "reth-chainspec", "reth-codecs", "reth-consensus", "reth-consensus-common", "reth-db-api", "reth-engine-primitives", + "reth-evm", + "reth-evm-bsc", "reth-network", "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-rpc-types", "secp256k1", @@ -7323,7 +7475,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-chains", "alloy-eips", @@ -7334,27 +7486,101 @@ dependencies = [ "derive_more", "nybbles", "once_cell", + "op-alloy-rpc-types", "rand 0.8.5", "reth-ethereum-forks", "reth-network-peers", "reth-primitives-traits", "reth-rpc-types", "reth-trie-common", + "serde", "serde_json", ] +[[package]] +name = "reth-cli" +version = "1.0.1" +dependencies = [ + "clap", + "eyre", + "reth-chainspec", + "reth-cli-runner", +] + +[[package]] +name = "reth-cli-commands" +version = "1.0.1" +dependencies = [ + "ahash", + "arbitrary", + "backon", + "clap", + "comfy-table", + "confy", + "crossterm", + "eyre", + "fdlimit", + "human_bytes", + "itertools 0.13.0", + "metrics-process", + "proptest", + "proptest-arbitrary-interop", + "ratatui", + "reth-beacon-consensus", + "reth-chainspec", + "reth-cli-runner", + "reth-cli-util", + "reth-config", + "reth-consensus", + "reth-db", + "reth-db-api", + "reth-db-common", + "reth-downloaders", + "reth-evm", + "reth-exex", + "reth-fs-util", + "reth-network", + "reth-network-p2p", + "reth-node-core", + "reth-primitives", + "reth-provider", + "reth-prune", + "reth-stages", + "reth-static-file", + "reth-static-file-types", + "reth-trie", + "serde", + "serde_json", + "tokio", + "toml 0.8.13", + "tracing", +] + [[package]] name = "reth-cli-runner" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-tasks", "tokio", "tracing", ] +[[package]] +name = "reth-cli-util" +version = "1.0.1" +dependencies = [ + "eyre", + "proptest", + "reth-fs-util", + "reth-network", + "reth-primitives", + "secp256k1", + "thiserror", +] + [[package]] name = "reth-codecs" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7365,7 +7591,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "reth-codecs-derive", "serde", "serde_json", @@ -7374,32 +7600,33 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.0.0" +version = "1.0.1" dependencies = [ "convert_case 0.6.0", "proc-macro2", "quote", "similar-asserts", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] name = "reth-config" -version = "1.0.0" +version = "1.0.1" dependencies = [ "confy", "humantime-serde", - "reth-network", + "reth-network-types", "reth-primitives", "reth-prune-types", + "reth-stages-types", "serde", "tempfile", - "toml 0.8.14", + "toml 0.8.13", ] [[package]] name = "reth-consensus" -version = "1.0.0" +version = "1.0.1" dependencies = [ "auto_impl", "reth-primitives", @@ -7408,7 +7635,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.0.0" +version = "1.0.1" dependencies = [ "mockall", "rand 0.8.5", @@ -7420,7 +7647,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7428,7 +7655,7 @@ dependencies = [ "auto_impl", "eyre", "futures", - "reqwest 0.12.5", + "reqwest 0.12.4", "reth-node-api", "reth-node-core", "reth-rpc-api", @@ -7442,7 +7669,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.0.0" +version = "1.0.1" dependencies = [ "arbitrary", "assert_matches", @@ -7481,7 +7708,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "arbitrary", "assert_matches", @@ -7496,7 +7723,7 @@ dependencies = [ "pprof", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "reth-primitives", @@ -7513,9 +7740,10 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-genesis", + "boyer-moore-magiclen", "eyre", "reth-chainspec", "reth-codecs", @@ -7523,6 +7751,7 @@ dependencies = [ "reth-db", "reth-db-api", "reth-etl", + "reth-fs-util", "reth-primitives", "reth-primitives-traits", "reth-provider", @@ -7536,7 +7765,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7563,7 +7792,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7589,7 +7818,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7617,7 +7846,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "assert_matches", @@ -7652,7 +7881,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-consensus", "alloy-network", @@ -7665,6 +7894,7 @@ dependencies = [ "reth", "reth-chainspec", "reth-db", + "reth-network-peers", "reth-node-builder", "reth-payload-builder", "reth-primitives", @@ -7682,7 +7912,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.0.0" +version = "1.0.1" dependencies = [ "aes 0.8.4", "alloy-primitives", @@ -7712,16 +7942,78 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-chainspec", "reth-payload-primitives", "serde", ] +[[package]] +name = "reth-engine-tree" +version = "1.0.1" +dependencies = [ + "aquamarine", + "assert_matches", + "futures", + "metrics", + "parking_lot 0.12.3", + "reth-beacon-consensus", + "reth-blockchain-tree", + "reth-blockchain-tree-api", + "reth-chainspec", + "reth-consensus", + "reth-db", + "reth-db-api", + "reth-engine-primitives", + "reth-errors", + "reth-ethereum-consensus", + "reth-evm", + "reth-metrics", + "reth-network-p2p", + "reth-payload-builder", + "reth-payload-primitives", + "reth-payload-validator", + "reth-primitives", + "reth-provider", + "reth-prune", + "reth-prune-types", + "reth-revm", + "reth-rpc-types", + "reth-stages", + "reth-stages-api", + "reth-static-file", + "reth-tasks", + "reth-tokio-util", + "reth-tracing", + "reth-trie", + "revm", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "reth-engine-util" +version = "1.0.1" +dependencies = [ + "eyre", + "futures", + "pin-project", + "reth-beacon-consensus", + "reth-engine-primitives", + "reth-fs-util", + "reth-rpc", + "reth-rpc-types", + "serde", + "serde_json", + "tokio-util", + "tracing", +] + [[package]] name = "reth-errors" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7733,7 +8025,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "arbitrary", @@ -7744,11 +8036,10 @@ dependencies = [ "pin-project", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-chainspec", "reth-codecs", - "reth-discv4", "reth-ecies", "reth-eth-wire-types", "reth-metrics", @@ -7768,8 +8059,9 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.0.0" +version = "1.0.1" dependencies = [ + "alloy-chains", "alloy-genesis", "alloy-rlp", "arbitrary", @@ -7777,7 +8069,7 @@ dependencies = [ "derive_more", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-chainspec", "reth-codecs-derive", @@ -7786,9 +8078,13 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-ethereum-cli" +version = "1.0.1" + [[package]] name = "reth-ethereum-consensus" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-chainspec", "reth-consensus", @@ -7797,13 +8093,32 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-ethereum-engine" +version = "1.0.1" +dependencies = [ + "futures", + "pin-project", + "reth-beacon-consensus", + "reth-chainspec", + "reth-db-api", + "reth-engine-tree", + "reth-ethereum-engine-primitives", + "reth-network-p2p", + "reth-stages-api", + "reth-tasks", + "tokio", + "tokio-stream", +] + [[package]] name = "reth-ethereum-engine-primitives" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "reth-chainspec", "reth-engine-primitives", + "reth-evm-ethereum", "reth-payload-primitives", "reth-primitives", "reth-rpc-types", @@ -7816,22 +8131,26 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-chains", "alloy-primitives", "alloy-rlp", "arbitrary", + "auto_impl", "crc", + "dyn-clone", + "once_cell", "proptest", - "proptest-derive", + "proptest-derive 0.5.0", + "rustc-hash 2.0.0", "serde", "thiserror-no-std", ] [[package]] name = "reth-ethereum-payload-builder" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-basic-payload-builder", "reth-errors", @@ -7849,7 +8168,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "rayon", @@ -7859,8 +8178,9 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.0.0" +version = "1.0.1" dependencies = [ + "alloy-eips", "auto_impl", "futures-util", "parking_lot 0.12.3", @@ -7876,7 +8196,7 @@ dependencies = [ [[package]] name = "reth-evm-bsc" -version = "1.0.0" +version = "1.0.1" dependencies = [ "bitset", "blst", @@ -7886,6 +8206,7 @@ dependencies = [ "reth-bsc-consensus", "reth-chainspec", "reth-errors", + "reth-ethereum-forks", "reth-evm", "reth-primitives", "reth-provider", @@ -7898,12 +8219,13 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-eips", "alloy-sol-types", "reth-chainspec", "reth-ethereum-consensus", + "reth-ethereum-forks", "reth-evm", "reth-execution-types", "reth-primitives", @@ -7917,10 +8239,11 @@ dependencies = [ [[package]] name = "reth-evm-optimism" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-chainspec", "reth-consensus-common", + "reth-ethereum-forks", "reth-evm", "reth-execution-errors", "reth-execution-types", @@ -7936,7 +8259,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7949,7 +8272,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7963,11 +8286,17 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.0.0" +version = "1.0.1" dependencies = [ "eyre", "metrics", + "reth-blockchain-tree", + "reth-chainspec", "reth-config", + "reth-db-api", + "reth-db-common", + "reth-evm", + "reth-evm-ethereum", "reth-exex-types", "reth-metrics", "reth-network", @@ -7975,9 +8304,15 @@ dependencies = [ "reth-node-core", "reth-payload-builder", "reth-primitives", + "reth-primitives-traits", "reth-provider", + "reth-prune-types", + "reth-revm", + "reth-stages-api", "reth-tasks", + "reth-testing-utils", "reth-tracing", + "secp256k1", "serde", "tokio", "tokio-util", @@ -7985,7 +8320,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.0.0" +version = "1.0.1" dependencies = [ "eyre", "futures-util", @@ -8015,14 +8350,14 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-fs-util" -version = "1.0.0" +version = "1.0.1" dependencies = [ "serde_json", "thiserror", @@ -8030,7 +8365,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.0.0" +version = "1.0.1" dependencies = [ "async-trait", "bytes 1.6.0", @@ -8052,7 +8387,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.0.0" +version = "1.0.1" dependencies = [ "bitflags 2.5.0", "byteorder", @@ -8072,7 +8407,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.0.0" +version = "1.0.1" dependencies = [ "bindgen", "cc", @@ -8080,7 +8415,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.0.0" +version = "1.0.1" dependencies = [ "futures", "metrics", @@ -8091,7 +8426,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "1.0.0" +version = "1.0.1" dependencies = [ "metrics", "once_cell", @@ -8099,23 +8434,23 @@ dependencies = [ "quote", "regex", "serial_test", - "syn 2.0.68", + "syn 2.0.65", "trybuild", ] [[package]] name = "reth-net-banlist" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.0.0" +version = "1.0.1" dependencies = [ "futures-util", - "reqwest 0.12.5", + "reqwest 0.12.4", "reth-tracing", "serde_with", "thiserror", @@ -8124,7 +8459,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-node-bindings", "alloy-provider", @@ -8150,14 +8485,17 @@ dependencies = [ "reth-dns-discovery", "reth-ecies", "reth-eth-wire", + "reth-fs-util", "reth-metrics", "reth-net-banlist", "reth-network", "reth-network-api", "reth-network-p2p", "reth-network-peers", + "reth-network-types", "reth-primitives", "reth-provider", + "reth-storage-api", "reth-tasks", "reth-tokio-util", "reth-tracing", @@ -8179,7 +8517,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -8193,7 +8531,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.0.0" +version = "1.0.1" dependencies = [ "auto_impl", "futures", @@ -8211,7 +8549,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8225,9 +8563,22 @@ dependencies = [ "url", ] +[[package]] +name = "reth-network-types" +version = "1.0.1" +dependencies = [ + "humantime-serde", + "reth-net-banlist", + "reth-network-api", + "reth-network-peers", + "serde", + "serde_json", + "tracing", +] + [[package]] name = "reth-nippy-jar" -version = "1.0.0" +version = "1.0.1" dependencies = [ "anyhow", "bincode", @@ -8248,7 +8599,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-db-api", "reth-engine-primitives", @@ -8263,7 +8614,7 @@ dependencies = [ [[package]] name = "reth-node-bsc" -version = "1.0.0" +version = "1.0.1" dependencies = [ "eyre", "futures", @@ -8293,12 +8644,11 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.0.0" +version = "1.0.1" dependencies = [ "aquamarine", "backon", "confy", - "discv5", "eyre", "fdlimit", "futures", @@ -8307,7 +8657,9 @@ dependencies = [ "reth-beacon-consensus", "reth-blockchain-tree", "reth-bsc-consensus", + "reth-bsc-engine", "reth-chainspec", + "reth-cli-util", "reth-config", "reth-consensus", "reth-consensus-debug-client", @@ -8315,6 +8667,7 @@ dependencies = [ "reth-db-api", "reth-db-common", "reth-downloaders", + "reth-engine-util", "reth-evm", "reth-exex", "reth-network", @@ -8340,16 +8693,17 @@ dependencies = [ "tempfile", "tokio", "tokio-stream", + "tracing", ] [[package]] name = "reth-node-core" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", "clap", - "const-str", + "const_format", "derive_more", "dirs-next", "eyre", @@ -8362,19 +8716,17 @@ dependencies = [ "metrics-process", "metrics-util", "once_cell", - "pin-project", "procfs", "proptest", "rand 0.8.5", - "reth-beacon-consensus", "reth-chainspec", + "reth-cli-util", "reth-config", "reth-consensus-common", "reth-db", "reth-db-api", "reth-discv4", "reth-discv5", - "reth-engine-primitives", "reth-fs-util", "reth-metrics", "reth-net-nat", @@ -8384,8 +8736,9 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-prune-types", - "reth-rpc", "reth-rpc-api", + "reth-rpc-eth-api", + "reth-rpc-eth-types", "reth-rpc-server-types", "reth-rpc-types", "reth-rpc-types-compat", @@ -8395,13 +8748,10 @@ dependencies = [ "reth-tracing", "reth-transaction-pool", "secp256k1", - "serde", "serde_json", "shellexpand", - "thiserror", "tikv-jemalloc-ctl", "tokio", - "tokio-util", "tower", "tracing", "vergen", @@ -8409,7 +8759,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -8442,7 +8792,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rpc-types-engine", "futures", @@ -8453,6 +8803,7 @@ dependencies = [ "reth-network", "reth-network-api", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune", "reth-stages", @@ -8463,7 +8814,7 @@ dependencies = [ [[package]] name = "reth-node-optimism" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -8471,8 +8822,9 @@ dependencies = [ "clap", "eyre", "jsonrpsee", + "jsonrpsee-types", "parking_lot 0.12.3", - "reqwest 0.12.5", + "reqwest 0.12.4", "reth", "reth-auto-seal-consensus", "reth-basic-payload-builder", @@ -8492,23 +8844,57 @@ dependencies = [ "reth-payload-builder", "reth-primitives", "reth-provider", - "reth-revm", - "reth-rpc", - "reth-rpc-types", - "reth-rpc-types-compat", - "reth-tracing", - "reth-transaction-pool", - "revm-primitives", - "serde", - "serde_json", - "thiserror", + "reth-revm", + "reth-rpc", + "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-rpc-types", + "reth-rpc-types-compat", + "reth-tracing", + "reth-transaction-pool", + "revm-primitives", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "reth-optimism-cli" +version = "1.0.1" +dependencies = [ + "alloy-primitives", + "clap", + "eyre", + "futures-util", + "reth-cli-commands", + "reth-config", + "reth-consensus", + "reth-db", + "reth-db-api", + "reth-downloaders", + "reth-errors", + "reth-evm-optimism", + "reth-execution-types", + "reth-network-p2p", + "reth-node-core", + "reth-node-events", + "reth-optimism-primitives", + "reth-primitives", + "reth-provider", + "reth-prune", + "reth-stages", + "reth-stages-types", + "reth-static-file", + "reth-static-file-types", "tokio", "tracing", ] [[package]] name = "reth-optimism-consensus" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-chainspec", "reth-consensus", @@ -8519,7 +8905,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "reth-basic-payload-builder", @@ -8543,11 +8929,29 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.0.0" +version = "1.0.1" + +[[package]] +name = "reth-optimism-rpc" +version = "1.0.1" +dependencies = [ + "alloy-primitives", + "parking_lot 0.12.3", + "reth-chainspec", + "reth-errors", + "reth-evm", + "reth-provider", + "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-rpc-types", + "reth-tasks", + "reth-transaction-pool", + "tokio", +] [[package]] name = "reth-payload-builder" -version = "1.0.0" +version = "1.0.1" dependencies = [ "futures-util", "metrics", @@ -8569,7 +8973,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-chainspec", "reth-errors", @@ -8583,7 +8987,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-chainspec", "reth-primitives", @@ -8593,7 +8997,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-chains", "alloy-eips", @@ -8616,7 +9020,7 @@ dependencies = [ "pprof", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "rand 0.8.5", "rayon", "reth-chainspec", @@ -8635,14 +9039,14 @@ dependencies = [ "test-fuzz", "thiserror", "thiserror-no-std", - "toml 0.8.14", + "toml 0.8.13", "triehash", "zstd", ] [[package]] name = "reth-primitives-traits" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8657,7 +9061,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "revm-primitives", @@ -8670,7 +9074,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "alloy-rpc-types-engine", @@ -8712,7 +9116,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "assert_matches", @@ -8733,6 +9137,7 @@ dependencies = [ "reth-testing-utils", "reth-tokio-util", "reth-tracing", + "rustc-hash 2.0.0", "thiserror", "tokio", "tracing", @@ -8740,7 +9145,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -8750,21 +9155,20 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "reth-codecs", "serde", "serde_json", "test-fuzz", "thiserror", - "toml 0.8.14", + "toml 0.8.13", ] [[package]] name = "reth-revm" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-eips", - "alloy-rlp", "reth-chainspec", "reth-consensus-common", "reth-execution-errors", @@ -8779,24 +9183,22 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-dyn-abi", "alloy-genesis", "alloy-primitives", "alloy-rlp", - "alloy-sol-types", "assert_matches", "async-trait", "derive_more", - "dyn-clone", "futures", "http 1.1.0", "http-body 1.0.0", "hyper 1.3.1", "jsonrpsee", + "jsonrpsee-types", "jsonwebtoken", - "metrics", "parking_lot 0.12.3", "pin-project", "rand 0.8.5", @@ -8806,8 +9208,6 @@ dependencies = [ "reth-evm", "reth-evm-ethereum", "reth-evm-optimism", - "reth-execution-types", - "reth-metrics", "reth-network-api", "reth-network-peers", "reth-primitives", @@ -8815,6 +9215,8 @@ dependencies = [ "reth-revm", "reth-rpc-api", "reth-rpc-engine-api", + "reth-rpc-eth-api", + "reth-rpc-eth-types", "reth-rpc-server-types", "reth-rpc-types", "reth-rpc-types-compat", @@ -8824,7 +9226,6 @@ dependencies = [ "revm", "revm-inspectors", "revm-primitives", - "schnellru", "secp256k1", "serde", "serde_json", @@ -8839,13 +9240,13 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.0.0" +version = "1.0.1" dependencies = [ - "alloy-dyn-abi", "jsonrpsee", "reth-engine-primitives", "reth-network-peers", "reth-primitives", + "reth-rpc-eth-api", "reth-rpc-types", "serde", "serde_json", @@ -8853,12 +9254,13 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.0.0" +version = "1.0.1" dependencies = [ "futures", "jsonrpsee", "reth-primitives", "reth-rpc-api", + "reth-rpc-eth-api", "reth-rpc-types", "serde_json", "similar-asserts", @@ -8867,7 +9269,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.0.0" +version = "1.0.1" dependencies = [ "clap", "http 1.1.0", @@ -8883,6 +9285,7 @@ dependencies = [ "reth-ipc", "reth-metrics", "reth-network-api", + "reth-network-peers", "reth-node-core", "reth-payload-builder", "reth-primitives", @@ -8890,6 +9293,8 @@ dependencies = [ "reth-rpc", "reth-rpc-api", "reth-rpc-engine-api", + "reth-rpc-eth-api", + "reth-rpc-eth-types", "reth-rpc-layer", "reth-rpc-server-types", "reth-rpc-types", @@ -8909,7 +9314,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "assert_matches", @@ -8940,9 +9345,77 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-rpc-eth-api" +version = "1.0.1" +dependencies = [ + "alloy-dyn-abi", + "async-trait", + "auto_impl", + "dyn-clone", + "futures", + "jsonrpsee", + "parking_lot 0.12.3", + "reth-chainspec", + "reth-errors", + "reth-evm", + "reth-execution-types", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-rpc-eth-types", + "reth-rpc-server-types", + "reth-rpc-types", + "reth-rpc-types-compat", + "reth-tasks", + "reth-transaction-pool", + "revm", + "revm-inspectors", + "revm-primitives", + "tokio", + "tracing", +] + +[[package]] +name = "reth-rpc-eth-types" +version = "1.0.1" +dependencies = [ + "alloy-sol-types", + "derive_more", + "futures", + "jsonrpsee-core", + "jsonrpsee-types", + "metrics", + "rand 0.8.5", + "reth-chainspec", + "reth-errors", + "reth-evm", + "reth-execution-types", + "reth-metrics", + "reth-primitives", + "reth-provider", + "reth-revm", + "reth-rpc-server-types", + "reth-rpc-types", + "reth-rpc-types-compat", + "reth-tasks", + "reth-transaction-pool", + "reth-trie", + "revm", + "revm-inspectors", + "revm-primitives", + "schnellru", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "reth-rpc-layer" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rpc-types-engine", "assert_matches", @@ -8950,7 +9423,7 @@ dependencies = [ "jsonrpsee", "jsonrpsee-http-client", "pin-project", - "reqwest 0.12.5", + "reqwest 0.12.4", "tempfile", "tokio", "tower", @@ -8959,16 +9432,22 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", + "jsonrpsee-core", + "jsonrpsee-types", + "reth-errors", + "reth-network-api", + "reth-primitives", + "reth-rpc-types", "serde", "strum", ] [[package]] name = "reth-rpc-types" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8976,6 +9455,7 @@ dependencies = [ "alloy-rpc-types-anvil", "alloy-rpc-types-beacon", "alloy-rpc-types-engine", + "alloy-rpc-types-mev", "alloy-rpc-types-trace", "alloy-rpc-types-txpool", "alloy-serde", @@ -8983,7 +9463,7 @@ dependencies = [ "bytes 1.6.0", "jsonrpsee-types", "proptest", - "proptest-derive", + "proptest-derive 0.5.0", "rand 0.8.5", "serde", "serde_json", @@ -8992,7 +9472,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "alloy-rpc-types", @@ -9004,7 +9484,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "assert_matches", @@ -9033,6 +9513,7 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-prune-types", "reth-revm", @@ -9050,7 +9531,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "aquamarine", @@ -9079,7 +9560,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -9087,7 +9568,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "rand 0.8.5", "reth-codecs", "reth-trie-common", @@ -9097,7 +9578,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "assert_matches", @@ -9109,6 +9590,7 @@ dependencies = [ "reth-provider", "reth-prune-types", "reth-stages", + "reth-stages-types", "reth-static-file-types", "reth-storage-errors", "reth-testing-utils", @@ -9119,7 +9601,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-primitives", "clap", @@ -9130,7 +9612,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.0.0" +version = "1.0.1" dependencies = [ "auto_impl", "reth-chainspec", @@ -9146,7 +9628,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.0.0" +version = "1.0.1" dependencies = [ "reth-fs-util", "reth-primitives", @@ -9155,8 +9637,9 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.0.0" +version = "1.0.1" dependencies = [ + "auto_impl", "dyn-clone", "futures-util", "metrics", @@ -9171,7 +9654,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-genesis", "rand 0.8.5", @@ -9181,7 +9664,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.0.0" +version = "1.0.1" dependencies = [ "tokio", "tokio-stream", @@ -9190,7 +9673,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.0.0" +version = "1.0.1" dependencies = [ "clap", "eyre", @@ -9204,7 +9687,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "aquamarine", @@ -9244,7 +9727,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "auto_impl", @@ -9277,7 +9760,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9294,19 +9777,19 @@ dependencies = [ "plain_hasher", "proptest", "proptest-arbitrary-interop", - "proptest-derive", + "proptest-derive 0.5.0", "reth-codecs", "reth-primitives-traits", "revm-primitives", "serde", "serde_json", "test-fuzz", - "toml 0.8.14", + "toml 0.8.13", ] [[package]] name = "reth-trie-parallel" -version = "1.0.0" +version = "1.0.1" dependencies = [ "alloy-rlp", "criterion", @@ -9332,8 +9815,8 @@ dependencies = [ [[package]] name = "revm" -version = "10.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=e4bf75d62e36077fb3904c400332da0376c03383#e4bf75d62e36077fb3904c400332da0376c03383" +version = "11.0.0" +source = "git+https://github.com/bnb-chain/revm?rev=0fceb6332b50ece91aabe3f90df06ce5aa44111f#0fceb6332b50ece91aabe3f90df06ce5aa44111f" dependencies = [ "auto_impl", "cfg-if", @@ -9346,9 +9829,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.1.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0971cad2f8f1ecb10e270d80646e63bf19daef0dc0a17a45680d24bb346b7c" +checksum = "083fe9c20db39ab4d371e9c4d10367408fa3565ad277a4fa1770f7d9314e1b92" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -9364,8 +9847,8 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "6.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=e4bf75d62e36077fb3904c400332da0376c03383#e4bf75d62e36077fb3904c400332da0376c03383" +version = "7.0.0" +source = "git+https://github.com/bnb-chain/revm?rev=0fceb6332b50ece91aabe3f90df06ce5aa44111f#0fceb6332b50ece91aabe3f90df06ce5aa44111f" dependencies = [ "revm-primitives", "serde", @@ -9373,15 +9856,15 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "8.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=e4bf75d62e36077fb3904c400332da0376c03383#e4bf75d62e36077fb3904c400332da0376c03383" +version = "9.0.0" +source = "git+https://github.com/bnb-chain/revm?rev=0fceb6332b50ece91aabe3f90df06ce5aa44111f#0fceb6332b50ece91aabe3f90df06ce5aa44111f" dependencies = [ - "alloy-primitives", "alloy-rlp", "aurora-engine-modexp", "bls_on_arkworks", "blst", "c-kzg", + "cfg-if", "cometbft", "cometbft-light-client", "cometbft-light-client-verifier", @@ -9401,9 +9884,10 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "5.0.0" -source = "git+https://github.com/bnb-chain/revm?rev=e4bf75d62e36077fb3904c400332da0376c03383#e4bf75d62e36077fb3904c400332da0376c03383" +version = "6.0.0" +source = "git+https://github.com/bnb-chain/revm?rev=0fceb6332b50ece91aabe3f90df06ce5aa44111f#0fceb6332b50ece91aabe3f90df06ce5aa44111f" dependencies = [ + "alloy-eips", "alloy-primitives", "auto_impl", "bitflags 2.5.0", @@ -9448,7 +9932,7 @@ dependencies = [ "cfg-if", "getrandom 0.2.15", "libc", - "spin", + "spin 0.9.8", "untrusted", "windows-sys 0.52.0", ] @@ -9511,9 +9995,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.5" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7699249cc2c7d71939f30868f47e9d7add0bdc030d90ee10bfd16887ff8bb1c8" +checksum = "b26f4c25a604fcb3a1bcd96dd6ba37c93840de95de8198d94c0d571a74a804d1" dependencies = [ "bytemuck", "byteorder", @@ -9646,6 +10130,20 @@ dependencies = [ "sct", ] +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.4", + "subtle", + "zeroize", +] + [[package]] name = "rustls" version = "0.23.10" @@ -9993,7 +10491,7 @@ checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10027,7 +10525,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10078,7 +10576,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10103,7 +10601,7 @@ checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10365,6 +10863,12 @@ dependencies = [ "sha1", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + [[package]] name = "spin" version = "0.9.8" @@ -10394,7 +10898,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ff9eaf853dec4c8802325d8b6d3dffa86cc707fd7a1a4cdbf416e13b061787a" dependencies = [ "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10440,7 +10944,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10458,9 +10962,9 @@ dependencies = [ [[package]] name = "subtle" -version = "2.6.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0208408ba0c3df17ed26eb06992cb1a1268d41b2c0e12e65203fbe3972cee5" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "subtle-encoding" @@ -10489,9 +10993,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.9.2" +version = "12.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71297dc3e250f7dbdf8adb99e235da783d690f5819fdeb4cce39d9cfb0aca9f1" +checksum = "1cccfffbc6bb3bb2d3a26cd2077f4d055f6808d266f9d4d158797a4c60510dfe" dependencies = [ "debugid", "memmap2", @@ -10501,9 +11005,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.9.2" +version = "12.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "424fa2c9bf2c862891b9cfd354a752751a6730fd838a4691e7f6c2c7957b9daf" +checksum = "76a99812da4020a67e76c4eb41f08c87364c14170495ff780f30dd519c221a68" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10523,9 +11027,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.68" +version = "2.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" dependencies = [ "proc-macro2", "quote", @@ -10541,7 +11045,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10550,12 +11054,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "sync_wrapper" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" - [[package]] name = "synstructure" version = "0.12.6" @@ -10576,7 +11074,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10741,7 +11239,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10780,7 +11278,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10898,9 +11396,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "83c02bf3c538ab32ba913408224323915f4ef9a6d61c0e85d493f355921c0ece" dependencies = [ "displaydoc", "zerovec", @@ -10933,9 +11431,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes 1.6.0", @@ -10952,13 +11450,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -10971,6 +11469,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.0" @@ -11036,14 +11545,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.14" +version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" +checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.14", + "toml_edit 0.22.13", ] [[package]] @@ -11068,15 +11577,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.14" +version = "0.22.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" +checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.13", + "winnow 0.6.8", ] [[package]] @@ -11175,7 +11684,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -11336,7 +11845,7 @@ dependencies = [ "serde_derive", "serde_json", "termcolor", - "toml 0.8.14", + "toml 0.8.13", ] [[package]] @@ -11478,9 +11987,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", "idna 0.5.0", @@ -11508,15 +12017,15 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "utf8parse" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea73390fe27785838dcbf75b91b1d84799e28f1ce71e6f372a5dc2200c80de5" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "getrandom 0.2.15", ] @@ -11626,7 +12135,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", "wasm-bindgen-shared", ] @@ -11660,7 +12169,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11696,9 +12205,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" dependencies = [ "rustls-pki-types", ] @@ -11801,7 +12310,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -11812,14 +12321,14 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] name = "windows-result" -version = "0.1.2" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +checksum = "749f0da9cc72d82e600d8d2e44cadd0b9eedb9038f71a1c58556ac1c5791813b" dependencies = [ "windows-targets 0.52.5", ] @@ -11974,9 +12483,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.13" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] @@ -12009,9 +12518,9 @@ checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" [[package]] name = "writeable" -version = "0.5.5" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "dad7bb64b8ef9c0aa27b6da38b452b0ee9fd82beaf276a87dd796fb55cbae14e" [[package]] name = "ws_stream_wasm" @@ -12058,9 +12567,9 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.4" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "65e71b2e4f287f467794c671e2b8f8a5f3716b3c829079a1c44740148eff07e4" dependencies = [ "serde", "stable_deref_trait", @@ -12070,13 +12579,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", "synstructure 0.13.1", ] @@ -12097,35 +12606,35 @@ checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] name = "zerofrom" -version = "0.1.4" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "655b0814c5c0b19ade497851070c640773304939a6c0fd5f5fb43da0696d05b7" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "e6a647510471d372f2e6c2e6b7219e44d8c574d24fdc11c610a61455782f18c3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", "synstructure 0.13.1", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] @@ -12138,14 +12647,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] name = "zerovec" -version = "0.10.2" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2cc8827d6c0994478a15c53f374f46fbd41bea663d809b14744bc42e6b109c" +checksum = "eff4439ae91fb5c72b8abc12f3f2dbf51bd27e6eadb9f8a5bc8898dddb0e27ea" dependencies = [ "yoke", "zerofrom", @@ -12154,13 +12663,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.2" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97cf56601ee5052b4417d90c8755c6683473c926039908196cf35d99f893ebe7" +checksum = "7b4e5997cbf58990550ef1f0e5124a05e47e1ebd33a84af25739be6031a62c20" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.65", ] [[package]] @@ -12183,9 +12692,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.11+zstd.1.5.6" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index a7c9ae4c0..3df7110ec 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.0.0" +version = "1.0.1" edition = "2021" rust-version = "1.79" license = "MIT OR Apache-2.0" @@ -13,11 +13,15 @@ members = [ "bin/reth/", "crates/blockchain-tree/", "crates/blockchain-tree-api/", + "crates/bsc/cli", "crates/bsc/consensus", "crates/bsc/node/", "crates/bsc/evm/", "crates/chainspec/", + "crates/cli/cli/", + "crates/cli/commands/", "crates/cli/runner/", + "crates/cli/util/", "crates/config/", "crates/consensus/auto-seal/", "crates/consensus/beacon/", @@ -26,10 +30,14 @@ members = [ "crates/consensus/debug-client/", "crates/ethereum-forks/", "crates/e2e-test-utils/", - "crates/engine-primitives/", + "crates/engine/primitives/", + "crates/engine/tree/", + "crates/engine/util/", "crates/errors/", "crates/ethereum-forks/", + "crates/ethereum/cli/", "crates/ethereum/consensus/", + "crates/ethereum/engine/", "crates/ethereum/engine-primitives/", "crates/ethereum/evm", "crates/ethereum/node", @@ -53,18 +61,21 @@ members = [ "crates/net/eth-wire/", "crates/net/nat/", "crates/net/network-api/", + "crates/net/network-types/", "crates/net/network/", "crates/net/p2p/", "crates/net/peers/", - "crates/node-core/", + "crates/node/core/", "crates/node/api/", "crates/node/builder/", "crates/node/events/", + "crates/optimism/cli", "crates/optimism/consensus", "crates/optimism/evm/", "crates/optimism/node/", "crates/optimism/payload/", "crates/optimism/primitives/", + "crates/optimism/rpc/", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/primitives/", @@ -78,8 +89,11 @@ members = [ "crates/rpc/rpc-api/", "crates/rpc/rpc-builder/", "crates/rpc/rpc-engine-api/", + "crates/rpc/rpc-eth-api/", + "crates/rpc/rpc-eth-types/", "crates/rpc/rpc-layer", "crates/rpc/rpc-testing-util/", + "crates/rpc/rpc-server-types/", "crates/rpc/rpc-types-compat/", "crates/rpc/rpc-types/", "crates/rpc/rpc/", @@ -107,6 +121,7 @@ members = [ "crates/trie/parallel/", "crates/trie/trie", "crates/bsc/node/", + "crates/bsc/engine/", "examples/beacon-api-sidecar-fetcher/", "examples/beacon-api-sse/", "examples/bsc-p2p", @@ -127,8 +142,11 @@ members = [ "examples/polygon-p2p/", "examples/rpc-db/", "examples/txpool-tracing/", + "examples/custom-rlpx-subprotocol", + "examples/exex/minimal/", + "examples/exex/op-bridge/", "testing/ef-tests/", - "testing/testing-utils", + "testing/testing-utils", ] default-members = ["bin/reth"] @@ -198,6 +216,7 @@ zero_sized_map_values = "warn" single_char_pattern = "warn" needless_continue = "warn" enum_glob_use = "warn" +iter_without_into_iter = "warn" # These are nursery lints which have findings. Allow them for now. Some are not # quite mature enough for use in our codebase and some we don't really want. @@ -228,20 +247,28 @@ opt-level = 3 lto = "thin" [profile.release] +opt-level = 3 lto = "thin" -strip = "debuginfo" +debug = "line-tables-only" +strip = true +panic = "unwind" +codegen-units = 16 -# Like release, but with full debug symbols. Useful for e.g. `perf`. -[profile.debug-fast] +# Use the `--profile profiling` flag to show symbols in release mode. +# e.g. `cargo build --profile profiling` +[profile.profiling] inherits = "release" -strip = "none" -debug = true +debug = 2 +strip = false + +# Make sure debug symbols are in the bench profile +[profile.bench] +inherits = "profiling" [profile.maxperf] inherits = "release" lto = "fat" codegen-units = 1 -incremental = false [workspace.dependencies] # reth @@ -250,11 +277,15 @@ reth-bench = { path = "bin/reth-bench" } reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } +reth-bsc-cli = { path = "crates/bsc/cli" } reth-bsc-consensus = { path = "crates/bsc/consensus" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } reth-chainspec = { path = "crates/chainspec" } +reth-cli = { path = "crates/cli/cli" } +reth-cli-commands = { path = "crates/cli/commands" } reth-cli-runner = { path = "crates/cli/runner" } +reth-cli-util = { path = "crates/cli/util" } reth-codecs = { path = "crates/storage/codecs" } reth-codecs-derive = { path = "crates/storage/codecs/derive" } reth-config = { path = "crates/config" } @@ -270,10 +301,13 @@ reth-dns-discovery = { path = "crates/net/dns" } reth-downloaders = { path = "crates/net/downloaders" } reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-ecies = { path = "crates/net/ecies" } -reth-engine-primitives = { path = "crates/engine-primitives" } +reth-engine-primitives = { path = "crates/engine/primitives" } +reth-engine-tree = { path = "crates/engine/tree" } +reth-engine-util = { path = "crates/engine/util" } reth-errors = { path = "crates/errors" } reth-eth-wire = { path = "crates/net/eth-wire" } reth-eth-wire-types = { path = "crates/net/eth-wire-types" } +reth-ethereum-cli = { path = "crates/ethereum/cli" } reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-ethereum-forks = { path = "crates/ethereum-forks" } @@ -298,19 +332,22 @@ reth-net-banlist = { path = "crates/net/banlist" } reth-net-nat = { path = "crates/net/nat" } reth-network = { path = "crates/net/network" } reth-network-api = { path = "crates/net/network-api" } +reth-network-types = { path = "crates/net/network-types" } reth-network-peers = { path = "crates/net/peers", default-features = false } reth-network-p2p = { path = "crates/net/p2p" } reth-nippy-jar = { path = "crates/storage/nippy-jar" } reth-node-api = { path = "crates/node/api" } reth-node-builder = { path = "crates/node/builder" } reth-node-bsc= { path = "crates/bsc/node" } -reth-node-core = { path = "crates/node-core" } +reth-node-core = { path = "crates/node/core" } reth-node-ethereum = { path = "crates/ethereum/node" } reth-node-events = { path = "crates/node/events" } reth-node-optimism = { path = "crates/optimism/node" } +reth-optimism-cli = { path = "crates/optimism/cli" } reth-optimism-consensus = { path = "crates/optimism/consensus" } reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-optimism-primitives = { path = "crates/optimism/primitives" } +reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-primitives = { path = "crates/payload/primitives" } reth-payload-validator = { path = "crates/payload/validator" } @@ -325,7 +362,9 @@ reth-rpc-api = { path = "crates/rpc/rpc-api" } reth-rpc-api-testing-util = { path = "crates/rpc/rpc-testing-util" } reth-rpc-builder = { path = "crates/rpc/rpc-builder" } reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" } +reth-rpc-eth-api = { path = "crates/rpc/rpc-eth-api" } reth-rpc-layer = { path = "crates/rpc/rpc-layer" } +reth-rpc-eth-types = { path = "crates/rpc/rpc-eth-types" } reth-rpc-server-types = { path = "crates/rpc/rpc-server-types" } reth-rpc-types = { path = "crates/rpc/rpc-types" } reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } @@ -337,6 +376,7 @@ reth-static-file-types = { path = "crates/static-file/types" } reth-storage-api = { path = "crates/storage/storage-api" } reth-storage-errors = { path = "crates/storage/errors" } reth-tasks = { path = "crates/tasks" } +reth-bsc-engine = { path = "crates/bsc/engine" } reth-testing-utils = { path = "testing/testing-utils" } reth-tokio-util = { path = "crates/tokio-util" } reth-tracing = { path = "crates/tracing" } @@ -346,15 +386,15 @@ reth-trie-common = { path = "crates/trie/common" } reth-trie-parallel = { path = "crates/trie/parallel" } # revm -revm = { version = "10.0.0", features = [ +revm = { version = "11.0.0", features = [ "std", "secp256k1", "blst", ], default-features = false } -revm-primitives = { version = "5.0.0", features = [ +revm-primitives = { version = "6.0.0", features = [ "std", ], default-features = false } -revm-inspectors = "0.1" +revm-inspectors = "0.4" # eth alloy-chains = "0.1.15" @@ -364,36 +404,42 @@ alloy-json-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" alloy-trie = "0.4" -alloy-rpc-types = { version = "0.1", default-features = false, features = [ +alloy-rpc-types = { version = "0.1.4", default-features = false, features = [ "eth", ] } -alloy-rpc-types-anvil = { version = "0.1", default-features = false } -alloy-rpc-types-beacon = { version = "0.1", default-features = false } -alloy-rpc-types-admin = { version = "0.1", default-features = false } -alloy-rpc-types-txpool = { version = "0.1", default-features = false } -alloy-serde = { version = "0.1", default-features = false } -alloy-rpc-types-engine = { version = "0.1", default-features = false } -alloy-rpc-types-eth = { version = "0.1", default-features = false } -alloy-rpc-types-trace = { version = "0.1", default-features = false } -alloy-genesis = { version = "0.1", default-features = false } -alloy-node-bindings = { version = "0.1", default-features = false } -alloy-provider = { version = "0.1", default-features = false, features = [ +alloy-rpc-types-anvil = { version = "0.1.4", default-features = false } +alloy-rpc-types-beacon = { version = "0.1.4", default-features = false } +alloy-rpc-types-admin = { version = "0.1.4", default-features = false } +alloy-rpc-types-txpool = { version = "0.1.4", default-features = false } +alloy-serde = { version = "0.1.4", default-features = false } +alloy-rpc-types-engine = { version = "0.1.4", default-features = false } +alloy-rpc-types-eth = { version = "0.1.4", default-features = false, features = [ + "sidecar", +] } +alloy-rpc-types-mev = { version = "0.1.4", default-features = false } +alloy-rpc-types-trace = { version = "0.1.4", default-features = false } +alloy-genesis = { version = "0.1.4", default-features = false } +alloy-node-bindings = { version = "0.1.4", default-features = false } +alloy-provider = { version = "0.1.4", default-features = false, features = [ "reqwest", ] } -alloy-eips = { version = "0.1", default-features = false } -alloy-signer = { version = "0.1", default-features = false } -alloy-signer-local = { version = "0.1", default-features = false } -alloy-network = { version = "0.1", default-features = false } -alloy-consensus = { version = "0.1", default-features = false } -alloy-transport = { version = "0.1" } -alloy-transport-http = { version = "0.1", features = [ +alloy-eips = { version = "0.1.4", default-features = false } +alloy-signer = { version = "0.1.4", default-features = false } +alloy-signer-local = { version = "0.1.4", default-features = false } +alloy-network = { version = "0.1.4", default-features = false } +alloy-consensus = { version = "0.1.4", default-features = false } +alloy-transport = { version = "0.1.4" } +alloy-transport-http = { version = "0.1.4", features = [ "reqwest-rustls-tls", ], default-features = false } -alloy-transport-ws = { version = "0.1", default-features = false } -alloy-transport-ipc = { version = "0.1", default-features = false } -alloy-pubsub = { version = "0.1", default-features = false } -alloy-json-rpc = { version = "0.1", default-features = false } -alloy-rpc-client = { version = "0.1", default-features = false } +alloy-transport-ws = { version = "0.1.4", default-features = false } +alloy-transport-ipc = { version = "0.1.4", default-features = false } +alloy-pubsub = { version = "0.1.4", default-features = false } +alloy-json-rpc = { version = "0.1.4", default-features = false } +alloy-rpc-client = { version = "0.1.4", default-features = false } + +# op +op-alloy-rpc-types = "0.1" # misc auto_impl = "1" @@ -401,6 +447,7 @@ aquamarine = "0.5" bytes = "1.5" bitflags = "2.4" clap = "4" +const_format = { version = "0.2.32", features = ["rust_1_64"] } dashmap = "5.5" derive_more = "0.99.17" fdlimit = "0.3.0" @@ -419,7 +466,8 @@ humantime-serde = "1.1" rand = "0.8.5" rustc-hash = "2.0" schnellru = "0.2" -strum = "0.26" +strum = "=0.26.3" +strum_macros = "=0.26.4" rayon = "1.7" itertools = "0.13" parking_lot = "0.12" @@ -433,6 +481,7 @@ sha2 = { version = "0.10", default-features = false } paste = "1.0" url = "2.3" backon = "0.4" +boyer-moore-magiclen = "0.2.16" # metrics metrics = "0.23.0" @@ -464,7 +513,6 @@ tower-http = "0.5" # p2p discv5 = "0.6.0" -igd-next = "0.14.3" # rpc jsonrpsee = "0.23" @@ -499,18 +547,22 @@ tempfile = "3.8" criterion = "0.5" pprof = "0.13" proptest = "1.4" -proptest-derive = "0.4" +proptest-derive = "0.5" serial_test = "3" similar-asserts = "1.5.0" test-fuzz = "5" iai-callgrind = "0.11" [patch.crates-io] -revm = { git = "https://github.com/bnb-chain/revm", rev = "e4bf75d62e36077fb3904c400332da0376c03383" } -revm-interpreter = { git = "https://github.com/bnb-chain/revm", rev = "e4bf75d62e36077fb3904c400332da0376c03383" } -revm-precompile = { git = "https://github.com/bnb-chain/revm", rev = "e4bf75d62e36077fb3904c400332da0376c03383" } -revm-primitives = { git = "https://github.com/bnb-chain/revm", rev = "e4bf75d62e36077fb3904c400332da0376c03383" } +revm = { git = "https://github.com/bnb-chain/revm", rev = "0fceb6332b50ece91aabe3f90df06ce5aa44111f" } +revm-interpreter = { git = "https://github.com/bnb-chain/revm", rev = "0fceb6332b50ece91aabe3f90df06ce5aa44111f" } +revm-precompile = { git = "https://github.com/bnb-chain/revm", rev = "0fceb6332b50ece91aabe3f90df06ce5aa44111f" } +revm-primitives = { git = "https://github.com/bnb-chain/revm", rev = "0fceb6332b50ece91aabe3f90df06ce5aa44111f" } alloy-chains = { git = "https://github.com/bnb-chain/alloy-chains-rs.git", rev = "b7c5379cf47345181f8dce350acafb958f47152a" } - -[patch."https://github.com/bluealloy/revm"] -revm = { git = "https://github.com/bnb-chain/revm", rev = "e4bf75d62e36077fb3904c400332da0376c03383" } \ No newline at end of file +alloy-rpc-types-eth = { git = "https://github.com/bnb-chain/alloy", rev = "18f098dd78be661433bae682ad161a41f8a9c301" } +alloy-consensus = { git = "https://github.com/bnb-chain/alloy", rev = "18f098dd78be661433bae682ad161a41f8a9c301" } +alloy-eips = { git = "https://github.com/bnb-chain/alloy", rev = "18f098dd78be661433bae682ad161a41f8a9c301" } +alloy-network = { git = "https://github.com/bnb-chain/alloy", rev = "18f098dd78be661433bae682ad161a41f8a9c301" } +alloy-serde = { git = "https://github.com/bnb-chain/alloy", rev = "18f098dd78be661433bae682ad161a41f8a9c301" } +alloy-signer = { git = "https://github.com/bnb-chain/alloy", rev = "18f098dd78be661433bae682ad161a41f8a9c301" } +alloy-signer-local = { git = "https://github.com/bnb-chain/alloy", rev = "18f098dd78be661433bae682ad161a41f8a9c301" } \ No newline at end of file diff --git a/README.md b/README.md index 71510597f..d5f87d927 100644 --- a/README.md +++ b/README.md @@ -5,16 +5,20 @@ [![Discord Chat][discord-badge]][discord-url] [gh-ci]: https://github.com/bnb-chain/reth/actions/workflows/unit.yml + [gh-deny]: https://github.com/bnb-chain/reth/actions/workflows/deny.yml + [discord-badge]: https://img.shields.io/badge/discord-join%20chat-blue.svg + [discord-url]: https://discord.gg/z2VpC455eU -BNB Chain Reth is a cutting-edge rust client developed in collaboration with Paradigm, designed to provide seamless support -for [BNB Smart Chain(BSC)](https://github.com/bnb-chain/bsc) and [opBNB](https://github.com/bnb-chain/op-geth). +BNB Chain Reth is a cutting-edge rust client developed in collaboration with Paradigm, designed to provide seamless +support for [BNB Smart Chain(BSC)](https://github.com/bnb-chain/bsc) and [opBNB](https://github.com/bnb-chain/op-geth). ## Build from Source -For prerequisites and detailed build instructions please read the [Installation Instructions](https://paradigmxyz.github.io/reth/installation/source.html). +For prerequisites and detailed build instructions please read +the [Installation Instructions](https://paradigmxyz.github.io/reth/installation/source.html). With Rust and the dependencies installed, you're ready to build BNB Chain Reth. First, clone the repository: @@ -23,16 +27,16 @@ git clone https://github.com/bnb-chain/reth.git cd reth ``` -In the realm of BSC, you have the option to execute the following commands to compile reth: +In the realm of BSC, you have the option to execute the following commands to compile bsc-reth: ```shell -make build +make build-bsc ``` Alternatively, you can install reth using the following command: ```shell -make install +make install-bsc ``` When it comes to opBNB, you can run the following commands to compile op-reth: @@ -49,7 +53,60 @@ make install-op ## Run Reth for BSC -Coming soon...... +The command below is for an archive node. To run a full node, simply add the `--full` tag. + +```shell +# for testnet +export network=bsc-testnet + +# for mainnet +# export network=bsc + +./target/release/bsc-reth node \ + --datadir=./datadir \ + --chain=${network} \ + --http \ + --http.addr=0.0.0.0 \ + --http.port=8545 \ + --http.api="eth, net, txpool, web3, rpc" \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port=8546 \ + --nat=any \ + --log.file.directory ./datadir/logs +``` + +You can run `bsc-reth --help` for command explanations. + +For running bsc-reth with docker, please use the following command: + +```shell +# for testnet +export network=bsc-testnet + +# for mainnet +# export network=bsc + +# check this for version of the docker image, https://github.com/bnb-chain/reth/pkgs/container/bsc-reth +export version=latest + +# the directory where reth data will be stored +export data_dir=/xxx/xxx + +docker run -d -p 8545:8545 -p 8546:8546 -p 30303:30303 -p 30303:30303/udp -v ${data_dir}:/data \ + --name bsc-reth ghcr.io/bnb-chain/bsc-reth:${version} node \ + --datadir=/data \ + --chain=${network} \ + --http \ + --http.addr=0.0.0.0 \ + --http.port=8545 \ + --http.api="eth, net, txpool, web3, rpc" \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port=8546 \ + --nat=any \ + --log.file.directory /data/logs +``` ## Run Reth for opBNB @@ -64,10 +121,13 @@ The op-reth can function as both a full node and an archive node. Due to its uni ### Steps to Run op-reth -The op-reth is an [execution client](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients) for opBNB. +The op-reth is an [execution client](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients) for +opBNB. You need to run op-node along with op-reth to synchronize with the opBNB network. -Here is the quick command for running the op-node. For more details, refer to the [opbnb repository](https://github.com/bnb-chain/opbnb). +Here is the quick command for running the op-node. For more details, refer to +the [opbnb repository](https://github.com/bnb-chain/opbnb). + ```shell git clone https://github.com/bnb-chain/opbnb cd opbnb @@ -110,18 +170,18 @@ export P2P_BOOTNODES="enr:-J24QGQBeMsXOaCCaLWtNFSfb2Gv50DjGOKToH2HUTAIn9yXImowlR --l2.jwt-secret=./jwt.txt ``` -Copy the JWT file generated when running the op-node to the current workspace. Here is a quick command for running op-reth. +Copy the JWT file generated when running the op-node to the current workspace. Here is a quick command for running +op-reth. The command below is for an archive node. To run a full node, simply add the `--full` tag. + ```shell # for testnet export network=testnet export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org -export TRUST_NODES="enode://1a8f2d3160ad6efd6591981c026bd31807b79844422d99107f8ffa0bd966f35dd6b44d3169e05fcb15be492a58c3098c1d5ab04a3b2769f1aa87ab871b3ef49b@54.238.146.8:30303,enode://28a8309f958c58a0f6fd3cee83951033d20f2b7369e25c63f66caf0d2bac1df89df52b82d74d828f35c76152e4b2aa8dae816a2e3ea5a03c40d4ec08005d426c@35.74.91.224:30303" # for mainnet # export network=mainnet # export L2_RPC=https://opbnb-mainnet-rpc.bnbchain.org -# export TRUST_NODES="enode://db109c6cac5c8b6225edd3176fc3764c58e0720950fe94c122c80978e706a9c9e976629b718e48b6306ea0f9126e5394d3424c9716c5703549e2e7eba216353b@52.193.218.151:30303,enode://e74ecea4943c27d7d4d0c40f84fc3426a7e80f8a9035c0b383725b693ebf9a6376b8c9db12690b513a6ac83041d9b6418d51dc079dce1f13ef948b32f63a589d@54.150.37.120:30303" ./target/release/op-reth node \ --datadir=./datadir \ @@ -133,17 +193,57 @@ export TRUST_NODES="enode://1a8f2d3160ad6efd6591981c026bd31807b79844422d99107f8f --http \ --http.addr=0.0.0.0 \ --http.port=8545 \ - --http.api="admin, debug, eth, net, trace, txpool, web3, rpc, reth, ots, eth-call-bundle" \ + --http.api="eth, net, txpool, web3, rpc" \ --ws \ --ws.addr=0.0.0.0 \ --ws.port=8546 \ --builder.gaslimit=150000000 \ --nat=any \ - --trusted-peers=${TRUST_NODES} \ --log.file.directory ./datadir/logs ``` -You can run `op-reth --help` for command explanations. More details on running opbnb nodes can be found [here](https://docs.bnbchain.org/opbnb-docs/docs/tutorials/running-a-local-node/). +You can run `op-reth --help` for command explanations. More details on running opbnb nodes can be +found [here](https://docs.bnbchain.org/opbnb-docs/docs/tutorials/running-a-local-node/). + +For running op-reth with docker, please use the following command: + +```shell +# for testnet +export network=testnet +export L2_RPC=https://opbnb-testnet-rpc.bnbchain.org + +# for mainnet +# export network=mainnet +# export L2_RPC=https://opbnb-mainnet-rpc.bnbchain.org + +# check this for version of the docker image, https://github.com/bnb-chain/reth/pkgs/container/op-reth +export version=latest + +# the directory where reth data will be stored +export data_dir=/xxx/xxx + +# the directory where the jwt.txt file is stored +export jwt_dir=/xxx/xxx + +docker run -d -p 8545:8545 -p 8546:8546 -p 30303:30303 -p 30303:30303/udp -v ${data_dir}:/data -v ${jwt_dir}:/jwt \ + --name op-reth ghcr.io/bnb-chain/op-reth:${version} node \ + --datadir=/data \ + --chain=opbnb-${network} \ + --rollup.sequencer-http=${L2_RPC} \ + --authrpc.addr="0.0.0.0" \ + --authrpc.port=8551 \ + --authrpc.jwtsecret=/jwt/jwt.txt \ + --http \ + --http.addr=0.0.0.0 \ + --http.port=8545 \ + --http.api="eth, net, txpool, web3, rpc" \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port=8546 \ + --builder.gaslimit=150000000 \ + --nat=any \ + --log.file.directory /data/logs +``` ## Contribution @@ -152,7 +252,8 @@ from anyone on the internet, and are grateful for even the smallest of fixes! If you'd like to contribute to bnb chain reth, please fork, fix, commit and send a pull request for the maintainers to review and merge into the main code base. If you wish to submit -more complex changes though, please check up with the core devs first on [our discord channel](https://discord.gg/bnbchain) +more complex changes though, please check up with the core devs first +on [our discord channel](https://discord.gg/bnbchain) to ensure those changes are in line with the general philosophy of the project and/or get some early feedback which can make both your efforts much lighter as well as our review and merge procedures quick and simple. diff --git a/audit/sigma_prime_audit_v1.pdf b/audit/sigma_prime_audit_v2.pdf similarity index 51% rename from audit/sigma_prime_audit_v1.pdf rename to audit/sigma_prime_audit_v2.pdf index 4b31fb2b4..50da37d23 100644 Binary files a/audit/sigma_prime_audit_v1.pdf and b/audit/sigma_prime_audit_v2.pdf differ diff --git a/bin/reth-bench/README.md b/bin/reth-bench/README.md index 97c9572a1..fa58d467f 100644 --- a/bin/reth-bench/README.md +++ b/bin/reth-bench/README.md @@ -23,8 +23,7 @@ As long as the data is representative of real-world load, or closer to worst-cas ## Prerequisites -If you will be collecting CPU profiles, make sure `reth` is compiled with the `debug-fast` profile. -For collecting memory profiles, make sure `reth` is also compiled with the `--features profiling` flag. +If you will be collecting CPU profiles, make sure `reth` is compiled with the `profiling` profile. Otherwise, running `make maxperf` at the root of the repo should be sufficient for collecting accurate performance metrics. ## Command Usage diff --git a/bin/reth-bench/src/authenticated_transport.rs b/bin/reth-bench/src/authenticated_transport.rs index e92b581bc..c946d244d 100644 --- a/bin/reth-bench/src/authenticated_transport.rs +++ b/bin/reth-bench/src/authenticated_transport.rs @@ -39,7 +39,7 @@ impl InnerTransport { jwt: JwtSecret, ) -> Result<(Self, Claims), AuthenticatedTransportError> { match url.scheme() { - "http" | "https" => Self::connect_http(url, jwt).await, + "http" | "https" => Self::connect_http(url, jwt), "ws" | "wss" => Self::connect_ws(url, jwt).await, "file" => Ok((Self::connect_ipc(url).await?, Claims::default())), _ => Err(AuthenticatedTransportError::BadScheme(url.scheme().to_string())), @@ -48,7 +48,7 @@ impl InnerTransport { /// Connects to an HTTP [`alloy_transport_http::Http`] transport. Returns an [`InnerTransport`] /// and the [Claims] generated from the jwt. - async fn connect_http( + fn connect_http( url: Url, jwt: JwtSecret, ) -> Result<(Self, Claims), AuthenticatedTransportError> { diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 5f152de95..300e74b4f 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -21,7 +21,7 @@ reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true reth-exex.workspace = true -reth-provider = { workspace = true } +reth-provider.workspace = true reth-evm.workspace = true reth-revm.workspace = true reth-stages.workspace = true @@ -30,6 +30,8 @@ reth-errors.workspace = true reth-transaction-pool.workspace = true reth-beacon-consensus.workspace = true reth-cli-runner.workspace = true +reth-cli-commands.workspace = true +reth-cli-util.workspace = true reth-consensus-common.workspace = true reth-blockchain-tree.workspace = true reth-rpc-builder.workspace = true @@ -37,37 +39,38 @@ reth-rpc.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } +reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true -reth-net-banlist.workspace = true reth-network-api.workspace = true reth-downloaders.workspace = true reth-tracing.workspace = true reth-tasks.workspace = true -reth-ethereum-payload-builder.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true reth-payload-validator.workspace = true reth-basic-payload-builder.workspace = true -reth-discv4.workspace = true -reth-discv5.workspace = true reth-static-file.workspace = true reth-static-file-types = { workspace = true, features = ["clap"] } reth-trie = { workspace = true, features = ["metrics"] } -reth-nippy-jar.workspace = true reth-node-api.workspace = true -reth-node-ethereum.workspace = true reth-node-optimism = { workspace = true, optional = true, features = [ "optimism", "opbnb", ] } reth-node-core.workspace = true +reth-ethereum-payload-builder.workspace = true reth-db-common.workspace = true +reth-node-ethereum.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true reth-consensus.workspace = true reth-optimism-primitives.workspace = true -reth-prune-types.workspace = true +reth-engine-util.workspace = true +reth-prune.workspace = true +reth-stages-api.workspace = true +reth-optimism-cli = { workspace = true, optional = true } reth-node-bsc = { workspace = true, optional = true, features = [ "bsc", ] } @@ -88,20 +91,6 @@ toml = { workspace = true, features = ["display"] } # metrics metrics-process.workspace = true -# test vectors generation -proptest.workspace = true -arbitrary.workspace = true -proptest-arbitrary-interop.workspace = true -rand.workspace = true - -# tui -comfy-table = "7.0" -crossterm = "0.27.0" -ratatui = { version = "0.26", default-features = false, features = [ - "crossterm", -] } -human_bytes = "0.4.1" - # async tokio = { workspace = true, features = [ "sync", @@ -119,9 +108,6 @@ tempfile.workspace = true backon.workspace = true similar-asserts.workspace = true itertools.workspace = true -rayon.workspace = true -boyer-moore-magiclen = "0.2.16" -ahash = "0.8" # p2p discv5.workspace = true @@ -131,12 +117,14 @@ tikv-jemallocator = { version = "0.5.0", optional = true } libc = "0.2" [dev-dependencies] -jsonrpsee.workspace = true -assert_matches = "1.5.0" +reth-discv4.workspace = true + [features] default = ["jemalloc"] +dev = ["reth-cli-commands/dev"] + asm-keccak = ["reth-primitives/asm-keccak"] jemalloc = ["dep:tikv-jemallocator", "reth-node-core/jemalloc"] @@ -149,6 +137,8 @@ min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] optimism = [ + "dep:reth-optimism-cli", + "reth-optimism-cli?/optimism", "reth-primitives/optimism", "reth-rpc/optimism", "reth-provider/optimism", @@ -156,6 +146,7 @@ optimism = [ "reth-blockchain-tree/optimism", "dep:reth-node-optimism", "reth-node-core/optimism", + "reth-rpc-eth-types/optimism", ] opbnb = [ @@ -170,6 +161,7 @@ bsc = [ "reth-node-core/bsc", "reth-stages/bsc", "reth-node-builder/bsc", + "reth-beacon-consensus/bsc", ] # no-op feature flag for switching between the `optimism` and default functionality in CI matrices diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index ff5c4add5..e36930726 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -6,14 +6,17 @@ use crate::{ LogArgs, }, commands::{ - config_cmd, db, debug_cmd, dump_genesis, import, init_cmd, init_state, + debug_cmd, import, node::{self, NoArgs}, - p2p, recover, stage, test_vectors, }, + macros::block_executor, version::{LONG_VERSION, SHORT_VERSION}, }; use clap::{value_parser, Parser, Subcommand}; use reth_chainspec::ChainSpec; +use reth_cli_commands::{ + config_cmd, db, dump_genesis, init_cmd, init_state, p2p, prune, recover, stage, +}; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; @@ -158,12 +161,16 @@ impl Cli { } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Stage(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), + Commands::Stage(command) => runner.run_command_until_exit(|ctx| { + command.execute(ctx, |chain_spec| block_executor!(chain_spec)) + }), Commands::P2P(command) => runner.run_until_ctrl_c(command.execute()), + #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Debug(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), Commands::Recover(command) => runner.run_command_until_exit(|ctx| command.execute(ctx)), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute()), } } @@ -195,11 +202,11 @@ pub enum Commands { /// This syncs RLP encoded OP blocks below Bedrock from a file, without executing. #[cfg(feature = "optimism")] #[command(name = "import-op")] - ImportOp(crate::commands::import_op::ImportOpCommand), + ImportOp(reth_optimism_cli::ImportOpCommand), /// This imports RLP encoded receipts from a file. #[cfg(feature = "optimism")] #[command(name = "import-receipts-op")] - ImportReceiptsOp(crate::commands::import_receipts_op::ImportReceiptsOpCommand), + ImportReceiptsOp(reth_optimism_cli::ImportReceiptsOpCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities @@ -212,8 +219,9 @@ pub enum Commands { #[command(name = "p2p")] P2P(p2p::Command), /// Generate Test Vectors + #[cfg(feature = "dev")] #[command(name = "test-vectors")] - TestVectors(test_vectors::Command), + TestVectors(reth_cli_commands::test_vectors::Command), /// Write config to stdout #[command(name = "config")] Config(config_cmd::Command), @@ -223,6 +231,9 @@ pub enum Commands { /// Scripts for node recovery #[command(name = "recover")] Recover(recover::Command), + /// Prune according to the configuration without any limits + #[command(name = "prune")] + Prune(prune::PruneCommand), } #[cfg(test)] diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 43f0d1f86..a6f757087 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,9 +1,5 @@ //! Command for debugging block building. - -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, -}; +use crate::macros::block_executor; use alloy_rlp::Decodable; use clap::Parser; use eyre::Context; @@ -14,6 +10,7 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_consensus::Consensus; use reth_db::DatabaseEnv; @@ -24,16 +21,16 @@ use reth_fs_util as fs; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::database::CachedReads; use reth_primitives::{ - constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP}, - revm_primitives::KzgSettings, - Address, BlobTransaction, BlobTransactionSidecar, Bytes, PooledTransactionsElement, - SealedBlock, SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, U256, + constants::eip4844::LoadKzgSettingsError, revm_primitives::KzgSettings, Address, + BlobTransaction, BlobTransactionSidecar, Bytes, PooledTransactionsElement, SealedBlock, + SealedBlockWithSenders, Transaction, TransactionSigned, TxEip4844, B256, U256, }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::database::StateProviderDatabase; +use reth_prune::PruneModes; +use reth_revm::{database::StateProviderDatabase, primitives::EnvKzgSettings}; use reth_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; use reth_stages::StageId; use reth_transaction_pool::{ @@ -103,14 +100,14 @@ impl Command { } /// Loads the trusted setup params from a given file path or falls back to - /// `MAINNET_KZG_TRUSTED_SETUP`. - fn kzg_settings(&self) -> eyre::Result> { + /// `EnvKzgSettings::Default`. + fn kzg_settings(&self) -> eyre::Result { if let Some(ref trusted_setup_file) = self.trusted_setup_file { let trusted_setup = KzgSettings::load_trusted_setup_file(trusted_setup_file) .map_err(LoadKzgSettingsError::KzgError)?; - Ok(Arc::new(trusted_setup)) + Ok(EnvKzgSettings::Custom(Arc::new(trusted_setup))) } else { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + Ok(EnvKzgSettings::Default) } } @@ -129,7 +126,11 @@ impl Command { // configure blockchain tree let tree_externals = TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; + let tree = BlockchainTree::new( + tree_externals, + BlockchainTreeConfig::default(), + PruneModes::none(), + )?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // fetch the best block from the database @@ -310,7 +311,6 @@ impl Command { execution_outcome, hashed_post_state, trie_updates, - None, )?; info!(target: "reth::cli", "Successfully appended built block"); } diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index a398a136b..b7bf1449d 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,15 +1,12 @@ //! Command for debugging execution. -use crate::{ - args::{get_secret_key, NetworkArgs}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, - utils::get_single_header, -}; +use crate::{args::NetworkArgs, macros::block_executor, utils::get_single_header}; use clap::Parser; use futures::{stream::select as stream_select, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::Config; use reth_consensus::Consensus; use reth_db::DatabaseEnv; @@ -26,15 +23,14 @@ use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; use reth_provider::{ BlockExecutionWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, }; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::{ - sets::DefaultStages, - stages::{ExecutionStage, ExecutionStageThresholds}, - Pipeline, StageId, StageSet, + sets::DefaultStages, stages::ExecutionStage, ExecutionStageThresholds, Pipeline, StageId, + StageSet, }; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; use tracing::*; @@ -133,11 +129,6 @@ impl Command { .network .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) .with_task_executor(Box::new(task_executor)) - .listener_addr(SocketAddr::new(self.network.addr, self.network.port)) - .discovery_addr(SocketAddr::new( - self.network.discovery.addr, - self.network.discovery.port, - )) .build(provider_factory) .start_network() .await?; @@ -216,7 +207,7 @@ impl Command { ctx.task_executor.spawn_critical( "events task", reth_node_events::node::handle_events( - Some(network.clone()), + Some(Box::new(network)), latest_block_number, events, provider_factory.db_ref().clone(), diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index e4f4805f1..ed8b946fe 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -1,14 +1,15 @@ //! Command for debugging in-memory merkle trie calculation. use crate::{ - args::{get_secret_key, NetworkArgs}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, + args::NetworkArgs, macros::block_executor, utils::{get_single_body, get_single_header}, }; use backon::{ConstantBuilder, Retryable}; use clap::Parser; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::Config; use reth_db::DatabaseEnv; use reth_errors::BlockValidationError; @@ -25,8 +26,8 @@ use reth_provider::{ use reth_revm::database::StateProviderDatabase; use reth_stages::StageId; use reth_tasks::TaskExecutor; -use reth_trie::{updates::TrieKey, StateRoot}; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use reth_trie::StateRoot; +use std::{path::PathBuf, sync::Arc}; use tracing::*; /// `reth debug in-memory-merkle` command @@ -64,11 +65,6 @@ impl Command { .network .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) .with_task_executor(Box::new(task_executor)) - .listener_addr(SocketAddr::new(self.network.addr, self.network.port)) - .discovery_addr(SocketAddr::new( - self.network.discovery.addr, - self.network.discovery.port, - )) .build(provider_factory) .start_network() .await?; @@ -172,7 +168,6 @@ impl Command { .clone() .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError)?, - None, )?; execution_outcome.write_to_storage(provider_rw.tx_ref(), None, OriginalValuesKnown::No)?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; @@ -197,15 +192,16 @@ impl Command { // Compare updates let mut in_mem_mismatched = Vec::new(); let mut incremental_mismatched = Vec::new(); - let mut in_mem_updates_iter = in_memory_updates.into_iter().peekable(); - let mut incremental_updates_iter = incremental_trie_updates.into_iter().peekable(); + let mut in_mem_updates_iter = in_memory_updates.account_nodes_ref().iter().peekable(); + let mut incremental_updates_iter = + incremental_trie_updates.account_nodes_ref().iter().peekable(); while in_mem_updates_iter.peek().is_some() || incremental_updates_iter.peek().is_some() { match (in_mem_updates_iter.next(), incremental_updates_iter.next()) { (Some(in_mem), Some(incr)) => { similar_asserts::assert_eq!(in_mem.0, incr.0, "Nibbles don't match"); if in_mem.1 != incr.1 && - matches!(in_mem.0, TrieKey::AccountNode(ref nibbles) if nibbles.0.len() > self.skip_node_depth.unwrap_or_default()) + in_mem.0.len() > self.skip_node_depth.unwrap_or_default() { in_mem_mismatched.push(in_mem); incremental_mismatched.push(incr); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 65be1a2ad..46b50b81e 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,15 +1,11 @@ //! Command for debugging merkle trie calculation. - -use crate::{ - args::{get_secret_key, NetworkArgs}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, - utils::get_single_header, -}; +use crate::{args::NetworkArgs, macros::block_executor, utils::get_single_header}; use backon::{ConstantBuilder, Retryable}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::Config; use reth_consensus::Consensus; use reth_db::{tables, DatabaseEnv}; @@ -23,14 +19,13 @@ use reth_provider::{ BlockNumReader, BlockWriter, ChainSpecProvider, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, ProviderError, ProviderFactory, StateWriter, }; -use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; use reth_stages::{ stages::{AccountHashingStage, MerkleStage, StorageHashingStage}, ExecInput, Stage, StageCheckpoint, }; use reth_tasks::TaskExecutor; -use std::{net::SocketAddr, path::PathBuf, sync::Arc}; +use std::{path::PathBuf, sync::Arc}; use tracing::*; /// `reth debug merkle` command @@ -69,11 +64,6 @@ impl Command { .network .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) .with_task_executor(Box::new(task_executor)) - .listener_addr(SocketAddr::new(self.network.addr, self.network.port)) - .discovery_addr(SocketAddr::new( - self.network.discovery.addr, - self.network.discovery.port, - )) .build(provider_factory) .start_network() .await?; @@ -154,16 +144,15 @@ impl Command { .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; trace!(target: "reth::cli", block_number, "Executing block"); - provider_rw.insert_block(sealed_block.clone(), None)?; + provider_rw.insert_block(sealed_block.clone())?; td += sealed_block.difficulty; - let mut executor = executor_provider.batch_executor( - StateProviderDatabase::new(LatestStateProviderRef::new( + let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( + LatestStateProviderRef::new( provider_rw.tx_ref(), provider_rw.static_file_provider().clone(), - )), - PruneModes::none(), - ); + ), + )); executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; executor.finalize().write_to_storage( provider_rw.tx_ref(), diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 5bc618172..414b3fc97 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -1,8 +1,4 @@ -use crate::{ - args::{get_secret_key, NetworkArgs}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, -}; +use crate::{args::NetworkArgs, macros::block_executor}; use clap::Parser; use eyre::Context; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; @@ -10,24 +6,26 @@ use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeacon use reth_blockchain_tree::{ BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, }; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::Config; use reth_consensus::Consensus; use reth_db::DatabaseEnv; +use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_fs_util as fs; use reth_network::NetworkHandle; use reth_network_api::NetworkInfo; -use reth_node_core::engine::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_provider::{ providers::BlockchainProvider, CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, }; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::Pipeline; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_transaction_pool::noop::NoopTransactionPool; -use std::{net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; +use std::{path::PathBuf, sync::Arc, time::Duration}; use tokio::sync::oneshot; use tracing::*; @@ -65,11 +63,6 @@ impl Command { .network .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) .with_task_executor(Box::new(task_executor)) - .listener_addr(SocketAddr::new(self.network.addr, self.network.port)) - .discovery_addr(SocketAddr::new( - self.network.discovery.addr, - self.network.discovery.port, - )) .build(provider_factory) .start_network() .await?; @@ -93,7 +86,11 @@ impl Command { // Configure blockchain tree let tree_externals = TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; + let tree = BlockchainTree::new( + tree_externals, + BlockchainTreeConfig::default(), + PruneModes::none(), + )?; let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); // Set up the blockchain provider @@ -152,7 +149,7 @@ impl Command { network_client, Pipeline::builder().build( provider_factory.clone(), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + StaticFileProducer::new(provider_factory.clone(), PruneModes::none()), ), blockchain_db.clone(), Box::new(ctx.task_executor.clone()), diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 35bfc5b69..697a1c874 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -1,13 +1,9 @@ //! Command that initializes the node by importing a chain from a file. - -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, - version::SHORT_VERSION, -}; +use crate::{macros::block_executor, version::SHORT_VERSION}; use clap::Parser; use futures::{Stream, StreamExt}; use reth_beacon_consensus::EthBeaconConsensus; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_config::Config; use reth_consensus::Consensus; use reth_db::tables; @@ -27,7 +23,7 @@ use reth_provider::{ BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderError, ProviderFactory, StageCheckpointReader, }; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::{prelude::*, Pipeline, StageId, StageSet}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; @@ -100,8 +96,7 @@ impl ImportCommand { Arc::new(file_client), StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), self.no_state, - ) - .await?; + )?; // override the tip pipeline.set_tip(tip); @@ -157,7 +152,7 @@ impl ImportCommand { /// /// If configured to execute, all stages will run. Otherwise, only stages that don't require state /// will run. -pub async fn build_import_pipeline( +pub fn build_import_pipeline( config: &Config, provider_factory: ProviderFactory, consensus: &Arc, diff --git a/bin/reth/src/commands/mod.rs b/bin/reth/src/commands/mod.rs index cd5a7e7ba..cf1b79be5 100644 --- a/bin/reth/src/commands/mod.rs +++ b/bin/reth/src/commands/mod.rs @@ -1,20 +1,5 @@ //! This contains all of the `reth` commands -pub mod config_cmd; -pub mod db; pub mod debug_cmd; -pub mod dump_genesis; pub mod import; -pub mod import_op; -pub mod import_receipts_op; - -pub mod init_cmd; -pub mod init_state; - pub mod node; -pub mod p2p; -pub mod recover; -pub mod stage; -pub mod test_vectors; - -pub mod common; diff --git a/bin/reth/src/commands/node/mod.rs b/bin/reth/src/commands/node/mod.rs index f4c355b17..ee3c6da74 100644 --- a/bin/reth/src/commands/node/mod.rs +++ b/bin/reth/src/commands/node/mod.rs @@ -1,13 +1,14 @@ //! Main node command for launching a node use crate::args::{ - utils::{chain_help, chain_value_parser, parse_socket_address, SUPPORTED_CHAINS}, + utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }; use clap::{value_parser, Args, Parser}; use reth_chainspec::ChainSpec; use reth_cli_runner::CliContext; +use reth_cli_util::parse_socket_address; use reth_db::{init_db, DatabaseEnv}; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{node_config::NodeConfig, version}; diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 7c024438a..c725b033b 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -32,7 +32,15 @@ pub mod cli; pub mod commands; mod macros; -pub mod utils; + +/// Re-exported utils. +pub mod utils { + pub use reth_db::open_db_read_only; + + /// Re-exported from `reth_node_core`, also to prevent a breaking change. See the comment + /// on the `reth_node_core::args` re-export for more details. + pub use reth_node_core::utils::*; +} /// Re-exported payload related types pub mod payload { @@ -140,6 +148,15 @@ pub mod rpc { pub use reth_rpc_types::*; } + /// Re-exported from `reth_rpc_server_types`. + pub mod server_types { + pub use reth_rpc_server_types::*; + /// Re-exported from `reth_rpc_eth_types`. + pub mod eth { + pub use reth_rpc_eth_types::*; + } + } + /// Re-exported from `reth_rpc_api`. pub mod api { pub use reth_rpc_api::*; @@ -151,10 +168,10 @@ pub mod rpc { /// Re-exported from `reth_rpc::rpc`. pub mod result { - pub use reth_rpc::result::*; + pub use reth_rpc_server_types::result::*; } - /// Re-exported from `reth_rpc::eth`. + /// Re-exported from `reth_rpc_types_compat`. pub mod compat { pub use reth_rpc_types_compat::*; } diff --git a/book/SUMMARY.md b/book/SUMMARY.md index 499b6dd97..af03aa32a 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -61,8 +61,6 @@ - [`reth p2p`](./cli/reth/p2p.md) - [`reth p2p header`](./cli/reth/p2p/header.md) - [`reth p2p body`](./cli/reth/p2p/body.md) - - [`reth test-vectors`](./cli/reth/test-vectors.md) - - [`reth test-vectors tables`](./cli/reth/test-vectors/tables.md) - [`reth config`](./cli/reth/config.md) - [`reth debug`](./cli/reth/debug.md) - [`reth debug execution`](./cli/reth/debug/execution.md) @@ -72,6 +70,7 @@ - [`reth debug replay-engine`](./cli/reth/debug/replay-engine.md) - [`reth recover`](./cli/reth/recover.md) - [`reth recover storage-tries`](./cli/reth/recover/storage-tries.md) + - [`reth prune`](./cli/reth/prune.md) - [Developers](./developers/developers.md) - [Execution Extensions](./developers/exex/exex.md) - [How do ExExes work?](./developers/exex/how-it-works.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 089de1b65..5f02f1e9e 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -32,8 +32,6 @@ - [`reth p2p`](./reth/p2p.md) - [`reth p2p header`](./reth/p2p/header.md) - [`reth p2p body`](./reth/p2p/body.md) - - [`reth test-vectors`](./reth/test-vectors.md) - - [`reth test-vectors tables`](./reth/test-vectors/tables.md) - [`reth config`](./reth/config.md) - [`reth debug`](./reth/debug.md) - [`reth debug execution`](./reth/debug/execution.md) @@ -43,4 +41,5 @@ - [`reth debug replay-engine`](./reth/debug/replay-engine.md) - [`reth recover`](./reth/recover.md) - [`reth recover storage-tries`](./reth/recover/storage-tries.md) + - [`reth prune`](./reth/prune.md) diff --git a/book/cli/reth.md b/book/cli/reth.md index a4ba8f3d3..cebeb44e2 100644 --- a/book/cli/reth.md +++ b/book/cli/reth.md @@ -15,10 +15,10 @@ Commands: db Database debugging utilities stage Manipulate individual stages p2p P2P Debugging utilities - test-vectors Generate Test Vectors config Write config to stdout debug Various debug routines recover Scripts for node recovery + prune Prune according to the configuration without any limits help Print this message or the help of the given subcommand(s) Options: @@ -27,7 +27,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/config.md b/book/cli/reth/config.md index 1b2a89c66..df0d261b0 100644 --- a/book/cli/reth/config.md +++ b/book/cli/reth/config.md @@ -18,7 +18,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/db.md b/book/cli/reth/db.md index b884b7d0f..b867134a9 100644 --- a/book/cli/reth/db.md +++ b/book/cli/reth/db.md @@ -56,7 +56,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/debug.md b/book/cli/reth/debug.md index 2779b8d77..d61094834 100644 --- a/book/cli/reth/debug.md +++ b/book/cli/reth/debug.md @@ -20,7 +20,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/dump-genesis.md b/book/cli/reth/dump-genesis.md index 5add92402..7197be305 100644 --- a/book/cli/reth/dump-genesis.md +++ b/book/cli/reth/dump-genesis.md @@ -12,7 +12,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/import.md b/book/cli/reth/import.md index 5a139e348..29a67f181 100644 --- a/book/cli/reth/import.md +++ b/book/cli/reth/import.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/init-state.md b/book/cli/reth/init-state.md index b1802b253..d947baec3 100644 --- a/book/cli/reth/init-state.md +++ b/book/cli/reth/init-state.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/init.md b/book/cli/reth/init.md index 8fe3fe018..5eb9d4d03 100644 --- a/book/cli/reth/init.md +++ b/book/cli/reth/init.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 575fe18cc..61759a694 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -15,7 +15,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] @@ -313,6 +313,16 @@ RPC: [default: 50000000] + --rpc.eth-proof-window + The maximum proof window for historical proof generation. This value allows for generating historical proofs up to configured number of blocks from current tip (up to `tip - window`) + + [default: 0] + + --rpc.proof-permits + Maximum number of concurrent getproof requests + + [default: 25] + RPC State Cache: --rpc-cache.max-blocks Max number of blocks in cache diff --git a/book/cli/reth/p2p.md b/book/cli/reth/p2p.md index ada874d8b..0177244a3 100644 --- a/book/cli/reth/p2p.md +++ b/book/cli/reth/p2p.md @@ -20,7 +20,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/prune.md b/book/cli/reth/prune.md new file mode 100644 index 000000000..0b3e701f6 --- /dev/null +++ b/book/cli/reth/prune.md @@ -0,0 +1,146 @@ +# reth prune + +Prune according to the configuration without any limits + +```bash +$ reth prune --help +Usage: reth prune [OPTIONS] + +Options: + --instance + Add a new instance of a node. + + Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine. + + Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other. + + Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2 + + [default: 1] + + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static_files + The absolute path to store static files in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, dev + + [default: mainnet] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/book/cli/reth/recover.md b/book/cli/reth/recover.md index 9ffd8eb70..4fe28211d 100644 --- a/book/cli/reth/recover.md +++ b/book/cli/reth/recover.md @@ -16,7 +16,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/recover/storage-tries.md b/book/cli/reth/recover/storage-tries.md index 649580382..d5df358a7 100644 --- a/book/cli/reth/recover/storage-tries.md +++ b/book/cli/reth/recover/storage-tries.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage.md b/book/cli/reth/stage.md index 17a888b6e..c9ff302c1 100644 --- a/book/cli/reth/stage.md +++ b/book/cli/reth/stage.md @@ -19,7 +19,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage/drop.md b/book/cli/reth/stage/drop.md index dc2f1330b..b700519e1 100644 --- a/book/cli/reth/stage/drop.md +++ b/book/cli/reth/stage/drop.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage/dump.md b/book/cli/reth/stage/dump.md index f08b9ffd8..a5fd3052c 100644 --- a/book/cli/reth/stage/dump.md +++ b/book/cli/reth/stage/dump.md @@ -51,7 +51,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage/run.md b/book/cli/reth/stage/run.md index a98a2be6d..4fa8e0a38 100644 --- a/book/cli/reth/stage/run.md +++ b/book/cli/reth/stage/run.md @@ -44,7 +44,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/stage/unwind.md b/book/cli/reth/stage/unwind.md index a1a538f3b..b9765bd8d 100644 --- a/book/cli/reth/stage/unwind.md +++ b/book/cli/reth/stage/unwind.md @@ -49,7 +49,7 @@ Datadir: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] @@ -204,6 +204,9 @@ Networking: [default: 131072] + --offline + If this is enabled, then all stages except headers, bodies, and sender recovery will be unwound + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/book/cli/reth/test-vectors.md b/book/cli/reth/test-vectors.md index da1b3c933..844c5ed84 100644 --- a/book/cli/reth/test-vectors.md +++ b/book/cli/reth/test-vectors.md @@ -16,7 +16,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/cli/reth/test-vectors/tables.md b/book/cli/reth/test-vectors/tables.md index 3b8f52f2c..2a3023817 100644 --- a/book/cli/reth/test-vectors/tables.md +++ b/book/cli/reth/test-vectors/tables.md @@ -16,7 +16,7 @@ Options: Possible values are either a built-in chain or the path to a chain specification file. Built-in chains: - mainnet, sepolia, goerli, holesky, dev + mainnet, sepolia, holesky, dev [default: mainnet] diff --git a/book/developers/exex/assets/remote_exex.png b/book/developers/exex/assets/remote_exex.png new file mode 100644 index 000000000..8606616e8 Binary files /dev/null and b/book/developers/exex/assets/remote_exex.png differ diff --git a/book/developers/exex/remote.md b/book/developers/exex/remote.md index a3ac9ff2e..e0caa72f6 100644 --- a/book/developers/exex/remote.md +++ b/book/developers/exex/remote.md @@ -1,3 +1,490 @@ # Remote Execution Extensions -WIP +In this chapter, we will learn how to create an ExEx that emits all notifications to an external process. + +We will use [Tonic](https://github.com/hyperium/tonic) to create a gRPC server and a client. +- The server binary will have the Reth client, our ExEx and the gRPC server. +- The client binary will have the gRPC client that connects to the server. + +## Prerequisites + +See [section](https://github.com/hyperium/tonic?tab=readme-ov-file#dependencies) of the Tonic documentation +to install the required dependencies. + +## Create a new project + +Let's create a new project. Don't forget to provide the `--lib` flag to `cargo new`, +because we will have two custom binaries in this project that we will create manually. + +```console +$ cargo new --lib exex-remote +$ cd exex-remote +``` + +We will also need a bunch of dependencies. Some of them you know from the [Hello World](./hello-world.md) chapter, +but some of specific to what we need now. + +```toml +[package] +name = "remote-exex" +version = "0.1.0" +edition = "2021" + +[dependencies] +# reth +reth = { git = "https://github.com/paradigmxyz/reth.git" } +reth-exex = { git = "https://github.com/paradigmxyz/reth.git", features = ["serde"] } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git"} +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } + +# async +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" +futures-util = "0.3" + +# grpc +tonic = "0.11" +prost = "0.12" +bincode = "1" + +# misc +eyre = "0.6" + +[build-dependencies] +tonic-build = "0.11" + +[[bin]] +name = "exex" +path = "src/exex.rs" + +[[bin]] +name = "consumer" +path = "src/consumer.rs" +``` + +We also added a build dependency for Tonic. We will use it to generate the Rust code for our +Protobuf definitions at compile time. Read more about using Tonic in the +[introductory tutorial](https://github.com/hyperium/tonic/blob/6a213e9485965db0628591e30577ed81cdaeaf2b/examples/helloworld-tutorial.md). + +Also, we now have two separate binaries: +- `exex` is the server binary that will run the ExEx and the gRPC server. +- `consumer` is the client binary that will connect to the server and receive notifications. + +### Create the Protobuf definitions + +In the root directory of your project (not `src`), create a new directory called `proto` and a file called `exex.proto`. + +We define a service called `RemoteExEx` that exposes a single method called `Subscribe`. +This method streams notifications to the client. + +
+ +A proper way to represent the notification would be to define all fields in the schema, but it goes beyond the scope +of this chapter. + +For an example of a full schema, see the [Remote ExEx](https://github.com/paradigmxyz/reth-exex-grpc/blob/22b26f7beca1c74577d28be3b3838eb352747be0/proto/exex.proto) example. + +
+ +```protobuf +syntax = "proto3"; + +package exex; + +service RemoteExEx { + rpc Subscribe(SubscribeRequest) returns (stream ExExNotification) {} +} + +message SubscribeRequest {} + +message ExExNotification { + bytes data = 1; +} +``` + +To instruct Tonic to generate the Rust code using this `.proto`, add the following lines to your `lib.rs` file: +```rust,norun,noplayground,ignore +pub mod proto { + tonic::include_proto!("exex"); +} +``` + +## ExEx and gRPC server + +We will now create the ExEx and the gRPC server in our `src/exex.rs` file. + +### gRPC server + +Let's create a minimal gRPC server that listens on the port `:10000`, and spawn it using +the [NodeBuilder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html)'s [task executor](https://reth.rs/docs/reth/tasks/struct.TaskExecutor.html). + +```rust,norun,noplayground,ignore +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth_exex::ExExNotification; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +struct ExExService {} + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (_tx, rx) = mpsc::channel(1); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService {})) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder.node(EthereumNode::default()).launch().await?; + + handle + .node + .task_executor + .spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +``` + +Currently, it does not send anything on the stream. +We need to create a communication channel between our future ExEx and this gRPC server +to send new `ExExNotification` on it. + +Let's create this channel in the `main` function where we will have both gRPC server and ExEx initiated, +and save the sender part (that way we will be able to create new receivers) of this channel in our gRPC server. + +```rust,norun,noplayground,ignore +// ... +use reth_exex::{ExExNotification}; + +struct ExExService { + notifications: Arc>, +} + +... + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder + .node(EthereumNode::default()) + .launch() + .await?; + + handle + .node + .task_executor + .spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +``` + +And with that, we're ready to handle incoming notifications, serialize them with [bincode](https://docs.rs/bincode/) +and send back to the client. + +For each incoming request, we spawn a separate tokio task that will run in the background, +and then return the stream receiver to the client. + +```rust,norun,noplayground,ignore +// ... + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (tx, rx) = mpsc::channel(1); + + let mut notifications = self.notifications.subscribe(); + tokio::spawn(async move { + while let Ok(notification) = notifications.recv().await { + let proto_notification = proto::ExExNotification { + data: bincode::serialize(¬ification).expect("failed to serialize"), + }; + tx.send(Ok(proto_notification)) + .await + .expect("failed to send notification to client"); + + info!("Notification sent to the gRPC client"); + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +// ... +``` + +That's it for the gRPC server part! It doesn't receive anything on the `notifications` channel yet, +but we will fix it with our ExEx. + +### ExEx + +Now, let's define the ExEx part of our binary. + +Our ExEx accepts a `notifications` channel and redirects all incoming `ExExNotification`s to it. + +
+ +Don't forget to emit `ExExEvent::FinishedHeight` + +
+ +```rust,norun,noplayground,ignore +// ... +use reth_exex::{ExExContext, ExExEvent}; + +async fn remote_exex( + mut ctx: ExExContext, + notifications: Arc>, +) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.recv().await { + if let Some(committed_chain) = notification.committed_chain() { + ctx.events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + + info!("Notification sent to the gRPC server"); + let _ = notifications.send(notification); + } + + Ok(()) +} + +// ... +``` + +All that's left is to connect all pieces together: install our ExEx in the node and pass the sender part +of communication channel to it. + +```rust,norun,noplayground,ignore +// ... + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder + .node(EthereumNode::default()) + .install_exex("remote-exex", |ctx| async move { + Ok(remote_exex(ctx, notifications)) + }) + .launch() + .await?; + + handle + .node + .task_executor + .spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +``` + +### Full `exex.rs` code + +
+Click to expand + +```rust,norun,noplayground,ignore +use remote_exex::proto::{ + self, + remote_ex_ex_server::{RemoteExEx, RemoteExExServer}, +}; +use reth::api::FullNodeComponents; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{transport::Server, Request, Response, Status}; + +struct ExExService { + notifications: Arc>, +} + +#[tonic::async_trait] +impl RemoteExEx for ExExService { + type SubscribeStream = ReceiverStream>; + + async fn subscribe( + &self, + _request: Request, + ) -> Result, Status> { + let (tx, rx) = mpsc::channel(1); + + let mut notifications = self.notifications.subscribe(); + tokio::spawn(async move { + while let Ok(notification) = notifications.recv().await { + let proto_notification = proto::ExExNotification { + data: bincode::serialize(¬ification).expect("failed to serialize"), + }; + tx.send(Ok(proto_notification)) + .await + .expect("failed to send notification to client"); + + info!(?notification, "Notification sent to the gRPC client"); + } + }); + + Ok(Response::new(ReceiverStream::new(rx))) + } +} + +async fn remote_exex( + mut ctx: ExExContext, + notifications: Arc>, +) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.recv().await { + if let Some(committed_chain) = notification.committed_chain() { + ctx.events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + + info!(?notification, "Notification sent to the gRPC server"); + let _ = notifications.send(notification); + } + + Ok(()) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let notifications = Arc::new(broadcast::channel(1).0); + + let server = Server::builder() + .add_service(RemoteExExServer::new(ExExService { + notifications: notifications.clone(), + })) + .serve("[::1]:10000".parse().unwrap()); + + let handle = builder + .node(EthereumNode::default()) + .install_exex("remote-exex", |ctx| async move { + Ok(remote_exex(ctx, notifications)) + }) + .launch() + .await?; + + handle + .node + .task_executor + .spawn_critical("gRPC server", async move { + server.await.expect("failed to start gRPC server") + }); + + handle.wait_for_node_exit().await + }) +} +``` +
+ +## Consumer + +Consumer will be a much simpler binary that just connects to our gRPC server and prints out all the notifications +it receives. + +
+ +We need to increase maximum message encoding and decoding sizes to `usize::MAX`, +because notifications can get very heavy + +
+ +```rust,norun,noplayground,ignore +use remote_exex::proto::{remote_ex_ex_client::RemoteExExClient, SubscribeRequest}; +use reth_exex::ExExNotification; +use reth_tracing::{tracing::info, RethTracer, Tracer}; + +#[tokio::main] +async fn main() -> eyre::Result<()> { + let _ = RethTracer::new().init()?; + + let mut client = RemoteExExClient::connect("http://[::1]:10000") + .await? + .max_encoding_message_size(usize::MAX) + .max_decoding_message_size(usize::MAX); + + let mut stream = client.subscribe(SubscribeRequest {}).await?.into_inner(); + while let Some(notification) = stream.message().await? { + let notification: ExExNotification = bincode::deserialize(¬ification.data)?; + + match notification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + } + + Ok(()) +} +``` + +## Running + +In one terminal window, we will run our ExEx and gRPC server. It will start syncing Reth on the Holesky chain +and use Etherscan in place of a real Consensus Client. + +```console +cargo run --bin exex --release -- node --chain holesky --debug.etherscan +``` + +And in the other, we will run our consumer: + +```console +cargo run --bin consumer --release +``` + + diff --git a/book/developers/profiling.md b/book/developers/profiling.md index 884032b2a..f1fdf520e 100644 --- a/book/developers/profiling.md +++ b/book/developers/profiling.md @@ -41,12 +41,12 @@ cargo build --features jemalloc-prof ``` When performing a longer-running or performance-sensitive task with reth, such as a sync test or load benchmark, it's usually recommended to use the `maxperf` profile. However, the `maxperf` -profile does not enable debug symbols, which are required for tools like `perf` and `jemalloc` to produce results that a human can interpret. Reth includes a performance profile with debug symbols called `debug-fast`. To compile reth with debug symbols, jemalloc, profiling, and a performance profile: +profile does not enable debug symbols, which are required for tools like `perf` and `jemalloc` to produce results that a human can interpret. Reth includes a performance profile with debug symbols called `profiling`. To compile reth with debug symbols, jemalloc, profiling, and a performance profile: ``` -cargo build --features jemalloc-prof --profile debug-fast +cargo build --features jemalloc-prof --profile profiling # May improve performance even more -RUSTFLAGS="-C target-cpu=native" cargo build --features jemalloc-prof --profile debug-fast +RUSTFLAGS="-C target-cpu=native" cargo build --features jemalloc-prof --profile profiling ``` ### Monitoring memory usage diff --git a/book/intro.md b/book/intro.md index 9b57849eb..077cfed30 100644 --- a/book/intro.md +++ b/book/intro.md @@ -5,8 +5,7 @@ _Documentation for Reth users and developers._ Reth (short for Rust Ethereum, [pronunciation](https://twitter.com/kelvinfichter/status/1597653609411268608)) is an **Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient.** -Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime servi -ces. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. +Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime services. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. @@ -76,7 +75,7 @@ Reth implements the specification of Ethereum as defined in the [ethereum/execut 1. We operate multiple nodes at the tip of Ethereum mainnet and various testnets. 1. We extensively unit test, fuzz test and document all our code, while also restricting PRs with aggressive lint rules. -We have completed an audit of the [Reth v1.0.0-rc.2](https://github.com/paradigmxyz/reth/releases/tag/v1.0.0-rc.2) with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](https://github.com/paradigmxyz/reth/blob/bf9cac7571f018fec581fe3647862dab527aeafb/audit/sigma_prime_audit_v1.pdf). +We have completed an audit of the [Reth v1.0.0-rc.2](https://github.com/paradigmxyz/reth/releases/tag/v1.0.0-rc.2) with [Sigma Prime](https://sigmaprime.io/), the developers of [Lighthouse](https://github.com/sigp/lighthouse), the Rust Consensus Layer implementation. Find it [here](https://github.com/paradigmxyz/reth/blob/main/audit/sigma_prime_audit_v2.pdf). [Revm](https://github.com/bluealloy/revm) (the EVM used in Reth) underwent an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon. diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index 3b24e9444..3a987e52c 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -13,85 +13,76 @@ To see all possible configurations and flags you can use, including metrics and Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/). Read more about how the `ethereum-package` works by going [here](https://github.com/ethpandaops/ethereum-package/). ### Step 1: Define the parameters and shape of your private network -First, in your home directory, create a file with the name `network_params.json` with the following contents: -```json -{ - "participants": [ - { - "el_type": "reth", - "el_image": "ghcr.io/paradigmxyz/reth", - "cl_type": "lighthouse", - "cl_image": "sigp/lighthouse:latest", - "count": 1 - }, - { - "el_type": "reth", - "el_image": "ghcr.io/paradigmxyz/reth", - "cl_type": "teku", - "cl_image": "consensys/teku:latest", - "count": 1 - } - ], - "launch_additional_services": false -} +First, in your home directory, create a file with the name `network_params.yaml` with the following contents: +```yaml +participants: + - el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: lighthouse + cl_image: sigp/lighthouse:latest + - el_type: reth + el_image: ghcr.io/paradigmxyz/reth + cl_type: teku + cl_image: consensys/teku:latest ``` > [!TIP] -> If you would like to use a modified reth node, you can build an image locally with a custom tag. The tag can then be used in the `el_image` field in the `network_params.json` file. +> If you would like to use a modified reth node, you can build an image locally with a custom tag. The tag can then be used in the `el_image` field in the `network_params.yaml` file. ### Step 2: Spin up your network Next, run the following command from your command line: ```bash -kurtosis run github.com/ethpandaops/ethereum-package --args-file ~/network_params.json +kurtosis run github.com/ethpandaops/ethereum-package --args-file ~/network_params.yaml --image-download always ``` Kurtosis will spin up an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/) (i.e an ephemeral, isolated environment) and begin to configure and instantiate the nodes in your network. In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output: ```console -INFO[2023-08-21T18:22:18-04:00] ==================================================== -INFO[2023-08-21T18:22:18-04:00] || Created enclave: silky-swamp || -INFO[2023-08-21T18:22:18-04:00] ==================================================== -Name: silky-swamp -UUID: 3df730c66123 +INFO[2024-07-09T12:01:35+02:00] ======================================================== +INFO[2024-07-09T12:01:35+02:00] || Created enclave: silent-mountain || +INFO[2024-07-09T12:01:35+02:00] ======================================================== +Name: silent-mountain +UUID: cb5d0a7d0e7c Status: RUNNING -Creation Time: Mon, 21 Aug 2023 18:21:32 EDT +Creation Time: Tue, 09 Jul 2024 12:00:03 CEST +Flags: ========================================= Files Artifacts ========================================= UUID Name -c168ec4468f6 1-lighthouse-reth-0-63 -61f821e2cfd5 2-teku-reth-64-127 -e6f94fdac1b8 cl-genesis-data -e6b57828d099 el-genesis-data -1fb632573a2e genesis-generation-config-cl -b8917e497980 genesis-generation-config-el -6fd8c5be336a geth-prefunded-keys -6ab83723b4bd prysm-password +414a075a37aa 1-lighthouse-reth-0-63-0 +34d0b9ff906b 2-teku-reth-64-127-0 +dffa1bcd1da1 el_cl_genesis_data +fdb202429b26 final-genesis-timestamp +da0d9d24b340 genesis-el-cl-env-file +55c46a6555ad genesis_validators_root +ba79dbd109dd jwt_file +04948fd8b1e3 keymanager_file +538211b6b7d7 prysm-password +ed75fe7d5293 validator-ranges ========================================== User Services ========================================== -UUID Name Ports Status -95386198d3f9 cl-1-lighthouse-reth http: 4000/tcp -> http://127.0.0.1:64947 RUNNING - metrics: 5054/tcp -> http://127.0.0.1:64948 - tcp-discovery: 9000/tcp -> 127.0.0.1:64949 - udp-discovery: 9000/udp -> 127.0.0.1:60303 -5f5cc4cf639a cl-1-lighthouse-reth-validator http: 5042/tcp -> 127.0.0.1:64950 RUNNING - metrics: 5064/tcp -> http://127.0.0.1:64951 -27e1cfaddc72 cl-2-teku-reth http: 4000/tcp -> 127.0.0.1:64954 RUNNING - metrics: 8008/tcp -> 127.0.0.1:64952 - tcp-discovery: 9000/tcp -> 127.0.0.1:64953 - udp-discovery: 9000/udp -> 127.0.0.1:53749 -b454497fbec8 el-1-reth-lighthouse engine-rpc: 8551/tcp -> 127.0.0.1:64941 RUNNING - metrics: 9001/tcp -> 127.0.0.1:64937 - rpc: 8545/tcp -> 127.0.0.1:64939 - tcp-discovery: 30303/tcp -> 127.0.0.1:64938 - udp-discovery: 30303/udp -> 127.0.0.1:55861 - ws: 8546/tcp -> 127.0.0.1:64940 -03a2ef13c99b el-2-reth-teku engine-rpc: 8551/tcp -> 127.0.0.1:64945 RUNNING - metrics: 9001/tcp -> 127.0.0.1:64946 - rpc: 8545/tcp -> 127.0.0.1:64943 - tcp-discovery: 30303/tcp -> 127.0.0.1:64942 - udp-discovery: 30303/udp -> 127.0.0.1:64186 - ws: 8546/tcp -> 127.0.0.1:64944 -5c199b334236 prelaunch-data-generator-cl-genesis-data RUNNING -46829c4bd8b0 prelaunch-data-generator-el-genesis-data RUNNING +UUID Name Ports Status +0853f809c300 cl-1-lighthouse-reth http: 4000/tcp -> http://127.0.0.1:32811 RUNNING + metrics: 5054/tcp -> http://127.0.0.1:32812 + tcp-discovery: 9000/tcp -> 127.0.0.1:32813 + udp-discovery: 9000/udp -> 127.0.0.1:32776 +f81cd467efe3 cl-2-teku-reth http: 4000/tcp -> http://127.0.0.1:32814 RUNNING + metrics: 8008/tcp -> http://127.0.0.1:32815 + tcp-discovery: 9000/tcp -> 127.0.0.1:32816 + udp-discovery: 9000/udp -> 127.0.0.1:32777 +f21d5ca3061f el-1-reth-lighthouse engine-rpc: 8551/tcp -> 127.0.0.1:32803 RUNNING + metrics: 9001/tcp -> http://127.0.0.1:32804 + rpc: 8545/tcp -> 127.0.0.1:32801 + tcp-discovery: 30303/tcp -> 127.0.0.1:32805 + udp-discovery: 30303/udp -> 127.0.0.1:32774 + ws: 8546/tcp -> 127.0.0.1:32802 +e234b3b4a440 el-2-reth-teku engine-rpc: 8551/tcp -> 127.0.0.1:32808 RUNNING + metrics: 9001/tcp -> http://127.0.0.1:32809 + rpc: 8545/tcp -> 127.0.0.1:32806 + tcp-discovery: 30303/tcp -> 127.0.0.1:32810 + udp-discovery: 30303/udp -> 127.0.0.1:32775 + ws: 8546/tcp -> 127.0.0.1:32807 +92dd5a0599dc validator-key-generation-cl-validator-keystore RUNNING +f0a7d5343346 vc-1-reth-lighthouse metrics: 8080/tcp -> http://127.0.0.1:32817 RUNNING ``` Great! You now have a private network with 2 full Ethereum nodes on your local machine over Docker - one that is a Reth/Lighthouse pair and another that is Reth/Teku. Check out the [Kurtosis docs](https://docs.kurtosis.com/cli) to learn about the various ways you can interact with and inspect your network. diff --git a/book/run/troubleshooting.md b/book/run/troubleshooting.md index 68a7cc29e..7368b6631 100644 --- a/book/run/troubleshooting.md +++ b/book/run/troubleshooting.md @@ -109,3 +109,71 @@ pthread_mutex_lock.c:438: __pthread_mutex_lock_full: Assertion `e != ESRCH || !r If you are using Docker, a possible solution is to run all database-accessing containers with `--pid=host` flag. For more information, check out the `Containers` section in the [libmdbx README](https://github.com/erthink/libmdbx#containers). + +## Hardware Performance Testing + +If you're experiencing degraded performance, it may be related to hardware issues. Below are some tools and tests you can run to evaluate your hardware performance. + +If your hardware performance is significantly lower than these reference numbers, it may explain degraded node performance. Consider upgrading your hardware or investigating potential issues with your current setup. + +### Disk Speed Testing with [IOzone](https://linux.die.net/man/1/iozone) + +1. Test disk speed: + ```bash + iozone -e -t1 -i0 -i2 -r1k -s1g /tmp + ``` + Reference numbers (on Latitude c3.large.x86): + + ```console + Children see throughput for 1 initial writers = 907733.81 kB/sec + Parent sees throughput for 1 initial writers = 907239.68 kB/sec + Children see throughput for 1 rewriters = 1765222.62 kB/sec + Parent sees throughput for 1 rewriters = 1763433.35 kB/sec + Children see throughput for 1 random readers = 1557497.38 kB/sec + Parent sees throughput for 1 random readers = 1554846.58 kB/sec + Children see throughput for 1 random writers = 984428.69 kB/sec + Parent sees throughput for 1 random writers = 983476.67 kB/sec + ``` +2. Test disk speed with memory-mapped files: + ```bash + iozone -B -G -e -t1 -i0 -i2 -r1k -s1g /tmp + ``` + Reference numbers (on Latitude c3.large.x86): + + ```console + Children see throughput for 1 initial writers = 56471.06 kB/sec + Parent sees throughput for 1 initial writers = 56365.14 kB/sec + Children see throughput for 1 rewriters = 241650.69 kB/sec + Parent sees throughput for 1 rewriters = 239067.96 kB/sec + Children see throughput for 1 random readers = 6833161.00 kB/sec + Parent sees throughput for 1 random readers = 5597659.65 kB/sec + Children see throughput for 1 random writers = 220248.53 kB/sec + Parent sees throughput for 1 random writers = 219112.26 kB/sec + ``` + +### RAM Speed and Health Testing + +1. Check RAM speed with [lshw](https://linux.die.net/man/1/lshw): + ```bash + sudo lshw -short -C memory + ``` + Look for the frequency in the output. Reference output: + + ```console + H/W path Device Class Description + ================================================================ + /0/24/0 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns) + /0/24/1 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns) + ... + ``` + +2. Test RAM health with [memtester](https://linux.die.net/man/8/memtester): + ```bash + sudo memtester 10G + ``` + This will take a while. You can test with a smaller amount first: + + ```bash + sudo memtester 1G 1 + ``` + All checks should report "ok". diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 5d73a1a78..7701d8ce8 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -15,8 +15,8 @@ use reth_evm::execute::BlockExecutorProvider; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, + BlockHash, BlockNumHash, BlockNumber, EthereumHardfork, ForkBlock, GotExpected, Receipt, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, B256, U256, }; use reth_provider::{ BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, @@ -65,8 +65,6 @@ pub struct BlockchainTree { externals: TreeExternals, /// Tree configuration config: BlockchainTreeConfig, - /// Prune modes. - prune_modes: Option, /// Broadcast channel for canon state changes notifications. canon_state_notification_sender: CanonStateNotificationSender, /// Metrics for sync stages. @@ -115,9 +113,9 @@ where /// storage space efficiently. It's important to validate this configuration to ensure it does /// not lead to unintended data loss. pub fn new( - externals: TreeExternals, + mut externals: TreeExternals, config: BlockchainTreeConfig, - prune_modes: Option, + prune_modes: PruneModes, ) -> ProviderResult { let max_reorg_depth = config.max_reorg_depth() as usize; // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg @@ -125,6 +123,9 @@ where let (canon_state_notification_sender, _receiver) = tokio::sync::broadcast::channel(max_reorg_depth * 2); + // Set the prune modes argument, on the provider + externals.provider_factory = externals.provider_factory.with_prune_modes(prune_modes); + let last_canonical_hashes = externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?; @@ -138,7 +139,6 @@ where config.max_unconnected_blocks(), ), config, - prune_modes, canon_state_notification_sender, sync_metrics_tx: None, metrics: Default::default(), @@ -402,8 +402,9 @@ where .externals .provider_factory .chain_spec() - .fork(Hardfork::Paris) - .active_at_ttd(parent_td, U256::ZERO) + .fork(EthereumHardfork::Paris) + .active_at_ttd(parent_td, U256::ZERO) && + !self.externals.provider_factory.chain_spec().is_bsc() { return Err(BlockExecutionError::Validation(BlockValidationError::BlockPreMerge { hash: block.hash(), @@ -1043,8 +1044,9 @@ where .externals .provider_factory .chain_spec() - .fork(Hardfork::Paris) - .active_at_ttd(td, U256::ZERO) + .fork(EthereumHardfork::Paris) + .active_at_ttd(td, U256::ZERO) && + !self.externals.provider_factory.chain_spec().is_bsc() { return Err(CanonicalError::from(BlockValidationError::BlockPreMerge { hash: block_hash, @@ -1258,7 +1260,6 @@ where state, hashed_state, trie_updates, - self.prune_modes.as_ref(), ) .map_err(|e| CanonicalError::CanonicalCommit(e.to_string()))?; @@ -1424,7 +1425,6 @@ mod tests { provider .insert_historical_block( genesis.try_seal_with_senders().expect("invalid tx signature in genesis"), - None, ) .unwrap(); @@ -1545,7 +1545,6 @@ mod tests { SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) .try_seal_with_senders() .unwrap(), - None, ) .unwrap(); let account = Account { balance: initial_signer_balance, ..Default::default() }; @@ -1625,6 +1624,7 @@ mod tests { body: body.clone().into_iter().map(|tx| tx.into_signed()).collect(), ommers: Vec::new(), withdrawals: Some(Withdrawals::default()), + sidecars: None, requests: None, }, body.iter().map(|tx| tx.signer()).collect(), @@ -1647,7 +1647,7 @@ mod tests { let mut tree = BlockchainTree::new( TreeExternals::new(provider_factory, consensus, executor_provider), BlockchainTreeConfig::default(), - None, + PruneModes::default(), ) .expect("failed to create tree"); @@ -1727,7 +1727,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -1803,7 +1804,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -1867,7 +1869,7 @@ mod tests { ); let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = exec5.hash_state_slow().construct_prefix_sets(); + let prefix_sets = exec5.hash_state_slow().construct_prefix_sets().freeze(); let state_root = StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); assert_eq!(state_root, block5.state_root); @@ -1888,7 +1890,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); // genesis block 10 is already canonical tree.make_canonical(B256::ZERO).unwrap(); @@ -1986,7 +1989,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); let mut canon_notif = tree.subscribe_canon_state(); // genesis block 10 is already canonical @@ -2379,7 +2383,8 @@ mod tests { // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"); assert_eq!( tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), @@ -2399,8 +2404,8 @@ mod tests { tree.make_canonical(block2.hash()).unwrap(); // restart - let mut tree = - BlockchainTree::new(cloned_externals_1, config, None).expect("failed to create tree"); + let mut tree = BlockchainTree::new(cloned_externals_1, config, PruneModes::default()) + .expect("failed to create tree"); assert_eq!(tree.block_indices().last_finalized_block(), 0); let mut block1a = block1; @@ -2416,8 +2421,8 @@ mod tests { tree.finalize_block(block1a.number).unwrap(); // restart - let tree = - BlockchainTree::new(cloned_externals_2, config, None).expect("failed to create tree"); + let tree = BlockchainTree::new(cloned_externals_2, config, PruneModes::default()) + .expect("failed to create tree"); assert_eq!(tree.block_indices().last_finalized_block(), block1a.number); } diff --git a/crates/bsc/cli/Cargo.toml b/crates/bsc/cli/Cargo.toml new file mode 100644 index 000000000..0d2c9917d --- /dev/null +++ b/crates/bsc/cli/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "reth-bsc-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] + +[features] +bsc = [] \ No newline at end of file diff --git a/crates/bsc/cli/src/lib.rs b/crates/bsc/cli/src/lib.rs new file mode 100644 index 000000000..004a6fc25 --- /dev/null +++ b/crates/bsc/cli/src/lib.rs @@ -0,0 +1,11 @@ +//! Reth CLI implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `bsc` feature must be enabled to use this crate. +#![cfg(feature = "bsc")] diff --git a/crates/bsc/consensus/Cargo.toml b/crates/bsc/consensus/Cargo.toml index 46d9669c2..213f95e50 100644 --- a/crates/bsc/consensus/Cargo.toml +++ b/crates/bsc/consensus/Cargo.toml @@ -24,7 +24,6 @@ reth-network.workspace = true reth-engine-primitives.workspace = true reth-network-p2p.workspace = true reth-network-peers.workspace = true -reth-beacon-consensus.workspace = true # eth alloy-rlp.workspace = true @@ -62,4 +61,4 @@ rand = "0.8.5" bsc = [ "reth-primitives/bsc", "reth-consensus-common/bsc" -] \ No newline at end of file +] diff --git a/crates/bsc/consensus/src/abi.rs b/crates/bsc/consensus/src/abi.rs index c061a7da9..424743599 100644 --- a/crates/bsc/consensus/src/abi.rs +++ b/crates/bsc/consensus/src/abi.rs @@ -1,6 +1,7 @@ use crate::{Parlia, VoteAddress}; use alloy_dyn_abi::{DynSolValue, FunctionExt, JsonAbiExt}; use lazy_static::lazy_static; +use reth_chainspec::BscHardforks; use reth_primitives::{ system_contracts::{STAKE_HUB_CONTRACT, VALIDATOR_CONTRACT}, Address, BlockNumber, Bytes, U256, diff --git a/crates/bsc/consensus/src/lib.rs b/crates/bsc/consensus/src/lib.rs index 75e81f0fd..f97595007 100644 --- a/crates/bsc/consensus/src/lib.rs +++ b/crates/bsc/consensus/src/lib.rs @@ -11,7 +11,7 @@ use alloy_rlp::Decodable; use lazy_static::lazy_static; use lru::LruCache; use parking_lot::RwLock; -use reth_chainspec::ChainSpec; +use reth_chainspec::{BscHardforks, ChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_primitives::{ constants::EMPTY_MIX_HASH, @@ -26,25 +26,14 @@ use secp256k1::{ use sha3::{Digest, Keccak256}; use std::{ clone::Clone, - collections::{HashMap, VecDeque}, + collections::HashMap, fmt::{Debug, Formatter}, num::NonZeroUsize, sync::Arc, time::SystemTime, }; - -use tokio::sync::{ - mpsc::{UnboundedReceiver, UnboundedSender}, - Mutex, RwLockReadGuard, RwLockWriteGuard, -}; use tracing::{log::debug, trace}; -use reth_beacon_consensus::BeaconEngineMessage; -use reth_engine_primitives::EngineTypes; -use reth_network::{fetch::FetchClient, message::EngineMessage}; -use reth_primitives::{BlockBody, BlockHash, BlockHashOrNumber, BlockNumber}; -use reth_provider::BlockReaderIdExt; - mod util; pub use util::*; mod constants; @@ -65,14 +54,9 @@ use reth_consensus_common::validation::{ mod validation; pub use validation::{validate_4844_header_of_bsc, validate_block_post_execution}; -mod client; mod system_tx; -use client::*; -mod task; -use task::*; const RECOVERED_PROPOSER_CACHE_NUM: usize = 4096; -const STORAGE_CACHE_NUM: usize = 1000; lazy_static! { // recovered proposer cache map by block_number: proposer_address @@ -440,7 +424,7 @@ impl Parlia { SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs() } - fn validate_header_with_predicted_timestamp( + pub fn validate_header_with_predicted_timestamp( &self, header: &SealedHeader, predicted_timestamp: u64, @@ -590,202 +574,6 @@ impl Debug for Parlia { } } -/// Builder type for configuring the setup -#[derive(Debug)] -pub struct ParliaEngineBuilder { - chain_spec: Arc, - cfg: ParliaConfig, - storage: Storage, - to_engine: UnboundedSender>, - network_block_event_rx: Arc>>, - fetch_client: FetchClient, - client: Client, -} - -// === impl ParliaEngineBuilder === - -impl ParliaEngineBuilder -where - Client: BlockReaderIdExt + Clone + 'static, - Engine: EngineTypes + 'static, -{ - /// Creates a new builder instance to configure all parts. - pub fn new( - chain_spec: Arc, - cfg: ParliaConfig, - client: Client, - to_engine: UnboundedSender>, - network_block_event_rx: Arc>>, - fetch_client: FetchClient, - ) -> Self { - let latest_header = client - .latest_header() - .ok() - .flatten() - .unwrap_or_else(|| chain_spec.sealed_genesis_header()); - - Self { - chain_spec, - cfg, - client, - storage: Storage::new(latest_header), - to_engine, - network_block_event_rx, - fetch_client, - } - } - - /// Consumes the type and returns all components - #[track_caller] - pub fn build(self, start_engine_task: bool) -> ParliaClient { - let Self { - chain_spec, - cfg, - storage, - to_engine, - network_block_event_rx, - fetch_client, - client, - } = self; - let parlia_client = ParliaClient::new(storage.clone(), fetch_client); - if start_engine_task { - ParliaEngineTask::start( - chain_spec.clone(), - Parlia::new(chain_spec, cfg.clone()), - client, - to_engine, - network_block_event_rx, - storage, - parlia_client.clone(), - cfg.period, - ); - } - parlia_client - } -} - -/// In memory storage -#[derive(Debug, Clone)] -pub(crate) struct Storage { - inner: Arc>, -} - -// == impl Storage === - -impl Storage { - /// Initializes the [Storage] with the given best block. This should be initialized with the - /// highest block in the chain, if there is a chain already stored on-disk. - fn new(best_block: SealedHeader) -> Self { - let mut storage = StorageInner { - best_hash: best_block.hash(), - best_block: best_block.number, - best_header: best_block.clone(), - headers: LimitedHashSet::new(STORAGE_CACHE_NUM), - hash_to_number: LimitedHashSet::new(STORAGE_CACHE_NUM), - bodies: LimitedHashSet::new(STORAGE_CACHE_NUM), - }; - storage.headers.put(best_block.number, best_block.clone()); - storage.hash_to_number.put(best_block.hash(), best_block.number); - Self { inner: Arc::new(tokio::sync::RwLock::new(storage)) } - } - - /// Returns the write lock of the storage - pub(crate) async fn write(&self) -> RwLockWriteGuard<'_, StorageInner> { - self.inner.write().await - } - - /// Returns the read lock of the storage - pub(crate) async fn read(&self) -> RwLockReadGuard<'_, StorageInner> { - self.inner.read().await - } -} - -/// In-memory storage for the chain the parlia engine task cache. -#[derive(Debug)] -pub(crate) struct StorageInner { - /// Headers buffered for download. - pub(crate) headers: LimitedHashSet, - /// A mapping between block hash and number. - pub(crate) hash_to_number: LimitedHashSet, - /// Bodies buffered for download. - pub(crate) bodies: LimitedHashSet, - /// Tracks best block - pub(crate) best_block: u64, - /// Tracks hash of best block - pub(crate) best_hash: B256, - /// The best header in the chain - pub(crate) best_header: SealedHeader, -} - -// === impl StorageInner === - -impl StorageInner { - /// Returns the matching header if it exists. - pub(crate) fn header_by_hash_or_number( - &self, - hash_or_num: BlockHashOrNumber, - ) -> Option { - let num = match hash_or_num { - BlockHashOrNumber::Hash(hash) => self.hash_to_number.get(&hash).copied()?, - BlockHashOrNumber::Number(num) => num, - }; - self.headers.get(&num).cloned() - } - - /// Inserts a new header+body pair - pub(crate) fn insert_new_block(&mut self, header: SealedHeader, body: BlockBody) { - self.best_hash = header.hash(); - self.best_block = header.number; - self.best_header = header.clone(); - - trace!(target: "parlia::client", num=self.best_block, hash=?self.best_hash, "inserting new block"); - self.headers.put(header.number, header); - self.bodies.put(self.best_hash, body); - self.hash_to_number.put(self.best_hash, self.best_block); - } - - /// Inserts a new header - pub(crate) fn insert_new_header(&mut self, header: SealedHeader) { - self.best_hash = header.hash(); - self.best_block = header.number; - self.best_header = header.clone(); - - trace!(target: "parlia::client", num=self.best_block, hash=?self.best_hash, "inserting new header"); - self.headers.put(header.number, header); - self.hash_to_number.put(self.best_hash, self.best_block); - } -} - -#[derive(Debug)] -struct LimitedHashSet { - map: HashMap, - queue: VecDeque, - capacity: usize, -} - -impl LimitedHashSet -where - K: std::hash::Hash + Eq + Clone, -{ - fn new(capacity: usize) -> Self { - Self { map: HashMap::new(), queue: VecDeque::new(), capacity } - } - - fn put(&mut self, key: K, value: V) { - if self.map.len() >= self.capacity { - if let Some(old_key) = self.queue.pop_front() { - self.map.remove(&old_key); - } - } - self.map.insert(key.clone(), value); - self.queue.push_back(key); - } - - fn get(&self, key: &K) -> Option<&V> { - self.map.get(key) - } -} - #[cfg(test)] mod tests { use super::*; @@ -797,69 +585,4 @@ mod tests { assert_eq!(parlia.epoch(), 200); assert_eq!(parlia.period(), 3); } - - #[test] - fn test_inner_storage() { - let default_block = Header::default().seal_slow(); - let mut storage = StorageInner { - best_hash: default_block.hash(), - best_block: default_block.number, - best_header: default_block.clone(), - headers: LimitedHashSet::new(10), - hash_to_number: LimitedHashSet::new(10), - bodies: LimitedHashSet::new(10), - }; - storage.headers.put(default_block.number, default_block.clone()); - storage.hash_to_number.put(default_block.hash(), default_block.number); - - let block = Header::default().seal_slow(); - storage.insert_new_block(block.clone(), BlockBody::default()); - assert_eq!(storage.best_block, block.number); - assert_eq!(storage.best_hash, block.hash()); - assert_eq!(storage.best_header, block); - assert_eq!(storage.headers.get(&block.number), Some(&block)); - assert_eq!(storage.hash_to_number.get(&block.hash()), Some(&block.number)); - assert_eq!(storage.bodies.get(&block.hash()), Some(&BlockBody::default())); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block.hash())), - Some(block.clone()) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(block.number)), - Some(block.clone()) - ); - assert_eq!(storage.best_block, block.number); - assert_eq!(storage.best_hash, block.hash()); - assert_eq!(storage.best_header, block); - - let header = Header::default().seal_slow(); - storage.insert_new_header(header.clone()); - assert_eq!(storage.best_block, header.number); - assert_eq!(storage.best_hash, header.hash()); - assert_eq!(storage.best_header, header); - assert_eq!(storage.headers.get(&header.number), Some(&header)); - assert_eq!(storage.hash_to_number.get(&header.hash()), Some(&header.number)); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Hash(header.hash())), - Some(header.clone()) - ); - assert_eq!( - storage.header_by_hash_or_number(BlockHashOrNumber::Number(header.number)), - Some(header.clone()) - ); - assert_eq!(storage.best_block, header.number); - assert_eq!(storage.best_hash, header.hash()); - assert_eq!(storage.best_header, header); - } - - #[test] - fn test_limited_hash_set() { - let mut set = LimitedHashSet::new(2); - set.put(1, 1); - set.put(2, 2); - set.put(3, 3); - assert_eq!(set.get(&1), None); - assert_eq!(set.get(&2), Some(&2)); - assert_eq!(set.get(&3), Some(&3)); - } } diff --git a/crates/bsc/consensus/src/validation.rs b/crates/bsc/consensus/src/validation.rs index 1f9d9734c..5a7cab943 100644 --- a/crates/bsc/consensus/src/validation.rs +++ b/crates/bsc/consensus/src/validation.rs @@ -1,4 +1,4 @@ -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ constants::eip4844::{DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK}, diff --git a/crates/bsc/engine/Cargo.toml b/crates/bsc/engine/Cargo.toml new file mode 100644 index 000000000..574fb6394 --- /dev/null +++ b/crates/bsc/engine/Cargo.toml @@ -0,0 +1,72 @@ +[package] +name = "reth-bsc-engine" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-chainspec.workspace = true +reth-codecs.workspace = true +reth-consensus-common.workspace = true +reth-consensus.workspace = true +reth-db-api.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-rpc-types.workspace = true +reth-network.workspace = true +reth-engine-primitives.workspace = true +reth-network-p2p.workspace = true +reth-network-peers.workspace = true +reth-beacon-consensus.workspace = true +reth-bsc-consensus.workspace = true +reth-evm-bsc.workspace = true +reth-evm.workspace = true +reth-primitives-traits.workspace = true + +# eth +alloy-rlp.workspace = true +alloy-dyn-abi.workspace = true +alloy-json-abi.workspace = true + +# crypto +secp256k1.workspace = true +sha3 = "0.10.8" +blst = "0.3.11" + +# misc +serde_json.workspace = true +parking_lot.workspace = true +tracing.workspace = true +lazy_static = "1.4.0" +lru = "0.12" +bitset = "0.1.2" +thiserror = "1.0.59" +serde = { version = "1.0.203", features = ["derive"] } +bytes = "1.6.0" +serde_cbor = "0.11.2" + +# async +futures-util.workspace = true +tokio = { workspace = true, features = ["sync", "time"] } +tokio-stream.workspace = true + +[dev-dependencies] +mockall = "0.12" +reth-provider = { workspace = true, features = ["test-utils"] } +rand = "0.8.5" + +[features] +bsc = [ + "reth-primitives/bsc", + "reth-consensus-common/bsc", + "reth-bsc-consensus/bsc", + "reth-evm-bsc/bsc", + "reth-beacon-consensus/bsc", +] diff --git a/crates/bsc/consensus/src/client.rs b/crates/bsc/engine/src/client.rs similarity index 95% rename from crates/bsc/consensus/src/client.rs rename to crates/bsc/engine/src/client.rs index 2bc69b0bb..27643fb25 100644 --- a/crates/bsc/consensus/src/client.rs +++ b/crates/bsc/engine/src/client.rs @@ -1,14 +1,14 @@ -//! This includes download client implementations for auto sealing miners. +//! This includes download client implementations for parlia consensus. use crate::Storage; use reth_network::FetchClient; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, - headers::client::{HeadersClient, HeadersFut, HeadersRequest}, + headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, priority::Priority, }; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, Header, HeadersDirection, SealedHeader, B256}; +use reth_primitives::{BlockBody, Header, SealedHeader, B256}; use std::fmt::Debug; use tracing::trace; diff --git a/crates/bsc/engine/src/lib.rs b/crates/bsc/engine/src/lib.rs new file mode 100644 index 000000000..37dcfb75c --- /dev/null +++ b/crates/bsc/engine/src/lib.rs @@ -0,0 +1,325 @@ +//! Bsc Tasks implementation. + +#![allow(missing_docs)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `bsc` feature must be enabled to use this crate. +#![cfg(feature = "bsc")] + +use reth_bsc_consensus::Parlia; +use reth_chainspec::ChainSpec; +use reth_primitives::{parlia::ParliaConfig, SealedHeader, B256}; +use std::{ + clone::Clone, + collections::{HashMap, VecDeque}, + fmt::Debug, + sync::Arc, +}; + +use tokio::sync::{ + mpsc::{UnboundedReceiver, UnboundedSender}, + Mutex, RwLockReadGuard, RwLockWriteGuard, +}; +use tracing::trace; + +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use reth_evm_bsc::SnapshotReader; +use reth_network::{fetch::FetchClient, message::EngineMessage}; +use reth_primitives::{BlockBody, BlockHash, BlockHashOrNumber, BlockNumber}; +use reth_provider::{BlockReaderIdExt, CanonChainTracker, ParliaProvider}; + +mod client; +use client::*; + +mod task; +use task::*; + +const STORAGE_CACHE_NUM: usize = 1000; + +/// Builder type for configuring the setup +#[derive(Debug)] +pub struct ParliaEngineBuilder { + chain_spec: Arc, + cfg: ParliaConfig, + storage: Storage, + to_engine: UnboundedSender>, + network_block_event_rx: Arc>>, + fetch_client: FetchClient, + provider: Provider, + parlia_provider: P, +} + +// === impl ParliaEngineBuilder === + +impl ParliaEngineBuilder +where + Provider: BlockReaderIdExt + CanonChainTracker + Clone + 'static, + Engine: EngineTypes + 'static, + P: ParliaProvider + 'static, +{ + /// Creates a new builder instance to configure all parts. + pub fn new( + chain_spec: Arc, + cfg: ParliaConfig, + provider: Provider, + parlia_provider: P, + to_engine: UnboundedSender>, + network_block_event_rx: Arc>>, + fetch_client: FetchClient, + ) -> Self { + let latest_header = provider + .latest_header() + .ok() + .flatten() + .unwrap_or_else(|| chain_spec.sealed_genesis_header()); + + Self { + chain_spec, + cfg, + provider, + parlia_provider, + storage: Storage::new(latest_header), + to_engine, + network_block_event_rx, + fetch_client, + } + } + + /// Consumes the type and returns all components + #[track_caller] + pub fn build(self, start_engine_task: bool) -> ParliaClient { + let Self { + chain_spec, + cfg, + storage, + to_engine, + network_block_event_rx, + fetch_client, + provider, + parlia_provider, + } = self; + let parlia_client = ParliaClient::new(storage.clone(), fetch_client); + let parlia = Parlia::new(chain_spec.clone(), cfg.clone()); + if start_engine_task { + ParliaEngineTask::start( + chain_spec, + parlia.clone(), + provider, + SnapshotReader::new(Arc::new(parlia_provider), Arc::new(parlia)), + to_engine, + network_block_event_rx, + storage, + parlia_client.clone(), + cfg.period, + ); + } + parlia_client + } +} + +/// In memory storage +#[derive(Debug, Clone)] +pub(crate) struct Storage { + inner: Arc>, +} + +// == impl Storage === + +impl Storage { + /// Initializes the [Storage] with the given best block. This should be initialized with the + /// highest block in the chain, if there is a chain already stored on-disk. + fn new(best_block: SealedHeader) -> Self { + let mut storage = StorageInner { + best_hash: best_block.hash(), + best_block: best_block.number, + best_header: best_block.clone(), + headers: LimitedHashSet::new(STORAGE_CACHE_NUM), + hash_to_number: LimitedHashSet::new(STORAGE_CACHE_NUM), + bodies: LimitedHashSet::new(STORAGE_CACHE_NUM), + best_finalized_hash: B256::default(), + best_safe_hash: B256::default(), + }; + storage.headers.put(best_block.number, best_block.clone()); + storage.hash_to_number.put(best_block.hash(), best_block.number); + Self { inner: Arc::new(tokio::sync::RwLock::new(storage)) } + } + + /// Returns the write lock of the storage + pub(crate) async fn write(&self) -> RwLockWriteGuard<'_, StorageInner> { + self.inner.write().await + } + + /// Returns the read lock of the storage + pub(crate) async fn read(&self) -> RwLockReadGuard<'_, StorageInner> { + self.inner.read().await + } +} + +/// In-memory storage for the chain the parlia engine task cache. +#[derive(Debug)] +pub(crate) struct StorageInner { + /// Headers buffered for download. + pub(crate) headers: LimitedHashSet, + /// A mapping between block hash and number. + pub(crate) hash_to_number: LimitedHashSet, + /// Bodies buffered for download. + pub(crate) bodies: LimitedHashSet, + /// Tracks best block + pub(crate) best_block: u64, + /// Tracks hash of best block + pub(crate) best_hash: B256, + /// The best header in the chain + pub(crate) best_header: SealedHeader, + /// Tracks hash of best finalized block + pub(crate) best_finalized_hash: B256, + /// Tracks hash of best safe block + pub(crate) best_safe_hash: B256, +} + +// === impl StorageInner === + +impl StorageInner { + /// Returns the matching header if it exists. + pub(crate) fn header_by_hash_or_number( + &self, + hash_or_num: BlockHashOrNumber, + ) -> Option { + let num = match hash_or_num { + BlockHashOrNumber::Hash(hash) => self.hash_to_number.get(&hash).copied()?, + BlockHashOrNumber::Number(num) => num, + }; + self.headers.get(&num).cloned() + } + + /// Inserts a new header+body pair + pub(crate) fn insert_new_block(&mut self, header: SealedHeader, body: BlockBody) { + self.best_hash = header.hash(); + self.best_block = header.number; + self.best_header = header.clone(); + + trace!(target: "parlia::client", num=self.best_block, hash=?self.best_hash, "inserting new block"); + self.headers.put(header.number, header); + self.bodies.put(self.best_hash, body); + self.hash_to_number.put(self.best_hash, self.best_block); + } + + /// Inserts a new header + pub(crate) fn insert_new_header(&mut self, header: SealedHeader) { + self.best_hash = header.hash(); + self.best_block = header.number; + self.best_header = header.clone(); + + trace!(target: "parlia::client", num=self.best_block, hash=?self.best_hash, "inserting new header"); + self.headers.put(header.number, header); + self.hash_to_number.put(self.best_hash, self.best_block); + } + + /// Inserts new finalized and safe hash + pub(crate) fn insert_finalized_and_safe_hash(&mut self, finalized: B256, safe: B256) { + self.best_finalized_hash = finalized; + self.best_safe_hash = safe; + } +} + +#[derive(Debug)] +struct LimitedHashSet { + map: HashMap, + queue: VecDeque, + capacity: usize, +} + +impl LimitedHashSet +where + K: std::hash::Hash + Eq + Clone, +{ + fn new(capacity: usize) -> Self { + Self { map: HashMap::new(), queue: VecDeque::new(), capacity } + } + + fn put(&mut self, key: K, value: V) { + if self.map.len() >= self.capacity { + if let Some(old_key) = self.queue.pop_front() { + self.map.remove(&old_key); + } + } + self.map.insert(key.clone(), value); + self.queue.push_back(key); + } + + fn get(&self, key: &K) -> Option<&V> { + self.map.get(key) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::Header; + + #[test] + fn test_inner_storage() { + let default_block = Header::default().seal_slow(); + let mut storage = StorageInner { + best_hash: default_block.hash(), + best_block: default_block.number, + best_header: default_block.clone(), + headers: LimitedHashSet::new(10), + hash_to_number: LimitedHashSet::new(10), + bodies: LimitedHashSet::new(10), + best_finalized_hash: B256::default(), + best_safe_hash: B256::default(), + }; + storage.headers.put(default_block.number, default_block.clone()); + storage.hash_to_number.put(default_block.hash(), default_block.number); + + let block = Header::default().seal_slow(); + storage.insert_new_block(block.clone(), BlockBody::default()); + assert_eq!(storage.best_block, block.number); + assert_eq!(storage.best_hash, block.hash()); + assert_eq!(storage.best_header, block); + assert_eq!(storage.headers.get(&block.number), Some(&block)); + assert_eq!(storage.hash_to_number.get(&block.hash()), Some(&block.number)); + assert_eq!(storage.bodies.get(&block.hash()), Some(&BlockBody::default())); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Hash(block.hash())), + Some(block.clone()) + ); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Number(block.number)), + Some(block.clone()) + ); + assert_eq!(storage.best_block, block.number); + assert_eq!(storage.best_hash, block.hash()); + assert_eq!(storage.best_header, block); + + let header = Header::default().seal_slow(); + storage.insert_new_header(header.clone()); + assert_eq!(storage.best_block, header.number); + assert_eq!(storage.best_hash, header.hash()); + assert_eq!(storage.best_header, header); + assert_eq!(storage.headers.get(&header.number), Some(&header)); + assert_eq!(storage.hash_to_number.get(&header.hash()), Some(&header.number)); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Hash(header.hash())), + Some(header.clone()) + ); + assert_eq!( + storage.header_by_hash_or_number(BlockHashOrNumber::Number(header.number)), + Some(header.clone()) + ); + assert_eq!(storage.best_block, header.number); + assert_eq!(storage.best_hash, header.hash()); + assert_eq!(storage.best_header, header); + } + + #[test] + fn test_limited_hash_set() { + let mut set = LimitedHashSet::new(2); + set.put(1, 1); + set.put(2, 2); + set.put(3, 3); + assert_eq!(set.get(&1), None); + assert_eq!(set.get(&2), Some(&2)); + assert_eq!(set.get(&3), Some(&3)); + } +} diff --git a/crates/bsc/consensus/src/task.rs b/crates/bsc/engine/src/task.rs similarity index 57% rename from crates/bsc/consensus/src/task.rs rename to crates/bsc/engine/src/task.rs index 6e4ad594f..e2bffe883 100644 --- a/crates/bsc/consensus/src/task.rs +++ b/crates/bsc/engine/src/task.rs @@ -1,11 +1,17 @@ -use crate::{client::ParliaClient, Parlia, Storage}; +use crate::{client::ParliaClient, Storage}; use reth_beacon_consensus::{BeaconEngineMessage, ForkchoiceStatus}; +use reth_bsc_consensus::Parlia; use reth_chainspec::ChainSpec; use reth_engine_primitives::EngineTypes; +use reth_evm_bsc::SnapshotReader; use reth_network::message::EngineMessage; -use reth_network_p2p::{headers::client::HeadersClient, priority::Priority}; -use reth_primitives::{Block, BlockBody, BlockHashOrNumber, B256}; -use reth_provider::BlockReaderIdExt; +use reth_network_p2p::{ + headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, + priority::Priority, +}; +use reth_primitives::{Block, BlockBody, BlockHashOrNumber, SealedHeader, B256}; +use reth_primitives_traits::constants::EPOCH_SLOTS; +use reth_provider::{BlockReaderIdExt, CanonChainTracker, ParliaProvider}; use reth_rpc_types::engine::ForkchoiceState; use std::{ clone::Clone, @@ -29,13 +35,14 @@ use tracing::{debug, error, info, trace}; #[derive(Debug)] enum ForkChoiceMessage { /// Broadcast new hash. - NewBlock(HashEvent), + NewHeader(NewHeaderEvent), } -/// internal message to beacon engine +/// internal message to notify the engine of a new block #[derive(Debug, Clone)] -struct HashEvent { - /// Hash of the block - hash: B256, +struct NewHeaderEvent { + header: SealedHeader, + trusted_header: SealedHeader, + pipeline_sync: bool, } /// A struct that contains a block hash or number and a block @@ -47,13 +54,19 @@ struct BlockInfo { } /// A Future that listens for new headers and puts into storage -pub(crate) struct ParliaEngineTask { +pub(crate) struct ParliaEngineTask< + Engine: EngineTypes, + Provider: BlockReaderIdExt + CanonChainTracker, + P: ParliaProvider, +> { /// The configured chain spec chain_spec: Arc, /// The coneensus instance consensus: Parlia, - /// The client used to read the block and header from the inserted chain - client: Client, + /// The provider used to read the block and header from the inserted chain + provider: Provider, + /// The snapshot reader used to read the snapshot + snapshot_reader: Arc>, /// The client used to fetch headers block_fetcher: ParliaClient, /// The interval of the block producing @@ -68,18 +81,26 @@ pub(crate) struct ParliaEngineTask, /// The channel to receive fork choice messages fork_choice_rx: Arc>>, + /// The channel to send chain tracker messages + chain_tracker_tx: UnboundedSender, + /// The channel to receive chain tracker messages + chain_tracker_rx: Arc>>, } // === impl ParliaEngineTask === -impl - ParliaEngineTask +impl< + Engine: EngineTypes + 'static, + Provider: BlockReaderIdExt + CanonChainTracker + Clone + 'static, + P: ParliaProvider + 'static, + > ParliaEngineTask { /// Creates a new instance of the task #[allow(clippy::too_many_arguments)] pub(crate) fn start( chain_spec: Arc, consensus: Parlia, - client: Client, + provider: Provider, + snapshot_reader: SnapshotReader

, to_engine: UnboundedSender>, network_block_event_rx: Arc>>, storage: Storage, @@ -87,10 +108,12 @@ impl block_interval: u64, ) { let (fork_choice_tx, fork_choice_rx) = mpsc::unbounded_channel(); + let (chain_tracker_tx, chain_tracker_rx) = mpsc::unbounded_channel(); let this = Self { chain_spec, consensus, - client, + provider, + snapshot_reader: Arc::new(snapshot_reader), to_engine, network_block_event_rx, storage, @@ -98,10 +121,13 @@ impl block_interval, fork_choice_tx, fork_choice_rx: Arc::new(Mutex::new(fork_choice_rx)), + chain_tracker_tx, + chain_tracker_rx: Arc::new(Mutex::new(chain_tracker_rx)), }; this.start_block_event_listening(); this.start_fork_choice_update_notifier(); + this.start_chain_tracker_notifier(); } /// Start listening to the network block event @@ -111,10 +137,11 @@ impl let mut interval = interval(Duration::from_secs(block_interval)); let chain_spec = self.chain_spec.clone(); let storage = self.storage.clone(); - let client = self.client.clone(); + let client = self.provider.clone(); let block_fetcher = self.block_fetcher.clone(); let consensus = self.consensus.clone(); let fork_choice_tx = self.fork_choice_tx.clone(); + let chain_tracker_tx = self.chain_tracker_tx.clone(); let fetch_header_timeout_duration = Duration::from_secs(block_interval); tokio::spawn(async move { @@ -225,7 +252,7 @@ impl // header number the timestamp of latest header should be bigger // than the predicted timestamp and less than the current timestamp. let predicted_timestamp = trusted_header.timestamp + - block_interval * (latest_header.number - trusted_header.number); + block_interval * (latest_header.number - 1 - trusted_header.number); let sealed_header = latest_header.clone().seal_slow(); let is_valid_header = match consensus .validate_header_with_predicted_timestamp(&sealed_header, predicted_timestamp) @@ -241,21 +268,84 @@ impl continue }; + let mut disconnected_headers = Vec::new(); + disconnected_headers.push(sealed_header.clone()); + let pipeline_sync = (trusted_header.number + EPOCH_SLOTS) < sealed_header.number; + if !pipeline_sync && (sealed_header.number - 1) > trusted_header.number { + let fetch_headers_result = match timeout( + fetch_header_timeout_duration, + block_fetcher.get_headers(HeadersRequest { + start: BlockHashOrNumber::Hash(sealed_header.parent_hash), + limit: (sealed_header.number - 1) - trusted_header.number, + direction: HeadersDirection::Falling, + }), + ) + .await + { + Ok(result) => result, + Err(_) => { + trace!(target: "consensus::parlia", "Fetch header timeout"); + continue + } + }; + if fetch_headers_result.is_err() { + trace!(target: "consensus::parlia", "Failed to fetch header"); + continue + } + + let headers = fetch_headers_result.unwrap().into_data(); + for header in headers { + let sealed_header = header.clone().seal_slow(); + let predicted_timestamp = trusted_header.timestamp + + block_interval * (sealed_header.number - 1 - trusted_header.number); + if consensus + .validate_header_with_predicted_timestamp( + &sealed_header, + predicted_timestamp, + ) + .is_err() + { + trace!(target: "consensus::parlia", "Invalid header"); + continue + } + disconnected_headers.push(sealed_header.clone()); + } + }; + // cache header and block let mut storage = storage.write().await; - storage.insert_new_header(sealed_header.clone()); if info.block.is_some() { storage.insert_new_block( sealed_header.clone(), BlockBody::from(info.block.clone().unwrap()), ); } - drop(storage); - let result = fork_choice_tx - .send(ForkChoiceMessage::NewBlock(HashEvent { hash: sealed_header.hash() })); + + for header in disconnected_headers { + storage.insert_new_header(header.clone()); + let result = + fork_choice_tx.send(ForkChoiceMessage::NewHeader(NewHeaderEvent { + header: header.clone(), + // if the pipeline sync is true, the fork choice will not use the safe + // and finalized hash. + // this can make Block Sync Engine to use pipeline sync mode. + pipeline_sync, + trusted_header: trusted_header.clone(), + })); + if result.is_err() { + error!(target: "consensus::parlia", "Failed to send new block event to fork choice"); + } + } + + let result = chain_tracker_tx.send(ForkChoiceMessage::NewHeader(NewHeaderEvent { + header: sealed_header.clone(), + pipeline_sync, + trusted_header: trusted_header.clone(), + })); if result.is_err() { - error!(target: "consensus::parlia", "Failed to send new block event to fork choice"); + error!(target: "consensus::parlia", "Failed to send new block event to chain tracker"); } + drop(storage); } }); info!(target: "consensus::parlia", "started listening to network block event") @@ -264,6 +354,7 @@ impl fn start_fork_choice_update_notifier(&self) { let fork_choice_rx = self.fork_choice_rx.clone(); let to_engine = self.to_engine.clone(); + let storage = self.storage.clone(); tokio::spawn(async move { loop { let mut fork_choice_rx_guard = fork_choice_rx.lock().await; @@ -273,15 +364,24 @@ impl continue; } match msg.unwrap() { - ForkChoiceMessage::NewBlock(event) => { + ForkChoiceMessage::NewHeader(event) => { // notify parlia engine - let state = ForkchoiceState { - head_block_hash: event.hash, - // safe(justified) and finalized hash will be determined in the parlia consensus engine and stored in the snapshot after the block sync + let new_header = event.header; + let storage = storage.read().await; + let safe_hash = storage.best_safe_hash; + let finalized_hash = storage.best_finalized_hash; + drop(storage); + + // safe(justified) and finalized hash will be determined in the parlia consensus engine and stored in the snapshot after the block sync + let mut state = ForkchoiceState { + head_block_hash: new_header.hash(), safe_block_hash: B256::ZERO, finalized_block_hash: B256::ZERO, }; - + if !event.pipeline_sync { + state.safe_block_hash = safe_hash; + state.finalized_block_hash = finalized_hash; + } // send the new update to the engine, this will trigger the engine // to download and execute the block we just inserted @@ -297,9 +397,10 @@ impl Ok(result) => result, Err(err)=> { error!(target: "consensus::parlia", ?err, "Fork choice update response failed"); - continue + break } }; + match rx_result { Ok(fcu_response) => { match fcu_response.forkchoice_status() { @@ -308,14 +409,17 @@ impl } ForkchoiceStatus::Invalid => { error!(target: "consensus::parlia", ?fcu_response, "Forkchoice update returned invalid response"); + continue } ForkchoiceStatus::Syncing => { - debug!(target: "consensus::parlia", ?fcu_response, "Forkchoice update returned SYNCING, waiting for VALID"); + trace!(target: "consensus::parlia", ?fcu_response, "Forkchoice update returned SYNCING, waiting for VALID"); + continue } } } Err(err) => { error!(target: "consensus::parlia", %err, "Parlia fork choice update failed"); + continue } } } @@ -330,10 +434,79 @@ impl }); info!(target: "consensus::parlia", "started fork choice notifier") } + + fn start_chain_tracker_notifier(&self) { + let chain_tracker_rx = self.chain_tracker_rx.clone(); + let snapshot_reader = self.snapshot_reader.clone(); + let provider = self.provider.clone(); + let storage = self.storage.clone(); + + tokio::spawn(async move { + loop { + let mut chain_tracker_rx_guard = chain_tracker_rx.lock().await; + tokio::select! { + msg = chain_tracker_rx_guard.recv() => { + if msg.is_none() { + continue; + } + match msg.unwrap() { + ForkChoiceMessage::NewHeader(event) => { + let new_header = event.trusted_header; + + let snap = match snapshot_reader.snapshot(&new_header, None) { + Ok(snap) => snap, + Err(err) => { + error!(target: "consensus::parlia", %err, "Snapshot not found"); + continue + } + }; + // safe finalized and safe hash for next round fcu + let finalized_hash = snap.vote_data.source_hash; + let safe_hash = snap.vote_data.target_hash; + let mut storage = storage.write().await; + storage.insert_finalized_and_safe_hash(finalized_hash, safe_hash); + drop(storage); + + // notify chain tracker to help rpc module can know the finalized and safe hash + match provider.sealed_header(snap.vote_data.source_number) { + Ok(header) => { + if let Some(sealed_header) = header { + provider.set_finalized(sealed_header.clone()); + } + } + Err(err) => { + error!(target: "consensus::parlia", %err, "Failed to get source header"); + } + } + + match provider.sealed_header(snap.vote_data.target_number) { + Ok(header) => { + if let Some(sealed_header) = header { + provider.set_safe(sealed_header.clone()); + } + } + Err(err) => { + error!(target: "consensus::parlia", %err, "Failed to get target header"); + } + } + } + + } + } + _ = signal::ctrl_c() => { + info!(target: "consensus::parlia", "chain tracker notifier shutting down..."); + return + }, + } + } + }); + + info!(target: "consensus::parlia", "started chain tracker notifier") + } } -impl fmt::Debug - for ParliaEngineTask +impl + fmt::Debug for ParliaEngineTask { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("chain_spec") diff --git a/crates/bsc/evm/Cargo.toml b/crates/bsc/evm/Cargo.toml index 53c59ce1c..6446157a6 100644 --- a/crates/bsc/evm/Cargo.toml +++ b/crates/bsc/evm/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # Reth reth-chainspec.workspace = true +reth-ethereum-forks.workspace = true reth-errors.workspace = true reth-evm.workspace = true reth-primitives.workspace = true @@ -42,4 +43,4 @@ bsc = [ "reth-bsc-consensus/bsc", "reth-primitives/bsc", "revm-primitives/bsc", -] +] \ No newline at end of file diff --git a/crates/bsc/evm/src/config.rs b/crates/bsc/evm/src/config.rs new file mode 100644 index 000000000..bf2ecb377 --- /dev/null +++ b/crates/bsc/evm/src/config.rs @@ -0,0 +1,100 @@ +use reth_chainspec::{BscHardfork, ChainSpec}; +use reth_ethereum_forks::{EthereumHardfork, Head}; + +/// Returns the spec id at the given timestamp. +/// +/// Note: This is only intended to be used after the merge, when hardforks are activated by +/// timestamp. +pub fn revm_spec_by_timestamp_after_shanghai( + chain_spec: &ChainSpec, + timestamp: u64, +) -> revm_primitives::SpecId { + if chain_spec.fork(BscHardfork::HaberFix).active_at_timestamp(timestamp) { + revm_primitives::HABER_FIX + } else if chain_spec.fork(BscHardfork::Haber).active_at_timestamp(timestamp) { + revm_primitives::HABER + } else if chain_spec.fork(BscHardfork::FeynmanFix).active_at_timestamp(timestamp) { + revm_primitives::FEYNMAN_FIX + } else if chain_spec.fork(BscHardfork::Feynman).active_at_timestamp(timestamp) { + revm_primitives::FEYNMAN + } else if chain_spec.fork(BscHardfork::Kepler).active_at_timestamp(timestamp) { + revm_primitives::KEPLER + } else { + revm_primitives::SHANGHAI + } +} + +/// return `revm_spec` from spec configuration. +pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecId { + if chain_spec.fork(BscHardfork::HaberFix).active_at_head(block) { + revm_primitives::HABER_FIX + } else if chain_spec.fork(BscHardfork::Haber).active_at_head(block) { + revm_primitives::HABER + } else if chain_spec.fork(EthereumHardfork::Cancun).active_at_head(block) { + revm_primitives::CANCUN + } else if chain_spec.fork(BscHardfork::FeynmanFix).active_at_head(block) { + revm_primitives::FEYNMAN_FIX + } else if chain_spec.fork(BscHardfork::Feynman).active_at_head(block) { + revm_primitives::FEYNMAN + } else if chain_spec.fork(BscHardfork::Kepler).active_at_head(block) { + revm_primitives::KEPLER + } else if chain_spec.fork(EthereumHardfork::Shanghai).active_at_head(block) { + revm_primitives::SHANGHAI + } else if chain_spec.fork(BscHardfork::HertzFix).active_at_head(block) { + revm_primitives::HERTZ_FIX + } else if chain_spec.fork(BscHardfork::Hertz).active_at_head(block) { + revm_primitives::HERTZ + } else if chain_spec.fork(EthereumHardfork::London).active_at_head(block) { + revm_primitives::LONDON + } else if chain_spec.fork(EthereumHardfork::Berlin).active_at_head(block) { + revm_primitives::BERLIN + } else if chain_spec.fork(BscHardfork::Plato).active_at_head(block) { + revm_primitives::PLATO + } else if chain_spec.fork(BscHardfork::Luban).active_at_head(block) { + revm_primitives::LUBAN + } else if chain_spec.fork(BscHardfork::Planck).active_at_head(block) { + revm_primitives::PLANCK + } else if chain_spec.fork(BscHardfork::Gibbs).active_at_head(block) { + // bsc mainnet and testnet have different order for Moran, Nano and Gibbs + if chain_spec.fork(BscHardfork::Moran).active_at_head(block) { + revm_primitives::MORAN + } else if chain_spec.fork(BscHardfork::Nano).active_at_head(block) { + revm_primitives::NANO + } else { + revm_primitives::EULER + } + } else if chain_spec.fork(BscHardfork::Moran).active_at_head(block) { + revm_primitives::MORAN + } else if chain_spec.fork(BscHardfork::Nano).active_at_head(block) { + revm_primitives::NANO + } else if chain_spec.fork(BscHardfork::Euler).active_at_head(block) { + revm_primitives::EULER + } else if chain_spec.fork(BscHardfork::Bruno).active_at_head(block) { + revm_primitives::BRUNO + } else if chain_spec.fork(BscHardfork::MirrorSync).active_at_head(block) { + revm_primitives::MIRROR_SYNC + } else if chain_spec.fork(BscHardfork::Niels).active_at_head(block) { + revm_primitives::NIELS + } else if chain_spec.fork(BscHardfork::Ramanujan).active_at_head(block) { + revm_primitives::RAMANUJAN + } else if chain_spec.fork(EthereumHardfork::MuirGlacier).active_at_head(block) { + revm_primitives::MUIR_GLACIER + } else if chain_spec.fork(EthereumHardfork::Istanbul).active_at_head(block) { + revm_primitives::ISTANBUL + } else if chain_spec.fork(EthereumHardfork::Petersburg).active_at_head(block) { + revm_primitives::PETERSBURG + } else if chain_spec.fork(EthereumHardfork::Constantinople).active_at_head(block) { + revm_primitives::CONSTANTINOPLE + } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_head(block) { + revm_primitives::BYZANTIUM + } else if chain_spec.fork(EthereumHardfork::Homestead).active_at_head(block) { + revm_primitives::HOMESTEAD + } else if chain_spec.fork(EthereumHardfork::Frontier).active_at_head(block) { + revm_primitives::FRONTIER + } else { + panic!( + "invalid hardfork chainspec: expected at least one hardfork, got {:?}", + chain_spec.hardforks + ) + } +} diff --git a/crates/bsc/evm/src/execute.rs b/crates/bsc/evm/src/execute.rs index 186edc5c0..85a32f417 100644 --- a/crates/bsc/evm/src/execute.rs +++ b/crates/bsc/evm/src/execute.rs @@ -8,7 +8,7 @@ use reth_bsc_consensus::{ is_breathe_block, is_system_transaction, validate_block_post_execution, Parlia, ValidatorElectionInfo, ValidatorsInfo, }; -use reth_chainspec::ChainSpec; +use reth_chainspec::{BscHardforks, ChainSpec, EthereumHardforks}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; use reth_evm::{ execute::{ @@ -18,7 +18,7 @@ use reth_evm::{ }; use reth_primitives::{ parlia::{ParliaConfig, Snapshot, VoteAddress, CHECKPOINT_INTERVAL}, - system_contracts::get_upgrade_system_contracts, + system_contracts::{get_upgrade_system_contracts, SLASH_CONTRACT}, Address, BlockNumber, BlockWithSenders, Bytes, Header, Receipt, Transaction, TransactionSigned, B256, BSC_MAINNET, U256, }; @@ -31,10 +31,10 @@ use reth_revm::{ }; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, TransactTo, + BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, ResultAndState, TransactTo, }; use std::{collections::HashMap, num::NonZeroUsize, sync::Arc, time::Instant}; -use tracing::log::debug; +use tracing::{debug, warn}; const SNAP_CACHE_NUM: usize = 2048; @@ -83,7 +83,7 @@ where { fn bsc_executor(&self, db: DB) -> BscBlockExecutor where - DB: Database, + DB: Database + std::fmt::Display>, { BscBlockExecutor::new( self.chain_spec.clone(), @@ -100,25 +100,27 @@ where P: ParliaProvider + Clone + Unpin + 'static, EvmConfig: ConfigureEvm, { - type Executor> = BscBlockExecutor; + type Executor + std::fmt::Display>> = + BscBlockExecutor; - type BatchExecutor> = BscBatchExecutor; + type BatchExecutor + std::fmt::Display>> = + BscBatchExecutor; fn executor(&self, db: DB) -> Self::Executor where - DB: Database, + DB: Database + std::fmt::Display>, { self.bsc_executor(db) } - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + std::fmt::Display>, { let executor = self.bsc_executor(db); BscBatchExecutor { executor, - batch_record: BlockBatchRecord::new(prune_modes), + batch_record: BlockBatchRecord::default(), stats: BlockExecutorStats::default(), snapshots: Vec::new(), } @@ -159,7 +161,7 @@ where mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, Vec, u64), BlockExecutionError> where - DB: Database, + DB: Database + std::fmt::Display>, { // execute transactions let mut cumulative_gas_used = 0; @@ -188,39 +190,45 @@ where .into()); } - self.patch_mainnet(&block.header, transaction, evm.db_mut()); - self.patch_chapel(&block.header, transaction, evm.db_mut()); + self.patch_mainnet_before_tx(transaction, evm.db_mut()); + self.patch_chapel_before_tx(transaction, evm.db_mut()); - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); + self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { + let new_err = match err { + EVMError::Transaction(e) => EVMError::Transaction(e), + EVMError::Header(e) => EVMError::Header(e), + EVMError::Database(e) => EVMError::Database(e.into()), + EVMError::Custom(e) => EVMError::Custom(e), + EVMError::Precompile(e) => EVMError::Precompile(e), + }; // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { hash: transaction.recalculate_hash(), - error: err.into(), + error: Box::new(new_err), } })?; evm.db_mut().commit(state); + self.patch_mainnet_after_tx(transaction, evm.db_mut()); + self.patch_chapel_after_tx(transaction, evm.db_mut()); + // append gas used cumulative_gas_used += result.gas_used(); // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push( - #[allow(clippy::needless_update)] // side-effect of optimism fields - Receipt { - tx_type: transaction.tx_type(), - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - success: result.is_success(), - cumulative_gas_used, - // convert to reth log - logs: result.into_logs(), - ..Default::default() - }, - ); + receipts.push(Receipt { + tx_type: transaction.tx_type(), + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + success: result.is_success(), + cumulative_gas_used, + // convert to reth log + logs: result.into_logs(), + }); } drop(evm); @@ -240,13 +248,13 @@ pub struct BscBlockExecutor { /// The state to use for execution pub(crate) state: State, /// Extra provider for bsc - provider: P, + pub(crate) provider: Arc

, /// Parlia consensus instance - parlia: Arc, + pub(crate) parlia: Arc, } impl BscBlockExecutor { - /// Creates a new Ethereum block executor. + /// Creates a new Parlia block executor. pub fn new( chain_spec: Arc, evm_config: EvmConfig, @@ -255,7 +263,13 @@ impl BscBlockExecutor { provider: P, ) -> Self { let parlia = Arc::new(Parlia::new(Arc::clone(&chain_spec), parlia_config)); - Self { executor: BscEvmExecutor { chain_spec, evm_config }, state, provider, parlia } + let shared_provider = Arc::new(provider); + Self { + executor: BscEvmExecutor { chain_spec, evm_config }, + state, + provider: shared_provider, + parlia, + } } #[inline] @@ -279,7 +293,7 @@ impl BscBlockExecutor { impl BscBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, P: ParliaProvider, { /// Configures a new evm configuration and block environment for the given block. @@ -288,8 +302,7 @@ where fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); let mut block_env = BlockEnv::default(); - - EvmConfig::fill_cfg_and_block_env( + self.executor.evm_config.fill_cfg_and_block_env( &mut cfg, &mut block_env, self.chain_spec(), @@ -314,7 +327,8 @@ where ) -> Result { // 1. get parent header and snapshot let parent = &(self.get_header_by_hash(block.parent_hash)?); - let snap = &(self.snapshot(parent, None)?); + let snapshot_reader = SnapshotReader::new(self.provider.clone(), self.parlia.clone()); + let snap = &(snapshot_reader.snapshot(parent, None)?); // 2. prepare state on new block self.on_new_block(&block.header, parent, snap)?; @@ -326,7 +340,7 @@ where // 4. execute normal transactions let env = self.evm_env_for_block(&block.header, total_difficulty); - if !self.parlia.chain_spec().is_feynman_active_at_timestamp(block.timestamp) { + if !self.chain_spec().is_feynman_active_at_timestamp(block.timestamp) { // apply system contract upgrade self.upgrade_system_contracts(block.number, block.timestamp, parent.timestamp)?; } @@ -355,115 +369,6 @@ where } } - pub(crate) fn find_ancient_header( - &self, - header: &Header, - count: u64, - ) -> Result { - let mut result = header.clone(); - for _ in 0..count { - result = self.get_header_by_hash(result.parent_hash)?; - } - Ok(result) - } - - pub(crate) fn snapshot( - &self, - header: &Header, - parent: Option<&Header>, - ) -> Result { - let mut cache = RECENT_SNAPS.write(); - - let mut header = header.clone(); - let mut block_number = header.number; - let mut block_hash = header.hash_slow(); - let mut skip_headers = Vec::new(); - - let snap: Option; - loop { - // Read from cache - if let Some(cached) = cache.get(&block_hash) { - snap = Some(cached.clone()); - break; - } - - // Read from db - if block_number % CHECKPOINT_INTERVAL == 0 { - if let Some(cached) = - self.provider.get_parlia_snapshot(block_hash).map_err(|err| { - BscBlockExecutionError::ProviderInnerError { error: err.into() } - })? - { - snap = Some(cached); - break; - } - } - - // If we're at the genesis, snapshot the initial state. - if block_number == 0 { - let ValidatorsInfo { consensus_addrs, vote_addrs } = - self.parlia.parse_validators_from_header(&header).map_err(|err| { - BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } - })?; - snap = Some(Snapshot::new( - consensus_addrs, - block_number, - block_hash, - self.parlia.epoch(), - vote_addrs, - )); - break; - } - - // No snapshot for this header, gather the header and move backward - skip_headers.push(header.clone()); - if let Some(parent) = parent { - block_number = parent.number; - block_hash = header.parent_hash; - header = parent.clone(); - } else if let Ok(h) = self.get_header_by_hash(header.parent_hash) { - block_number = h.number; - block_hash = header.parent_hash; - header = h; - } - } - - let mut snap = snap.ok_or_else(|| BscBlockExecutionError::SnapshotNotFound)?; - - // apply skip headers - skip_headers.reverse(); - for header in &skip_headers { - let ValidatorsInfo { consensus_addrs, vote_addrs } = if header.number > 0 && - header.number % self.parlia.epoch() == (snap.validators.len() / 2) as u64 - { - // change validator set - let checkpoint_header = - self.find_ancient_header(header, (snap.validators.len() / 2) as u64)?; - - self.parlia.parse_validators_from_header(&checkpoint_header).map_err(|err| { - BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } - })? - } else { - ValidatorsInfo::default() - }; - - let validator = self.parlia.recover_proposer(header).map_err(|err| { - BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } - })?; - let attestation = - self.parlia.get_vote_attestation_from_header(header).map_err(|err| { - BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } - })?; - - snap = snap - .apply(validator, header, consensus_addrs, vote_addrs, attestation) - .ok_or_else(|| BscBlockExecutionError::ApplySnapshotFailed)?; - } - - cache.put(snap.block_hash, snap.clone()); - Ok(snap) - } - pub(crate) fn get_justified_header( &self, snap: &Snapshot, @@ -499,7 +404,7 @@ where parent_block_time: u64, ) -> Result { if let Ok(contracts) = get_upgrade_system_contracts( - self.parlia().chain_spec(), + self.chain_spec(), block_number, block_time, parent_block_time, @@ -508,7 +413,7 @@ where debug!("Upgrade system contract {:?} at height {:?}", k, block_number); let account = self.state.load_cache_account(k).map_err(|err| { - BscBlockExecutionError::ProviderInnerError { error: err.into() } + BscBlockExecutionError::ProviderInnerError { error: Box::new(err.into()) } })?; let mut new_info = account.account_info().unwrap_or_default(); @@ -555,9 +460,16 @@ where block_env.basefee = U256::ZERO; // Execute call. - let ResultAndState { result, .. } = evm.transact().map_err(move |e| { + let ResultAndState { result, .. } = evm.transact().map_err(move |err| { + let new_err = match err { + EVMError::Transaction(e) => EVMError::Transaction(e), + EVMError::Header(e) => EVMError::Header(e), + EVMError::Database(e) => EVMError::Database(e.into()), + EVMError::Custom(e) => EVMError::Custom(e), + EVMError::Precompile(e) => EVMError::Precompile(e), + }; // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { hash: B256::default(), error: e.into() } + BlockValidationError::EVM { hash: B256::default(), error: Box::new(new_err) } })?; if !result.is_success() { @@ -579,10 +491,30 @@ where ) -> Result<(), BlockExecutionError> { let mut evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - let nonce = evm.db_mut().basic(sender).unwrap().unwrap_or_default().nonce; + let nonce = evm + .db_mut() + .basic(sender) + .map_err(|err| BscBlockExecutionError::ProviderInnerError { + error: Box::new(err.into()), + }) + .unwrap() + .unwrap_or_default() + .nonce; transaction.set_nonce(nonce); let hash = transaction.signature_hash(); - if hash != system_txs[0].signature_hash() { + if system_txs.is_empty() || hash != system_txs[0].signature_hash() { + // slash tx could fail and not in the block + if let Some(to) = transaction.to() { + if to == SLASH_CONTRACT.parse::

().unwrap() && + (system_txs.is_empty() || + system_txs[0].to().unwrap_or_default() != + SLASH_CONTRACT.parse::
().unwrap()) + { + warn!("slash validator failed"); + return Ok(()); + } + } + debug!("unexpected transaction: {:?}", transaction); for tx in system_txs.iter() { debug!("left system tx: {:?}", tx); @@ -614,9 +546,16 @@ where block_env.basefee = U256::ZERO; // Execute transaction. - let ResultAndState { result, state } = evm.transact().map_err(move |e| { + let ResultAndState { result, state } = evm.transact().map_err(move |err| { + let new_err = match err { + EVMError::Transaction(e) => EVMError::Transaction(e), + EVMError::Header(e) => EVMError::Header(e), + EVMError::Database(e) => EVMError::Database(e.into()), + EVMError::Custom(e) => EVMError::Custom(e), + EVMError::Precompile(e) => EVMError::Precompile(e), + }; // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { hash, error: e.into() } + BlockValidationError::EVM { hash, error: Box::new(new_err) } })?; evm.db_mut().commit(state); @@ -667,12 +606,9 @@ where }; // 2. get election info - if self.parlia().chain_spec().is_feynman_active_at_timestamp(header.timestamp) && + if self.chain_spec().is_feynman_active_at_timestamp(header.timestamp) && is_breathe_block(parent.timestamp, header.timestamp) && - !self - .parlia() - .chain_spec() - .is_on_feynman_at_timestamp(header.timestamp, parent.timestamp) + !self.chain_spec().is_on_feynman_at_timestamp(header.timestamp, parent.timestamp) { let (to, data) = self.parlia().get_max_elected_validators(); let bz = self.eth_call(to, data, env.clone())?; @@ -715,7 +651,7 @@ where number: BlockNumber, env: EnvWithHandlerCfg, ) -> (Vec
, Vec) { - if !self.parlia().chain_spec().is_luban_active_at_block(number) { + if !self.chain_spec().is_luban_active_at_block(number) { let (to, data) = self.parlia().get_current_validators_before_luban(number); let output = self.eth_call(to, data, env).unwrap(); @@ -732,7 +668,7 @@ where impl Executor for BscBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, P: ParliaProvider, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; @@ -788,7 +724,7 @@ impl BscBatchExecutor { impl BatchExecutor for BscBatchExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, P: ParliaProvider, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; @@ -843,7 +779,144 @@ where self.batch_record.set_tip(tip); } + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.batch_record.set_prune_modes(prune_modes); + } + fn size_hint(&self) -> Option { Some(self.executor.state.bundle_state.size_hint()) } } + +#[derive(Debug, Clone)] +pub struct SnapshotReader

{ + /// Extra provider for bsc + provider: Arc

, + /// Parlia consensus instance + parlia: Arc, +} + +impl

SnapshotReader

+where + P: ParliaProvider, +{ + pub fn new(provider: Arc

, parlia: Arc) -> Self { + Self { provider, parlia } + } + + pub fn snapshot( + &self, + header: &Header, + parent: Option<&Header>, + ) -> Result { + let mut cache = RECENT_SNAPS.write(); + + let mut header = header.clone(); + let mut block_number = header.number; + let mut block_hash = header.hash_slow(); + let mut skip_headers = Vec::new(); + + let snap: Option; + loop { + // Read from cache + if let Some(cached) = cache.get(&block_hash) { + snap = Some(cached.clone()); + break; + } + + // Read from db + if block_number % CHECKPOINT_INTERVAL == 0 { + if let Some(cached) = + self.provider.get_parlia_snapshot(block_hash).map_err(|err| { + BscBlockExecutionError::ProviderInnerError { error: err.into() } + })? + { + snap = Some(cached); + break; + } + } + + // If we're at the genesis, snapshot the initial state. + if block_number == 0 { + let ValidatorsInfo { consensus_addrs, vote_addrs } = + self.parlia.parse_validators_from_header(&header).map_err(|err| { + BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } + })?; + snap = Some(Snapshot::new( + consensus_addrs, + block_number, + block_hash, + self.parlia.epoch(), + vote_addrs, + )); + break; + } + + // No snapshot for this header, gather the header and move backward + skip_headers.push(header.clone()); + if let Some(parent) = parent { + block_number = parent.number; + block_hash = header.parent_hash; + header = parent.clone(); + } else if let Ok(h) = self.get_header_by_hash(header.parent_hash) { + block_number = h.number; + block_hash = header.parent_hash; + header = h; + } + } + + let mut snap = snap.ok_or_else(|| BscBlockExecutionError::SnapshotNotFound)?; + + // apply skip headers + skip_headers.reverse(); + for header in &skip_headers { + let ValidatorsInfo { consensus_addrs, vote_addrs } = if header.number > 0 && + header.number % self.parlia.epoch() == (snap.validators.len() / 2) as u64 + { + // change validator set + let checkpoint_header = + self.find_ancient_header(header, (snap.validators.len() / 2) as u64)?; + + self.parlia.parse_validators_from_header(&checkpoint_header).map_err(|err| { + BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } + })? + } else { + ValidatorsInfo::default() + }; + + let validator = self.parlia.recover_proposer(header).map_err(|err| { + BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } + })?; + let attestation = + self.parlia.get_vote_attestation_from_header(header).map_err(|err| { + BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } + })?; + + snap = snap + .apply(validator, header, consensus_addrs, vote_addrs, attestation) + .ok_or_else(|| BscBlockExecutionError::ApplySnapshotFailed)?; + } + + cache.put(snap.block_hash, snap.clone()); + Ok(snap) + } + + fn get_header_by_hash(&self, block_hash: B256) -> Result { + self.provider + .header(&block_hash) + .map_err(|err| BscBlockExecutionError::ProviderInnerError { error: err.into() })? + .ok_or_else(|| BscBlockExecutionError::UnknownHeader { block_hash }.into()) + } + + fn find_ancient_header( + &self, + header: &Header, + count: u64, + ) -> Result { + let mut result = header.clone(); + for _ in 0..count { + result = self.get_header_by_hash(result.parent_hash)?; + } + Ok(result) + } +} diff --git a/crates/bsc/evm/src/lib.rs b/crates/bsc/evm/src/lib.rs index 1810fbb7c..4344f5944 100644 --- a/crates/bsc/evm/src/lib.rs +++ b/crates/bsc/evm/src/lib.rs @@ -9,13 +9,16 @@ use reth_chainspec::ChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ - revm::{config::revm_spec, env::fill_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, Head, Header, TransactionSigned, U256, + transaction::FillTxEnv, + Address, Bytes, Head, Header, TransactionSigned, U256, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; +use revm_primitives::Env; -pub mod execute; +mod config; +pub use config::{revm_spec, revm_spec_by_timestamp_after_shanghai}; +mod execute; pub use execute::*; mod error; pub use error::BscBlockExecutionError; @@ -29,11 +32,22 @@ mod pre_execution; pub struct BscEvmConfig; impl ConfigureEvmEnv for BscEvmConfig { - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - fill_tx_env(tx_env, transaction, sender) + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + transaction.fill_tx_env(tx_env, sender); + } + + fn fill_tx_env_system_contract_call( + &self, + _env: &mut Env, + _caller: Address, + _contract: Address, + _data: Bytes, + ) { + // No system contract call on BSC } fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, @@ -41,7 +55,7 @@ impl ConfigureEvmEnv for BscEvmConfig { ) { let spec_id = revm_spec( chain_spec, - Head { + &Head { number: header.number, timestamp: header.timestamp, difficulty: header.difficulty, @@ -98,7 +112,7 @@ mod tests { let chain_spec = ChainSpec::default(); let total_difficulty = U256::ZERO; - BscEvmConfig::fill_cfg_and_block_env( + BscEvmConfig::default().fill_cfg_and_block_env( &mut cfg_env, &mut block_env, &chain_spec, diff --git a/crates/bsc/evm/src/patch_hertz.rs b/crates/bsc/evm/src/patch_hertz.rs index c4b866209..d375c9006 100644 --- a/crates/bsc/evm/src/patch_hertz.rs +++ b/crates/bsc/evm/src/patch_hertz.rs @@ -1,447 +1,745 @@ -use crate::execute::BscEvmExecutor; +use crate::{execute::BscEvmExecutor, BscBlockExecutionError}; +use lazy_static::lazy_static; use reth_errors::ProviderError; use reth_evm::ConfigureEvm; -use reth_primitives::{address, b256, Address, Header, TransactionSigned, B256, U256}; -use reth_revm::{ - db::{states::CacheAccount, AccountStatus::Destroyed}, - State, -}; +use reth_primitives::{address, b256, Address, TransactionSigned, B256, U256}; +use reth_revm::{db::states::StorageSlot, State}; use revm_primitives::db::Database; use std::{collections::HashMap, str::FromStr}; -use tracing::log::trace; +use tracing::trace; -impl BscEvmExecutor -where - EvmConfig: ConfigureEvm, -{ - pub(crate) fn patch_mainnet( - &self, - header: &Header, - transaction: &TransactionSigned, - state: &mut State, - ) where - DB: Database, - { - let patches = vec![ - // patch 1: BlockNum 33851236, txIndex 89(patch before tx 89) +struct StoragePatch { + address: Address, + storage: HashMap, +} + +lazy_static! { + static ref MAINNET_PATCHES_BEFORE_TX: HashMap = HashMap::from([ + // patch 1: BlockNum 33851236, txIndex 89 ( - b256!("022296e50021d7225b75f3873e7bc5a2bf6376a08079b4368f9dee81946d623b"), b256!("7eba4edc7c1806d6ee1691d43513838931de5c94f9da56ec865721b402f775b0"), - address!("00000000001f8b68515EfB546542397d3293CCfd"), - HashMap::from([ - ( - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - U256::from_str( - "0x00000000000000000000000052db206170b430da8223651d28830e56ba3cdc04", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000002", - ) - .unwrap(), - U256::from_str( - "0x000000000000000000000000bb45f138499734bf5c0948d490c65903676ea1de", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x65c95177950b486c2071bf2304da1427b9136564150fb97266ffb318b03a71cc", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x245e58a02bec784ccbdb9e022a84af83227a4125a22a5e68fcc596c7e436434e", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x1c4534c86090a60a9120f34c7b15254913c00bda3d4b276d6edb65c9f48a913f", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000004", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000019", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1b4", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1b5", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1b6", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000005", - ) - .unwrap(), - U256::from_str( - "0x00000000000000000000000000000000000000000000000000000000000fc248", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000006", - ) - .unwrap(), - U256::from_str( - "0x00000000000000000000000000000000000000000000000000000000000fc132", - ) - .unwrap(), - ), - ]), + StoragePatch { + address: address!("00000000001f8b68515EfB546542397d3293CCfd"), + storage: HashMap::from([ + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + U256::from_str( + "0x00000000000000000000000052db206170b430da8223651d28830e56ba3cdc04", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000002", + ) + .unwrap(), + U256::from_str( + "0x000000000000000000000000bb45f138499734bf5c0948d490c65903676ea1de", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x65c95177950b486c2071bf2304da1427b9136564150fb97266ffb318b03a71cc", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x245e58a02bec784ccbdb9e022a84af83227a4125a22a5e68fcc596c7e436434e", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x1c4534c86090a60a9120f34c7b15254913c00bda3d4b276d6edb65c9f48a913f", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000004", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000019", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1b4", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1b5", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd1b6", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000005", + ) + .unwrap(), + U256::from_str( + "0x00000000000000000000000000000000000000000000000000000000000fc248", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000006", + ) + .unwrap(), + U256::from_str( + "0x00000000000000000000000000000000000000000000000000000000000fc132", + ) + .unwrap(), + ), + ]), + } ), - // patch 2: BlockNum 33851236, txIndex 90(patch before tx 90) + // patch 2: BlockNum 33851236, txIndex 90 ( - b256!("022296e50021d7225b75f3873e7bc5a2bf6376a08079b4368f9dee81946d623b"), b256!("5217324f0711af744fe8e12d73f13fdb11805c8e29c0c095ac747b7e4563e935"), - address!("00000000001f8b68515EfB546542397d3293CCfd"), - HashMap::from([ - ( - U256::from_str( - "0xbcfc62ca570bdb58cf9828ac51ae8d7e063a1cc0fa1aee57691220a7cd78b1c8", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x30dce49ce1a4014301bf21aad0ee16893e4dcc4a4e4be8aa10e442dd13259837", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0xc0582628d787ee16fe03c8e5b5f5644d3b81989686f8312280b7a1f733145525", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0xfca5cf22ff2e8d58aece8e4370cce33cd0144d48d00f40a5841df4a42527694b", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0xb189302b37865d2ae522a492ff1f61a5addc1db44acbdcc4b6814c312c815f46", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0xfe1f1986775fc2ac905aeaecc7b1aa8b0d6722b852c90e26edacd2dac7382489", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x36052a8ddb27fecd20e2e09da15494a0f2186bf8db36deebbbe701993f8c4aae", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x4959a566d8396b889ff4bc20e18d2497602e01e5c6013af5af7a7c4657ece3e2", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0xe0b5aeb100569add952966f803cb67aca86dc6ec8b638f5a49f9e0760efa9a7a", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x632467ad388b91583f956f76488afc42846e283c962cbb215d288033ffc4fb71", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x9ad4e69f52519f7b7b8ee5ae3326d57061b429428ea0c056dd32e7a7102e79a7", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x35e130c7071699eae5288b12374ef157a15e4294e2b3a352160b7c1cd4641d82", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0xa0d8279f845f63979dc292228adfa0bda117de27e44d90ac2adcd44465b225e7", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x9a100b70ffda9ed9769becdadca2b2936b217e3da4c9b9817bad30d85eab25ff", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x28d67156746295d901005e2d95ce589e7093decb638f8c132d9971fd0a37e176", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x297c4e115b5df76bcd5a1654b8032661680a1803e30a0774cb42bb01891e6d97", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x5f71b88f1032d27d8866948fc9c49525f3e584bdd52a66de6060a7b1f767326f", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0xe6d8ddf6a0bbeb4840f48f0c4ffda9affa4675354bdb7d721235297f5a094f54", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( - U256::from_str( - "0x30ba10aef6238bf19667aaa988b18b72adb4724c016e19eb64bbb52808d1a842", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", - ) - .unwrap(), - ), - ( + StoragePatch { + address: address!("00000000001f8b68515EfB546542397d3293CCfd"), + storage: HashMap::from([ + ( + U256::from_str( + "0xbcfc62ca570bdb58cf9828ac51ae8d7e063a1cc0fa1aee57691220a7cd78b1c8", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x30dce49ce1a4014301bf21aad0ee16893e4dcc4a4e4be8aa10e442dd13259837", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xc0582628d787ee16fe03c8e5b5f5644d3b81989686f8312280b7a1f733145525", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xfca5cf22ff2e8d58aece8e4370cce33cd0144d48d00f40a5841df4a42527694b", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xb189302b37865d2ae522a492ff1f61a5addc1db44acbdcc4b6814c312c815f46", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xfe1f1986775fc2ac905aeaecc7b1aa8b0d6722b852c90e26edacd2dac7382489", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x36052a8ddb27fecd20e2e09da15494a0f2186bf8db36deebbbe701993f8c4aae", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x4959a566d8396b889ff4bc20e18d2497602e01e5c6013af5af7a7c4657ece3e2", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xe0b5aeb100569add952966f803cb67aca86dc6ec8b638f5a49f9e0760efa9a7a", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x632467ad388b91583f956f76488afc42846e283c962cbb215d288033ffc4fb71", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x9ad4e69f52519f7b7b8ee5ae3326d57061b429428ea0c056dd32e7a7102e79a7", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x35e130c7071699eae5288b12374ef157a15e4294e2b3a352160b7c1cd4641d82", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xa0d8279f845f63979dc292228adfa0bda117de27e44d90ac2adcd44465b225e7", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x9a100b70ffda9ed9769becdadca2b2936b217e3da4c9b9817bad30d85eab25ff", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x28d67156746295d901005e2d95ce589e7093decb638f8c132d9971fd0a37e176", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x297c4e115b5df76bcd5a1654b8032661680a1803e30a0774cb42bb01891e6d97", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x5f71b88f1032d27d8866948fc9c49525f3e584bdd52a66de6060a7b1f767326f", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xe6d8ddf6a0bbeb4840f48f0c4ffda9affa4675354bdb7d721235297f5a094f54", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x30ba10aef6238bf19667aaa988b18b72adb4724c016e19eb64bbb52808d1a842", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0x9c6806a4d6a99e4869b9a4aaf80b0a3bf5f5240a1d6032ed82edf0e86f2a2467", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xe8480d613bbf3b979aee2de4487496167735bb73df024d988e1795b3c7fa559a", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ( + U256::from_str( + "0xebfaec01f898f7f0e2abdb4b0aee3dfbf5ec2b287b1e92f9b62940f85d5f5bac", + ) + .unwrap(), + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + ), + ]), + } + ) + ]); + static ref MAINNET_PATCHES_AFTER_TX: HashMap = HashMap::from([ + // patch 1: BlockNum 33851236, txIndex 89 + ( + b256!("7eba4edc7c1806d6ee1691d43513838931de5c94f9da56ec865721b402f775b0"), + StoragePatch { + address: address!("00000000001f8b68515EfB546542397d3293CCfd"), + storage: HashMap::from([ + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000002", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x65c95177950b486c2071bf2304da1427b9136564150fb97266ffb318b03a71cc", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x245e58a02bec784ccbdb9e022a84af83227a4125a22a5e68fcc596c7e436434e", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x1c4534c86090a60a9120f34c7b15254913c00bda3d4b276d6edb65c9f48a913f", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000005", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x0000000000000000000000000000000000000000000000000000000000000006", + ) + .unwrap(), + U256::ZERO, + ), + ]), + } + ), + // patch 2: BlockNum 33851236, txIndex 90 + ( + b256!("5217324f0711af744fe8e12d73f13fdb11805c8e29c0c095ac747b7e4563e935"), + StoragePatch { + address: address!("00000000001f8b68515EfB546542397d3293CCfd"), + storage: HashMap::from([ + ( + U256::from_str( + "0xbcfc62ca570bdb58cf9828ac51ae8d7e063a1cc0fa1aee57691220a7cd78b1c8", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x30dce49ce1a4014301bf21aad0ee16893e4dcc4a4e4be8aa10e442dd13259837", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xc0582628d787ee16fe03c8e5b5f5644d3b81989686f8312280b7a1f733145525", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xfca5cf22ff2e8d58aece8e4370cce33cd0144d48d00f40a5841df4a42527694b", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xb189302b37865d2ae522a492ff1f61a5addc1db44acbdcc4b6814c312c815f46", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xfe1f1986775fc2ac905aeaecc7b1aa8b0d6722b852c90e26edacd2dac7382489", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x36052a8ddb27fecd20e2e09da15494a0f2186bf8db36deebbbe701993f8c4aae", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x4959a566d8396b889ff4bc20e18d2497602e01e5c6013af5af7a7c4657ece3e2", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xe0b5aeb100569add952966f803cb67aca86dc6ec8b638f5a49f9e0760efa9a7a", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x632467ad388b91583f956f76488afc42846e283c962cbb215d288033ffc4fb71", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x9ad4e69f52519f7b7b8ee5ae3326d57061b429428ea0c056dd32e7a7102e79a7", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x35e130c7071699eae5288b12374ef157a15e4294e2b3a352160b7c1cd4641d82", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xa0d8279f845f63979dc292228adfa0bda117de27e44d90ac2adcd44465b225e7", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x9a100b70ffda9ed9769becdadca2b2936b217e3da4c9b9817bad30d85eab25ff", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x28d67156746295d901005e2d95ce589e7093decb638f8c132d9971fd0a37e176", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x297c4e115b5df76bcd5a1654b8032661680a1803e30a0774cb42bb01891e6d97", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x5f71b88f1032d27d8866948fc9c49525f3e584bdd52a66de6060a7b1f767326f", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xe6d8ddf6a0bbeb4840f48f0c4ffda9affa4675354bdb7d721235297f5a094f54", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x30ba10aef6238bf19667aaa988b18b72adb4724c016e19eb64bbb52808d1a842", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0x9c6806a4d6a99e4869b9a4aaf80b0a3bf5f5240a1d6032ed82edf0e86f2a2467", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xe8480d613bbf3b979aee2de4487496167735bb73df024d988e1795b3c7fa559a", + ) + .unwrap(), + U256::ZERO, + ), + ( + U256::from_str( + "0xebfaec01f898f7f0e2abdb4b0aee3dfbf5ec2b287b1e92f9b62940f85d5f5bac", + ) + .unwrap(), + U256::ZERO, + ), + ]), + } + ) + ]); + static ref CHAPEL_PATCHES_BEFORE_TX: HashMap = HashMap::from([ + // patch 1: BlockNum 35547779, txIndex 196 + ( + b256!("7ce9a3cf77108fcc85c1e84e88e363e3335eca515dfcf2feb2011729878b13a7"), + StoragePatch { + address: address!("89791428868131eb109e42340ad01eb8987526b2"), + storage: HashMap::from([( U256::from_str( - "0x9c6806a4d6a99e4869b9a4aaf80b0a3bf5f5240a1d6032ed82edf0e86f2a2467", + "0xf1e9242398de526b8dd9c25d38e65fbb01926b8940377762d7884b8b0dcdc3b0", ) .unwrap(), U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000f6a7831804efd2cd0a", ) .unwrap(), - ), - ( + )]), + }, + ), + // patch 2: BlockNum 35548081, txIndex 486 + ( + b256!("e3895eb95605d6b43ceec7876e6ff5d1c903e572bf83a08675cb684c047a695c"), + StoragePatch { + address: address!("89791428868131eb109e42340ad01eb8987526b2"), + storage: HashMap::from([( U256::from_str( - "0xe8480d613bbf3b979aee2de4487496167735bb73df024d988e1795b3c7fa559a", + "0xf1e9242398de526b8dd9c25d38e65fbb01926b8940377762d7884b8b0dcdc3b0", ) .unwrap(), U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000114be8ecea72b64003", ) .unwrap(), - ), - ( + )]), + }, + ), + ]); + static ref CHAPEL_PATCHES_AFTER_TX: HashMap = HashMap::from([ + // patch 1: BlockNum 35547779, txIndex 196 + ( + b256!("7ce9a3cf77108fcc85c1e84e88e363e3335eca515dfcf2feb2011729878b13a7"), + StoragePatch { + address: address!("89791428868131eb109e42340ad01eb8987526b2"), + storage: HashMap::from([( U256::from_str( - "0xebfaec01f898f7f0e2abdb4b0aee3dfbf5ec2b287b1e92f9b62940f85d5f5bac", + "0xf1e9242398de526b8dd9c25d38e65fbb01926b8940377762d7884b8b0dcdc3b0", ) .unwrap(), + U256::ZERO, + )]), + }, + ), + // patch 2: BlockNum 35548081, txIndex 486 + ( + b256!("e3895eb95605d6b43ceec7876e6ff5d1c903e572bf83a08675cb684c047a695c"), + StoragePatch { + address: address!("89791428868131eb109e42340ad01eb8987526b2"), + storage: HashMap::from([( U256::from_str( - "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xf1e9242398de526b8dd9c25d38e65fbb01926b8940377762d7884b8b0dcdc3b0", ) .unwrap(), - ), - ]), + U256::ZERO, + )]), + }, ), - ]; + ]); +} - apply_patch(header, transaction, state, patches); +impl BscEvmExecutor +where + EvmConfig: ConfigureEvm, +{ + pub(crate) fn patch_mainnet_before_tx( + &self, + transaction: &TransactionSigned, + state: &mut State, + ) where + DB: Database + std::fmt::Display>, + { + let tx_hash = transaction.recalculate_hash(); + if let Some(patch) = MAINNET_PATCHES_BEFORE_TX.get(&tx_hash) { + trace!("patch evm state for mainnet before tx {:?}", tx_hash); + + apply_patch(state, patch.address, &patch.storage); + } } - pub(crate) fn patch_chapel( + pub(crate) fn patch_chapel_before_tx( &self, - header: &Header, transaction: &TransactionSigned, state: &mut State, ) where - DB: Database, + DB: Database + std::fmt::Display>, { - let patches = vec![ - // patch 1: BlockNum 35547779, txIndex 196(patch before tx 196) - ( - b256!("1237cb09a7d08c187a78e777853b70be28a41bb188c5341987408623c1a4f4aa"), - b256!("7ce9a3cf77108fcc85c1e84e88e363e3335eca515dfcf2feb2011729878b13a7"), - address!("89791428868131eb109e42340ad01eb8987526b2"), - HashMap::from([( - U256::from_str( - "0xf1e9242398de526b8dd9c25d38e65fbb01926b8940377762d7884b8b0dcdc3b0", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000f6a7831804efd2cd0a", - ) - .unwrap(), - )]), - ), - // patch 2: BlockNum 35548081, txIndex 486(patch before tx 486) - ( - b256!("cdd38b3681c8f3f1da5569a893231466ab35f47d58ba85dbd7d9217f304983bf"), - b256!("e3895eb95605d6b43ceec7876e6ff5d1c903e572bf83a08675cb684c047a695c"), - address!("89791428868131eb109e42340ad01eb8987526b2"), - HashMap::from([( - U256::from_str( - "0xf1e9242398de526b8dd9c25d38e65fbb01926b8940377762d7884b8b0dcdc3b0", - ) - .unwrap(), - U256::from_str( - "0x0000000000000000000000000000000000000000000000114be8ecea72b64003", - ) - .unwrap(), - )]), - ), - ]; + let tx_hash = transaction.recalculate_hash(); + if let Some(patch) = CHAPEL_PATCHES_BEFORE_TX.get(&tx_hash) { + trace!("patch evm state for chapel before tx {:?}", tx_hash); - apply_patch(header, transaction, state, patches); + apply_patch(state, patch.address, &patch.storage); + } } -} -fn apply_patch( - header: &Header, - transaction: &TransactionSigned, - state: &mut State, - patches: Vec<(B256, B256, Address, HashMap)>, -) where - DB: Database, -{ - for (block_hash, tx_hash, address, patch) in patches { - if header.hash_slow() == block_hash && transaction.recalculate_hash() == tx_hash { - trace!("patch evm state at block {:?} tx {:?}", block_hash, tx_hash); + pub(crate) fn patch_mainnet_after_tx( + &self, + transaction: &TransactionSigned, + state: &mut State, + ) where + DB: Database + std::fmt::Display>, + { + let tx_hash = transaction.recalculate_hash(); + if let Some(patch) = MAINNET_PATCHES_AFTER_TX.get(&tx_hash) { + trace!("patch evm state for mainnet after tx {:?}", tx_hash); - let account = state.load_cache_account(address).unwrap().clone(); - let (info, mut storage) = account.into_components().0.unwrap(); - patch.into_iter().for_each(|(key, value)| { - storage.insert(key, value); - }); + apply_patch(state, patch.address, &patch.storage); + } + } - let mut account = CacheAccount::new_loaded(info, storage); - account.status = Destroyed; - state.cache.accounts.insert(address, account); + pub(crate) fn patch_chapel_after_tx( + &self, + transaction: &TransactionSigned, + state: &mut State, + ) where + DB: Database + std::fmt::Display>, + { + let tx_hash = transaction.recalculate_hash(); + if let Some(patch) = CHAPEL_PATCHES_AFTER_TX.get(&tx_hash) { + trace!("patch evm state for chapel after tx {:?}", tx_hash); + + apply_patch(state, patch.address, &patch.storage); } } } + +fn apply_patch(state: &mut State, address: Address, storage: &HashMap) +where + DB: Database + std::fmt::Display>, +{ + let account = state + .load_cache_account(address) + .map_err(|err| BscBlockExecutionError::ProviderInnerError { error: Box::new(err.into()) }) + .unwrap(); + let account_change = account.change( + account.account_info().unwrap_or_default(), + storage + .iter() + .map(|(key, value)| { + ( + *key, + StorageSlot { previous_or_original_value: U256::ZERO, present_value: *value }, + ) + }) + .collect(), + ); + + state.apply_transition(vec![(address, account_change)]); +} diff --git a/crates/bsc/evm/src/post_execution.rs b/crates/bsc/evm/src/post_execution.rs index a7057a21d..a68b811c3 100644 --- a/crates/bsc/evm/src/post_execution.rs +++ b/crates/bsc/evm/src/post_execution.rs @@ -1,10 +1,11 @@ -use crate::{BscBlockExecutionError, BscBlockExecutor}; +use crate::{BscBlockExecutionError, BscBlockExecutor, SnapshotReader}; use bitset::BitSet; use reth_bsc_consensus::{ get_top_validators_by_voting_power, is_breathe_block, ElectedValidators, ValidatorElectionInfo, COLLECT_ADDITIONAL_VOTES_REWARD_RATIO, DIFF_INTURN, MAX_SYSTEM_REWARD, SYSTEM_REWARD_PERCENT, }; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; +use reth_ethereum_forks::BscHardforks; use reth_evm::ConfigureEvm; use reth_primitives::{ hex, @@ -13,10 +14,10 @@ use reth_primitives::{ Address, BlockWithSenders, GotExpected, Header, Receipt, TransactionSigned, U256, }; use reth_provider::ParliaProvider; -use reth_revm::{bsc::SYSTEM_ADDRESS, db::AccountStatus}; +use reth_revm::bsc::SYSTEM_ADDRESS; use revm_primitives::{db::Database, EnvWithHandlerCfg}; use std::collections::HashMap; -use tracing::log::debug; +use tracing::debug; /// Helper type for the input of post execution. #[allow(clippy::type_complexity)] @@ -30,7 +31,7 @@ pub(crate) struct PostExecutionInput { impl BscBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, P: ParliaProvider, { /// Apply post execution state changes, including system txs and other state change. @@ -62,13 +63,12 @@ where )?; } - if self.parlia().chain_spec().is_feynman_active_at_timestamp(block.timestamp) { + if self.chain_spec().is_feynman_active_at_timestamp(block.timestamp) { // apply system contract upgrade self.upgrade_system_contracts(block.number, block.timestamp, parent.timestamp)?; } - if self.parlia().chain_spec().is_on_feynman_at_timestamp(block.timestamp, parent.timestamp) - { + if self.chain_spec().is_on_feynman_at_timestamp(block.timestamp, parent.timestamp) { self.init_feynman_contracts( validator, system_txs, @@ -81,7 +81,7 @@ where // slash validator if it's not inturn if block.difficulty != DIFF_INTURN { let spoiled_val = snap.inturn_validator(); - let signed_recently = if self.parlia().chain_spec().is_plato_active_at_block(number) { + let signed_recently = if self.chain_spec().is_plato_active_at_block(number) { snap.sign_recently(spoiled_val) } else { snap.recent_proposers.iter().any(|(_, v)| *v == spoiled_val) @@ -101,7 +101,7 @@ where self.distribute_incoming(header, system_txs, receipts, cumulative_gas_used, env.clone())?; - if self.parlia().chain_spec().is_plato_active_at_block(number) { + if self.chain_spec().is_plato_active_at_block(number) { self.distribute_finality_reward( header, system_txs, @@ -112,7 +112,7 @@ where } // update validator set after Feynman upgrade - if self.parlia().chain_spec().is_feynman_active_at_timestamp(header.timestamp) && + if self.chain_spec().is_feynman_active_at_timestamp(header.timestamp) && is_breathe_block(parent.timestamp, header.timestamp) && !self .parlia() @@ -159,7 +159,7 @@ where validators.sort(); let validator_num = validators.len(); - if self.parlia().chain_spec().is_on_luban_at_block(number) { + if self.chain_spec().is_on_luban_at_block(number) { vote_addrs_map = validators .iter() .cloned() @@ -171,7 +171,7 @@ where .into_iter() .flat_map(|v| { let mut bytes = v.to_vec(); - if self.parlia().chain_spec().is_luban_active_at_block(number) { + if self.chain_spec().is_luban_active_at_block(number) { bytes.extend_from_slice(vote_addrs_map[&v].as_ref()); } bytes @@ -265,24 +265,26 @@ where ) -> Result<(), BlockExecutionError> { let validator = header.beneficiary; - let system_account = self - .state - .load_cache_account(SYSTEM_ADDRESS) - .map_err(|err| BscBlockExecutionError::ProviderInnerError { error: err.into() })?; - if system_account.status == AccountStatus::LoadedNotExisting || - system_account.status == AccountStatus::DestroyedAgain + let system_account = self.state.load_cache_account(SYSTEM_ADDRESS).map_err(|err| { + BscBlockExecutionError::ProviderInnerError { error: Box::new(err.into()) } + })?; + + if header.number != 1 && + (system_account.account.is_none() || + system_account.account.as_ref().unwrap().info.balance == U256::ZERO) { return Ok(()); } let (mut block_reward, transition) = system_account.drain_balance(); self.state.apply_transition(vec![(SYSTEM_ADDRESS, transition)]); + + // if block reward is zero, no need to distribute if block_reward == 0 { return Ok(()); } - let mut balance_increment = HashMap::new(); - balance_increment.insert(validator, block_reward); + let balance_increment = HashMap::from([(validator, block_reward)]); self.state .increment_balances(balance_increment) .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; @@ -290,10 +292,13 @@ where let system_reward_balance = self .state .basic(SYSTEM_REWARD_CONTRACT.parse().unwrap()) + .map_err(|err| BscBlockExecutionError::ProviderInnerError { + error: Box::new(err.into()), + }) .unwrap() .unwrap_or_default() .balance; - if !self.parlia().chain_spec().is_kepler_active_at_timestamp(header.timestamp) && + if !self.chain_spec().is_kepler_active_at_timestamp(header.timestamp) && system_reward_balance < U256::from(MAX_SYSTEM_REWARD) { let reward_to_system = block_reward >> SYSTEM_REWARD_PERCENT; @@ -406,7 +411,8 @@ where ) -> Result<(), BlockExecutionError> { let justified_header = self.get_header_by_hash(attestation.data.target_hash)?; let parent = self.get_header_by_hash(justified_header.parent_hash)?; - let snapshot = self.snapshot(&parent, None)?; + let snapshot_reader = SnapshotReader::new(self.provider.clone(), self.parlia.clone()); + let snapshot = &(snapshot_reader.snapshot(&parent, None)?); let validators = &snapshot.validators; let validators_bit_set = BitSet::from_u64(attestation.vote_address_set); diff --git a/crates/bsc/evm/src/pre_execution.rs b/crates/bsc/evm/src/pre_execution.rs index 064114380..3f8931a04 100644 --- a/crates/bsc/evm/src/pre_execution.rs +++ b/crates/bsc/evm/src/pre_execution.rs @@ -1,4 +1,4 @@ -use crate::{BscBlockExecutionError, BscBlockExecutor}; +use crate::{BscBlockExecutionError, BscBlockExecutor, SnapshotReader}; use bitset::BitSet; use blst::{ min_pk::{PublicKey, Signature}, @@ -6,6 +6,7 @@ use blst::{ }; use reth_bsc_consensus::{DIFF_INTURN, DIFF_NOTURN}; use reth_errors::{BlockExecutionError, ProviderError}; +use reth_ethereum_forks::{BscHardforks, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_primitives::{ parlia::{Snapshot, VoteAddress, MAX_ATTESTATION_EXTRA_LENGTH}, @@ -19,7 +20,7 @@ const BLST_DST: &[u8] = b"BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_"; impl BscBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, P: ParliaProvider, { /// Apply settings and verify headers before a new block is executed. @@ -55,7 +56,7 @@ where header: &Header, parent: &Header, ) -> Result<(), BlockExecutionError> { - if self.parlia().chain_spec().is_ramanujan_active_at_block(header.number) && + if self.chain_spec().is_ramanujan_active_at_block(header.number) && header.timestamp < parent.timestamp + self.parlia().period() + @@ -77,6 +78,10 @@ where header: &Header, parent: &Header, ) -> Result<(), BlockExecutionError> { + if !self.chain_spec().is_plato_active_at_block(header.number) { + return Ok(()); + } + let attestation = self.parlia().get_vote_attestation_from_header(header).map_err(|err| { BscBlockExecutionError::ParliaConsensusInnerError { error: err.into() } @@ -116,7 +121,8 @@ where // Get the target_number - 1 block's snapshot. let pre_target_header = &(self.get_header_by_hash(parent.parent_hash)?); - let snap = &(self.snapshot(pre_target_header, None)?); + let snapshot_reader = SnapshotReader::new(self.provider.clone(), self.parlia.clone()); + let snap = &(snapshot_reader.snapshot(pre_target_header, None)?); // query bls keys from snapshot. let validators_count = snap.validators.len(); diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 1565567b5..ce8a79872 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -24,8 +24,13 @@ alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-trie.workspace = true +# op +op-alloy-rpc-types = { workspace = true, optional = true } + + # misc once_cell.workspace = true +serde = { workspace = true, optional = true } serde_json.workspace = true derive_more.workspace = true @@ -39,13 +44,18 @@ alloy-genesis.workspace = true reth-rpc-types.workspace = true rand.workspace = true +# op +op-alloy-rpc-types.workspace = true + [features] +default = ["std"] bsc = [ "reth-ethereum-forks/bsc" ] -default = ["std"] optimism = [ - "reth-ethereum-forks/optimism" + "reth-ethereum-forks/optimism", + "serde", + "dep:op-alloy-rpc-types", ] opbnb = [ "reth-ethereum-forks/opbnb" diff --git a/crates/chainspec/src/constants/mod.rs b/crates/chainspec/src/constants/mod.rs index 9af4f946b..cde927189 100644 --- a/crates/chainspec/src/constants/mod.rs +++ b/crates/chainspec/src/constants/mod.rs @@ -10,40 +10,3 @@ pub(crate) const MAINNET_DEPOSIT_CONTRACT: DepositContract = DepositContract::ne #[cfg(feature = "optimism")] pub(crate) mod optimism; - -#[cfg(test)] -mod tests { - use alloy_eips::calc_next_block_base_fee; - - #[test] - fn calculate_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1125000000, 1083333333, 1053571428, 1179939062, 1116028649, 918084097, 1063811730, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - crate::BaseFeeParams::ethereum(), - ) as u64 - ); - } - } -} diff --git a/crates/chainspec/src/constants/optimism.rs b/crates/chainspec/src/constants/optimism.rs index d4a1de6d0..1c32df6f3 100644 --- a/crates/chainspec/src/constants/optimism.rs +++ b/crates/chainspec/src/constants/optimism.rs @@ -44,105 +44,3 @@ pub(crate) const OP_CANYON_BASE_FEE_PARAMS: BaseFeeParams = BaseFeeParams { max_change_denominator: OP_MAINNET_EIP1559_BASE_FEE_MAX_CHANGE_DENOMINATOR_CANYON, elasticity_multiplier: OP_MAINNET_EIP1559_DEFAULT_ELASTICITY_MULTIPLIER, }; - -#[cfg(test)] -mod tests { - use super::*; - use alloy_eips::calc_next_block_base_fee; - - #[test] - fn calculate_optimism_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1100000048, 1080000000, 1065714297, 1167067046, 1128881311, 1028254188, 1098203452, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - OP_BASE_FEE_PARAMS, - ) as u64 - ); - } - } - - #[test] - fn calculate_optimism_sepolia_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1100000048, 1080000000, 1065714297, 1167067046, 1128881311, 1028254188, 1098203452, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - OP_SEPOLIA_BASE_FEE_PARAMS, - ) as u64 - ); - } - } - - #[test] - fn calculate_base_sepolia_base_fee_success() { - let base_fee = [ - 1000000000, 1000000000, 1000000000, 1072671875, 1059263476, 1049238967, 1049238967, 0, - 1, 2, - ]; - let gas_used = [ - 10000000, 10000000, 10000000, 9000000, 10001000, 0, 10000000, 10000000, 10000000, - 10000000, - ]; - let gas_limit = [ - 10000000, 12000000, 14000000, 10000000, 14000000, 2000000, 18000000, 18000000, - 18000000, 18000000, - ]; - let next_base_fee = [ - 1180000000, 1146666666, 1122857142, 1244299375, 1189416692, 1028254188, 1144836295, 1, - 2, 3, - ]; - - for i in 0..base_fee.len() { - assert_eq!( - next_base_fee[i], - calc_next_block_base_fee( - gas_used[i] as u128, - gas_limit[i] as u128, - base_fee[i] as u128, - BASE_SEPOLIA_BASE_FEE_PARAMS, - ) as u64 - ); - } - } -} diff --git a/crates/chainspec/src/lib.rs b/crates/chainspec/src/lib.rs index 6bd4bbe8b..f4f5020be 100644 --- a/crates/chainspec/src/lib.rs +++ b/crates/chainspec/src/lib.rs @@ -13,7 +13,7 @@ pub use alloy_chains::{Chain, ChainKind, NamedChain}; pub use info::ChainInfo; pub use spec::{ BaseFeeParams, BaseFeeParamsKind, ChainSpec, ChainSpecBuilder, DepositContract, - ForkBaseFeeParams, DEV, GOERLI, HOLESKY, MAINNET, SEPOLIA, + ForkBaseFeeParams, DEV, HOLESKY, MAINNET, SEPOLIA, }; #[cfg(feature = "optimism")] pub use spec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; @@ -30,9 +30,6 @@ extern crate alloc; /// The chain info module. mod info; -/// Network related constants -pub mod net; - /// The chain spec module. mod spec; @@ -57,8 +54,8 @@ mod tests { #[test] fn test_named_id() { - let chain = Chain::from_named(NamedChain::Goerli); - assert_eq!(chain.id(), 5); + let chain = Chain::from_named(NamedChain::Holesky); + assert_eq!(chain.id(), 17000); } #[test] @@ -84,9 +81,9 @@ mod tests { #[test] fn test_into_u256() { - let chain = Chain::from_named(NamedChain::Goerli); + let chain = Chain::from_named(NamedChain::Holesky); let n: U256 = U256::from(chain.id()); - let expected = U256::from(5); + let expected = U256::from(17000); assert_eq!(n, expected); } diff --git a/crates/chainspec/src/net.rs b/crates/chainspec/src/net.rs deleted file mode 100644 index 7bbfa4412..000000000 --- a/crates/chainspec/src/net.rs +++ /dev/null @@ -1,308 +0,0 @@ -pub use reth_network_peers::{NodeRecord, NodeRecordParseError, TrustedPeer}; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - -// Ethereum bootnodes come from -// OP bootnodes come from - -/// Ethereum Foundation Go Bootnodes -pub static MAINNET_BOOTNODES: [&str; 4] = [ - "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", // bootnode-aws-ap-southeast-1-001 - "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", // bootnode-aws-us-east-1-001 - "enode://2b252ab6a1d0f971d9722cb839a42cb81db019ba44c08754628ab4a823487071b5695317c8ccd085219c3a03af063495b2f1da8d18218da2d6a82981b45e6ffc@65.108.70.101:30303", // bootnode-hetzner-hel - "enode://4aeb4ab6c14b23e2c4cfdce879c04b0748a20d8e9b59e25ded2a08143e265c6c25936e74cbc8e641e3312ca288673d91f2f93f8e277de3cfa444ecdaaf982052@157.90.35.166:30303", // bootnode-hetzner-fsn -]; - -/// Ethereum Foundation Sepolia Bootnodes -pub static SEPOLIA_BOOTNODES: [&str; 5] = [ - "enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", // sepolia-bootnode-1-nyc3 - "enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", // sepolia-bootnode-1-sfo3 - "enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", // sepolia-bootnode-1-syd1 - "enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", // sepolia-bootnode-1-blr1 - "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3 -]; - -/// Görli Bootnodes -pub static GOERLI_BOOTNODES: [&str; 7] = [ - // Upstream bootnodes - "enode://011f758e6552d105183b1761c5e2dea0111bc20fd5f6422bc7f91e0fabbec9a6595caf6239b37feb773dddd3f87240d99d859431891e4a642cf2a0a9e6cbb98a@51.141.78.53:30303", - "enode://176b9417f511d05b6b2cf3e34b756cf0a7096b3094572a8f6ef4cdcb9d1f9d00683bf0f83347eebdf3b81c3521c2332086d9592802230bf528eaf606a1d9677b@13.93.54.137:30303", - "enode://46add44b9f13965f7b9875ac6b85f016f341012d84f975377573800a863526f4da19ae2c620ec73d11591fa9510e992ecc03ad0751f53cc02f7c7ed6d55c7291@94.237.54.114:30313", - "enode://b5948a2d3e9d486c4d75bf32713221c2bd6cf86463302339299bd227dc2e276cd5a1c7ca4f43a0e9122fe9af884efed563bd2a1fd28661f3b5f5ad7bf1de5949@18.218.250.66:30303", - - // Ethereum Foundation bootnode - "enode://a61215641fb8714a373c80edbfa0ea8878243193f57c96eeb44d0bc019ef295abd4e044fd619bfc4c59731a73fb79afe84e9ab6da0c743ceb479cbb6d263fa91@3.11.147.67:30303", - - // Goerli Initiative bootnodes - "enode://d4f764a48ec2a8ecf883735776fdefe0a3949eb0ca476bd7bc8d0954a9defe8fea15ae5da7d40b5d2d59ce9524a99daedadf6da6283fca492cc80b53689fb3b3@46.4.99.122:32109", - "enode://d2b720352e8216c9efc470091aa91ddafc53e222b32780f505c817ceef69e01d5b0b0797b69db254c586f493872352f5a022b4d8479a00fc92ec55f9ad46a27e@88.99.70.182:30303", -]; - -/// Ethereum Foundation Holesky Bootnodes -pub static HOLESKY_BOOTNODES: [&str; 2] = [ - "enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303", - "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", -]; - -#[cfg(feature = "optimism")] -/// OP stack mainnet boot nodes. -pub static OP_BOOTNODES: &[&str] = &[ - // OP Labs - "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", - "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", - "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", - // Base - "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", - "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", - "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", - "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", - "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" -]; - -#[cfg(feature = "optimism")] -/// OP stack testnet boot nodes. -pub static OP_TESTNET_BOOTNODES: &[&str] = &[ - // OP Labs - "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", - "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", - "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", - // Base - "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", - "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", -]; - -#[cfg(feature = "bsc")] -/// Bsc testnet boot nodes. -pub static BSC_MAINNET_BOOTNODES: &[&str] = &[ - "enode://433c8bfdf53a3e2268ccb1b829e47f629793291cbddf0c76ae626da802f90532251fc558e2e0d10d6725e759088439bf1cd4714716b03a259a35d4b2e4acfa7f@52.69.102.73:30311", - "enode://571bee8fb902a625942f10a770ccf727ae2ba1bab2a2b64e121594a99c9437317f6166a395670a00b7d93647eacafe598b6bbcef15b40b6d1a10243865a3e80f@35.73.84.120:30311", - "enode://fac42fb0ba082b7d1eebded216db42161163d42e4f52c9e47716946d64468a62da4ba0b1cac0df5e8bf1e5284861d757339751c33d51dfef318be5168803d0b5@18.203.152.54:30311", - "enode://3063d1c9e1b824cfbb7c7b6abafa34faec6bb4e7e06941d218d760acdd7963b274278c5c3e63914bd6d1b58504c59ec5522c56f883baceb8538674b92da48a96@34.250.32.100:30311", - "enode://ad78c64a4ade83692488aa42e4c94084516e555d3f340d9802c2bf106a3df8868bc46eae083d2de4018f40e8d9a9952c32a0943cd68855a9bc9fd07aac982a6d@34.204.214.24:30311", - "enode://5db798deb67df75d073f8e2953dad283148133acb520625ea804c9c4ad09a35f13592a762d8f89056248f3889f6dcc33490c145774ea4ff2966982294909b37a@107.20.191.97:30311", -]; - -#[cfg(feature = "bsc")] -/// Bsc testnet boot nodes. -pub static BSC_TESTNET_BOOTNODES: &[&str] = &[ - "enode://0637d1e62026e0c8685b1db0ca1c767c78c95c3fab64abc468d1a64b12ca4b530b46b8f80c915aec96f74f7ffc5999e8ad6d1484476f420f0c10e3d42361914b@52.199.214.252:30311", - "enode://df1e8eb59e42cad3c4551b2a53e31a7e55a2fdde1287babd1e94b0836550b489ba16c40932e4dacb16cba346bd442c432265a299c4aca63ee7bb0f832b9f45eb@52.51.80.128:30311", - "enode://ecd664250ca19b1074dcfbfb48576a487cc18d052064222a363adacd2650f8e08fb3db9de7a7aecb48afa410eaeb3285e92e516ead01fb62598553aed91ee15e@3.209.122.123:30311", - "enode://665cf77ca26a8421cfe61a52ac312958308d4912e78ce8e0f61d6902e4494d4cc38f9b0dd1b23a427a7a5734e27e5d9729231426b06bb9c73b56a142f83f6b68@52.72.123.113:30311", -]; - -#[cfg(all(feature = "optimism", feature = "opbnb"))] -/// OPBNB mainnet boot nodes. -pub static OPBNB_MAINNET_BOOTNODES: &[&str] = &[ - "enode://db109c6cac5c8b6225edd3176fc3764c58e0720950fe94c122c80978e706a9c9e976629b718e48b6306ea0f9126e5394d3424c9716c5703549e2e7eba216353b@52.193.218.151:30304", - "enode://afe18782053bb31fb7ea41e1acf659ab9bd1eec181fb97331f0a6b61871a469b4f75138f903c977796be1cc2a3c985d33150a396e878d3cd6e4723b6040ff9c0@52.195.105.192:30304", -]; - -#[cfg(all(feature = "optimism", feature = "opbnb"))] -/// OPBNB testnet boot nodes. -pub static OPBNB_TESTNET_BOOTNODES: &[&str] = &[ - "enode://217cfe091047a1c3f490e96d51e2f3bd90517a9be77b8a6033b31833a193aa6c33b6d07088c4980f462162635ffbccaa413dc28cb14c4f2b96af0dd97292411f@13.112.117.88:30304", - "enode://38c8913f87d64179bac23514ddb56a17f5b28f7e253b3825a10a2c8b9553c5df7d3b6c83a96948ad0466f384bf63236fd5e6bed6d6402156749b6b0899c82d47@54.199.235.83:30304", -]; - -/// Returns parsed mainnet nodes -pub fn mainnet_nodes() -> Vec { - parse_nodes(&MAINNET_BOOTNODES[..]) -} - -/// Returns parsed goerli nodes -pub fn goerli_nodes() -> Vec { - parse_nodes(&GOERLI_BOOTNODES[..]) -} - -/// Returns parsed sepolia nodes -pub fn sepolia_nodes() -> Vec { - parse_nodes(&SEPOLIA_BOOTNODES[..]) -} - -/// Returns parsed holesky nodes -pub fn holesky_nodes() -> Vec { - parse_nodes(&HOLESKY_BOOTNODES[..]) -} - -#[cfg(feature = "optimism")] -/// Returns parsed op-stack mainnet nodes -pub fn op_nodes() -> Vec { - parse_nodes(OP_BOOTNODES) -} - -#[cfg(feature = "optimism")] -/// Returns parsed op-stack testnet nodes -pub fn op_testnet_nodes() -> Vec { - parse_nodes(OP_TESTNET_BOOTNODES) -} - -#[cfg(all(feature = "optimism", feature = "opbnb"))] -/// Returns parsed opbnb testnet nodes -pub fn opbnb_testnet_nodes() -> Vec { - parse_nodes(OPBNB_TESTNET_BOOTNODES) -} - -#[cfg(all(feature = "optimism", feature = "opbnb"))] -/// Returns parsed opbnb mainnet nodes -pub fn opbnb_mainnet_nodes() -> Vec { - parse_nodes(OPBNB_MAINNET_BOOTNODES) -} - -#[cfg(feature = "optimism")] -/// Returns parsed op-stack base mainnet nodes -pub fn base_nodes() -> Vec { - parse_nodes(OP_BOOTNODES) -} - -#[cfg(feature = "optimism")] -/// Returns parsed op-stack base testnet nodes -pub fn base_testnet_nodes() -> Vec { - parse_nodes(OP_TESTNET_BOOTNODES) -} - -#[cfg(feature = "bsc")] -/// Returns parsed bsc mainnet nodes -pub fn bsc_mainnet_nodes() -> Vec { - parse_nodes(BSC_MAINNET_BOOTNODES) -} - -#[cfg(feature = "bsc")] -/// Returns parsed bsc mainnet nodes -pub fn bsc_testnet_nodes() -> Vec { - parse_nodes(BSC_TESTNET_BOOTNODES) -} - -/// Parses all the nodes -pub fn parse_nodes(nodes: impl IntoIterator>) -> Vec { - nodes.into_iter().map(|s| s.as_ref().parse().unwrap()).collect() -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_rlp::Decodable; - use rand::{thread_rng, Rng, RngCore}; - use std::net::{IpAddr, Ipv4Addr}; - - #[test] - fn test_mapped_ipv6() { - let mut rng = thread_rng(); - - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - let v6 = v4.to_ipv6_mapped(); - - let record = NodeRecord { - address: v6.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_mapped_ipv4() { - let mut rng = thread_rng(); - let v4: Ipv4Addr = "0.0.0.0".parse().unwrap(); - - let record = NodeRecord { - address: v4.into(), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - assert!(!record.clone().convert_ipv4_mapped()); - assert_eq!(record.into_ipv4_mapped().address, IpAddr::from(v4)); - } - - #[test] - fn test_noderecord_codec_ipv4() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 4]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V4(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_noderecord_codec_ipv6() { - let mut rng = thread_rng(); - for _ in 0..100 { - let mut ip = [0u8; 16]; - rng.fill_bytes(&mut ip); - let record = NodeRecord { - address: IpAddr::V6(ip.into()), - tcp_port: rng.gen(), - udp_port: rng.gen(), - id: rng.gen(), - }; - - let decoded = NodeRecord::decode(&mut alloy_rlp::encode(record).as_slice()).unwrap(); - assert_eq!(record, decoded); - } - } - - #[test] - fn test_url_parse() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(node, NodeRecord { - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303, - udp_port: 30301, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }) - } - - #[test] - fn test_node_display() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_display_discport() { - let url = "enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301"; - let node: NodeRecord = url.parse().unwrap(); - assert_eq!(url, &format!("{node}")); - } - - #[test] - fn test_node_serialize() { - let node = NodeRecord { - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }; - let ser = serde_json::to_string::(&node).expect("couldn't serialize"); - assert_eq!(ser, "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\"") - } - - #[test] - fn test_node_deserialize() { - let url = "\"enode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@10.3.58.6:30303?discport=30301\""; - let node: NodeRecord = serde_json::from_str(url).expect("couldn't deserialize"); - assert_eq!(node, NodeRecord { - address: IpAddr::V4([10, 3, 58, 6].into()), - tcp_port: 30303u16, - udp_port: 30301u16, - id: "6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0".parse().unwrap(), - }) - } -} diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index b03df5295..f010c540c 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -1,12 +1,6 @@ use crate::constants::MAINNET_DEPOSIT_CONTRACT; #[cfg(not(feature = "std"))] -use alloc::{ - collections::BTreeMap, - format, - string::{String, ToString}, - sync::Arc, - vec::Vec, -}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_chains::{Chain, ChainKind, NamedChain}; use alloy_genesis::Genesis; use alloy_primitives::{address, b256, Address, BlockNumber, B256, U256}; @@ -14,20 +8,20 @@ use alloy_trie::EMPTY_ROOT_HASH; use derive_more::From; use once_cell::sync::Lazy; use reth_ethereum_forks::{ - chains::ethereum::{GOERLI_HARDFORKS, HOLESKY_HARDFORKS, MAINNET_HARDFORKS, SEPOLIA_HARDFORKS}, - DisplayHardforks, ForkCondition, ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Head, + ChainHardforks, DisplayHardforks, EthereumHardfork, EthereumHardforks, ForkCondition, + ForkFilter, ForkFilterKey, ForkHash, ForkId, Hardfork, Head, DEV_HARDFORKS, }; use reth_network_peers::NodeRecord; use reth_primitives_traits::{ constants::{ - EIP1559_INITIAL_BASE_FEE, EMPTY_OMMER_ROOT_HASH, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS, - EMPTY_WITHDRAWALS, + DEV_GENESIS_HASH, EIP1559_INITIAL_BASE_FEE, EMPTY_WITHDRAWALS, HOLESKY_GENESIS_HASH, + MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, }, Header, SealedHeader, }; use reth_trie_common::root::state_root_ref_unhashed; #[cfg(feature = "std")] -use std::{collections::BTreeMap, sync::Arc}; +use std::sync::Arc; #[cfg(feature = "optimism")] use crate::constants::optimism::{ @@ -35,15 +29,18 @@ use crate::constants::optimism::{ OP_CANYON_BASE_FEE_PARAMS, OP_SEPOLIA_BASE_FEE_PARAMS, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS, }; pub use alloy_eips::eip1559::BaseFeeParams; +#[cfg(feature = "bsc")] +use reth_ethereum_forks::BscHardfork; #[cfg(feature = "optimism")] -use reth_ethereum_forks::chains::optimism::*; - -#[cfg(feature = "optimism")] -use crate::net::{base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes}; -use crate::net::{goerli_nodes, holesky_nodes, mainnet_nodes, sepolia_nodes}; - +use reth_ethereum_forks::OptimismHardfork; +use reth_network_peers::{ + base_nodes, base_testnet_nodes, holesky_nodes, mainnet_nodes, op_nodes, op_testnet_nodes, + sepolia_nodes, +}; #[cfg(feature = "bsc")] -pub(crate) use crate::net::{bsc_mainnet_nodes, bsc_testnet_nodes}; +use reth_network_peers::{bsc_mainnet_nodes, bsc_testnet_nodes}; +#[cfg(feature = "optimism")] +use reth_network_peers::{opbnb_mainnet_nodes, opbnb_testnet_nodes}; /// The BSC mainnet spec #[cfg(feature = "bsc")] @@ -56,38 +53,7 @@ pub static BSC_MAINNET: Lazy> = Lazy::new(|| { "0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Ramanujan, ForkCondition::Block(0)), - (Hardfork::Niels, ForkCondition::Block(0)), - (Hardfork::MirrorSync, ForkCondition::Block(5184000)), - (Hardfork::Bruno, ForkCondition::Block(13082000)), - (Hardfork::Euler, ForkCondition::Block(18907621)), - (Hardfork::Nano, ForkCondition::Block(21962149)), - (Hardfork::Moran, ForkCondition::Block(22107423)), - (Hardfork::Gibbs, ForkCondition::Block(23846001)), - (Hardfork::Planck, ForkCondition::Block(27281024)), - (Hardfork::Luban, ForkCondition::Block(29020050)), - (Hardfork::Plato, ForkCondition::Block(30720096)), - (Hardfork::Berlin, ForkCondition::Block(31302048)), - (Hardfork::London, ForkCondition::Block(31302048)), - (Hardfork::Hertz, ForkCondition::Block(31302048)), - (Hardfork::HertzFix, ForkCondition::Block(34140700)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1705996800)), - (Hardfork::Kepler, ForkCondition::Timestamp(1705996800)), - (Hardfork::Feynman, ForkCondition::Timestamp(1713419340)), - (Hardfork::FeynmanFix, ForkCondition::Timestamp(1713419340)), - (Hardfork::Cancun, ForkCondition::Timestamp(1718863500)), - (Hardfork::Haber, ForkCondition::Timestamp(1718863500)), - ]), + hardforks: BscHardfork::bsc_mainnet(), deposit_contract: None, base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::new(1, 1)), prune_delete_limit: 3500, @@ -106,39 +72,7 @@ pub static BSC_TESTNET: Lazy> = Lazy::new(|| { "6d3c66c5357ec91d5c43af47e234a939b22557cbb552dc45bebbceeed90fbe34" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Ramanujan, ForkCondition::Block(1010000)), - (Hardfork::Niels, ForkCondition::Block(1014369)), - (Hardfork::MirrorSync, ForkCondition::Block(5582500)), - (Hardfork::Bruno, ForkCondition::Block(13837000)), - (Hardfork::Euler, ForkCondition::Block(19203503)), - (Hardfork::Gibbs, ForkCondition::Block(22800220)), - (Hardfork::Nano, ForkCondition::Block(23482428)), - (Hardfork::Moran, ForkCondition::Block(23603940)), - (Hardfork::Planck, ForkCondition::Block(28196022)), - (Hardfork::Luban, ForkCondition::Block(29295050)), - (Hardfork::Plato, ForkCondition::Block(29861024)), - (Hardfork::Berlin, ForkCondition::Block(31103030)), - (Hardfork::London, ForkCondition::Block(31103030)), - (Hardfork::Hertz, ForkCondition::Block(31103030)), - (Hardfork::HertzFix, ForkCondition::Block(35682300)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1702972800)), - (Hardfork::Kepler, ForkCondition::Timestamp(1702972800)), - (Hardfork::Feynman, ForkCondition::Timestamp(1710136800)), - (Hardfork::FeynmanFix, ForkCondition::Timestamp(1711342800)), - (Hardfork::Cancun, ForkCondition::Timestamp(1713330442)), - (Hardfork::Haber, ForkCondition::Timestamp(1716962820)), - (Hardfork::HaberFix, ForkCondition::Timestamp(1719986788)), - ]), + hardforks: BscHardfork::bsc_testnet(), deposit_contract: None, base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::new(1, 1)), prune_delete_limit: 3500, @@ -146,24 +80,19 @@ pub static BSC_TESTNET: Lazy> = Lazy::new(|| { .into() }); -#[cfg(all(feature = "optimism", feature = "opbnb"))] -pub(crate) use crate::net::{opbnb_mainnet_nodes, opbnb_testnet_nodes}; - /// The Ethereum mainnet spec pub static MAINNET: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::mainnet(), genesis: serde_json::from_str(include_str!("../res/genesis/mainnet.json")) .expect("Can't deserialize Mainnet genesis json"), - genesis_hash: Some(b256!( - "d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" - )), + genesis_hash: Some(MAINNET_GENESIS_HASH), // paris_block_and_final_difficulty: Some(( 15537394, U256::from(58_750_003_716_598_352_816_469u128), )), - hardforks: MAINNET_HARDFORKS.into(), + hardforks: EthereumHardfork::mainnet().into(), // https://etherscan.io/tx/0xe75fb554e433e03763a1560646ee22dcb74e5274b34c5ad644e7c0f619a7e1d0 deposit_contract: Some(DepositContract::new( address!("00000000219ab540356cbb839cbe05303d7705fa"), @@ -176,42 +105,16 @@ pub static MAINNET: Lazy> = Lazy::new(|| { .into() }); -/// The Goerli spec -pub static GOERLI: Lazy> = Lazy::new(|| { - ChainSpec { - chain: Chain::goerli(), - genesis: serde_json::from_str(include_str!("../res/genesis/goerli.json")) - .expect("Can't deserialize Goerli genesis json"), - genesis_hash: Some(b256!( - "bf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a" - )), - // - paris_block_and_final_difficulty: Some((7382818, U256::from(10_790_000))), - hardforks: GOERLI_HARDFORKS.into(), - // https://goerli.etherscan.io/tx/0xa3c07dc59bfdb1bfc2d50920fed2ef2c1c4e0a09fe2325dbc14e07702f965a78 - deposit_contract: Some(DepositContract::new( - address!("ff50ed3d0ec03ac01d4c79aad74928bff48a7b2b"), - 4367322, - b256!("649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5"), - )), - base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), - prune_delete_limit: 1700, - } - .into() -}); - /// The Sepolia spec pub static SEPOLIA: Lazy> = Lazy::new(|| { ChainSpec { chain: Chain::sepolia(), genesis: serde_json::from_str(include_str!("../res/genesis/sepolia.json")) .expect("Can't deserialize Sepolia genesis json"), - genesis_hash: Some(b256!( - "25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9" - )), + genesis_hash: Some(SEPOLIA_GENESIS_HASH), // paris_block_and_final_difficulty: Some((1450409, U256::from(17_000_018_015_853_232u128))), - hardforks: SEPOLIA_HARDFORKS.into(), + hardforks: EthereumHardfork::sepolia().into(), // https://sepolia.etherscan.io/tx/0x025ecbf81a2f1220da6285d1701dc89fb5a956b62562ee922e1a9efd73eb4b14 deposit_contract: Some(DepositContract::new( address!("7f02c3e3c98b133055b8b348b2ac625669ed295d"), @@ -230,11 +133,9 @@ pub static HOLESKY: Lazy> = Lazy::new(|| { chain: Chain::holesky(), genesis: serde_json::from_str(include_str!("../res/genesis/holesky.json")) .expect("Can't deserialize Holesky genesis json"), - genesis_hash: Some(b256!( - "b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4" - )), + genesis_hash: Some(HOLESKY_GENESIS_HASH), paris_block_and_final_difficulty: Some((0, U256::from(1))), - hardforks: HOLESKY_HARDFORKS.into(), + hardforks: EthereumHardfork::holesky().into(), deposit_contract: Some(DepositContract::new( address!("4242424242424242424242424242424242424242"), 0, @@ -255,36 +156,9 @@ pub static DEV: Lazy> = Lazy::new(|| { chain: Chain::dev(), genesis: serde_json::from_str(include_str!("../res/genesis/dev.json")) .expect("Can't deserialize Dev testnet genesis json"), - genesis_hash: Some(b256!( - "2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c" - )), + genesis_hash: Some(DEV_GENESIS_HASH), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(0)), - (Hardfork::Cancun, ForkCondition::Timestamp(0)), - #[cfg(feature = "optimism")] - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - #[cfg(feature = "optimism")] - (Hardfork::Bedrock, ForkCondition::Block(0)), - #[cfg(feature = "optimism")] - (Hardfork::Ecotone, ForkCondition::Timestamp(0)), - ]), + hardforks: DEV_HARDFORKS.clone(), base_fee_params: BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), deposit_contract: None, // TODO: do we even have? ..Default::default() @@ -305,11 +179,11 @@ pub static OP_MAINNET: Lazy> = Lazy::new(|| { "7ca38a1916c42007829c55e69d3e9a73265554b586a499015373241b8a3fa48b" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OP_MAINNET_HARDFORKS.into(), + hardforks: OptimismHardfork::op_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, OP_BASE_FEE_PARAMS), - (Hardfork::Canyon, OP_CANYON_BASE_FEE_PARAMS), + (EthereumHardfork::London.boxed(), OP_BASE_FEE_PARAMS), + (OptimismHardfork::Canyon.boxed(), OP_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -330,11 +204,11 @@ pub static OP_SEPOLIA: Lazy> = Lazy::new(|| { "102de6ffb001480cc9b8b548fd05c34cd4f46ae4aa91759393db90ea0409887d" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: OP_SEPOLIA_HARDFORKS.into(), + hardforks: OptimismHardfork::op_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, OP_SEPOLIA_BASE_FEE_PARAMS), - (Hardfork::Canyon, OP_SEPOLIA_CANYON_BASE_FEE_PARAMS), + (EthereumHardfork::London.boxed(), OP_SEPOLIA_BASE_FEE_PARAMS), + (OptimismHardfork::Canyon.boxed(), OP_SEPOLIA_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -355,11 +229,11 @@ pub static BASE_SEPOLIA: Lazy> = Lazy::new(|| { "0dcc9e089e30b90ddfc55be9a37dd15bc551aeee999d2e2b51414c54eaf934e4" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BASE_SEPOLIA_HARDFORKS.into(), + hardforks: OptimismHardfork::base_sepolia(), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, BASE_SEPOLIA_BASE_FEE_PARAMS), - (Hardfork::Canyon, BASE_SEPOLIA_CANYON_BASE_FEE_PARAMS), + (EthereumHardfork::London.boxed(), BASE_SEPOLIA_BASE_FEE_PARAMS), + (OptimismHardfork::Canyon.boxed(), BASE_SEPOLIA_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -380,11 +254,11 @@ pub static BASE_MAINNET: Lazy> = Lazy::new(|| { "f712aa9241cc24369b143cf6dce85f0902a9731e70d66818a3a5845b296c73dd" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BASE_MAINNET_HARDFORKS.into(), + hardforks: OptimismHardfork::base_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, OP_BASE_FEE_PARAMS), - (Hardfork::Canyon, OP_CANYON_BASE_FEE_PARAMS), + (EthereumHardfork::London.boxed(), OP_BASE_FEE_PARAMS), + (OptimismHardfork::Canyon.boxed(), OP_CANYON_BASE_FEE_PARAMS), ] .into(), ), @@ -405,36 +279,9 @@ pub static OPBNB_MAINNET: Lazy> = Lazy::new(|| { "4dd61178c8b0f01670c231597e7bcb368e84545acd46d940a896d6a791dd6df4" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::ArrowGlacier, ForkCondition::Block(0)), - (Hardfork::GrayGlacier, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) }, - ), - (Hardfork::Bedrock, ForkCondition::Block(0)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Fermat, ForkCondition::Timestamp(1701151200)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1718870400)), /* Jun-20-2024 08:00 AM - * +UTC */ - (Hardfork::Canyon, ForkCondition::Timestamp(1718870400)), // Jun-20-2024 08:00 AM +UTC - (Hardfork::Cancun, ForkCondition::Timestamp(1718871600)), // Jun-20-2024 08:20 AM +UTC - (Hardfork::Ecotone, ForkCondition::Timestamp(1718871600)), // Jun-20-2024 08:20 AM +UTC - (Hardfork::Haber, ForkCondition::Timestamp(1718872200)), // Jun-20-2024 08:30 AM +UTC - ]), + hardforks: OptimismHardfork::opbnb_mainnet(), base_fee_params: BaseFeeParamsKind::Variable( - vec![(Hardfork::London, BaseFeeParams::ethereum())].into(), + vec![(EthereumHardfork::London.boxed(), BaseFeeParams::ethereum())].into(), ), prune_delete_limit: 0, ..Default::default() @@ -453,36 +300,9 @@ pub static OPBNB_TESTNET: Lazy> = Lazy::new(|| { "51fa57729dfb1c27542c21b06cb72a0459c57440ceb43a465dae1307cd04fe80" )), paris_block_and_final_difficulty: Some((0, U256::from(0))), - hardforks: BTreeMap::from([ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::ArrowGlacier, ForkCondition::Block(0)), - (Hardfork::GrayGlacier, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::from(0) }, - ), - (Hardfork::Bedrock, ForkCondition::Block(0)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::PreContractForkBlock, ForkCondition::Block(5805494)), - (Hardfork::Fermat, ForkCondition::Timestamp(1698991506)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1715753400)), - (Hardfork::Canyon, ForkCondition::Timestamp(1715753400)), - (Hardfork::Cancun, ForkCondition::Timestamp(1715754600)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1715754600)), - (Hardfork::Haber, ForkCondition::Timestamp(1717048800)), - ]), + hardforks: OptimismHardfork::opbnb_testnet(), base_fee_params: BaseFeeParamsKind::Variable( - vec![(Hardfork::London, BaseFeeParams::ethereum())].into(), + vec![(EthereumHardfork::London.boxed(), BaseFeeParams::ethereum())].into(), ), prune_delete_limit: 0, ..Default::default() @@ -501,6 +321,12 @@ pub enum BaseFeeParamsKind { Variable(ForkBaseFeeParams), } +impl Default for BaseFeeParamsKind { + fn default() -> Self { + BaseFeeParams::ethereum().into() + } +} + impl From for BaseFeeParamsKind { fn from(params: BaseFeeParams) -> Self { Self::Constant(params) @@ -516,7 +342,15 @@ impl From for BaseFeeParamsKind { /// A type alias to a vector of tuples of [Hardfork] and [`BaseFeeParams`], sorted by [Hardfork] /// activation order. This is used to specify dynamic EIP-1559 parameters for chains like Optimism. #[derive(Clone, Debug, PartialEq, Eq, From)] -pub struct ForkBaseFeeParams(Vec<(Hardfork, BaseFeeParams)>); +pub struct ForkBaseFeeParams(Vec<(Box, BaseFeeParams)>); + +impl core::ops::Deref for ChainSpec { + type Target = ChainHardforks; + + fn deref(&self) -> &Self::Target { + &self.hardforks + } +} /// An Ethereum chain specification. /// @@ -539,12 +373,12 @@ pub struct ChainSpec { /// The genesis block pub genesis: Genesis, - /// The block at which [`Hardfork::Paris`] was activated and the final difficulty at this - /// block. + /// The block at which [`EthereumHardfork::Paris`] was activated and the final difficulty at + /// this block. pub paris_block_and_final_difficulty: Option<(u64, U256)>, /// The active hard forks and their activation conditions - pub hardforks: BTreeMap, + pub hardforks: ChainHardforks, /// The deposit contract deployed for `PoS` pub deposit_contract: Option, @@ -615,7 +449,7 @@ impl ChainSpec { #[inline] #[cfg(feature = "optimism")] pub fn is_optimism(&self) -> bool { - self.chain.is_optimism() || self.hardforks.contains_key(&Hardfork::Bedrock) + self.chain.is_optimism() || self.hardforks.get(OptimismHardfork::Bedrock).is_some() } /// Returns `true` if this chain contains Optimism configuration. @@ -646,7 +480,7 @@ impl ChainSpec { // If shanghai is activated, initialize the header with an empty withdrawals hash, and // empty withdrawals list. let withdrawals_root = self - .fork(Hardfork::Shanghai) + .fork(EthereumHardfork::Shanghai) .active_at_timestamp(self.genesis.timestamp) .then_some(EMPTY_WITHDRAWALS); @@ -671,12 +505,6 @@ impl ChainSpec { }; Header { - parent_hash: B256::ZERO, - number: 0, - transactions_root: EMPTY_TRANSACTIONS, - ommers_hash: EMPTY_OMMER_ROOT_HASH, - receipts_root: EMPTY_RECEIPTS, - logs_bloom: Default::default(), gas_limit: self.genesis.gas_limit as u64, difficulty: self.genesis.difficulty, nonce: self.genesis.nonce, @@ -685,13 +513,13 @@ impl ChainSpec { timestamp: self.genesis.timestamp, mix_hash: self.genesis.mix_hash, beneficiary: self.genesis.coinbase, - gas_used: Default::default(), base_fee_per_gas, withdrawals_root, parent_beacon_block_root, blob_gas_used, excess_blob_gas, requests_root, + ..Default::default() } } @@ -707,7 +535,7 @@ impl ChainSpec { self.genesis.base_fee_per_gas.map(|fee| fee as u64).unwrap_or(EIP1559_INITIAL_BASE_FEE); // If London is activated at genesis, we set the initial base fee as per EIP-1559. - self.fork(Hardfork::London).active_at_block(0).then_some(genesis_base_fee) + self.hardforks.fork(EthereumHardfork::London).active_at_block(0).then_some(genesis_base_fee) } /// Get the [`BaseFeeParams`] for the chain at the given timestamp. @@ -719,7 +547,7 @@ impl ChainSpec { // first one that corresponds to a hardfork that is active at the // given timestamp. for (fork, params) in bf_params.iter().rev() { - if self.is_fork_active_at_timestamp(*fork, timestamp) { + if self.hardforks.is_fork_active_at_timestamp(fork.clone(), timestamp) { return *params; } } @@ -738,7 +566,7 @@ impl ChainSpec { // first one that corresponds to a hardfork that is active at the // given timestamp. for (fork, params) in bf_params.iter().rev() { - if self.is_fork_active_at_block(*fork, block_number) { + if self.hardforks.is_fork_active_at_block(fork.clone(), block_number) { return *params } } @@ -775,282 +603,55 @@ impl ChainSpec { } /// Get the fork filter for the given hardfork - pub fn hardfork_fork_filter(&self, fork: Hardfork) -> Option { - match self.fork(fork) { + pub fn hardfork_fork_filter(&self, fork: H) -> Option { + match self.hardforks.fork(fork.clone()) { ForkCondition::Never => None, - _ => Some(self.fork_filter(self.satisfy(self.fork(fork)))), + _ => Some(self.fork_filter(self.satisfy(self.hardforks.fork(fork)))), } } - /// Returns the forks in this specification and their activation conditions. - pub const fn hardforks(&self) -> &BTreeMap { - &self.hardforks - } - /// Returns the hardfork display helper. pub fn display_hardforks(&self) -> DisplayHardforks { DisplayHardforks::new( - self.hardforks(), + &self.hardforks, self.paris_block_and_final_difficulty.map(|(block, _)| block), ) } /// Get the fork id for the given hardfork. #[inline] - pub fn hardfork_fork_id(&self, fork: Hardfork) -> Option { - match self.fork(fork) { + pub fn hardfork_fork_id(&self, fork: H) -> Option { + let condition = self.hardforks.fork(fork); + match condition { ForkCondition::Never => None, - _ => Some(self.fork_id(&self.satisfy(self.fork(fork)))), + _ => Some(self.fork_id(&self.satisfy(condition))), } } - /// Convenience method to get the fork id for [`Hardfork::Shanghai`] from a given chainspec. + /// Convenience method to get the fork id for [`EthereumHardfork::Shanghai`] from a given + /// chainspec. #[inline] pub fn shanghai_fork_id(&self) -> Option { - self.hardfork_fork_id(Hardfork::Shanghai) + self.hardfork_fork_id(EthereumHardfork::Shanghai) } - /// Convenience method to get the fork id for [`Hardfork::Cancun`] from a given chainspec. + /// Convenience method to get the fork id for [`EthereumHardfork::Cancun`] from a given + /// chainspec. #[inline] pub fn cancun_fork_id(&self) -> Option { - self.hardfork_fork_id(Hardfork::Cancun) + self.hardfork_fork_id(EthereumHardfork::Cancun) } /// Convenience method to get the latest fork id from the chainspec. Panics if chainspec has no /// hardforks. #[inline] pub fn latest_fork_id(&self) -> ForkId { - self.hardfork_fork_id(*self.hardforks().last_key_value().unwrap().0).unwrap() - } - - /// Get the fork condition for the given fork. - pub fn fork(&self, fork: Hardfork) -> ForkCondition { - self.hardforks.get(&fork).copied().unwrap_or(ForkCondition::Never) - } - - /// Get an iterator of all hardforks with their respective activation conditions. - pub fn forks_iter(&self) -> impl Iterator + '_ { - let mut hardforks: Vec<(Hardfork, ForkCondition)> = - self.hardforks.iter().map(|(f, c)| (*f, *c)).collect(); - hardforks.sort_by(|(f1, c1), (f2, c2)| match (c1, c2) { - (ForkCondition::Block(b1), ForkCondition::Block(b2)) => b1.cmp(b2), - (ForkCondition::Timestamp(t1), ForkCondition::Timestamp(t2)) => t1.cmp(t2), - _ => f1.cmp(f2), - }); - hardforks.into_iter() - } - - /// Convenience method to check if a fork is active at a given timestamp. - #[inline] - pub fn is_fork_active_at_timestamp(&self, fork: Hardfork, timestamp: u64) -> bool { - self.fork(fork).active_at_timestamp(timestamp) - } - - /// Convenience method to check if a fork is active at a given block number - #[inline] - pub fn is_fork_active_at_block(&self, fork: Hardfork, block_number: u64) -> bool { - self.fork(fork).active_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Shanghai`] is active at a given timestamp. - #[inline] - pub fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Shanghai, timestamp) - } - - /// Convenience method to check if [`Hardfork::Cancun`] is active at a given timestamp. - #[inline] - pub fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Cancun, timestamp) - } - - /// Convenience method to check if [`Hardfork::Prague`] is active at a given timestamp. - #[inline] - pub fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Prague, timestamp) - } - - /// Convenience method to check if [`Hardfork::Byzantium`] is active at a given block number. - #[inline] - pub fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Byzantium).active_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::SpuriousDragon`] is active at a given block - /// number. - #[inline] - pub fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::SpuriousDragon).active_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Homestead`] is active at a given block number. - #[inline] - pub fn is_homestead_active_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Homestead).active_at_block(block_number) - } - - /// The Paris hardfork (merge) is activated via block number. If we have knowledge of the block, - /// this function will return true if the block number is greater than or equal to the Paris - /// (merge) block. - pub fn is_paris_active_at_block(&self, block_number: u64) -> Option { - self.paris_block_and_final_difficulty.map(|(paris_block, _)| block_number >= paris_block) - } - - /// Convenience method to check if [`Hardfork::Bedrock`] is active at a given block number. - #[cfg(feature = "optimism")] - #[inline] - pub fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Bedrock).active_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Ramanujan`] is firstly active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_ramanujan_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Ramanujan).transitions_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Ramanujan`] is active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_ramanujan_active_at_block(&self, block_number: u64) -> bool { - self.is_fork_active_at_block(Hardfork::Ramanujan, block_number) - } - - /// Convenience method to check if [`Hardfork::Euler`] is firstly active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_euler_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Euler).transitions_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Euler`] is active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_euler_active_at_block(&self, block_number: u64) -> bool { - self.is_fork_active_at_block(Hardfork::Euler, block_number) - } - - /// Convenience method to check if [`Hardfork::Planck`] is firstly active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_planck_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Planck).transitions_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Planck`] is active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_planck_active_at_block(&self, block_number: u64) -> bool { - self.is_fork_active_at_block(Hardfork::Planck, block_number) - } - - /// Convenience method to check if [`Hardfork::Luban`] is firstly active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_luban_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Luban).transitions_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Luban`] is active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_luban_active_at_block(&self, block_number: u64) -> bool { - self.is_fork_active_at_block(Hardfork::Luban, block_number) - } - - /// Convenience method to check if [`Hardfork::Plato`] is firstly active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_plato_at_block(&self, block_number: u64) -> bool { - self.fork(Hardfork::Plato).transitions_at_block(block_number) - } - - /// Convenience method to check if [`Hardfork::Plato`] is active at a given block. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_plato_active_at_block(&self, block_number: u64) -> bool { - self.is_fork_active_at_block(Hardfork::Plato, block_number) - } - - /// Convenience method to check if [`Hardfork::Kepler`] is firstly active at a given timestamp - /// and parent timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_kepler_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { - self.fork(Hardfork::Kepler).transitions_at_timestamp(timestamp, parent_timestamp) - } - - /// Convenience method to check if [`Hardfork::Kepler`] is active at a given timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_kepler_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Kepler, timestamp) - } - - /// Convenience method to check if [`Hardfork::Feynman`] is firstly active at a given timestamp - /// and parent timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_feynman_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { - self.fork(Hardfork::Feynman).transitions_at_timestamp(timestamp, parent_timestamp) - } - - /// Convenience method to check if [`Hardfork::Feynman`] is active at a given timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_feynman_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Feynman, timestamp) - } - - /// Convenience method to check if [`Hardfork::FeynmanFix`] is firstly active at a given - /// timestamp and parent timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_feynman_fix_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { - self.fork(Hardfork::FeynmanFix).transitions_at_timestamp(timestamp, parent_timestamp) - } - - /// Convenience method to check if [`Hardfork::FeynmanFix`] is active at a given timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_feynman_fix_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::FeynmanFix, timestamp) - } - - /// Convenience method to check if [`Hardfork::Haber`] is firstly active at a given timestamp - /// and parent timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_haber_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { - self.fork(Hardfork::Haber).transitions_at_timestamp(timestamp, parent_timestamp) - } - - /// Convenience method to check if [`Hardfork::Haber`] is active at a given timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_haber_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::Haber, timestamp) - } - - /// Convenience method to check if [`Hardfork::HaberFix`] is firstly active at a given timestamp - /// and parent timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_on_haber_fix_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { - self.fork(Hardfork::HaberFix).transitions_at_timestamp(timestamp, parent_timestamp) - } - - /// Convenience method to check if [`Hardfork::HaberFix`] is active at a given timestamp. - #[cfg(feature = "bsc")] - #[inline] - pub fn is_haber_fix_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(Hardfork::HaberFix, timestamp) + self.hardfork_fork_id(self.hardforks.last().unwrap().0).unwrap() } /// Creates a [`ForkFilter`] for the block described by [Head]. pub fn fork_filter(&self, head: Head) -> ForkFilter { - let forks = self.forks_iter().filter_map(|(_, condition)| { + let forks = self.hardforks.forks_iter().filter_map(|(_, condition)| { // We filter out TTD-based forks w/o a pre-known block since those do not show up in the // fork filter. Some(match condition { @@ -1070,7 +671,7 @@ impl ChainSpec { let mut current_applied = 0; // handle all block forks before handling timestamp based forks. see: https://eips.ethereum.org/EIPS/eip-6122 - for (_, cond) in self.forks_iter() { + for (_, cond) in self.hardforks.forks_iter() { // handle block based forks and the sepolia merge netsplit block edge case (TTD // ForkCondition with Some(block)) if let ForkCondition::Block(block) | @@ -1092,7 +693,7 @@ impl ChainSpec { // timestamp are ALWAYS applied after the merge. // // this filter ensures that no block-based forks are returned - for timestamp in self.forks_iter().filter_map(|(_, cond)| { + for timestamp in self.hardforks.forks_iter().filter_map(|(_, cond)| { cond.as_timestamp().filter(|time| time > &self.genesis.timestamp) }) { // Skip Fermat hardfork for opbnb @@ -1141,7 +742,7 @@ impl ChainSpec { /// /// Note: this returns None if the `ChainSpec` is not configured with a TTD/Timestamp fork. pub(crate) fn last_block_fork_before_merge_or_timestamp(&self) -> Option { - let mut hardforks_iter = self.forks_iter().peekable(); + let mut hardforks_iter = self.hardforks.forks_iter().peekable(); while let Some((_, curr_cond)) = hardforks_iter.next() { if let Some((_, next_cond)) = hardforks_iter.peek() { // peek and find the first occurrence of ForkCondition::TTD (merge) , or in @@ -1184,16 +785,11 @@ impl ChainSpec { let chain = self.chain; match chain.try_into().ok()? { C::Mainnet => Some(mainnet_nodes()), - C::Goerli => Some(goerli_nodes()), C::Sepolia => Some(sepolia_nodes()), C::Holesky => Some(holesky_nodes()), - #[cfg(feature = "optimism")] C::Base => Some(base_nodes()), - #[cfg(feature = "optimism")] C::Optimism => Some(op_nodes()), - #[cfg(feature = "optimism")] C::BaseGoerli | C::BaseSepolia => Some(base_testnet_nodes()), - #[cfg(feature = "optimism")] C::OptimismSepolia | C::OptimismGoerli | C::OptimismKovan => Some(op_testnet_nodes()), #[cfg(feature = "bsc")] C::BNBSmartChain => Some(bsc_mainnet_nodes()), @@ -1212,40 +808,43 @@ impl From for ChainSpec { fn from(genesis: Genesis) -> Self { #[cfg(feature = "optimism")] let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + #[cfg(feature = "optimism")] + let genesis_info = + optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); // Block-based hardforks let hardfork_opts = [ - (Hardfork::Homestead, genesis.config.homestead_block), - (Hardfork::Dao, genesis.config.dao_fork_block), - (Hardfork::Tangerine, genesis.config.eip150_block), - (Hardfork::SpuriousDragon, genesis.config.eip155_block), - (Hardfork::Byzantium, genesis.config.byzantium_block), - (Hardfork::Constantinople, genesis.config.constantinople_block), - (Hardfork::Petersburg, genesis.config.petersburg_block), - (Hardfork::Istanbul, genesis.config.istanbul_block), - (Hardfork::MuirGlacier, genesis.config.muir_glacier_block), - (Hardfork::Berlin, genesis.config.berlin_block), - (Hardfork::London, genesis.config.london_block), - (Hardfork::ArrowGlacier, genesis.config.arrow_glacier_block), - (Hardfork::GrayGlacier, genesis.config.gray_glacier_block), + (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), + (EthereumHardfork::Dao.boxed(), genesis.config.dao_fork_block), + (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), + (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), + (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), + (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), + (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), + (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), + (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), + (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), + (EthereumHardfork::London.boxed(), genesis.config.london_block), + (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), + (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), #[cfg(feature = "optimism")] - (Hardfork::Bedrock, optimism_genesis_info.bedrock_block), + (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), ]; let mut hardforks = hardfork_opts - .iter() - .filter_map(|(hardfork, opt)| opt.map(|block| (*hardfork, ForkCondition::Block(block)))) - .collect::>(); + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) + .collect::>(); // Paris let paris_block_and_final_difficulty = if let Some(ttd) = genesis.config.terminal_total_difficulty { - hardforks.insert( - Hardfork::Paris, + hardforks.push(( + EthereumHardfork::Paris.boxed(), ForkCondition::TTD { total_difficulty: ttd, fork_block: genesis.config.merge_netsplit_block, }, - ); + )); genesis.config.merge_netsplit_block.map(|block| (block, ttd)) } else { @@ -1254,28 +853,45 @@ impl From for ChainSpec { // Time-based hardforks let time_hardfork_opts = [ - (Hardfork::Shanghai, genesis.config.shanghai_time), - (Hardfork::Cancun, genesis.config.cancun_time), - (Hardfork::Prague, genesis.config.prague_time), + (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), + (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), + (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), #[cfg(feature = "optimism")] - (Hardfork::Regolith, optimism_genesis_info.regolith_time), + (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), #[cfg(feature = "optimism")] - (Hardfork::Canyon, optimism_genesis_info.canyon_time), + (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), #[cfg(feature = "optimism")] - (Hardfork::Ecotone, optimism_genesis_info.ecotone_time), + (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), #[cfg(feature = "optimism")] - (Hardfork::Fjord, optimism_genesis_info.fjord_time), + (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), ]; let time_hardforks = time_hardfork_opts - .iter() + .into_iter() .filter_map(|(hardfork, opt)| { - opt.map(|time| (*hardfork, ForkCondition::Timestamp(time))) + opt.map(|time| (hardfork, ForkCondition::Timestamp(time))) }) - .collect::>(); + .collect::>(); hardforks.extend(time_hardforks); + // Uses ethereum or optimism main chains to find proper order + #[cfg(not(feature = "optimism"))] + let mainnet_hardforks: ChainHardforks = EthereumHardfork::mainnet().into(); + #[cfg(not(feature = "optimism"))] + let mainnet_order = mainnet_hardforks.forks_iter(); + #[cfg(feature = "optimism")] + let mainnet_hardforks = OptimismHardfork::op_mainnet(); + #[cfg(feature = "optimism")] + let mainnet_order = mainnet_hardforks.forks_iter(); + + let mut ordered_hardforks = Vec::with_capacity(hardforks.len()); + for (hardfork, _) in mainnet_order { + if let Some(pos) = hardforks.iter().position(|(e, _)| **e == *hardfork) { + ordered_hardforks.push(hardforks[pos].clone()); + } + } + // NOTE: in full node, we prune all receipts except the deposit contract's. We do not // have the deployment block in the genesis file, so we use block zero. We use the same // deposit topic as the mainnet contract if we have the deposit contract address in the @@ -1288,7 +904,7 @@ impl From for ChainSpec { chain: genesis.config.chain_id.into(), genesis, genesis_hash: None, - hardforks, + hardforks: ChainHardforks::new(hardforks), paris_block_and_final_difficulty, deposit_contract, #[cfg(feature = "optimism")] @@ -1303,7 +919,7 @@ impl From for ChainSpec { pub struct ChainSpecBuilder { chain: Option, genesis: Option, - hardforks: BTreeMap, + hardforks: ChainHardforks, } impl ChainSpecBuilder { @@ -1315,7 +931,9 @@ impl ChainSpecBuilder { hardforks: MAINNET.hardforks.clone(), } } +} +impl ChainSpecBuilder { /// Set the chain ID pub const fn chain(mut self, chain: Chain) -> Self { self.chain = Some(chain); @@ -1329,14 +947,14 @@ impl ChainSpecBuilder { } /// Add the given fork with the given activation condition to the spec. - pub fn with_fork(mut self, fork: Hardfork, condition: ForkCondition) -> Self { + pub fn with_fork(mut self, fork: EthereumHardfork, condition: ForkCondition) -> Self { self.hardforks.insert(fork, condition); self } /// Remove the given fork from the spec. - pub fn without_fork(mut self, fork: Hardfork) -> Self { - self.hardforks.remove(&fork); + pub fn without_fork(mut self, fork: EthereumHardfork) -> Self { + self.hardforks.remove(fork); self } @@ -1345,77 +963,77 @@ impl ChainSpecBuilder { /// Does not set the merge netsplit block. pub fn paris_at_ttd(self, ttd: U256) -> Self { self.with_fork( - Hardfork::Paris, + EthereumHardfork::Paris, ForkCondition::TTD { total_difficulty: ttd, fork_block: None }, ) } /// Enable Frontier at genesis. pub fn frontier_activated(mut self) -> Self { - self.hardforks.insert(Hardfork::Frontier, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Frontier, ForkCondition::Block(0)); self } /// Enable Homestead at genesis. pub fn homestead_activated(mut self) -> Self { self = self.frontier_activated(); - self.hardforks.insert(Hardfork::Homestead, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Homestead, ForkCondition::Block(0)); self } /// Enable Tangerine at genesis. pub fn tangerine_whistle_activated(mut self) -> Self { self = self.homestead_activated(); - self.hardforks.insert(Hardfork::Tangerine, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Tangerine, ForkCondition::Block(0)); self } /// Enable Spurious Dragon at genesis. pub fn spurious_dragon_activated(mut self) -> Self { self = self.tangerine_whistle_activated(); - self.hardforks.insert(Hardfork::SpuriousDragon, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::SpuriousDragon, ForkCondition::Block(0)); self } /// Enable Byzantium at genesis. pub fn byzantium_activated(mut self) -> Self { self = self.spurious_dragon_activated(); - self.hardforks.insert(Hardfork::Byzantium, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Byzantium, ForkCondition::Block(0)); self } /// Enable Constantinople at genesis. pub fn constantinople_activated(mut self) -> Self { self = self.byzantium_activated(); - self.hardforks.insert(Hardfork::Constantinople, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Constantinople, ForkCondition::Block(0)); self } /// Enable Petersburg at genesis. pub fn petersburg_activated(mut self) -> Self { self = self.constantinople_activated(); - self.hardforks.insert(Hardfork::Petersburg, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Petersburg, ForkCondition::Block(0)); self } /// Enable Istanbul at genesis. pub fn istanbul_activated(mut self) -> Self { self = self.petersburg_activated(); - self.hardforks.insert(Hardfork::Istanbul, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Istanbul, ForkCondition::Block(0)); self } /// Enable Berlin at genesis. pub fn berlin_activated(mut self) -> Self { self = self.istanbul_activated(); - self.hardforks.insert(Hardfork::Berlin, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::Berlin, ForkCondition::Block(0)); self } /// Enable London at genesis. pub fn london_activated(mut self) -> Self { self = self.berlin_activated(); - self.hardforks.insert(Hardfork::London, ForkCondition::Block(0)); + self.hardforks.insert(EthereumHardfork::London, ForkCondition::Block(0)); self } @@ -1423,7 +1041,7 @@ impl ChainSpecBuilder { pub fn paris_activated(mut self) -> Self { self = self.london_activated(); self.hardforks.insert( - Hardfork::Paris, + EthereumHardfork::Paris, ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, ); self @@ -1432,14 +1050,14 @@ impl ChainSpecBuilder { /// Enable Shanghai at genesis. pub fn shanghai_activated(mut self) -> Self { self = self.paris_activated(); - self.hardforks.insert(Hardfork::Shanghai, ForkCondition::Timestamp(0)); + self.hardforks.insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); self } /// Enable Cancun at genesis. pub fn cancun_activated(mut self) -> Self { self = self.shanghai_activated(); - self.hardforks.insert(Hardfork::Cancun, ForkCondition::Timestamp(0)); + self.hardforks.insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); self } @@ -1447,7 +1065,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn bedrock_activated(mut self) -> Self { self = self.paris_activated(); - self.hardforks.insert(Hardfork::Bedrock, ForkCondition::Block(0)); + self.hardforks.insert(OptimismHardfork::Bedrock, ForkCondition::Block(0)); self } @@ -1455,7 +1073,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn regolith_activated(mut self) -> Self { self = self.bedrock_activated(); - self.hardforks.insert(Hardfork::Regolith, ForkCondition::Timestamp(0)); + self.hardforks.insert(OptimismHardfork::Regolith, ForkCondition::Timestamp(0)); self } @@ -1464,8 +1082,8 @@ impl ChainSpecBuilder { pub fn canyon_activated(mut self) -> Self { self = self.regolith_activated(); // Canyon also activates changes from L1's Shanghai hardfork - self.hardforks.insert(Hardfork::Shanghai, ForkCondition::Timestamp(0)); - self.hardforks.insert(Hardfork::Canyon, ForkCondition::Timestamp(0)); + self.hardforks.insert(EthereumHardfork::Shanghai, ForkCondition::Timestamp(0)); + self.hardforks.insert(OptimismHardfork::Canyon, ForkCondition::Timestamp(0)); self } @@ -1473,8 +1091,8 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn ecotone_activated(mut self) -> Self { self = self.canyon_activated(); - self.hardforks.insert(Hardfork::Cancun, ForkCondition::Timestamp(0)); - self.hardforks.insert(Hardfork::Ecotone, ForkCondition::Timestamp(0)); + self.hardforks.insert(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)); + self.hardforks.insert(OptimismHardfork::Ecotone, ForkCondition::Timestamp(0)); self } @@ -1482,7 +1100,7 @@ impl ChainSpecBuilder { #[cfg(feature = "optimism")] pub fn fjord_activated(mut self) -> Self { self = self.ecotone_activated(); - self.hardforks.insert(Hardfork::Fjord, ForkCondition::Timestamp(0)); + self.hardforks.insert(OptimismHardfork::Fjord, ForkCondition::Timestamp(0)); self } @@ -1494,9 +1112,9 @@ impl ChainSpecBuilder { /// [`Self::genesis`]) pub fn build(self) -> ChainSpec { let paris_block_and_final_difficulty = { - self.hardforks.get(&Hardfork::Paris).and_then(|cond| { + self.hardforks.get(EthereumHardfork::Paris).and_then(|cond| { if let ForkCondition::TTD { fork_block, total_difficulty } = cond { - fork_block.map(|fork_block| (fork_block, *total_difficulty)) + fork_block.map(|fork_block| (fork_block, total_difficulty)) } else { None } @@ -1542,100 +1160,73 @@ impl DepositContract { } } +/// Genesis info for Optimism. #[cfg(feature = "optimism")] +#[derive(Default, Debug, serde::Deserialize)] +#[serde(rename_all = "camelCase")] struct OptimismGenesisInfo { - bedrock_block: Option, - regolith_time: Option, - canyon_time: Option, - ecotone_time: Option, - fjord_time: Option, + optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo, + #[serde(skip)] base_fee_params: BaseFeeParamsKind, } #[cfg(feature = "optimism")] impl OptimismGenesisInfo { fn extract_from(genesis: &Genesis) -> Self { - let optimism_config = - genesis.config.extra_fields.get("optimism").and_then(|value| value.as_object()); - - let eip1559_elasticity = optimism_config - .and_then(|config| config.get("eip1559Elasticity")) - .and_then(|value| value.as_u64()); - - let eip1559_denominator = optimism_config - .and_then(|config| config.get("eip1559Denominator")) - .and_then(|value| value.as_u64()); - - let eip1559_denominator_canyon = optimism_config - .and_then(|config| config.get("eip1559DenominatorCanyon")) - .and_then(|value| value.as_u64()); - - let base_fee_params = if let (Some(elasticity), Some(denominator)) = - (eip1559_elasticity, eip1559_denominator) - { - if let Some(canyon_denominator) = eip1559_denominator_canyon { - BaseFeeParamsKind::Variable( - vec![ - ( - Hardfork::London, - BaseFeeParams::new(denominator as u128, elasticity as u128), - ), - ( - Hardfork::Canyon, - BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), - ), - ] - .into(), - ) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128).into() - } - } else { - BaseFeeParams::ethereum().into() + let mut info = Self { + optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo::extract_from( + &genesis.config.extra_fields, + ) + .unwrap_or_default(), + ..Default::default() }; + if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info { + if let (Some(elasticity), Some(denominator)) = ( + optimism_base_fee_info.eip1559_elasticity, + optimism_base_fee_info.eip1559_denominator, + ) { + let base_fee_params = if let Some(canyon_denominator) = + optimism_base_fee_info.eip1559_denominator_canyon + { + BaseFeeParamsKind::Variable( + vec![ + ( + EthereumHardfork::London.boxed(), + BaseFeeParams::new(denominator as u128, elasticity as u128), + ), + ( + OptimismHardfork::Canyon.boxed(), + BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), + ), + ] + .into(), + ) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128).into() + }; - Self { - bedrock_block: genesis - .config - .extra_fields - .get("bedrockBlock") - .and_then(|value| value.as_u64()), - regolith_time: genesis - .config - .extra_fields - .get("regolithTime") - .and_then(|value| value.as_u64()), - canyon_time: genesis - .config - .extra_fields - .get("canyonTime") - .and_then(|value| value.as_u64()), - ecotone_time: genesis - .config - .extra_fields - .get("ecotoneTime") - .and_then(|value| value.as_u64()), - fjord_time: genesis - .config - .extra_fields - .get("fjordTime") - .and_then(|value| value.as_u64()), - base_fee_params, + info.base_fee_params = base_fee_params; + } } + + info } } #[cfg(test)] mod tests { + use super::*; use alloy_chains::Chain; use alloy_genesis::{ChainConfig, GenesisAccount}; + use alloy_primitives::{b256, hex}; + use core::ops::Deref; use reth_ethereum_forks::{ForkCondition, ForkHash, ForkId, Head}; use reth_trie_common::TrieAccount; - - use super::*; - use alloy_primitives::{b256, hex}; use std::{collections::HashMap, str::FromStr}; + #[cfg(feature = "optimism")] + use reth_ethereum_forks::OptimismHardforks; + fn test_fork_ids(spec: &ChainSpec, cases: &[(Head, ForkId)]) { for (block, expected_id) in cases { let computed_id = spec.fork_id(block); @@ -1647,14 +1238,14 @@ mod tests { } } - fn test_hardfork_fork_ids(spec: &ChainSpec, cases: &[(Hardfork, ForkId)]) { + fn test_hardfork_fork_ids(spec: &ChainSpec, cases: &[(EthereumHardfork, ForkId)]) { for (hardfork, expected_id) in cases { if let Some(computed_id) = spec.hardfork_fork_id(*hardfork) { assert_eq!( expected_id, &computed_id, "Expected fork ID {expected_id:?}, computed fork ID {computed_id:?} for hardfork {hardfork}" ); - if matches!(hardfork, Hardfork::Shanghai) { + if matches!(hardfork, EthereumHardfork::Shanghai) { if let Some(shangai_id) = spec.shanghai_fork_id() { assert_eq!( expected_id, &shangai_id, @@ -1700,8 +1291,8 @@ Post-merge hard forks (timestamp based): let spec = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(Genesis::default()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Shanghai, ForkCondition::Never) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Never) .build(); assert_eq!( spec.display_hardforks().to_string(), @@ -1716,21 +1307,21 @@ Post-merge hard forks (timestamp based): let spec = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(Genesis::default()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(0)) - .with_fork(Hardfork::Tangerine, ForkCondition::Block(0)) - .with_fork(Hardfork::SpuriousDragon, ForkCondition::Block(0)) - .with_fork(Hardfork::Byzantium, ForkCondition::Block(0)) - .with_fork(Hardfork::Constantinople, ForkCondition::Block(0)) - .with_fork(Hardfork::Istanbul, ForkCondition::Block(0)) - .with_fork(Hardfork::MuirGlacier, ForkCondition::Block(0)) - .with_fork(Hardfork::Berlin, ForkCondition::Block(0)) - .with_fork(Hardfork::London, ForkCondition::Block(0)) - .with_fork(Hardfork::ArrowGlacier, ForkCondition::Block(0)) - .with_fork(Hardfork::GrayGlacier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Tangerine, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::SpuriousDragon, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Byzantium, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Constantinople, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Istanbul, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::MuirGlacier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Berlin, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::London, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::ArrowGlacier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::GrayGlacier, ForkCondition::Block(0)) .build(); - assert_eq!(spec.hardforks().len(), 12, "12 forks should be active."); + assert_eq!(spec.deref().len(), 12, "12 forks should be active."); assert_eq!( spec.fork_id(&Head { number: 1, ..Default::default() }), ForkId { hash: ForkHash::from(spec.genesis_hash()), next: 0 }, @@ -1744,16 +1335,16 @@ Post-merge hard forks (timestamp based): let unique_spec = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(1)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(1)) .build(); let duplicate_spec = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(1)) - .with_fork(Hardfork::Tangerine, ForkCondition::Block(1)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(1)) + .with_fork(EthereumHardfork::Tangerine, ForkCondition::Block(1)) .build(); assert_eq!( @@ -1770,9 +1361,9 @@ Post-merge hard forks (timestamp based): let happy_path_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(73)) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123)) .build(); let happy_path_head = happy_path_case.satisfy(ForkCondition::Timestamp(11313123)); let happy_path_expected = Head { number: 73, timestamp: 11313123, ..Default::default() }; @@ -1784,10 +1375,10 @@ Post-merge hard forks (timestamp based): let multiple_timestamp_fork_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(73)) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123)) - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(11313398)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(11313398)) .build(); let multi_timestamp_head = multiple_timestamp_fork_case.satisfy(ForkCondition::Timestamp(11313398)); @@ -1801,7 +1392,7 @@ Post-merge hard forks (timestamp based): let no_block_fork_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123)) .build(); let no_block_fork_head = no_block_fork_case.satisfy(ForkCondition::Timestamp(11313123)); let no_block_fork_expected = Head { number: 0, timestamp: 11313123, ..Default::default() }; @@ -1813,16 +1404,16 @@ Post-merge hard forks (timestamp based): let fork_cond_ttd_blocknum_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis.clone()) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(73)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73)) .with_fork( - Hardfork::Paris, + EthereumHardfork::Paris, ForkCondition::TTD { fork_block: Some(101), total_difficulty: U256::from(10_790_000), }, ) - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(11313123)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(11313123)) .build(); let fork_cond_ttd_blocknum_head = fork_cond_ttd_blocknum_case.satisfy(ForkCondition::Timestamp(11313123)); @@ -1839,8 +1430,8 @@ Post-merge hard forks (timestamp based): let fork_cond_block_only_case = ChainSpec::builder() .chain(Chain::mainnet()) .genesis(empty_genesis) - .with_fork(Hardfork::Frontier, ForkCondition::Block(0)) - .with_fork(Hardfork::Homestead, ForkCondition::Block(73)) + .with_fork(EthereumHardfork::Frontier, ForkCondition::Block(0)) + .with_fork(EthereumHardfork::Homestead, ForkCondition::Block(73)) .build(); let fork_cond_block_only_head = fork_cond_block_only_case.satisfy(ForkCondition::Block(73)); let fork_cond_block_only_expected = Head { number: 73, ..Default::default() }; @@ -1868,117 +1459,69 @@ Post-merge hard forks (timestamp based): &MAINNET, &[ ( - Hardfork::Frontier, + EthereumHardfork::Frontier, ForkId { hash: ForkHash([0xfc, 0x64, 0xec, 0x04]), next: 1150000 }, ), ( - Hardfork::Homestead, + EthereumHardfork::Homestead, ForkId { hash: ForkHash([0x97, 0xc2, 0xc3, 0x4c]), next: 1920000 }, ), - (Hardfork::Dao, ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }), ( - Hardfork::Tangerine, + EthereumHardfork::Dao, + ForkId { hash: ForkHash([0x91, 0xd1, 0xf9, 0x48]), next: 2463000 }, + ), + ( + EthereumHardfork::Tangerine, ForkId { hash: ForkHash([0x7a, 0x64, 0xda, 0x13]), next: 2675000 }, ), ( - Hardfork::SpuriousDragon, + EthereumHardfork::SpuriousDragon, ForkId { hash: ForkHash([0x3e, 0xdd, 0x5b, 0x10]), next: 4370000 }, ), ( - Hardfork::Byzantium, + EthereumHardfork::Byzantium, ForkId { hash: ForkHash([0xa0, 0x0b, 0xc3, 0x24]), next: 7280000 }, ), ( - Hardfork::Constantinople, + EthereumHardfork::Constantinople, ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, ), ( - Hardfork::Petersburg, + EthereumHardfork::Petersburg, ForkId { hash: ForkHash([0x66, 0x8d, 0xb0, 0xaf]), next: 9069000 }, ), ( - Hardfork::Istanbul, + EthereumHardfork::Istanbul, ForkId { hash: ForkHash([0x87, 0x9d, 0x6e, 0x30]), next: 9200000 }, ), ( - Hardfork::MuirGlacier, + EthereumHardfork::MuirGlacier, ForkId { hash: ForkHash([0xe0, 0x29, 0xe9, 0x91]), next: 12244000 }, ), ( - Hardfork::Berlin, + EthereumHardfork::Berlin, ForkId { hash: ForkHash([0x0e, 0xb4, 0x40, 0xf6]), next: 12965000 }, ), ( - Hardfork::London, + EthereumHardfork::London, ForkId { hash: ForkHash([0xb7, 0x15, 0x07, 0x7d]), next: 13773000 }, ), ( - Hardfork::ArrowGlacier, + EthereumHardfork::ArrowGlacier, ForkId { hash: ForkHash([0x20, 0xc3, 0x27, 0xfc]), next: 15050000 }, ), ( - Hardfork::GrayGlacier, + EthereumHardfork::GrayGlacier, ForkId { hash: ForkHash([0xf0, 0xaf, 0xd0, 0xe3]), next: 1681338455 }, ), ( - Hardfork::Shanghai, + EthereumHardfork::Shanghai, ForkId { hash: ForkHash([0xdc, 0xe9, 0x6c, 0x2d]), next: 1710338135 }, ), - (Hardfork::Cancun, ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 0 }), - ], - ); - } - - #[test] - fn goerli_hardfork_fork_ids() { - test_hardfork_fork_ids( - &GOERLI, - &[ - ( - Hardfork::Frontier, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Homestead, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Tangerine, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::SpuriousDragon, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Byzantium, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Constantinople, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Petersburg, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Hardfork::Istanbul, - ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 }, - ), - ( - Hardfork::Berlin, - ForkId { hash: ForkHash([0x75, 0x7a, 0x1c, 0x47]), next: 5062605 }, - ), - ( - Hardfork::London, - ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 }, - ), ( - Hardfork::Shanghai, - ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 }, + EthereumHardfork::Cancun, + ForkId { hash: ForkHash([0x9f, 0x3d, 0x22, 0x54]), next: 0 }, ), - (Hardfork::Cancun, ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 }), ], ); } @@ -1989,54 +1532,57 @@ Post-merge hard forks (timestamp based): &SEPOLIA, &[ ( - Hardfork::Frontier, + EthereumHardfork::Frontier, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Homestead, + EthereumHardfork::Homestead, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Tangerine, + EthereumHardfork::Tangerine, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::SpuriousDragon, + EthereumHardfork::SpuriousDragon, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Byzantium, + EthereumHardfork::Byzantium, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Constantinople, + EthereumHardfork::Constantinople, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Petersburg, + EthereumHardfork::Petersburg, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Istanbul, + EthereumHardfork::Istanbul, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Berlin, + EthereumHardfork::Berlin, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::London, + EthereumHardfork::London, ForkId { hash: ForkHash([0xfe, 0x33, 0x66, 0xe7]), next: 1735371 }, ), ( - Hardfork::Paris, + EthereumHardfork::Paris, ForkId { hash: ForkHash([0xb9, 0x6c, 0xbd, 0x13]), next: 1677557088 }, ), ( - Hardfork::Shanghai, + EthereumHardfork::Shanghai, ForkId { hash: ForkHash([0xf7, 0xf9, 0xbc, 0x08]), next: 1706655072 }, ), - (Hardfork::Cancun, ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 0 }), + ( + EthereumHardfork::Cancun, + ForkId { hash: ForkHash([0x88, 0xcf, 0x81, 0xd9]), next: 0 }, + ), ], ); } @@ -2155,63 +1701,6 @@ Post-merge hard forks (timestamp based): ) } - #[test] - fn goerli_forkids() { - test_fork_ids( - &GOERLI, - &[ - ( - Head { number: 0, ..Default::default() }, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Head { number: 1561650, ..Default::default() }, - ForkId { hash: ForkHash([0xa3, 0xf5, 0xab, 0x08]), next: 1561651 }, - ), - ( - Head { number: 1561651, ..Default::default() }, - ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 }, - ), - ( - Head { number: 4460643, ..Default::default() }, - ForkId { hash: ForkHash([0xc2, 0x5e, 0xfa, 0x5c]), next: 4460644 }, - ), - ( - Head { number: 4460644, ..Default::default() }, - ForkId { hash: ForkHash([0x75, 0x7a, 0x1c, 0x47]), next: 5062605 }, - ), - ( - Head { number: 5062605, ..Default::default() }, - ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 }, - ), - ( - Head { number: 6000000, timestamp: 1678832735, ..Default::default() }, - ForkId { hash: ForkHash([0xb8, 0xc6, 0x29, 0x9d]), next: 1678832736 }, - ), - // First Shanghai block - ( - Head { number: 6000001, timestamp: 1678832736, ..Default::default() }, - ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 }, - ), - // Future Shanghai block - ( - Head { number: 6500002, timestamp: 1678832736, ..Default::default() }, - ForkId { hash: ForkHash([0xf9, 0x84, 0x3a, 0xbf]), next: 1705473120 }, - ), - // First Cancun block - ( - Head { number: 6500003, timestamp: 1705473120, ..Default::default() }, - ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 }, - ), - // Future Cancun block - ( - Head { number: 6500003, timestamp: 2705473120, ..Default::default() }, - ForkId { hash: ForkHash([0x70, 0xcc, 0x14, 0xe2]), next: 0 }, - ), - ], - ); - } - #[test] fn sepolia_forkids() { test_fork_ids( @@ -2537,8 +2026,8 @@ Post-merge hard forks (timestamp based): cancun_time: u64, ) -> ChainSpec { builder - .with_fork(Hardfork::Shanghai, ForkCondition::Timestamp(shanghai_time)) - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(cancun_time)) + .with_fork(EthereumHardfork::Shanghai, ForkCondition::Timestamp(shanghai_time)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(cancun_time)) .build() } @@ -2593,14 +2082,14 @@ Post-merge hard forks (timestamp based): let terminal_block_ttd = U256::from(58750003716598352816469_u128); let terminal_block_difficulty = U256::from(11055787484078698_u128); assert!(!chainspec - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(terminal_block_ttd, terminal_block_difficulty)); // Check that Paris is active on first PoS block #15537394. let first_pos_block_ttd = U256::from(58750003716598352816469_u128); let first_pos_difficulty = U256::ZERO; assert!(chainspec - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(first_pos_block_ttd, first_pos_difficulty)); } @@ -2676,55 +2165,64 @@ Post-merge hard forks (timestamp based): // assert a bunch of hardforks that should be set assert_eq!( - chainspec.hardforks.get(&Hardfork::Homestead).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Homestead).unwrap(), + ForkCondition::Block(0) + ); + assert_eq!( + chainspec.hardforks.get(EthereumHardfork::Tangerine).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::Tangerine).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::SpuriousDragon).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::SpuriousDragon).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Byzantium).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::Byzantium).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Constantinople).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::Constantinople).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Petersburg).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::Petersburg).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Istanbul).unwrap(), + ForkCondition::Block(0) ); - assert_eq!(chainspec.hardforks.get(&Hardfork::Istanbul).unwrap(), &ForkCondition::Block(0)); assert_eq!( - chainspec.hardforks.get(&Hardfork::MuirGlacier).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::MuirGlacier).unwrap(), + ForkCondition::Block(0) ); - assert_eq!(chainspec.hardforks.get(&Hardfork::Berlin).unwrap(), &ForkCondition::Block(0)); - assert_eq!(chainspec.hardforks.get(&Hardfork::London).unwrap(), &ForkCondition::Block(0)); assert_eq!( - chainspec.hardforks.get(&Hardfork::ArrowGlacier).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::Berlin).unwrap(), + ForkCondition::Block(0) ); assert_eq!( - chainspec.hardforks.get(&Hardfork::GrayGlacier).unwrap(), - &ForkCondition::Block(0) + chainspec.hardforks.get(EthereumHardfork::London).unwrap(), + ForkCondition::Block(0) + ); + assert_eq!( + chainspec.hardforks.get(EthereumHardfork::ArrowGlacier).unwrap(), + ForkCondition::Block(0) + ); + assert_eq!( + chainspec.hardforks.get(EthereumHardfork::GrayGlacier).unwrap(), + ForkCondition::Block(0) ); // including time based hardforks assert_eq!( - chainspec.hardforks.get(&Hardfork::Shanghai).unwrap(), - &ForkCondition::Timestamp(0) + chainspec.hardforks.get(EthereumHardfork::Shanghai).unwrap(), + ForkCondition::Timestamp(0) ); // including time based hardforks assert_eq!( - chainspec.hardforks.get(&Hardfork::Cancun).unwrap(), - &ForkCondition::Timestamp(1) + chainspec.hardforks.get(EthereumHardfork::Cancun).unwrap(), + ForkCondition::Timestamp(1) ); // alloc key -> expected rlp mapping @@ -2820,14 +2318,14 @@ Post-merge hard forks (timestamp based): hex!("9a6049ac535e3dc7436c189eaa81c73f35abd7f282ab67c32944ff0301d63360").into(); assert_eq!(chainspec.genesis_header().state_root, expected_state_root); let hard_forks = vec![ - Hardfork::Byzantium, - Hardfork::Homestead, - Hardfork::Istanbul, - Hardfork::Petersburg, - Hardfork::Constantinople, + EthereumHardfork::Byzantium, + EthereumHardfork::Homestead, + EthereumHardfork::Istanbul, + EthereumHardfork::Petersburg, + EthereumHardfork::Constantinople, ]; - for ref fork in hard_forks { - assert_eq!(chainspec.hardforks.get(fork).unwrap(), &ForkCondition::Block(0)); + for fork in hard_forks { + assert_eq!(chainspec.hardforks.get(fork).unwrap(), ForkCondition::Block(0)); } let expected_hash: B256 = @@ -3084,7 +2582,7 @@ Post-merge hard forks (timestamp based): #[test] fn holesky_paris_activated_at_genesis() { assert!(HOLESKY - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(HOLESKY.genesis.difficulty, HOLESKY.genesis.difficulty)); } @@ -3138,13 +2636,16 @@ Post-merge hard forks (timestamp based): chain: Chain::mainnet(), genesis: Genesis::default(), genesis_hash: None, - hardforks: BTreeMap::from([(Hardfork::Frontier, ForkCondition::Never)]), + hardforks: ChainHardforks::new(vec![( + EthereumHardfork::Frontier.boxed(), + ForkCondition::Never, + )]), paris_block_and_final_difficulty: None, deposit_contract: None, ..Default::default() }; - assert_eq!(spec.hardfork_fork_id(Hardfork::Frontier), None); + assert_eq!(spec.hardfork_fork_id(EthereumHardfork::Frontier), None); } #[test] @@ -3153,13 +2654,16 @@ Post-merge hard forks (timestamp based): chain: Chain::mainnet(), genesis: Genesis::default(), genesis_hash: None, - hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Never)]), + hardforks: ChainHardforks::new(vec![( + EthereumHardfork::Shanghai.boxed(), + ForkCondition::Never, + )]), paris_block_and_final_difficulty: None, deposit_contract: None, ..Default::default() }; - assert_eq!(spec.hardfork_fork_filter(Hardfork::Shanghai), None); + assert_eq!(spec.hardfork_fork_filter(EthereumHardfork::Shanghai), None); } #[test] @@ -3277,17 +2781,17 @@ Post-merge hard forks (timestamp based): BaseFeeParamsKind::Constant(BaseFeeParams::new(70, 60)) ); - assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 0)); + assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); - assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 50)); + assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); } #[cfg(feature = "optimism")] @@ -3338,24 +2842,101 @@ Post-merge hard forks (timestamp based): chain_spec.base_fee_params, BaseFeeParamsKind::Variable( vec![ - (Hardfork::London, BaseFeeParams::new(70, 60)), - (Hardfork::Canyon, BaseFeeParams::new(80, 60)), + (EthereumHardfork::London.boxed(), BaseFeeParams::new(70, 60)), + (OptimismHardfork::Canyon.boxed(), BaseFeeParams::new(80, 60)), ] .into() ) ); - assert!(!chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 0)); - assert!(!chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 0)); - - assert!(chain_spec.is_fork_active_at_block(Hardfork::Bedrock, 10)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, 20)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, 30)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, 40)); - assert!(chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, 50)); + assert!(!chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 0)); + assert!(!chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 0)); + + assert!(chain_spec.is_fork_active_at_block(OptimismHardfork::Bedrock, 10)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, 30)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, 40)); + assert!(chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, 50)); + } + + #[cfg(feature = "optimism")] + #[test] + fn parse_genesis_optimism_with_variable_base_fee_params() { + use op_alloy_rpc_types::genesis::OptimismBaseFeeInfo; + + let geth_genesis = r#" + { + "config": { + "chainId": 8453, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "mergeNetsplitBlock": 0, + "bedrockBlock": 0, + "regolithTime": 15, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "optimism": { + "eip1559Elasticity": 6, + "eip1559Denominator": 50 + } + } + } + "#; + let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap(); + let chainspec = ChainSpec::from(genesis.clone()); + + let actual_chain_id = genesis.config.chain_id; + assert_eq!(actual_chain_id, 8453); + + assert_eq!( + chainspec.hardforks.get(EthereumHardfork::Istanbul), + Some(ForkCondition::Block(0)) + ); + + let actual_bedrock_block = genesis.config.extra_fields.get("bedrockBlock"); + assert_eq!(actual_bedrock_block, Some(serde_json::Value::from(0)).as_ref()); + let actual_canyon_timestamp = genesis.config.extra_fields.get("canyonTime"); + assert_eq!(actual_canyon_timestamp, None); + + assert!(genesis.config.terminal_total_difficulty_passed); + + let optimism_object = genesis.config.extra_fields.get("optimism").unwrap(); + let optimism_base_fee_info = + serde_json::from_value::(optimism_object.clone()).unwrap(); + + assert_eq!( + optimism_base_fee_info, + OptimismBaseFeeInfo { + eip1559_elasticity: Some(6), + eip1559_denominator: Some(50), + eip1559_denominator_canyon: None, + } + ); + assert_eq!( + chainspec.base_fee_params, + BaseFeeParamsKind::Constant(BaseFeeParams { + max_change_denominator: 50, + elasticity_multiplier: 6, + }) + ); + + assert!(chainspec.is_fork_active_at_block(OptimismHardfork::Bedrock, 0)); + + assert!(chainspec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); } #[test] diff --git a/crates/cli/cli/Cargo.toml b/crates/cli/cli/Cargo.toml new file mode 100644 index 000000000..83ea9da6f --- /dev/null +++ b/crates/cli/cli/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "reth-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] + + +[dependencies] +# reth +reth-cli-runner.workspace = true +reth-chainspec.workspace = true +eyre.workspace = true + +# misc +clap.workspace = true diff --git a/crates/cli/cli/src/chainspec.rs b/crates/cli/cli/src/chainspec.rs new file mode 100644 index 000000000..4c1b4372f --- /dev/null +++ b/crates/cli/cli/src/chainspec.rs @@ -0,0 +1,25 @@ +use clap::builder::TypedValueParser; +use reth_chainspec::ChainSpec; +use std::sync::Arc; + +/// Trait for parsing chain specifications. +/// +/// This trait extends [`clap::builder::TypedValueParser`] to provide a parser for chain +/// specifications. Implementers of this trait must provide a list of supported chains and a +/// function to parse a given string into a [`ChainSpec`]. +pub trait ChainSpecParser: TypedValueParser> + Default { + /// List of supported chains. + const SUPPORTED_CHAINS: &'static [&'static str]; + + /// Parses the given string into a [`ChainSpec`]. + /// + /// # Arguments + /// + /// * `s` - A string slice that holds the chain spec to be parsed. + /// + /// # Errors + /// + /// This function will return an error if the input string cannot be parsed into a valid + /// [`ChainSpec`]. + fn parse(&self, s: &str) -> eyre::Result>; +} diff --git a/crates/cli/cli/src/lib.rs b/crates/cli/cli/src/lib.rs new file mode 100644 index 000000000..9e078e82f --- /dev/null +++ b/crates/cli/cli/src/lib.rs @@ -0,0 +1,70 @@ +//! Cli abstraction for reth based nodes. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use std::{borrow::Cow, ffi::OsString}; + +use reth_cli_runner::CliRunner; + +use clap::{Error, Parser}; + +pub mod chainspec; + +/// Reth based node cli. +/// +/// This trait is supposed to be implemented by the main struct of the CLI. +/// +/// It provides commonly used functionality for running commands and information about the CL, such +/// as the name and version. +pub trait RethCli: Sized { + /// The name of the implementation, eg. `reth`, `op-reth`, etc. + fn name(&self) -> Cow<'static, str>; + + /// The version of the node, such as `reth/v1.0.0` + fn version(&self) -> Cow<'static, str>; + + /// Parse args from iterator from [`std::env::args_os()`]. + fn parse_args() -> Result + where + Self: Parser + Sized, + { + ::try_parse_from(std::env::args_os()) + } + + /// Parse args from the given iterator. + fn try_parse_from(itr: I) -> Result + where + Self: Parser + Sized, + I: IntoIterator, + T: Into + Clone, + { + ::try_parse_from(itr) + } + + /// Executes a command. + fn with_runner(self, f: F) -> R + where + F: FnOnce(Self, CliRunner) -> R, + { + let runner = CliRunner::default(); + + f(self, runner) + } + + /// Parses and executes a command. + fn execute(f: F) -> Result + where + Self: Parser + Sized, + F: FnOnce(Self, CliRunner) -> R, + { + let cli = Self::parse_args()?; + + Ok(cli.with_runner(f)) + } +} diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml new file mode 100644 index 000000000..1bb1a4e00 --- /dev/null +++ b/crates/cli/commands/Cargo.toml @@ -0,0 +1,78 @@ +[package] +name = "reth-cli-commands" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] + +[dependencies] +reth-beacon-consensus.workspace = true +reth-chainspec.workspace = true +reth-cli-runner.workspace = true +reth-cli-util.workspace = true +reth-config.workspace = true +reth-consensus.workspace = true +reth-db = { workspace = true, features = ["mdbx"] } +reth-db-api.workspace = true +reth-db-common.workspace = true +reth-downloaders.workspace = true +reth-evm.workspace = true +reth-exex.workspace = true +reth-fs-util.workspace = true +reth-network = { workspace = true, features = ["serde"] } +reth-network-p2p.workspace = true +reth-node-core.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-prune.workspace = true +reth-stages.workspace = true +reth-static-file-types.workspace = true +reth-static-file.workspace = true +reth-trie = { workspace = true, features = ["metrics"] } + +tokio.workspace = true +itertools.workspace = true + +# misc +ahash = "0.8" +human_bytes = "0.4.1" +eyre.workspace = true +clap = { workspace = true, features = ["derive", "env"] } +serde.workspace = true +serde_json.workspace = true +tracing.workspace = true +backon.workspace = true + +# io +fdlimit.workspace = true +confy.workspace = true +toml = { workspace = true, features = ["display"] } + +# tui +comfy-table = "7.0" +crossterm = "0.27.0" +ratatui = { version = "0.27", default-features = false, features = [ + "crossterm", +] } + +# metrics +metrics-process.workspace = true + +# reth test-vectors +proptest = { workspace = true, optional = true } +arbitrary = { workspace = true, optional = true } +proptest-arbitrary-interop = { workspace = true, optional = true } + +[features] +default = [] +dev = [ + "dep:proptest", + "dep:arbitrary", + "dep:proptest-arbitrary-interop", + "reth-primitives/arbitrary", + "reth-db-api/arbitrary" +] diff --git a/bin/reth/src/commands/common.rs b/crates/cli/commands/src/common.rs similarity index 93% rename from bin/reth/src/commands/common.rs rename to crates/cli/commands/src/common.rs index 31c0329a5..b382f7312 100644 --- a/bin/reth/src/commands/common.rs +++ b/crates/cli/commands/src/common.rs @@ -65,7 +65,11 @@ impl EnvironmentArgs { } let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); - let mut config: Config = confy::load_path(config_path).unwrap_or_default(); + let mut config: Config = confy::load_path(config_path) + .inspect_err( + |err| warn!(target: "reth::cli", %err, "Failed to load config file, using default"), + ) + .unwrap_or_default(); // Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to if config.stages.etl.dir.is_none() { @@ -105,7 +109,10 @@ impl EnvironmentArgs { static_file_provider: StaticFileProvider, ) -> eyre::Result>> { let has_receipt_pruning = config.prune.as_ref().map_or(false, |a| a.has_receipts_pruning()); - let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider); + let prune_modes = + config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(); + let factory = ProviderFactory::new(db, self.chain.clone(), static_file_provider) + .with_prune_modes(prune_modes.clone()); info!(target: "reth::cli", "Verifying storage consistency."); @@ -119,8 +126,6 @@ impl EnvironmentArgs { return Ok(factory) } - let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); - // Highly unlikely to happen, and given its destructive nature, it's better to panic // instead. assert_ne!(unwind_target, PipelineTarget::Unwind(0), "A static file <> database inconsistency was found that would trigger an unwind to block 0"); diff --git a/bin/reth/src/commands/config_cmd.rs b/crates/cli/commands/src/config_cmd.rs similarity index 100% rename from bin/reth/src/commands/config_cmd.rs rename to crates/cli/commands/src/config_cmd.rs diff --git a/bin/reth/src/commands/db/checksum.rs b/crates/cli/commands/src/db/checksum.rs similarity index 97% rename from bin/reth/src/commands/db/checksum.rs rename to crates/cli/commands/src/db/checksum.rs index 6aa6b69e6..abc183da4 100644 --- a/bin/reth/src/commands/db/checksum.rs +++ b/crates/cli/commands/src/db/checksum.rs @@ -1,11 +1,9 @@ -use crate::{ - commands::db::get::{maybe_json_value_parser, table_key}, - utils::DbTool, -}; +use crate::db::get::{maybe_json_value_parser, table_key}; use ahash::RandomState; use clap::Parser; use reth_db::{DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables}; use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx}; +use reth_db_common::DbTool; use std::{ hash::{BuildHasher, Hasher}, sync::Arc, diff --git a/bin/reth/src/commands/db/clear.rs b/crates/cli/commands/src/db/clear.rs similarity index 96% rename from bin/reth/src/commands/db/clear.rs rename to crates/cli/commands/src/db/clear.rs index 76c1b97e3..b9edf458d 100644 --- a/bin/reth/src/commands/db/clear.rs +++ b/crates/cli/commands/src/db/clear.rs @@ -5,8 +5,8 @@ use reth_db_api::{ table::Table, transaction::{DbTx, DbTxMut}, }; -use reth_primitives::{static_file::find_fixed_range, StaticFileSegment}; use reth_provider::{ProviderFactory, StaticFileProviderFactory}; +use reth_static_file_types::{find_fixed_range, StaticFileSegment}; /// The arguments for the `reth db clear` command #[derive(Parser, Debug)] diff --git a/bin/reth/src/commands/db/diff.rs b/crates/cli/commands/src/db/diff.rs similarity index 99% rename from bin/reth/src/commands/db/diff.rs rename to crates/cli/commands/src/db/diff.rs index 246b107fa..e025c4648 100644 --- a/bin/reth/src/commands/db/diff.rs +++ b/crates/cli/commands/src/db/diff.rs @@ -1,11 +1,11 @@ -use crate::{ - args::DatabaseArgs, - dirs::{DataDirPath, PlatformPath}, - utils::DbTool, -}; use clap::Parser; use reth_db::{open_db_read_only, tables_to_generic, DatabaseEnv, Tables}; use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx}; +use reth_db_common::DbTool; +use reth_node_core::{ + args::DatabaseArgs, + dirs::{DataDirPath, PlatformPath}, +}; use std::{ collections::HashMap, fmt::Debug, diff --git a/bin/reth/src/commands/db/get.rs b/crates/cli/commands/src/db/get.rs similarity index 90% rename from bin/reth/src/commands/db/get.rs rename to crates/cli/commands/src/db/get.rs index 699a31471..9b5b011d3 100644 --- a/bin/reth/src/commands/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -1,15 +1,18 @@ -use crate::utils::DbTool; use clap::Parser; use reth_db::{ - static_file::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, TransactionMask}, - tables, RawKey, RawTable, Receipts, TableViewer, Transactions, + static_file::{ + ColumnSelectorOne, ColumnSelectorTwo, HeaderMask, ReceiptMask, SidecarMask, TransactionMask, + }, + tables, RawKey, RawTable, Receipts, Sidecars, TableViewer, Transactions, }; use reth_db_api::{ database::Database, table::{Decompress, DupSort, Table}, }; -use reth_primitives::{BlockHash, Header, StaticFileSegment}; +use reth_db_common::DbTool; +use reth_primitives::{BlobSidecars, BlockHash, Header}; use reth_provider::StaticFileProviderFactory; +use reth_static_file_types::StaticFileSegment; use tracing::error; /// The arguments for the `reth db get` command @@ -71,6 +74,10 @@ impl Command { table_key::(&key)?, ::Value>>::MASK, ), + StaticFileSegment::Sidecars => ( + table_key::(&key)?, + >::MASK, + ), }; let content = tool.provider_factory.static_file_provider().find_static_file( @@ -112,6 +119,12 @@ impl Command { )?; println!("{}", serde_json::to_string_pretty(&receipt)?); } + StaticFileSegment::Sidecars => { + let sc = <::Value>::decompress( + content[0].as_slice(), + )?; + println!("{}", serde_json::to_string_pretty(&sc)?); + } } } } diff --git a/bin/reth/src/commands/db/list.rs b/crates/cli/commands/src/db/list.rs similarity index 99% rename from bin/reth/src/commands/db/list.rs rename to crates/cli/commands/src/db/list.rs index dd1a1846a..ed337bdcf 100644 --- a/bin/reth/src/commands/db/list.rs +++ b/crates/cli/commands/src/db/list.rs @@ -1,9 +1,9 @@ use super::tui::DbListTUI; -use crate::utils::{DbTool, ListFilter}; use clap::Parser; use eyre::WrapErr; use reth_db::{DatabaseEnv, RawValue, TableViewer, Tables}; use reth_db_api::{database::Database, table::Table}; +use reth_db_common::{DbTool, ListFilter}; use reth_primitives::hex; use std::{cell::RefCell, sync::Arc}; use tracing::error; diff --git a/bin/reth/src/commands/db/mod.rs b/crates/cli/commands/src/db/mod.rs similarity index 92% rename from bin/reth/src/commands/db/mod.rs rename to crates/cli/commands/src/db/mod.rs index fcafcc41a..de1f1cc38 100644 --- a/bin/reth/src/commands/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -1,11 +1,7 @@ -//! Database debugging tool - -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - utils::DbTool, -}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; use reth_db::version::{get_db_version, DatabaseVersionError, DB_VERSION}; +use reth_db_common::DbTool; use std::io::{self, Write}; mod checksum; @@ -71,6 +67,16 @@ impl Command { let db_path = data_dir.db(); let static_files_path = data_dir.static_files(); + // ensure the provided datadir exist + eyre::ensure!( + data_dir.data_dir().is_dir(), + "Datadir does not exist: {:?}", + data_dir.data_dir() + ); + + // ensure the provided database exist + eyre::ensure!(db_path.is_dir(), "Database does not exist: {:?}", db_path); + match self.command { // TODO: We'll need to add this on the DB trait. Subcommands::Stats(command) => { diff --git a/bin/reth/src/commands/db/stats.rs b/crates/cli/commands/src/db/stats.rs similarity index 98% rename from bin/reth/src/commands/db/stats.rs rename to crates/cli/commands/src/db/stats.rs index 517b9c9e5..37f7d617b 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/crates/cli/commands/src/db/stats.rs @@ -1,4 +1,4 @@ -use crate::{commands::db::checksum::ChecksumViewer, utils::DbTool}; +use crate::db::checksum::ChecksumViewer; use clap::Parser; use comfy_table::{Cell, Row, Table as ComfyTable}; use eyre::WrapErr; @@ -6,10 +6,11 @@ use human_bytes::human_bytes; use itertools::Itertools; use reth_db::{mdbx, static_file::iter_static_files, DatabaseEnv, TableViewer, Tables}; use reth_db_api::database::Database; +use reth_db_common::DbTool; use reth_fs_util as fs; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_primitives::static_file::{find_fixed_range, SegmentRangeInclusive}; use reth_provider::providers::StaticFileProvider; +use reth_static_file_types::{find_fixed_range, SegmentRangeInclusive}; use std::{sync::Arc, time::Duration}; #[derive(Parser, Debug)] diff --git a/bin/reth/src/commands/db/tui.rs b/crates/cli/commands/src/db/tui.rs similarity index 100% rename from bin/reth/src/commands/db/tui.rs rename to crates/cli/commands/src/db/tui.rs diff --git a/bin/reth/src/commands/dump_genesis.rs b/crates/cli/commands/src/dump_genesis.rs similarity index 93% rename from bin/reth/src/commands/dump_genesis.rs rename to crates/cli/commands/src/dump_genesis.rs index 70b95e736..ae425ca8c 100644 --- a/bin/reth/src/commands/dump_genesis.rs +++ b/crates/cli/commands/src/dump_genesis.rs @@ -1,7 +1,7 @@ //! Command that dumps genesis block JSON configuration to stdout -use crate::args::utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}; use clap::Parser; use reth_chainspec::ChainSpec; +use reth_node_core::args::utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}; use std::sync::Arc; /// Dumps genesis block JSON configuration to stdout diff --git a/bin/reth/src/commands/init_cmd.rs b/crates/cli/commands/src/init_cmd.rs similarity index 91% rename from bin/reth/src/commands/init_cmd.rs rename to crates/cli/commands/src/init_cmd.rs index 22657f0c0..933527cc5 100644 --- a/bin/reth/src/commands/init_cmd.rs +++ b/crates/cli/commands/src/init_cmd.rs @@ -1,6 +1,6 @@ //! Command that initializes the node from a genesis file. -use crate::commands::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_provider::BlockHashReader; use tracing::info; diff --git a/bin/reth/src/commands/init_state.rs b/crates/cli/commands/src/init_state.rs similarity index 96% rename from bin/reth/src/commands/init_state.rs rename to crates/cli/commands/src/init_state.rs index dbf45e581..af26d15e0 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/crates/cli/commands/src/init_state.rs @@ -1,6 +1,6 @@ //! Command that initializes the node from a genesis file. -use crate::commands::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_config::config::EtlConfig; use reth_db_api::database::Database; diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs new file mode 100644 index 000000000..16767544e --- /dev/null +++ b/crates/cli/commands/src/lib.rs @@ -0,0 +1,22 @@ +//! Commonly used reth CLI commands. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod common; +pub mod config_cmd; +pub mod db; +pub mod dump_genesis; +pub mod init_cmd; +pub mod init_state; +pub mod p2p; +pub mod prune; +pub mod recover; +pub mod stage; +#[cfg(feature = "dev")] +pub mod test_vectors; diff --git a/bin/reth/src/commands/p2p/mod.rs b/crates/cli/commands/src/p2p.rs similarity index 61% rename from bin/reth/src/commands/p2p/mod.rs rename to crates/cli/commands/src/p2p.rs index 290a0a0b0..0fdefac8b 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/crates/cli/commands/src/p2p.rs @@ -1,29 +1,21 @@ //! P2P Debugging tool -use crate::{ - args::{ - get_secret_key, - utils::{chain_help, chain_value_parser, hash_or_num_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, DiscoveryArgs, NetworkArgs, - }, - utils::get_single_header, -}; use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; -use discv5::ListenConfig; use reth_chainspec::ChainSpec; +use reth_cli_util::{get_secret_key, hash_or_num_value_parser}; use reth_config::Config; -use reth_db::create_db; use reth_network::NetworkConfigBuilder; use reth_network_p2p::bodies::client::BodiesClient; -use reth_node_core::args::DatadirArgs; -use reth_primitives::BlockHashOrNumber; -use reth_provider::{providers::StaticFileProvider, ProviderFactory}; -use std::{ - net::{IpAddr, SocketAddrV4, SocketAddrV6}, - path::PathBuf, - sync::Arc, +use reth_node_core::{ + args::{ + utils::{chain_help, chain_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, DatadirArgs, NetworkArgs, + }, + utils::get_single_header, }; +use reth_primitives::BlockHashOrNumber; +use std::{path::PathBuf, sync::Arc}; /// `reth p2p` command #[derive(Debug, Parser)] @@ -80,18 +72,12 @@ pub enum Subcommands { impl Command { /// Execute `p2p` command pub async fn execute(&self) -> eyre::Result<()> { - let tempdir = tempfile::TempDir::new()?; - let noop_db = Arc::new(create_db(tempdir.into_path(), self.db.database_args())?); - - // add network name to data dir let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); let mut config: Config = confy::load_path(&config_path).unwrap_or_default(); - for peer in &self.network.trusted_peers { - config.peers.trusted_nodes.insert(peer.resolve().await?); - } + config.peers.trusted_nodes.extend(self.network.resolve_trusted_peers().await?); if config.peers.trusted_nodes.is_empty() && self.network.trusted_only { eyre::bail!("No trusted nodes. Set trusted peer with `--trusted-peer ` or set `--trusted-only` to `false`") @@ -106,62 +92,20 @@ impl Command { let rlpx_socket = (self.network.addr, self.network.port).into(); let boot_nodes = self.chain.bootnodes().unwrap_or_default(); - let network = NetworkConfigBuilder::new(p2p_secret_key) + let net = NetworkConfigBuilder::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) .chain_spec(self.chain.clone()) .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .boot_nodes(boot_nodes.clone()) .apply(|builder| { - self.network - .discovery - .apply_to_builder(builder, rlpx_socket) - .map_discv5_config_builder(|builder| { - let DiscoveryArgs { - discv5_addr, - discv5_addr_ipv6, - discv5_port, - discv5_port_ipv6, - discv5_lookup_interval, - discv5_bootstrap_lookup_interval, - discv5_bootstrap_lookup_countdown, - .. - } = self.network.discovery; - - // Use rlpx address if none given - let discv5_addr_ipv4 = discv5_addr.or(match self.network.addr { - IpAddr::V4(ip) => Some(ip), - IpAddr::V6(_) => None, - }); - let discv5_addr_ipv6 = discv5_addr_ipv6.or(match self.network.addr { - IpAddr::V4(_) => None, - IpAddr::V6(ip) => Some(ip), - }); - - builder - .discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( - discv5_addr_ipv4 - .map(|addr| SocketAddrV4::new(addr, discv5_port)), - discv5_addr_ipv6.map(|addr| { - SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0) - }), - )) - .build(), - ) - .add_unsigned_boot_nodes(boot_nodes.into_iter()) - .lookup_interval(discv5_lookup_interval) - .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) - .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) - }) + self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) }) - .build(Arc::new(ProviderFactory::new( - noop_db, - self.chain.clone(), - StaticFileProvider::read_write(data_dir.static_files())?, - ))) - .start_network() + .build_with_noop_provider() + .manager() .await?; + let network = net.handle().clone(); + tokio::task::spawn(net); let fetch_client = network.fetch_client().await?; let retries = self.retries.max(1); diff --git a/crates/cli/commands/src/prune.rs b/crates/cli/commands/src/prune.rs new file mode 100644 index 000000000..6cc5e033b --- /dev/null +++ b/crates/cli/commands/src/prune.rs @@ -0,0 +1,42 @@ +//! Command that runs pruning without any limits. +use crate::common::{AccessRights, Environment, EnvironmentArgs}; +use clap::Parser; +use reth_prune::PrunerBuilder; +use reth_static_file::StaticFileProducer; +use tracing::info; + +/// Prunes according to the configuration without any limits +#[derive(Debug, Parser)] +pub struct PruneCommand { + #[command(flatten)] + env: EnvironmentArgs, +} + +impl PruneCommand { + /// Execute the `prune` command + pub async fn execute(self) -> eyre::Result<()> { + let Environment { config, provider_factory, .. } = self.env.init(AccessRights::RW)?; + let prune_config = config.prune.unwrap_or_default(); + + // Copy data from database to static files + info!(target: "reth::cli", "Copying data from database to static files..."); + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), prune_config.segments.clone()); + let lowest_static_file_height = static_file_producer.lock().copy_to_static_files()?.min(); + info!(target: "reth::cli", ?lowest_static_file_height, "Copied data from database to static files"); + + // Delete data which has been copied to static files. + if let Some(prune_tip) = lowest_static_file_height { + info!(target: "reth::cli", ?prune_tip, ?prune_config, "Pruning data from database..."); + // Run the pruner according to the configuration, and don't enforce any limits on it + let mut pruner = PrunerBuilder::new(prune_config) + .prune_delete_limit(usize::MAX) + .build(provider_factory); + + pruner.run(prune_tip)?; + info!(target: "reth::cli", "Pruned data from database"); + } + + Ok(()) + } +} diff --git a/bin/reth/src/commands/recover/mod.rs b/crates/cli/commands/src/recover/mod.rs similarity index 100% rename from bin/reth/src/commands/recover/mod.rs rename to crates/cli/commands/src/recover/mod.rs diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/crates/cli/commands/src/recover/storage_tries.rs similarity index 96% rename from bin/reth/src/commands/recover/storage_tries.rs rename to crates/cli/commands/src/recover/storage_tries.rs index b1dbbfa88..2b4087144 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/crates/cli/commands/src/recover/storage_tries.rs @@ -1,4 +1,4 @@ -use crate::commands::common::{AccessRights, Environment, EnvironmentArgs}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_cli_runner::CliContext; use reth_db::tables; diff --git a/bin/reth/src/commands/stage/drop.rs b/crates/cli/commands/src/stage/drop.rs similarity index 96% rename from bin/reth/src/commands/stage/drop.rs rename to crates/cli/commands/src/stage/drop.rs index 8297eafef..8278185df 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/crates/cli/commands/src/stage/drop.rs @@ -1,18 +1,17 @@ //! Database debugging tool - -use crate::{ - args::StageEnum, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - utils::DbTool, -}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use itertools::Itertools; use reth_db::{static_file::iter_static_files, tables, DatabaseEnv}; use reth_db_api::transaction::DbTxMut; -use reth_db_common::init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}; -use reth_primitives::{static_file::find_fixed_range, StaticFileSegment}; +use reth_db_common::{ + init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}, + DbTool, +}; +use reth_node_core::args::StageEnum; use reth_provider::{providers::StaticFileWriter, StaticFileProviderFactory}; use reth_stages::StageId; +use reth_static_file_types::{find_fixed_range, StaticFileSegment}; /// `reth drop-stage` command #[derive(Debug, Parser)] diff --git a/bin/reth/src/commands/stage/dump/execution.rs b/crates/cli/commands/src/stage/dump/execution.rs similarity index 86% rename from bin/reth/src/commands/stage/dump/execution.rs rename to crates/cli/commands/src/stage/dump/execution.rs index b3ec8ad33..61fc5e41c 100644 --- a/bin/reth/src/commands/stage/dump/execution.rs +++ b/crates/cli/commands/src/stage/dump/execution.rs @@ -1,26 +1,32 @@ use super::setup; -use crate::{macros::block_executor, utils::DbTool}; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, database::Database, table::TableImporter, transaction::DbTx, }; +use reth_db_common::DbTool; +use reth_evm::{execute::BlockExecutorProvider, noop::NoopBlockExecutorProvider}; use reth_node_core::dirs::{ChainPath, DataDirPath}; -use reth_provider::{providers::StaticFileProvider, ChainSpecProvider, ProviderFactory}; +use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_stages::{stages::ExecutionStage, Stage, StageCheckpoint, UnwindInput}; use tracing::info; -pub(crate) async fn dump_execution_stage( +pub(crate) async fn dump_execution_stage( db_tool: &DbTool, from: u64, to: u64, output_datadir: ChainPath, should_run: bool, -) -> eyre::Result<()> { + executor: E, +) -> eyre::Result<()> +where + DB: Database, + E: BlockExecutorProvider, +{ let (output_db, tip_block_number) = setup(from, to, &output_datadir.db(), db_tool)?; import_tables_with_range(&output_db, db_tool, from, to)?; - unwind_and_copy(db_tool, from, tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, from, tip_block_number, &output_db)?; if should_run { dry_run( @@ -31,8 +37,8 @@ pub(crate) async fn dump_execution_stage( ), to, from, - ) - .await?; + executor, + )?; } Ok(()) @@ -119,7 +125,7 @@ fn import_tables_with_range( /// Dry-run an unwind to FROM block, so we can get the `PlainStorageState` and /// `PlainAccountState` safely. There might be some state dependency from an address /// which hasn't been changed in the given range. -async fn unwind_and_copy( +fn unwind_and_copy( db_tool: &DbTool, from: u64, tip_block_number: u64, @@ -127,8 +133,7 @@ async fn unwind_and_copy( ) -> eyre::Result<()> { let provider = db_tool.provider_factory.provider_rw()?; - let executor = block_executor!(db_tool.chain()); - let mut exec_stage = ExecutionStage::new_with_executor(executor); + let mut exec_stage = ExecutionStage::new_with_executor(NoopBlockExecutorProvider::default()); exec_stage.unwind( &provider, @@ -150,18 +155,18 @@ async fn unwind_and_copy( } /// Try to re-execute the stage without committing -async fn dry_run( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, -) -> eyre::Result<()> { + executor: E, +) -> eyre::Result<()> +where + DB: Database, + E: BlockExecutorProvider, +{ info!(target: "reth::cli", "Executing stage. [dry-run]"); - #[cfg(feature = "bsc")] - let executor = - block_executor!(output_provider_factory.chain_spec(), output_provider_factory.clone()); - #[cfg(not(feature = "bsc"))] - let executor = block_executor!(output_provider_factory.chain_spec()); let mut exec_stage = ExecutionStage::new_with_executor(executor); let input = diff --git a/bin/reth/src/commands/stage/dump/hashing_account.rs b/crates/cli/commands/src/stage/dump/hashing_account.rs similarity index 96% rename from bin/reth/src/commands/stage/dump/hashing_account.rs rename to crates/cli/commands/src/stage/dump/hashing_account.rs index 2e50a8ad6..025899231 100644 --- a/bin/reth/src/commands/stage/dump/hashing_account.rs +++ b/crates/cli/commands/src/stage/dump/hashing_account.rs @@ -1,8 +1,8 @@ use super::setup; -use crate::utils::DbTool; use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; +use reth_db_common::DbTool; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_primitives::BlockNumber; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; @@ -38,8 +38,7 @@ pub(crate) async fn dump_hashing_account_stage( ), to, from, - ) - .await?; + )?; } Ok(()) @@ -71,7 +70,7 @@ fn unwind_and_copy( } /// Try to re-execute the stage straight away -async fn dry_run( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/bin/reth/src/commands/stage/dump/hashing_storage.rs b/crates/cli/commands/src/stage/dump/hashing_storage.rs similarity index 96% rename from bin/reth/src/commands/stage/dump/hashing_storage.rs rename to crates/cli/commands/src/stage/dump/hashing_storage.rs index 1dfd722f5..ad6298887 100644 --- a/bin/reth/src/commands/stage/dump/hashing_storage.rs +++ b/crates/cli/commands/src/stage/dump/hashing_storage.rs @@ -1,8 +1,8 @@ use super::setup; -use crate::utils::DbTool; use eyre::Result; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; +use reth_db_common::DbTool; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; use reth_stages::{stages::StorageHashingStage, Stage, StageCheckpoint, UnwindInput}; @@ -28,8 +28,7 @@ pub(crate) async fn dump_hashing_storage_stage( ), to, from, - ) - .await?; + )?; } Ok(()) @@ -66,7 +65,7 @@ fn unwind_and_copy( } /// Try to re-execute the stage straight away -async fn dry_run( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/bin/reth/src/commands/stage/dump/merkle.rs b/crates/cli/commands/src/stage/dump/merkle.rs similarity index 90% rename from bin/reth/src/commands/stage/dump/merkle.rs rename to crates/cli/commands/src/stage/dump/merkle.rs index fa345bb47..2d13c1515 100644 --- a/bin/reth/src/commands/stage/dump/merkle.rs +++ b/crates/cli/commands/src/stage/dump/merkle.rs @@ -1,20 +1,21 @@ use super::setup; -use crate::{macros::block_executor, utils::DbTool}; use eyre::Result; use reth_config::config::EtlConfig; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, table::TableImporter}; +use reth_db_common::DbTool; +use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_node_core::dirs::{ChainPath, DataDirPath}; use reth_primitives::BlockNumber; use reth_provider::{providers::StaticFileProvider, ProviderFactory}; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::{ stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, - StorageHashingStage, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, + AccountHashingStage, ExecutionStage, MerkleStage, StorageHashingStage, + MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, }, - Stage, StageCheckpoint, UnwindInput, + ExecutionStageThresholds, Stage, StageCheckpoint, UnwindInput, }; use tracing::info; @@ -43,7 +44,7 @@ pub(crate) async fn dump_merkle_stage( ) })??; - unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, (from, to), tip_block_number, &output_db)?; if should_run { dry_run( @@ -54,15 +55,14 @@ pub(crate) async fn dump_merkle_stage( ), to, from, - ) - .await?; + )?; } Ok(()) } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -async fn unwind_and_copy( +fn unwind_and_copy( db_tool: &DbTool, range: (u64, u64), tip_block_number: u64, @@ -86,11 +86,9 @@ async fn unwind_and_copy( MerkleStage::default_unwind().unwind(&provider, unwind)?; - let executor = block_executor!(db_tool.chain()); - // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - executor, + NoopBlockExecutorProvider::default(), // Not necessary for unwinding. ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, @@ -142,7 +140,7 @@ async fn unwind_and_copy( } /// Try to re-execute the stage straight away -async fn dry_run( +fn dry_run( output_provider_factory: ProviderFactory, to: u64, from: u64, diff --git a/bin/reth/src/commands/stage/dump/mod.rs b/crates/cli/commands/src/stage/dump/mod.rs similarity index 77% rename from bin/reth/src/commands/stage/dump/mod.rs rename to crates/cli/commands/src/stage/dump/mod.rs index 287708b00..7366ff998 100644 --- a/bin/reth/src/commands/stage/dump/mod.rs +++ b/crates/cli/commands/src/stage/dump/mod.rs @@ -1,20 +1,19 @@ //! Database debugging tool - -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - dirs::DataDirPath, - utils::DbTool, -}; - -use crate::args::DatadirArgs; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; +use reth_chainspec::ChainSpec; use reth_db::{init_db, mdbx::DatabaseArguments, tables, DatabaseEnv}; use reth_db_api::{ cursor::DbCursorRO, database::Database, models::ClientVersion, table::TableImporter, transaction::DbTx, }; -use reth_node_core::dirs::PlatformPath; -use std::path::PathBuf; +use reth_db_common::DbTool; +use reth_evm::execute::BlockExecutorProvider; +use reth_node_core::{ + args::DatadirArgs, + dirs::{DataDirPath, PlatformPath}, +}; +use std::{path::PathBuf, sync::Arc}; use tracing::info; mod hashing_storage; @@ -77,16 +76,29 @@ macro_rules! handle_stage { let output_datadir = output_datadir.with_chain($tool.chain().chain, DatadirArgs::default()); $stage_fn($tool, *from, *to, output_datadir, *dry_run).await? }}; + + ($stage_fn:ident, $tool:expr, $command:expr, $executor:expr) => {{ + let StageCommand { output_datadir, from, to, dry_run, .. } = $command; + let output_datadir = output_datadir.with_chain($tool.chain().chain, DatadirArgs::default()); + $stage_fn($tool, *from, *to, output_datadir, *dry_run, $executor).await? + }}; } impl Command { /// Execute `dump-stage` command - pub async fn execute(self) -> eyre::Result<()> { + pub async fn execute(self, executor: F) -> eyre::Result<()> + where + E: BlockExecutorProvider, + F: FnOnce(Arc) -> E, + { let Environment { provider_factory, .. } = self.env.init(AccessRights::RO)?; let tool = DbTool::new(provider_factory)?; match &self.command { - Stages::Execution(cmd) => handle_stage!(dump_execution_stage, &tool, cmd), + Stages::Execution(cmd) => { + let executor = executor(tool.chain()); + handle_stage!(dump_execution_stage, &tool, cmd, executor) + } Stages::StorageHashing(cmd) => handle_stage!(dump_hashing_storage_stage, &tool, cmd), Stages::AccountHashing(cmd) => handle_stage!(dump_hashing_account_stage, &tool, cmd), Stages::Merkle(cmd) => handle_stage!(dump_merkle_stage, &tool, cmd), diff --git a/bin/reth/src/commands/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs similarity index 73% rename from bin/reth/src/commands/stage/mod.rs rename to crates/cli/commands/src/stage/mod.rs index 8f514295e..e0365c879 100644 --- a/bin/reth/src/commands/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -1,7 +1,11 @@ //! `reth stage` command +use std::sync::Arc; + use clap::{Parser, Subcommand}; +use reth_chainspec::ChainSpec; use reth_cli_runner::CliContext; +use reth_evm::execute::BlockExecutorProvider; pub mod drop; pub mod dump; @@ -35,11 +39,15 @@ pub enum Subcommands { impl Command { /// Execute `stage` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> + where + E: BlockExecutorProvider, + F: FnOnce(Arc) -> E, + { match self.command { - Subcommands::Run(command) => command.execute(ctx).await, + Subcommands::Run(command) => command.execute(ctx, executor).await, Subcommands::Drop(command) => command.execute().await, - Subcommands::Dump(command) => command.execute().await, + Subcommands::Dump(command) => command.execute(executor).await, Subcommands::Unwind(command) => command.execute().await, } } diff --git a/bin/reth/src/commands/stage/run.rs b/crates/cli/commands/src/stage/run.rs similarity index 84% rename from bin/reth/src/commands/stage/run.rs rename to crates/cli/commands/src/stage/run.rs index a13476f52..2a2dd6f8a 100644 --- a/bin/reth/src/commands/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -1,30 +1,31 @@ //! Main `stage` command //! //! Stage debugging tool - -use crate::{ - args::{get_secret_key, NetworkArgs, StageEnum}, - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, - prometheus_exporter, -}; +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; +use reth_chainspec::ChainSpec; use reth_cli_runner::CliContext; +use reth_cli_util::get_secret_key; use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig}; use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; +use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; +use reth_node_core::{ + args::{NetworkArgs, StageEnum}, + prometheus_exporter, +}; use reth_provider::{ ChainSpecProvider, StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory, StaticFileWriter, }; use reth_stages::{ stages::{ - AccountHashingStage, BodyStage, ExecutionStage, ExecutionStageThresholds, - IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, - StorageHashingStage, TransactionLookupStage, + AccountHashingStage, BodyStage, ExecutionStage, IndexAccountHistoryStage, + IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, + TransactionLookupStage, }, - ExecInput, ExecOutput, Stage, StageExt, UnwindInput, UnwindOutput, + ExecInput, ExecOutput, ExecutionStageThresholds, Stage, StageExt, UnwindInput, UnwindOutput, }; use std::{any::Any, net::SocketAddr, sync::Arc, time::Instant}; use tracing::*; @@ -83,7 +84,11 @@ pub struct Command { impl Command { /// Execute `stage` command - pub async fn execute(self, ctx: CliContext) -> eyre::Result<()> { + pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> + where + E: BlockExecutorProvider, + F: FnOnce(Arc) -> E, + { // Raise the fd limit of the process. // Does not do anything on windows. let _ = fdlimit::raise_fd_limit(); @@ -118,12 +123,7 @@ impl Command { let mut config = config; config.peers.trusted_nodes_only = self.network.trusted_only; - if !self.network.trusted_peers.is_empty() { - for peer in &self.network.trusted_peers { - let peer = peer.resolve().await?; - config.peers.trusted_nodes.insert(peer); - } - } + config.peers.trusted_nodes.extend(self.network.resolve_trusted_peers().await?); let network_secret_path = self .network @@ -168,28 +168,21 @@ impl Command { })), None, ), - StageEnum::Execution => { - #[cfg(feature = "bsc")] - let executor = - block_executor!(provider_factory.chain_spec(), provider_factory.clone()); - #[cfg(not(feature = "bsc"))] - let executor = block_executor!(provider_factory.chain_spec()); - ( - Box::new(ExecutionStage::new( - executor, - ExecutionStageThresholds { - max_blocks: Some(batch_size), - max_changes: None, - max_cumulative_gas: None, - max_duration: None, - }, - config.stages.merkle.clean_threshold, - prune_modes, - ExExManagerHandle::empty(), - )), - None, - ) - } + StageEnum::Execution => ( + Box::new(ExecutionStage::new( + executor(provider_factory.chain_spec()), + ExecutionStageThresholds { + max_blocks: Some(batch_size), + max_changes: None, + max_cumulative_gas: None, + max_duration: None, + }, + config.stages.merkle.clean_threshold, + prune_modes, + ExExManagerHandle::empty(), + )), + None, + ), StageEnum::TxLookup => ( Box::new(TransactionLookupStage::new( TransactionLookupConfig { chunk_size: batch_size }, diff --git a/bin/reth/src/commands/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs similarity index 73% rename from bin/reth/src/commands/stage/unwind.rs rename to crates/cli/commands/src/stage/unwind.rs index 157f33bff..7659fdfc1 100644 --- a/bin/reth/src/commands/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -1,11 +1,13 @@ //! Unwinding a certain block range +use crate::common::{AccessRights, Environment, EnvironmentArgs}; use clap::{Parser, Subcommand}; use reth_beacon_consensus::EthBeaconConsensus; use reth_config::Config; use reth_consensus::Consensus; use reth_db_api::database::Database; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; +use reth_evm::noop::NoopBlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_node_core::args::NetworkArgs; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; @@ -13,22 +15,17 @@ use reth_provider::{ BlockExecutionWriter, BlockNumReader, ChainSpecProvider, FinalizedBlockReader, FinalizedBlockWriter, ProviderFactory, StaticFileProviderFactory, }; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::{ - sets::DefaultStages, - stages::{ExecutionStage, ExecutionStageThresholds}, - Pipeline, StageSet, + sets::{DefaultStages, OfflineStages}, + stages::ExecutionStage, + ExecutionStageThresholds, Pipeline, StageSet, }; use reth_static_file::StaticFileProducer; use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::watch; use tracing::info; -use crate::{ - commands::common::{AccessRights, Environment, EnvironmentArgs}, - macros::block_executor, -}; - /// `reth stage unwind` command #[derive(Debug, Parser)] pub struct Command { @@ -40,6 +37,11 @@ pub struct Command { #[command(subcommand)] command: Subcommands, + + /// If this is enabled, then all stages except headers, bodies, and sender recovery will be + /// unwound. + #[arg(long)] + offline: bool, } impl Command { @@ -52,17 +54,31 @@ impl Command { eyre::bail!("Cannot unwind genesis block") } - // Only execute a pipeline unwind if the start of the range overlaps the existing static - // files. If that's the case, then copy all available data from MDBX to static files, and - // only then, proceed with the unwind. - if let Some(highest_static_block) = provider_factory + let highest_static_file_block = provider_factory .static_file_provider() .get_highest_static_files() .max() - .filter(|highest_static_file_block| highest_static_file_block >= range.start()) - { - info!(target: "reth::cli", ?range, ?highest_static_block, "Executing a pipeline unwind."); - let mut pipeline = self.build_pipeline(config, provider_factory.clone()).await?; + .filter(|highest_static_file_block| highest_static_file_block >= range.start()); + + // Execute a pipeline unwind if the start of the range overlaps the existing static + // files. If that's the case, then copy all available data from MDBX to static files, and + // only then, proceed with the unwind. + // + // We also execute a pipeline unwind if `offline` is specified, because we need to only + // unwind the data associated with offline stages. + if highest_static_file_block.is_some() || self.offline { + if self.offline { + info!(target: "reth::cli", "Performing an unwind for offline-only data!"); + } + + if let Some(highest_static_file_block) = highest_static_file_block { + info!(target: "reth::cli", ?range, ?highest_static_file_block, "Executing a pipeline unwind."); + } else { + info!(target: "reth::cli", ?range, "Executing a pipeline unwind."); + } + + // This will build an offline-only pipeline if the `offline` flag is enabled + let mut pipeline = self.build_pipeline(config, provider_factory)?; // Move all applicable data from database to static files. pipeline.move_to_static_files()?; @@ -87,12 +103,12 @@ impl Command { provider.commit()?; } - println!("Unwound {} blocks", range.count()); + info!(target: "reth::cli", range=?range.clone(), count=range.count(), "Unwound blocks"); Ok(()) } - async fn build_pipeline( + fn build_pipeline( self, config: Config, provider_factory: ProviderFactory>, @@ -103,11 +119,18 @@ impl Command { let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let executor = block_executor!(provider_factory.chain_spec()); - let pipeline = Pipeline::builder() - .with_tip_sender(tip_tx) - .add_stages( + // Unwinding does not require a valid executor + let executor = NoopBlockExecutorProvider::default(); + + let builder = if self.offline { + Pipeline::builder().add_stages( + OfflineStages::new(executor, config.stages, PruneModes::default()) + .builder() + .disable(reth_stages::StageId::SenderRecovery), + ) + } else { + Pipeline::builder().with_tip_sender(tip_tx).add_stages( DefaultStages::new( provider_factory.clone(), tip_rx, @@ -131,10 +154,12 @@ impl Command { ExExManagerHandle::empty(), )), ) - .build( - provider_factory.clone(), - StaticFileProducer::new(provider_factory, PruneModes::default()), - ); + }; + + let pipeline = builder.build( + provider_factory.clone(), + StaticFileProducer::new(provider_factory, PruneModes::default()), + ); Ok(pipeline) } } diff --git a/bin/reth/src/commands/test_vectors/mod.rs b/crates/cli/commands/src/test_vectors/mod.rs similarity index 100% rename from bin/reth/src/commands/test_vectors/mod.rs rename to crates/cli/commands/src/test_vectors/mod.rs diff --git a/bin/reth/src/commands/test_vectors/tables.rs b/crates/cli/commands/src/test_vectors/tables.rs similarity index 100% rename from bin/reth/src/commands/test_vectors/tables.rs rename to crates/cli/commands/src/test_vectors/tables.rs diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml new file mode 100644 index 000000000..f38421bc0 --- /dev/null +++ b/crates/cli/util/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "reth-cli-util" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +exclude.workspace = true + +[dependencies] +reth-fs-util.workspace = true +reth-network.workspace = true +reth-primitives.workspace = true +secp256k1.workspace = true +thiserror.workspace = true +eyre.workspace = true + +[dev-dependencies] +proptest.workspace = true + +[lints] +workspace = true diff --git a/crates/cli/util/src/lib.rs b/crates/cli/util/src/lib.rs new file mode 100644 index 000000000..39d7b7f98 --- /dev/null +++ b/crates/cli/util/src/lib.rs @@ -0,0 +1,17 @@ +//! This crate defines a set of commonly used cli utils. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// Helper function to load a secret key from a file. +pub mod load_secret_key; +pub use load_secret_key::get_secret_key; + +/// Cli parsers functions. +pub mod parsers; +pub use parsers::{hash_or_num_value_parser, parse_duration_from_secs, parse_socket_address}; diff --git a/crates/node-core/src/args/secret_key.rs b/crates/cli/util/src/load_secret_key.rs similarity index 100% rename from crates/node-core/src/args/secret_key.rs rename to crates/cli/util/src/load_secret_key.rs diff --git a/crates/cli/util/src/parsers.rs b/crates/cli/util/src/parsers.rs new file mode 100644 index 000000000..5e7c8c785 --- /dev/null +++ b/crates/cli/util/src/parsers.rs @@ -0,0 +1,96 @@ +use reth_primitives::{BlockHashOrNumber, B256}; +use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, + str::FromStr, + time::Duration, +}; + +/// Helper to parse a [Duration] from seconds +pub fn parse_duration_from_secs(arg: &str) -> eyre::Result { + let seconds = arg.parse()?; + Ok(Duration::from_secs(seconds)) +} + +/// Parse [`BlockHashOrNumber`] +pub fn hash_or_num_value_parser(value: &str) -> eyre::Result { + match B256::from_str(value) { + Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)), + Err(_) => Ok(BlockHashOrNumber::Number(value.parse()?)), + } +} + +/// Error thrown while parsing a socket address. +#[derive(thiserror::Error, Debug)] +pub enum SocketAddressParsingError { + /// Failed to convert the string into a socket addr + #[error("could not parse socket address: {0}")] + Io(#[from] std::io::Error), + /// Input must not be empty + #[error("cannot parse socket address from empty string")] + Empty, + /// Failed to parse the address + #[error("could not parse socket address from {0}")] + Parse(String), + /// Failed to parse port + #[error("could not parse port: {0}")] + Port(#[from] std::num::ParseIntError), +} + +/// Parse a [`SocketAddr`] from a `str`. +/// +/// The following formats are checked: +/// +/// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the +/// hostname is set to `localhost`. +/// - If the value contains `:` it is assumed to be the format `:` +/// - Otherwise it is assumed to be a hostname +/// +/// An error is returned if the value is empty. +pub fn parse_socket_address(value: &str) -> eyre::Result { + if value.is_empty() { + return Err(SocketAddressParsingError::Empty) + } + + if let Some(port) = value.strip_prefix(':').or_else(|| value.strip_prefix("localhost:")) { + let port: u16 = port.parse()?; + return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) + } + if let Ok(port) = value.parse::() { + return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) + } + value + .to_socket_addrs()? + .next() + .ok_or_else(|| SocketAddressParsingError::Parse(value.to_string())) +} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::prelude::Rng; + use secp256k1::rand::thread_rng; + + #[test] + fn parse_socket_addresses() { + for value in ["localhost:9000", ":9000", "9000"] { + let socket_addr = parse_socket_address(value) + .unwrap_or_else(|_| panic!("could not parse socket address: {value}")); + + assert!(socket_addr.ip().is_loopback()); + assert_eq!(socket_addr.port(), 9000); + } + } + + #[test] + fn parse_socket_address_random() { + let port: u16 = thread_rng().gen(); + + for value in [format!("localhost:{port}"), format!(":{port}"), port.to_string()] { + let socket_addr = parse_socket_address(&value) + .unwrap_or_else(|_| panic!("could not parse socket address: {value}")); + + assert!(socket_addr.ip().is_loopback()); + assert_eq!(socket_addr.port(), port); + } + } +} diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index 06658719d..fb6eb7ef1 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -12,8 +12,9 @@ workspace = true [dependencies] # reth -reth-network.workspace = true +reth-network-types = { workspace = true, features = ["serde"] } reth-prune-types.workspace = true +reth-stages-types.workspace = true reth-primitives = { workspace = true, optional = true } # serde @@ -30,4 +31,4 @@ toml.workspace = true [features] bsc = [ "reth-primitives/bsc" -] +] \ No newline at end of file diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index a23f576b5..45cdf512c 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -1,8 +1,9 @@ //! Configuration files. -use reth_network::{PeersConfig, SessionsConfig}; +use reth_network_types::{PeersConfig, SessionsConfig}; #[cfg(feature = "bsc")] use reth_primitives::parlia::ParliaConfig; use reth_prune_types::PruneModes; +use reth_stages_types::ExecutionStageThresholds; use serde::{Deserialize, Deserializer, Serialize}; use std::{ ffi::OsStr, @@ -57,7 +58,7 @@ impl Config { } /// Sets the pruning configuration. - pub fn update_prune_confing(&mut self, prune_config: PruneConfig) { + pub fn update_prune_config(&mut self, prune_config: PruneConfig) { self.prune = Some(prune_config); } } @@ -220,6 +221,17 @@ impl Default for ExecutionConfig { } } +impl From for ExecutionStageThresholds { + fn from(config: ExecutionConfig) -> Self { + Self { + max_blocks: config.max_blocks, + max_changes: config.max_changes, + max_cumulative_gas: config.max_cumulative_gas, + max_duration: config.max_duration, + } + } +} + /// Hashing stage configuration. #[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq, Serialize)] #[serde(default)] diff --git a/crates/consensus/auto-seal/src/client.rs b/crates/consensus/auto-seal/src/client.rs index b9befa738..d55cf6443 100644 --- a/crates/consensus/auto-seal/src/client.rs +++ b/crates/consensus/auto-seal/src/client.rs @@ -4,11 +4,11 @@ use crate::Storage; use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, - headers::client::{HeadersClient, HeadersFut, HeadersRequest}, + headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, priority::Priority, }; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, B256}; use std::fmt::Debug; use tracing::{trace, warn}; diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index 543e70b74..59078c209 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -16,7 +16,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_beacon_consensus::BeaconEngineMessage; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_engine_primitives::EngineTypes; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; @@ -369,6 +369,7 @@ impl StorageInner { body: transactions, ommers: ommers.clone(), withdrawals: withdrawals.clone(), + sidecars: None, requests: requests.clone(), } .with_recovered_senders() diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index bf74df0f7..079963e78 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -30,6 +30,7 @@ reth-static-file.workspace = true reth-tokio-util.workspace = true reth-engine-primitives.workspace = true reth-network-p2p.workspace = true +reth-bsc-consensus = { workspace = true, optional = true } # async @@ -80,3 +81,7 @@ optimism = [ "reth-blockchain-tree/optimism", "reth-rpc/optimism", ] + +bsc = [ + "reth-bsc-consensus/bsc" +] \ No newline at end of file diff --git a/crates/consensus/beacon/src/engine/forkchoice.rs b/crates/consensus/beacon/src/engine/forkchoice.rs index afd19f607..ba09dff6c 100644 --- a/crates/consensus/beacon/src/engine/forkchoice.rs +++ b/crates/consensus/beacon/src/engine/forkchoice.rs @@ -3,7 +3,7 @@ use reth_rpc_types::engine::{ForkchoiceState, PayloadStatusEnum}; /// The struct that keeps track of the received forkchoice state and their status. #[derive(Debug, Clone, Default)] -pub(crate) struct ForkchoiceStateTracker { +pub struct ForkchoiceStateTracker { /// The latest forkchoice state that we received. /// /// Caution: this can be invalid. @@ -20,7 +20,7 @@ impl ForkchoiceStateTracker { /// /// If the status is `VALID`, we also update the last valid forkchoice state and set the /// `sync_target` to `None`, since we're now fully synced. - pub(crate) fn set_latest(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { + pub fn set_latest(&mut self, state: ForkchoiceState, status: ForkchoiceStatus) { if status.is_valid() { self.set_valid(state); } else if status.is_syncing() { @@ -76,7 +76,7 @@ impl ForkchoiceStateTracker { } /// Returns the last received `ForkchoiceState` to which we need to sync. - pub(crate) const fn sync_target_state(&self) -> Option { + pub const fn sync_target_state(&self) -> Option { self.last_syncing } @@ -139,9 +139,12 @@ impl From for ForkchoiceStatus { /// A helper type to check represent hashes of a [`ForkchoiceState`] #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub(crate) enum ForkchoiceStateHash { +pub enum ForkchoiceStateHash { + /// Head hash of the [`ForkchoiceState`]. Head(B256), + /// Safe hash of the [`ForkchoiceState`]. Safe(B256), + /// Finalized hash of the [`ForkchoiceState`]. Finalized(B256), } diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 0f0d9e1da..0cffc67b3 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -87,7 +87,7 @@ where /// Sends a transition configuration exchange message to the beacon consensus engine. /// /// See also - pub async fn transition_configuration_exchanged(&self) { + pub fn transition_configuration_exchanged(&self) { let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); } diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 43c47ef41..b52812b53 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -101,6 +101,7 @@ impl StaticFileHook { headers: Some(finalized_block_number), receipts: Some(finalized_block_number), transactions: Some(finalized_block_number), + sidecars: Some(finalized_block_number), })?; // Check if the moving data to static files has been requested. diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index 2a37c6001..fbe6bf462 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -14,7 +14,8 @@ use tracing::warn; const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; /// Keeps track of invalid headers. -pub(crate) struct InvalidHeaderCache { +#[derive(Debug)] +pub struct InvalidHeaderCache { /// This maps a header hash to a reference to its invalid ancestor. headers: LruMap, /// Metrics for the cache. @@ -22,7 +23,8 @@ pub(crate) struct InvalidHeaderCache { } impl InvalidHeaderCache { - pub(crate) fn new(max_length: u32) -> Self { + /// Invalid header cache constructor. + pub fn new(max_length: u32) -> Self { Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } } @@ -34,7 +36,7 @@ impl InvalidHeaderCache { /// /// If this is called, the hit count for the entry is incremented. /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub(crate) fn get(&mut self, hash: &B256) -> Option> { + pub fn get(&mut self, hash: &B256) -> Option> { { let entry = self.headers.get(hash)?; entry.hit_count += 1; @@ -49,7 +51,7 @@ impl InvalidHeaderCache { } /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub(crate) fn insert_with_invalid_ancestor( + pub fn insert_with_invalid_ancestor( &mut self, header_hash: B256, invalid_ancestor: Arc

, diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index 052b275c1..f58f620b4 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -48,7 +48,7 @@ impl OnForkChoiceUpdated { /// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update succeeded and no /// payload attributes were provided. - pub(crate) fn valid(status: PayloadStatus) -> Self { + pub fn valid(status: PayloadStatus) -> Self { Self { forkchoice_status: ForkchoiceStatus::from_payload_status(&status.status), fut: Either::Left(futures::future::ready(Ok(ForkchoiceUpdated::new(status)))), @@ -57,7 +57,7 @@ impl OnForkChoiceUpdated { /// Creates a new instance of `OnForkChoiceUpdated` with the given payload status, if the /// forkchoice update failed due to an invalid payload. - pub(crate) fn with_invalid(status: PayloadStatus) -> Self { + pub fn with_invalid(status: PayloadStatus) -> Self { Self { forkchoice_status: ForkchoiceStatus::from_payload_status(&status.status), fut: Either::Left(futures::future::ready(Ok(ForkchoiceUpdated::new(status)))), @@ -66,7 +66,7 @@ impl OnForkChoiceUpdated { /// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update failed because the /// given state is considered invalid - pub(crate) fn invalid_state() -> Self { + pub fn invalid_state() -> Self { Self { forkchoice_status: ForkchoiceStatus::Invalid, fut: Either::Left(futures::future::ready(Err(ForkchoiceUpdateError::InvalidState))), @@ -75,7 +75,7 @@ impl OnForkChoiceUpdated { /// Creates a new instance of `OnForkChoiceUpdated` if the forkchoice update was successful but /// payload attributes were invalid. - pub(crate) fn invalid_payload_attributes() -> Self { + pub fn invalid_payload_attributes() -> Self { Self { // This is valid because this is only reachable if the state and payload is valid forkchoice_status: ForkchoiceStatus::Valid, @@ -86,7 +86,7 @@ impl OnForkChoiceUpdated { } /// If the forkchoice update was successful and no payload attributes were provided, this method - pub(crate) const fn updated_with_pending_payload_id( + pub const fn updated_with_pending_payload_id( payload_status: PayloadStatus, pending_payload_id: oneshot::Receiver>, ) -> Self { diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 72f71b286..9673f6205 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -53,7 +53,7 @@ pub use error::{ }; mod invalid_headers; -use invalid_headers::InvalidHeaderCache; +pub use invalid_headers::InvalidHeaderCache; mod event; pub use event::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; @@ -62,13 +62,12 @@ mod handle; pub use handle::BeaconConsensusEngineHandle; mod forkchoice; -pub use forkchoice::ForkchoiceStatus; -use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker}; +pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; mod metrics; use metrics::EngineMetrics; -pub(crate) mod sync; +pub mod sync; use sync::{EngineSyncController, EngineSyncEvent}; /// Hooks for running during the main loop of @@ -89,6 +88,18 @@ const MAX_INVALID_HEADERS: u32 = 512u32; /// If the distance exceeds this threshold, the pipeline will be used for sync. pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; +/// Represents a pending forkchoice update. +/// +/// This type encapsulates the necessary components for a pending forkchoice update +/// in the context of a beacon consensus engine. +/// +/// It consists of: +/// - The current fork choice state. +/// - Optional payload attributes specific to the engine type. +/// - Sender for the result of an oneshot channel, conveying the outcome of the fork choice update. +type PendingForkchoiceUpdate = + (ForkchoiceState, Option, oneshot::Sender>); + /// The beacon consensus engine is the driver that switches between historical and live sync. /// /// The beacon consensus engine is itself driven by messages from the Consensus Layer, which are @@ -190,12 +201,7 @@ where /// It is recorded if we cannot process the forkchoice update because /// a hook with database read-write access is active. /// This is a temporary solution to always process missed FCUs. - #[allow(clippy::type_complexity)] - pending_forkchoice_update: Option<( - ForkchoiceState, - Option, - oneshot::Sender>, - )>, + pending_forkchoice_update: Option>, /// Tracks the header of invalid payloads that were rejected by the engine because they're /// invalid. invalid_headers: InvalidHeaderCache, @@ -2160,9 +2166,8 @@ mod tests { provider .insert_block( b.clone().try_seal_with_senders().expect("invalid tx signature in block"), - None, ) - .map(|_| ()) + .map(drop) }) .expect("failed to insert"); provider.commit().unwrap(); @@ -2492,7 +2497,7 @@ mod tests { use super::*; use alloy_genesis::Genesis; use reth_db::test_utils::create_test_static_files_dir; - use reth_primitives::{Hardfork, U256}; + use reth_primitives::{EthereumHardfork, U256}; use reth_provider::{ providers::StaticFileProvider, test_utils::blocks::BlockchainTestData, }; @@ -2721,9 +2726,9 @@ mod tests { async fn payload_pre_merge() { let data = BlockchainTestData::default(); let mut block1 = data.blocks[0].0.block.clone(); - block1 - .header - .set_difficulty(MAINNET.fork(Hardfork::Paris).ttd().unwrap() - U256::from(1)); + block1.header.set_difficulty( + MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), + ); block1 = block1.unseal().seal_slow(); let (block2, exec_result2) = data.blocks[1].clone(); let mut block2 = block2.unseal().block; diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 76a6d0e81..0be1b2d11 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -1,10 +1,13 @@ //! Sync management for the engine implementation. +#[cfg(not(feature = "bsc"))] +use crate::EthBeaconConsensus; use crate::{ - engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, - ConsensusEngineLiveSyncProgress, EthBeaconConsensus, + engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, }; use futures::FutureExt; +#[cfg(feature = "bsc")] +use reth_bsc_consensus::Parlia; use reth_chainspec::ChainSpec; use reth_db_api::database::Database; use reth_network_p2p::{ @@ -16,6 +19,7 @@ use reth_primitives::{BlockNumber, SealedBlock, B256}; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; use reth_tasks::TaskSpawner; use reth_tokio_util::EventSender; + use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap}, @@ -76,11 +80,15 @@ where chain_spec: Arc, event_sender: EventSender, ) -> Self { + #[cfg(not(feature = "bsc"))] + let full_block_client = + FullBlockClient::new(client, Arc::new(EthBeaconConsensus::new(chain_spec))); + #[cfg(feature = "bsc")] + let full_block_client = + FullBlockClient::new(client, Arc::new(Parlia::new(chain_spec, Default::default()))); + Self { - full_block_client: FullBlockClient::new( - client, - Arc::new(EthBeaconConsensus::new(chain_spec)), - ), + full_block_client, pipeline_task_spawner, pipeline_state: PipelineState::Idle(Some(pipeline)), pending_pipeline_target: None, diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index f58063d3e..7b5ee65ee 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -393,7 +393,8 @@ where let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); let config = BlockchainTreeConfig::new(1, 2, 3, 2); let tree = Arc::new(ShareableBlockchainTree::new( - BlockchainTree::new(externals, config, None).expect("failed to create tree"), + BlockchainTree::new(externals, config, PruneModes::default()) + .expect("failed to create tree"), )); let latest = self.base_config.chain_spec.genesis_header().seal_slow(); let blockchain_provider = diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 27320700b..e4b2abc13 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,4 +1,4 @@ -use reth_chainspec::{Chain, ChainSpec, Hardfork}; +use reth_chainspec::{ChainSpec, EthereumHardfork}; use reth_primitives::{constants::ETH_TO_WEI, BlockNumber, U256}; /// Calculates the base block reward. @@ -26,9 +26,7 @@ pub fn base_block_reward( block_difficulty: U256, total_difficulty: U256, ) -> Option { - if chain_spec.fork(Hardfork::Paris).active_at_ttd(total_difficulty, block_difficulty) || - chain_spec.chain == Chain::goerli() - { + if chain_spec.fork(EthereumHardfork::Paris).active_at_ttd(total_difficulty, block_difficulty) { None } else { Some(base_block_reward_pre_merge(chain_spec, block_number)) @@ -39,9 +37,9 @@ pub fn base_block_reward( /// /// Caution: The caller must ensure that the block number is before the merge. pub fn base_block_reward_pre_merge(chain_spec: &ChainSpec, block_number: BlockNumber) -> u128 { - if chain_spec.fork(Hardfork::Constantinople).active_at_block(block_number) { + if chain_spec.fork(EthereumHardfork::Constantinople).active_at_block(block_number) { ETH_TO_WEI * 2 - } else if chain_spec.fork(Hardfork::Byzantium).active_at_block(block_number) { + } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_block(block_number) { ETH_TO_WEI * 3 } else { ETH_TO_WEI * 5 diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 7ef56384f..7d2e110d9 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,6 +1,6 @@ //! Collection of methods for block validation. -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ constants::{ @@ -8,7 +8,7 @@ use reth_primitives::{ MAXIMUM_EXTRA_DATA_SIZE, }, eip4844::calculate_excess_blob_gas, - GotExpected, Hardfork, Header, SealedBlock, SealedHeader, + EthereumHardfork, GotExpected, Header, SealedBlock, SealedHeader, }; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. @@ -29,7 +29,7 @@ pub fn validate_header_base_fee( header: &SealedHeader, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.fork(Hardfork::London).active_at_block(header.number) && + if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number) && header.base_fee_per_gas.is_none() { return Err(ConsensusError::BaseFeeMissing) @@ -152,8 +152,9 @@ pub fn validate_4844_header_standalone(header: &SealedHeader) -> Result<(), Cons /// This must be 32 bytes or fewer; formally Hx. #[inline] pub fn validate_header_extradata(header: &Header) -> Result<(), ConsensusError> { - if header.extra_data.len() > MAXIMUM_EXTRA_DATA_SIZE { - Err(ConsensusError::ExtraDataExceedsMax { len: header.extra_data.len() }) + let extradata_len = header.extra_data.len(); + if extradata_len > MAXIMUM_EXTRA_DATA_SIZE { + Err(ConsensusError::ExtraDataExceedsMax { len: extradata_len }) } else { Ok(()) } @@ -192,11 +193,11 @@ pub fn validate_against_parent_eip1559_base_fee( parent: &SealedHeader, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.fork(Hardfork::London).active_at_block(header.number) { + if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number) { let base_fee = header.base_fee_per_gas.ok_or(ConsensusError::BaseFeeMissing)?; let expected_base_fee = - if chain_spec.fork(Hardfork::London).transitions_at_block(header.number) { + if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { reth_primitives::constants::EIP1559_INITIAL_BASE_FEE } else { // This BaseFeeMissing will not happen as previous blocks are checked to have @@ -447,6 +448,7 @@ mod tests { body, ommers, withdrawals: None, + sidecars: None, requests: None, }, parent, diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 2855ecc51..f472da06b 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -13,14 +13,15 @@ reth.workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-tracing.workspace = true -reth-db.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } reth-rpc.workspace = true reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-provider.workspace = true -reth-node-builder.workspace = true +reth-node-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-stages-types.workspace = true +reth-network-peers.workspace = true jsonrpsee.workspace = true diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 925b09b91..e55a9a24b 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -58,15 +58,12 @@ where let mut nodes: Vec> = Vec::with_capacity(num_nodes); for idx in 0..num_nodes { - let mut node_config = NodeConfig::test() + let node_config = NodeConfig::test() .with_chain(chain_spec.clone()) .with_network(network_config.clone()) .with_unused_ports() - .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()); - - if is_dev { - node_config = node_config.dev(); - } + .with_rpc(RpcServerArgs::default().with_unused_ports().with_http()) + .set_dev(is_dev); let span = span!(Level::INFO, "node", idx); let _enter = span.enter(); diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index b575d7001..e5791afd7 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,6 +1,9 @@ use futures_util::StreamExt; -use reth::network::{NetworkEvent, NetworkEvents, NetworkHandle, PeersInfo}; -use reth_chainspec::net::NodeRecord; +use reth::{ + network::{NetworkEvent, NetworkEvents, NetworkHandle, PeersInfo}, + rpc::types::PeerId, +}; +use reth_network_peers::NodeRecord; use reth_tokio_util::EventStream; use reth_tracing::tracing::info; @@ -23,7 +26,7 @@ impl NetworkTestContext { match self.network_events.next().await { Some(NetworkEvent::PeerAdded(_)) => (), - _ => panic!("Expected a peer added event"), + ev => panic!("Expected a peer added event, got: {ev:?}"), } } @@ -32,13 +35,17 @@ impl NetworkTestContext { self.network.local_node_record() } - /// Expects a session to be established - pub async fn expect_session(&mut self) { - match self.network_events.next().await { - Some(NetworkEvent::SessionEstablished { remote_addr, .. }) => { - info!(?remote_addr, "Session established") + /// Awaits the next event for an established session. + pub async fn next_session_established(&mut self) -> Option { + while let Some(ev) = self.network_events.next().await { + match ev { + NetworkEvent::SessionEstablished { peer_id, .. } => { + info!("Session established with peer: {:?}", peer_id); + return Some(peer_id) + } + _ => continue, } - _ => panic!("Expected session established event"), } + None } } diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 524a5d556..684e0f401 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -52,11 +52,11 @@ where }) } + /// Establish a connection to the node pub async fn connect(&mut self, node: &mut NodeTestContext) { self.network.add_peer(node.network.record()).await; - node.network.add_peer(self.network.record()).await; - node.network.expect_session().await; - self.network.expect_session().await; + node.network.next_session_established().await; + self.network.next_session_established().await; } /// Advances the chain `length` blocks. diff --git a/crates/e2e-test-utils/src/rpc.rs b/crates/e2e-test-utils/src/rpc.rs index 09f161a91..b05d5df89 100644 --- a/crates/e2e-test-utils/src/rpc.rs +++ b/crates/e2e-test-utils/src/rpc.rs @@ -1,8 +1,13 @@ use alloy_consensus::TxEnvelope; use alloy_network::eip2718::Decodable2718; -use reth::{api::FullNodeComponents, builder::rpc::RpcRegistry, rpc::api::DebugApiServer}; +use reth::{ + builder::{rpc::RpcRegistry, FullNodeComponents}, + rpc::{ + api::{eth::helpers::EthTransactions, DebugApiServer}, + server_types::eth::EthResult, + }, +}; use reth_primitives::{Bytes, B256}; -use reth_rpc::eth::{error::EthResult, EthTransactions}; pub struct RpcTestContext { pub inner: RpcRegistry, diff --git a/crates/e2e-test-utils/src/transaction.rs b/crates/e2e-test-utils/src/transaction.rs index b278c9636..0719c7733 100644 --- a/crates/e2e-test-utils/src/transaction.rs +++ b/crates/e2e-test-utils/src/transaction.rs @@ -1,5 +1,6 @@ use alloy_consensus::{ - BlobTransactionSidecar, SidecarBuilder, SimpleCoder, TxEip4844Variant, TxEnvelope, + BlobTransactionSidecar, EnvKzgSettings, SidecarBuilder, SimpleCoder, TxEip4844Variant, + TxEnvelope, }; use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; use alloy_rpc_types::{TransactionInput, TransactionRequest}; @@ -7,7 +8,7 @@ use alloy_signer_local::PrivateKeySigner; use eyre::Ok; use reth_primitives::{hex, Address, Bytes, U256}; -use reth_primitives::{constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, B256}; +use reth_primitives::B256; pub struct TransactionTestContext; @@ -71,12 +72,12 @@ impl TransactionTestContext { /// Validates the sidecar of a given tx envelope and returns the versioned hashes pub fn validate_sidecar(tx: TxEnvelope) -> Vec { - let proof_setting = MAINNET_KZG_TRUSTED_SETUP.clone(); + let proof_setting = EnvKzgSettings::Default; match tx { TxEnvelope::Eip4844(signed) => match signed.tx() { TxEip4844Variant::TxEip4844WithSidecar(tx) => { - tx.validate_blob(&proof_setting).unwrap(); + tx.validate_blob(proof_setting.get()).unwrap(); tx.sidecar.versioned_hashes().collect() } _ => panic!("Expected Eip4844 transaction with sidecar"), diff --git a/crates/e2e-test-utils/src/wallet.rs b/crates/e2e-test-utils/src/wallet.rs index f8a4230ee..d24ee2d3f 100644 --- a/crates/e2e-test-utils/src/wallet.rs +++ b/crates/e2e-test-utils/src/wallet.rs @@ -36,7 +36,7 @@ impl Wallet { let mut wallets = Vec::with_capacity(self.amount); for idx in 0..self.amount { let builder = - builder.clone().derivation_path(&format!("{derivation_path}{idx}")).unwrap(); + builder.clone().derivation_path(format!("{derivation_path}{idx}")).unwrap(); let wallet = builder.build().unwrap().with_chain_id(Some(self.chain_id)); wallets.push(wallet) } diff --git a/crates/engine-primitives/Cargo.toml b/crates/engine/primitives/Cargo.toml similarity index 100% rename from crates/engine-primitives/Cargo.toml rename to crates/engine/primitives/Cargo.toml diff --git a/crates/engine-primitives/src/error.rs b/crates/engine/primitives/src/error.rs similarity index 100% rename from crates/engine-primitives/src/error.rs rename to crates/engine/primitives/src/error.rs diff --git a/crates/engine-primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs similarity index 100% rename from crates/engine-primitives/src/lib.rs rename to crates/engine/primitives/src/lib.rs diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml new file mode 100644 index 000000000..33ea676b8 --- /dev/null +++ b/crates/engine/tree/Cargo.toml @@ -0,0 +1,80 @@ +[package] +name = "reth-engine-tree" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-beacon-consensus.workspace = true +reth-blockchain-tree.workspace = true +reth-blockchain-tree-api.workspace = true +reth-chainspec.workspace = true +reth-consensus.workspace = true +reth-db.workspace = true +reth-db-api.workspace = true +reth-engine-primitives.workspace = true +reth-errors.workspace = true +reth-ethereum-consensus.workspace = true +reth-evm.workspace = true +reth-network-p2p.workspace = true +reth-payload-builder.workspace = true +reth-payload-primitives.workspace = true +reth-payload-validator.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-prune.workspace = true +reth-prune-types.workspace = true +reth-revm.workspace = true +reth-rpc-types.workspace = true +reth-stages-api.workspace = true +reth-static-file.workspace = true +reth-tasks.workspace = true +reth-tokio-util.workspace = true +reth-trie.workspace = true +revm.workspace = true + +# common +futures.workspace = true +tokio = { workspace = true, features = ["macros", "sync"] } +tokio-stream = { workspace = true, features = ["sync"] } + + +# metrics +metrics.workspace = true +reth-metrics = { workspace = true, features = ["common"] } + +# misc +aquamarine.workspace = true +parking_lot.workspace = true +tracing.workspace = true + +# optional deps for test-utils +reth-stages = { workspace = true, optional = true } +reth-tracing = { workspace = true, optional = true } + +[dev-dependencies] +# reth +reth-db = { workspace = true, features = ["test-utils"] } +reth-network-p2p = { workspace = true, features = ["test-utils"] } +reth-prune-types.workspace = true +reth-stages = { workspace = true, features = ["test-utils"] } +reth-tracing.workspace = true + +assert_matches.workspace = true + +[features] +test-utils = [ + "reth-db/test-utils", + "reth-network-p2p/test-utils", + "reth-stages/test-utils", + "reth-tracing" +] + +bsc = [] \ No newline at end of file diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs new file mode 100644 index 000000000..24153bed2 --- /dev/null +++ b/crates/engine/tree/src/backfill.rs @@ -0,0 +1,290 @@ +//! It is expected that the node has two sync modes: +//! +//! - Backfill sync: Sync to a certain block height in stages, e.g. download data from p2p then +//! execute that range. +//! - Live sync: In this mode the nodes is keeping up with the latest tip and listens for new +//! requests from the consensus client. +//! +//! These modes are mutually exclusive and the node can only be in one mode at a time. + +use futures::FutureExt; +use reth_db_api::database::Database; +use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; +use reth_tasks::TaskSpawner; +use std::task::{ready, Context, Poll}; +use tokio::sync::oneshot; +use tracing::trace; + +/// Backfill sync mode functionality. +pub trait BackfillSync: Send + Sync { + /// Performs a backfill action. + fn on_action(&mut self, action: BackfillAction); + + /// Polls the pipeline for completion. + fn poll(&mut self, cx: &mut Context<'_>) -> Poll; +} + +/// The backfill actions that can be performed. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BackfillAction { + /// Start backfilling with the given target. + Start(PipelineTarget), +} + +/// The events that can be emitted on backfill sync. +#[derive(Debug)] +pub enum BackfillEvent { + /// Backfill sync idle. + Idle, + /// Backfill sync started. + Started(PipelineTarget), + /// Backfill sync finished. + /// + /// If this is returned, backfill sync is idle. + Finished(Result), + /// Sync task was dropped after it was started, unable to receive it because + /// channel closed. This would indicate a panicked task. + TaskDropped(String), +} + +/// Pipeline sync. +#[derive(Debug)] +pub struct PipelineSync +where + DB: Database, +{ + /// The type that can spawn the pipeline task. + pipeline_task_spawner: Box, + /// The current state of the pipeline. + /// The pipeline is used for large ranges. + pipeline_state: PipelineState, + /// Pending target block for the pipeline to sync + pending_pipeline_target: Option, +} + +impl PipelineSync +where + DB: Database + 'static, +{ + /// Create a new instance. + pub fn new(pipeline: Pipeline, pipeline_task_spawner: Box) -> Self { + Self { + pipeline_task_spawner, + pipeline_state: PipelineState::Idle(Some(pipeline)), + pending_pipeline_target: None, + } + } + + /// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`. + #[allow(dead_code)] + const fn is_pipeline_sync_pending(&self) -> bool { + self.pending_pipeline_target.is_some() && self.pipeline_state.is_idle() + } + + /// Returns `true` if the pipeline is idle. + const fn is_pipeline_idle(&self) -> bool { + self.pipeline_state.is_idle() + } + + /// Returns `true` if the pipeline is active. + const fn is_pipeline_active(&self) -> bool { + !self.is_pipeline_idle() + } + + /// Sets a new target to sync the pipeline to. + /// + /// But ensures the target is not the zero hash. + fn set_pipeline_sync_target(&mut self, target: PipelineTarget) { + if target.sync_target().is_some_and(|target| target.is_zero()) { + trace!( + target: "consensus::engine::sync", + "Pipeline target cannot be zero hash." + ); + // precaution to never sync to the zero hash + return + } + self.pending_pipeline_target = Some(target); + } + + /// This will spawn the pipeline if it is idle and a target is set or if the pipeline is set to + /// run continuously. + fn try_spawn_pipeline(&mut self) -> Option { + match &mut self.pipeline_state { + PipelineState::Idle(pipeline) => { + let target = self.pending_pipeline_target.take()?; + let (tx, rx) = oneshot::channel(); + + let pipeline = pipeline.take().expect("exists"); + self.pipeline_task_spawner.spawn_critical_blocking( + "pipeline task", + Box::pin(async move { + let result = pipeline.run_as_fut(Some(target)).await; + let _ = tx.send(result); + }), + ); + self.pipeline_state = PipelineState::Running(rx); + + Some(BackfillEvent::Started(target)) + } + PipelineState::Running(_) => None, + } + } + + /// Advances the pipeline state. + /// + /// This checks for the result in the channel, or returns pending if the pipeline is idle. + fn poll_pipeline(&mut self, cx: &mut Context<'_>) -> Poll { + let res = match self.pipeline_state { + PipelineState::Idle(_) => return Poll::Pending, + PipelineState::Running(ref mut fut) => { + ready!(fut.poll_unpin(cx)) + } + }; + let ev = match res { + Ok((_, result)) => BackfillEvent::Finished(result), + Err(why) => { + // failed to receive the pipeline + BackfillEvent::TaskDropped(why.to_string()) + } + }; + Poll::Ready(ev) + } +} + +impl BackfillSync for PipelineSync +where + DB: Database + 'static, +{ + fn on_action(&mut self, event: BackfillAction) { + match event { + BackfillAction::Start(target) => self.set_pipeline_sync_target(target), + } + } + + fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + // try to spawn a pipeline if a target is set + if let Some(event) = self.try_spawn_pipeline() { + return Poll::Ready(event) + } + + // make sure we poll the pipeline if it's active, and return any ready pipeline events + if !self.is_pipeline_idle() { + // advance the pipeline + if let Poll::Ready(event) = self.poll_pipeline(cx) { + return Poll::Ready(event) + } + } + + Poll::Pending + } +} + +/// The possible pipeline states within the sync controller. +/// +/// [`PipelineState::Idle`] means that the pipeline is currently idle. +/// [`PipelineState::Running`] means that the pipeline is currently running. +/// +/// NOTE: The differentiation between these two states is important, because when the pipeline is +/// running, it acquires the write lock over the database. This means that we cannot forward to the +/// blockchain tree any messages that would result in database writes, since it would result in a +/// deadlock. +#[derive(Debug)] +enum PipelineState { + /// Pipeline is idle. + Idle(Option>), + /// Pipeline is running and waiting for a response + Running(oneshot::Receiver>), +} + +impl PipelineState { + /// Returns `true` if the state matches idle. + const fn is_idle(&self) -> bool { + matches!(self, Self::Idle(_)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::{insert_headers_into_client, TestPipelineBuilder}; + use assert_matches::assert_matches; + use futures::poll; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; + use reth_network_p2p::test_utils::TestFullBlockClient; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, BlockNumber, Header, B256}; + use reth_stages::ExecOutput; + use reth_stages_api::StageCheckpoint; + use reth_tasks::TokioTaskExecutor; + use std::{collections::VecDeque, future::poll_fn, sync::Arc}; + + struct TestHarness { + pipeline_sync: PipelineSync>>, + tip: B256, + } + + impl TestHarness { + fn new(total_blocks: usize, pipeline_done_after: u64) -> Self { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + + // force the pipeline to be "done" after `pipeline_done_after` blocks + let pipeline = TestPipelineBuilder::new() + .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { + checkpoint: StageCheckpoint::new(BlockNumber::from(pipeline_done_after)), + done: true, + })])) + .build(chain_spec); + + let pipeline_sync = PipelineSync::new(pipeline, Box::::default()); + let client = TestFullBlockClient::default(); + let header = Header { + base_fee_per_gas: Some(7), + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + ..Default::default() + } + .seal_slow(); + insert_headers_into_client(&client, header, 0..total_blocks); + + let tip = client.highest_block().expect("there should be blocks here").hash(); + + Self { pipeline_sync, tip } + } + } + + #[tokio::test] + async fn pipeline_started_and_finished() { + const TOTAL_BLOCKS: usize = 10; + const PIPELINE_DONE_AFTER: u64 = 5; + let TestHarness { mut pipeline_sync, tip } = + TestHarness::new(TOTAL_BLOCKS, PIPELINE_DONE_AFTER); + + let sync_future = poll_fn(|cx| pipeline_sync.poll(cx)); + let next_event = poll!(sync_future); + + // sync target not set, pipeline not started + assert_matches!(next_event, Poll::Pending); + + pipeline_sync.on_action(BackfillAction::Start(PipelineTarget::Sync(tip))); + + let sync_future = poll_fn(|cx| pipeline_sync.poll(cx)); + let next_event = poll!(sync_future); + + // sync target set, pipeline started + assert_matches!(next_event, Poll::Ready(BackfillEvent::Started(target)) => { + assert_eq!(target.sync_target().unwrap(), tip); + }); + + // the next event should be the pipeline finishing in a good state + let sync_future = poll_fn(|cx| pipeline_sync.poll(cx)); + let next_ready = sync_future.await; + assert_matches!(next_ready, BackfillEvent::Finished(result) => { + assert_matches!(result, Ok(control_flow) => assert_eq!(control_flow, ControlFlow::Continue { block_number: PIPELINE_DONE_AFTER })); + }); + } +} diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs new file mode 100644 index 000000000..e3f764bea --- /dev/null +++ b/crates/engine/tree/src/chain.rs @@ -0,0 +1,218 @@ +use crate::backfill::{BackfillAction, BackfillEvent, BackfillSync}; +use futures::Stream; +use reth_stages_api::PipelineTarget; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +/// The type that drives the chain forward. +/// +/// A state machine that orchestrates the components responsible for advancing the chain +/// +/// +/// ## Control flow +/// +/// The [`ChainOrchestrator`] is responsible for controlling the backfill sync and additional hooks. +/// It polls the given `handler`, which is responsible for advancing the chain, how is up to the +/// handler. However, due to database restrictions (e.g. exclusive write access), following +/// invariants apply: +/// - If the handler requests a backfill run (e.g. [`BackfillAction::Start`]), the handler must +/// ensure that while the backfill sync is running, no other write access is granted. +/// - At any time the [`ChainOrchestrator`] can request exclusive write access to the database +/// (e.g. if pruning is required), but will not do so until the handler has acknowledged the +/// request for write access. +/// +/// The [`ChainOrchestrator`] polls the [`ChainHandler`] to advance the chain and handles the +/// emitted events. Requests and events are passed to the [`ChainHandler`] via +/// [`ChainHandler::on_event`]. +#[must_use = "Stream does nothing unless polled"] +#[derive(Debug)] +pub struct ChainOrchestrator +where + T: ChainHandler, + P: BackfillSync, +{ + /// The handler for advancing the chain. + handler: T, + /// Controls backfill sync. + backfill_sync: P, +} + +impl ChainOrchestrator +where + T: ChainHandler + Unpin, + P: BackfillSync + Unpin, +{ + /// Creates a new [`ChainOrchestrator`] with the given handler and backfill sync. + pub const fn new(handler: T, backfill_sync: P) -> Self { + Self { handler, backfill_sync } + } + + /// Returns the handler + pub const fn handler(&self) -> &T { + &self.handler + } + + /// Returns a mutable reference to the handler + pub fn handler_mut(&mut self) -> &mut T { + &mut self.handler + } + + /// Internal function used to advance the chain. + /// + /// Polls the `ChainOrchestrator` for the next event. + #[tracing::instrument(level = "debug", name = "ChainOrchestrator::poll", skip(self, cx))] + fn poll_next_event(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + // This loop polls the components + // + // 1. Polls the backfill sync to completion, if active. + // 2. Advances the chain by polling the handler. + 'outer: loop { + // try to poll the backfill sync to completion, if active + match this.backfill_sync.poll(cx) { + Poll::Ready(backfill_sync_event) => match backfill_sync_event { + BackfillEvent::Idle => {} + BackfillEvent::Started(_) => { + // notify handler that backfill sync started + this.handler.on_event(FromOrchestrator::BackfillSyncStarted); + return Poll::Ready(ChainEvent::BackfillSyncStarted); + } + BackfillEvent::Finished(res) => { + return match res { + Ok(event) => { + tracing::debug!(?event, "backfill sync finished"); + // notify handler that backfill sync finished + this.handler.on_event(FromOrchestrator::BackfillSyncFinished); + Poll::Ready(ChainEvent::BackfillSyncFinished) + } + Err(err) => { + tracing::error!( %err, "backfill sync failed"); + Poll::Ready(ChainEvent::FatalError) + } + } + } + BackfillEvent::TaskDropped(err) => { + tracing::error!( %err, "backfill sync task dropped"); + return Poll::Ready(ChainEvent::FatalError); + } + }, + Poll::Pending => {} + } + + // poll the handler for the next event + match this.handler.poll(cx) { + Poll::Ready(handler_event) => { + match handler_event { + HandlerEvent::BackfillSync(target) => { + // trigger backfill sync and start polling it + this.backfill_sync.on_action(BackfillAction::Start(target)); + continue 'outer + } + HandlerEvent::Event(ev) => { + // bubble up the event + return Poll::Ready(ChainEvent::Handler(ev)); + } + } + } + Poll::Pending => { + // no more events to process + break 'outer + } + } + } + + Poll::Pending + } +} + +impl Stream for ChainOrchestrator +where + T: ChainHandler + Unpin, + P: BackfillSync + Unpin, +{ + type Item = ChainEvent; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.as_mut().poll_next_event(cx).map(Some) + } +} + +/// Represents the sync mode the chain is operating in. +#[derive(Debug, Default)] +enum SyncMode { + #[default] + Handler, + Backfill, +} + +/// Event emitted by the [`ChainOrchestrator`] +/// +/// These are meant to be used for observability and debugging purposes. +#[derive(Debug)] +pub enum ChainEvent { + /// Backfill sync started + BackfillSyncStarted, + /// Backfill sync finished + BackfillSyncFinished, + /// Fatal error + FatalError, + /// Event emitted by the handler + Handler(T), +} + +/// A trait that advances the chain by handling actions. +/// +/// This is intended to be implement the chain consensus logic, for example `engine` API. +pub trait ChainHandler: Send + Sync { + /// Event generated by this handler that orchestrator can bubble up; + type Event: Send; + + /// Informs the handler about an event from the [`ChainOrchestrator`]. + fn on_event(&mut self, event: FromOrchestrator); + + /// Polls for actions that [`ChainOrchestrator`] should handle. + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; +} + +/// Events/Requests that the [`ChainHandler`] can emit to the [`ChainOrchestrator`]. +#[derive(Clone, Debug)] +pub enum HandlerEvent { + /// Request to start a backfill sync + BackfillSync(PipelineTarget), + /// Other event emitted by the handler + Event(T), +} + +/// Internal events issued by the [`ChainOrchestrator`]. +#[derive(Clone, Debug)] +pub enum FromOrchestrator { + /// Invoked when backfill sync finished + BackfillSyncFinished, + /// Invoked when backfill sync started + BackfillSyncStarted, +} + +/// Represents the state of the chain. +#[derive(Clone, Copy, PartialEq, Eq, Default, Debug)] +pub enum OrchestratorState { + /// Orchestrator has exclusive write access to the database. + BackfillSyncActive, + /// Node is actively processing the chain. + #[default] + Idle, +} + +impl OrchestratorState { + /// Returns `true` if the state is [`OrchestratorState::BackfillSyncActive`]. + pub const fn is_backfill_sync_active(&self) -> bool { + matches!(self, Self::BackfillSyncActive) + } + + /// Returns `true` if the state is [`OrchestratorState::Idle`]. + pub const fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } +} diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs new file mode 100644 index 000000000..f777ce59e --- /dev/null +++ b/crates/engine/tree/src/download.rs @@ -0,0 +1,419 @@ +//! Handler that can download blocks on demand (e.g. from the network). + +use crate::{engine::DownloadRequest, metrics::BlockDownloaderMetrics}; +use futures::FutureExt; +use reth_consensus::Consensus; +use reth_network_p2p::{ + bodies::client::BodiesClient, + full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, + headers::client::HeadersClient, +}; +use reth_primitives::{SealedBlock, SealedBlockWithSenders, B256}; +use std::{ + cmp::{Ordering, Reverse}, + collections::{binary_heap::PeekMut, BinaryHeap, HashSet}, + sync::Arc, + task::{Context, Poll}, +}; +use tracing::trace; + +/// A trait that can download blocks on demand. +pub trait BlockDownloader: Send + Sync { + /// Handle an action. + fn on_action(&mut self, action: DownloadAction); + + /// Advance in progress requests if any + fn poll(&mut self, cx: &mut Context<'_>) -> Poll; +} + +/// Actions that can be performed by the block downloader. +#[derive(Debug)] +pub enum DownloadAction { + /// Stop downloading blocks. + Clear, + /// Download given blocks + Download(DownloadRequest), +} + +/// Outcome of downloaded blocks. +#[derive(Debug)] +pub enum DownloadOutcome { + /// Downloaded blocks. + Blocks(Vec), +} + +/// Basic [`BlockDownloader`]. +pub struct BasicBlockDownloader +where + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + /// A downloader that can download full blocks from the network. + full_block_client: FullBlockClient, + /// In-flight full block requests in progress. + inflight_full_block_requests: Vec>, + /// In-flight full block _range_ requests in progress. + inflight_block_range_requests: Vec>, + /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for + /// ordering. This means the blocks will be popped from the heap with ascending block numbers. + set_buffered_blocks: BinaryHeap>, + /// Engine download metrics. + metrics: BlockDownloaderMetrics, +} + +impl BasicBlockDownloader +where + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + /// Create a new instance + pub fn new(client: Client, consensus: Arc) -> Self { + Self { + full_block_client: FullBlockClient::new(client, consensus), + inflight_full_block_requests: Vec::new(), + inflight_block_range_requests: Vec::new(), + set_buffered_blocks: BinaryHeap::new(), + metrics: BlockDownloaderMetrics::default(), + } + } + + /// Clears the stored inflight requests. + fn clear(&mut self) { + self.inflight_full_block_requests.clear(); + self.inflight_block_range_requests.clear(); + self.set_buffered_blocks.clear(); + self.update_block_download_metrics(); + } + + /// Processes a download request. + fn download(&mut self, request: DownloadRequest) { + match request { + DownloadRequest::BlockSet(hashes) => self.download_block_set(hashes), + DownloadRequest::BlockRange(hash, count) => self.download_block_range(hash, count), + } + } + + /// Processes a block set download request. + fn download_block_set(&mut self, hashes: HashSet) { + for hash in hashes { + self.download_full_block(hash); + } + } + + /// Processes a block range download request. + fn download_block_range(&mut self, hash: B256, count: u64) { + if count == 1 { + self.download_full_block(hash); + } else { + trace!( + target: "consensus::engine", + ?hash, + ?count, + "start downloading full block range." + ); + + let request = self.full_block_client.get_full_block_range(hash, count); + self.inflight_block_range_requests.push(request); + } + } + + /// Starts requesting a full block from the network. + /// + /// Returns `true` if the request was started, `false` if there's already a request for the + /// given hash. + fn download_full_block(&mut self, hash: B256) -> bool { + if self.is_inflight_request(hash) { + return false + } + trace!( + target: "consensus::engine::sync", + ?hash, + "Start downloading full block" + ); + + let request = self.full_block_client.get_full_block(hash); + self.inflight_full_block_requests.push(request); + + self.update_block_download_metrics(); + + true + } + + /// Returns true if there's already a request for the given hash. + fn is_inflight_request(&self, hash: B256) -> bool { + self.inflight_full_block_requests.iter().any(|req| *req.hash() == hash) + } + + /// Sets the metrics for the active downloads + fn update_block_download_metrics(&self) { + self.metrics.active_block_downloads.set(self.inflight_full_block_requests.len() as f64); + // TODO: full block range metrics + } +} + +impl BlockDownloader for BasicBlockDownloader +where + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + /// Handles incoming download actions. + fn on_action(&mut self, action: DownloadAction) { + match action { + DownloadAction::Clear => self.clear(), + DownloadAction::Download(request) => self.download(request), + } + } + + /// Advances the download process. + fn poll(&mut self, cx: &mut Context<'_>) -> Poll { + // advance all full block requests + for idx in (0..self.inflight_full_block_requests.len()).rev() { + let mut request = self.inflight_full_block_requests.swap_remove(idx); + if let Poll::Ready(block) = request.poll_unpin(cx) { + trace!(target: "consensus::engine", block=?block.num_hash(), "Received single full block, buffering"); + self.set_buffered_blocks.push(Reverse(block.into())); + } else { + // still pending + self.inflight_full_block_requests.push(request); + } + } + + // advance all full block range requests + for idx in (0..self.inflight_block_range_requests.len()).rev() { + let mut request = self.inflight_block_range_requests.swap_remove(idx); + if let Poll::Ready(blocks) = request.poll_unpin(cx) { + trace!(target: "consensus::engine", len=?blocks.len(), first=?blocks.first().map(|b| b.num_hash()), last=?blocks.last().map(|b| b.num_hash()), "Received full block range, buffering"); + self.set_buffered_blocks.extend( + blocks + .into_iter() + .map(|b| { + let senders = b.senders().unwrap_or_default(); + OrderedSealedBlockWithSenders(SealedBlockWithSenders { + block: b, + senders, + }) + }) + .map(Reverse), + ); + } else { + // still pending + self.inflight_block_range_requests.push(request); + } + } + + self.update_block_download_metrics(); + + if self.set_buffered_blocks.is_empty() { + return Poll::Pending; + } + + // drain all unique element of the block buffer if there are any + let mut downloaded_blocks: Vec = + Vec::with_capacity(self.set_buffered_blocks.len()); + while let Some(block) = self.set_buffered_blocks.pop() { + // peek ahead and pop duplicates + while let Some(peek) = self.set_buffered_blocks.peek_mut() { + if peek.0 .0.hash() == block.0 .0.hash() { + PeekMut::pop(peek); + } else { + break + } + } + downloaded_blocks.push(block.0.into()); + } + Poll::Ready(DownloadOutcome::Blocks(downloaded_blocks)) + } +} + +/// A wrapper type around [`SealedBlockWithSenders`] that implements the [Ord] +/// trait by block number. +#[derive(Debug, Clone, PartialEq, Eq)] +struct OrderedSealedBlockWithSenders(SealedBlockWithSenders); + +impl PartialOrd for OrderedSealedBlockWithSenders { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for OrderedSealedBlockWithSenders { + fn cmp(&self, other: &Self) -> Ordering { + self.0.number.cmp(&other.0.number) + } +} + +impl From for OrderedSealedBlockWithSenders { + fn from(block: SealedBlock) -> Self { + let senders = block.senders().unwrap_or_default(); + Self(SealedBlockWithSenders { block, senders }) + } +} + +impl From for SealedBlockWithSenders { + fn from(value: OrderedSealedBlockWithSenders) -> Self { + let senders = value.0.senders; + Self { block: value.0.block, senders } + } +} + +/// A [`BlockDownloader`] that does nothing. +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct NoopBlockDownloader; + +impl BlockDownloader for NoopBlockDownloader { + fn on_action(&mut self, _event: DownloadAction) {} + + fn poll(&mut self, _cx: &mut Context<'_>) -> Poll { + Poll::Pending + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::insert_headers_into_client; + use assert_matches::assert_matches; + #[cfg(not(feature = "bsc"))] + use reth_beacon_consensus::EthBeaconConsensus; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_network_p2p::test_utils::TestFullBlockClient; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, Header}; + use std::{future::poll_fn, sync::Arc}; + + struct TestHarness { + block_downloader: BasicBlockDownloader, + client: TestFullBlockClient, + } + + impl TestHarness { + fn new(total_blocks: usize) -> Self { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + + let client = TestFullBlockClient::default(); + let header = Header { + base_fee_per_gas: Some(7), + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + ..Default::default() + } + .seal_slow(); + + insert_headers_into_client(&client, header, 0..total_blocks); + + #[cfg(feature = "bsc")] + let consensus = Arc::new(reth_consensus::test_utils::TestConsensus::default()); + #[cfg(not(feature = "bsc"))] + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); + + let block_downloader = BasicBlockDownloader::new(client.clone(), consensus); + Self { block_downloader, client } + } + } + + #[tokio::test] + async fn block_downloader_range_request() { + const TOTAL_BLOCKS: usize = 10; + let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS); + let tip = client.highest_block().expect("there should be blocks here"); + + // send block range download request + block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockRange( + tip.hash(), + tip.number, + ))); + + // ensure we have one in flight range request + assert_eq!(block_downloader.inflight_block_range_requests.len(), 1); + + // ensure the range request is made correctly + let first_req = block_downloader.inflight_block_range_requests.first().unwrap(); + assert_eq!(first_req.start_hash(), tip.hash()); + assert_eq!(first_req.count(), tip.number); + + // poll downloader + let sync_future = poll_fn(|cx| block_downloader.poll(cx)); + let next_ready = sync_future.await; + + assert_matches!(next_ready, DownloadOutcome::Blocks(blocks) => { + // ensure all blocks were obtained + assert_eq!(blocks.len(), TOTAL_BLOCKS); + + // ensure they are in ascending order + for num in 1..=TOTAL_BLOCKS { + assert_eq!(blocks[num-1].number, num as u64); + } + }); + } + + #[tokio::test] + async fn block_downloader_set_request() { + const TOTAL_BLOCKS: usize = 2; + let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS); + + let tip = client.highest_block().expect("there should be blocks here"); + + // send block set download request + block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockSet( + HashSet::from([tip.hash(), tip.parent_hash]), + ))); + + // ensure we have TOTAL_BLOCKS in flight full block request + assert_eq!(block_downloader.inflight_full_block_requests.len(), TOTAL_BLOCKS); + + // poll downloader + let sync_future = poll_fn(|cx| block_downloader.poll(cx)); + let next_ready = sync_future.await; + + assert_matches!(next_ready, DownloadOutcome::Blocks(blocks) => { + // ensure all blocks were obtained + assert_eq!(blocks.len(), TOTAL_BLOCKS); + + // ensure they are in ascending order + for num in 1..=TOTAL_BLOCKS { + assert_eq!(blocks[num-1].number, num as u64); + } + }); + } + + #[tokio::test] + async fn block_downloader_clear_request() { + const TOTAL_BLOCKS: usize = 10; + let TestHarness { mut block_downloader, client } = TestHarness::new(TOTAL_BLOCKS); + + let tip = client.highest_block().expect("there should be blocks here"); + + // send block range download request + block_downloader.on_action(DownloadAction::Download(DownloadRequest::BlockRange( + tip.hash(), + tip.number, + ))); + + // send block set download request + let download_set = HashSet::from([tip.hash(), tip.parent_hash]); + block_downloader + .on_action(DownloadAction::Download(DownloadRequest::BlockSet(download_set.clone()))); + + // ensure we have one in flight range request + assert_eq!(block_downloader.inflight_block_range_requests.len(), 1); + + // ensure the range request is made correctly + let first_req = block_downloader.inflight_block_range_requests.first().unwrap(); + assert_eq!(first_req.start_hash(), tip.hash()); + assert_eq!(first_req.count(), tip.number); + + // ensure we have download_set.len() in flight full block request + assert_eq!(block_downloader.inflight_full_block_requests.len(), download_set.len()); + + // send clear request + block_downloader.on_action(DownloadAction::Clear); + + // ensure we have no in flight range request + assert_eq!(block_downloader.inflight_block_range_requests.len(), 0); + + // ensure we have no in flight full block request + assert_eq!(block_downloader.inflight_full_block_requests.len(), 0); + } +} diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs new file mode 100644 index 000000000..9b965e892 --- /dev/null +++ b/crates/engine/tree/src/engine.rs @@ -0,0 +1,226 @@ +//! An engine API handler for the chain. + +use crate::{ + chain::{ChainHandler, FromOrchestrator, HandlerEvent}, + download::{BlockDownloader, DownloadAction, DownloadOutcome}, + tree::TreeEvent, +}; +use futures::{Stream, StreamExt}; +use reth_beacon_consensus::BeaconEngineMessage; +use reth_engine_primitives::EngineTypes; +use reth_primitives::{SealedBlockWithSenders, B256}; +use std::{ + collections::HashSet, + sync::mpsc::Sender, + task::{Context, Poll}, +}; +use tokio::sync::mpsc::UnboundedReceiver; + +/// Advances the chain based on incoming requests. +/// +/// This is a general purpose request handler with network access. +/// This type listens for incoming messages and processes them via the configured request handler. +/// +/// ## Overview +/// +/// This type is an orchestrator for incoming messages and responsible for delegating requests +/// received from the CL to the handler. +/// +/// It is responsible for handling the following: +/// - Downloading blocks on demand from the network if requested by the [`EngineApiRequestHandler`]. +/// +/// The core logic is part of the [`EngineRequestHandler`], which is responsible for processing the +/// incoming requests. +#[derive(Debug)] +pub struct EngineHandler { + /// Processes requests. + /// + /// This type is responsible for processing incoming requests. + handler: T, + /// Receiver for incoming requests that need to be processed. + incoming_requests: S, + /// A downloader to download blocks on demand. + downloader: D, +} + +impl EngineHandler { + /// Creates a new [`EngineHandler`] with the given handler and downloader. + pub const fn new(handler: T, downloader: D, incoming_requests: S) -> Self + where + T: EngineRequestHandler, + { + Self { handler, incoming_requests, downloader } + } +} + +impl ChainHandler for EngineHandler +where + T: EngineRequestHandler, + S: Stream + Send + Sync + Unpin + 'static, + D: BlockDownloader, +{ + type Event = T::Event; + + fn on_event(&mut self, event: FromOrchestrator) { + // delegate event to the handler + self.handler.on_event(event.into()); + } + + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + // drain the handler first + while let Poll::Ready(ev) = self.handler.poll(cx) { + match ev { + RequestHandlerEvent::Idle => break, + RequestHandlerEvent::HandlerEvent(ev) => { + return match ev { + HandlerEvent::BackfillSync(target) => { + // bubble up backfill sync request request + self.downloader.on_action(DownloadAction::Clear); + Poll::Ready(HandlerEvent::BackfillSync(target)) + } + HandlerEvent::Event(ev) => { + // bubble up the event + Poll::Ready(HandlerEvent::Event(ev)) + } + } + } + RequestHandlerEvent::Download(req) => { + // delegate download request to the downloader + self.downloader.on_action(DownloadAction::Download(req)); + } + } + } + + // pop the next incoming request + if let Poll::Ready(Some(req)) = self.incoming_requests.poll_next_unpin(cx) { + // and delegate the request to the handler + self.handler.on_event(FromEngine::Request(req)); + // skip downloading in this iteration to allow the handler to process the request + continue + } + + // advance the downloader + if let Poll::Ready(DownloadOutcome::Blocks(blocks)) = self.downloader.poll(cx) { + // delegate the downloaded blocks to the handler + self.handler.on_event(FromEngine::DownloadedBlocks(blocks)); + continue + } + + return Poll::Pending + } + } +} + +/// A type that processes incoming requests (e.g. requests from the consensus layer, engine API) +pub trait EngineRequestHandler: Send + Sync { + /// Even type this handler can emit + type Event: Send; + /// The request type this handler can process. + type Request; + + /// Informs the handler about an event from the [`EngineHandler`]. + fn on_event(&mut self, event: FromEngine); + + /// Advances the handler. + fn poll(&mut self, cx: &mut Context<'_>) -> Poll>; +} + +/// An [`EngineRequestHandler`] that processes engine API requests by delegating to an execution +/// task. +/// +/// This type is responsible for advancing the chain during live sync (following the tip of the +/// chain). +/// +/// It advances the chain based on received engine API requests by delegating them to the tree +/// executor. +/// +/// There are two types of requests that can be processed: +/// +/// - `on_new_payload`: Executes the payload and inserts it into the tree. These are allowed to be +/// processed concurrently. +/// - `on_forkchoice_updated`: Updates the fork choice based on the new head. These require write +/// access to the database and are skipped if the handler can't acquire exclusive access to the +/// database. +/// +/// In case required blocks are missing, the handler will request them from the network, by emitting +/// a download request upstream. +#[derive(Debug)] +pub struct EngineApiRequestHandler { + /// channel to send messages to the tree to execute the payload. + to_tree: Sender>>, + /// channel to receive messages from the tree. + from_tree: UnboundedReceiver, +} + +impl EngineApiRequestHandler +where + T: EngineTypes, +{ + pub const fn new( + to_tree: Sender>>, + from_tree: UnboundedReceiver, + ) -> Self { + Self { to_tree, from_tree } + } +} + +impl EngineRequestHandler for EngineApiRequestHandler +where + T: EngineTypes, +{ + type Event = EngineApiEvent; + type Request = BeaconEngineMessage; + + fn on_event(&mut self, event: FromEngine) { + // delegate to the tree + let _ = self.to_tree.send(event); + } + + fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { + todo!("poll tree") + } +} + +/// Events emitted by the engine API handler. +#[derive(Debug)] +pub enum EngineApiEvent { + /// Bubbled from tree. + FromTree(TreeEvent), +} + +#[derive(Debug)] +pub enum FromEngine { + /// Event from the top level orchestrator. + Event(FromOrchestrator), + /// Request from the engine + Request(Req), + /// Downloaded blocks from the network. + DownloadedBlocks(Vec), +} + +impl From for FromEngine { + fn from(event: FromOrchestrator) -> Self { + Self::Event(event) + } +} + +/// Requests produced by a [`EngineRequestHandler`]. +#[derive(Debug)] +pub enum RequestHandlerEvent { + /// The handler is idle. + Idle, + /// An event emitted by the handler. + HandlerEvent(HandlerEvent), + /// Request to download blocks. + Download(DownloadRequest), +} + +/// A request to download blocks from the network. +#[derive(Debug)] +pub enum DownloadRequest { + /// Download the given set of blocks. + BlockSet(HashSet), + /// Download the given range of blocks. + BlockRange(B256, u64), +} diff --git a/crates/engine/tree/src/lib.rs b/crates/engine/tree/src/lib.rs new file mode 100644 index 000000000..8f40119b2 --- /dev/null +++ b/crates/engine/tree/src/lib.rs @@ -0,0 +1,35 @@ +//! This crate includes the core components for advancing a reth chain. +//! +//! ## Feature Flags +//! +//! - `test-utils`: Export utilities for testing + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// #![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![allow(missing_docs, dead_code, missing_debug_implementations, unused_variables)] // TODO rm + +/// Re-export of the blockchain tree API. +pub use reth_blockchain_tree_api::*; + +/// Support for backfill sync mode. +pub mod backfill; +/// The type that drives the chain forward. +pub mod chain; +/// Support for downloading blocks on demand for live sync. +pub mod download; +/// Engine Api chain handler support. +pub mod engine; +/// Metrics support. +pub mod metrics; +/// The background writer task for batch db writes. +pub mod persistence; +/// Support for interacting with the blockchain tree. +pub mod tree; + +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; diff --git a/crates/engine/tree/src/metrics.rs b/crates/engine/tree/src/metrics.rs new file mode 100644 index 000000000..9579affe6 --- /dev/null +++ b/crates/engine/tree/src/metrics.rs @@ -0,0 +1,9 @@ +use reth_metrics::{metrics::Gauge, Metrics}; + +/// Metrics for the `BasicBlockDownloader`. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon")] +pub(crate) struct BlockDownloaderMetrics { + /// How many blocks are currently being downloaded. + pub(crate) active_block_downloads: Gauge, +} diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs new file mode 100644 index 000000000..23b3a5827 --- /dev/null +++ b/crates/engine/tree/src/persistence.rs @@ -0,0 +1,266 @@ +#![allow(dead_code)] + +use crate::tree::ExecutedBlock; +use reth_db::database::Database; +use reth_errors::ProviderResult; +use reth_primitives::B256; +use reth_provider::{ + bundle_state::HashedStateChanges, BlockWriter, HistoryWriter, OriginalValuesKnown, + ProviderFactory, StageCheckpointWriter, StateWriter, +}; +use reth_prune::{PruneProgress, Pruner}; +use std::sync::mpsc::{Receiver, SendError, Sender}; +use tokio::sync::oneshot; +use tracing::debug; + +/// Writes parts of reth's in memory tree state to the database. +/// +/// This is meant to be a spawned task that listens for various incoming persistence operations, +/// performing those actions on disk, and returning the result in a channel. +/// +/// There are two types of operations this task can perform: +/// - Writing executed blocks to disk, returning the hash of the latest block that was inserted. +/// - Removing blocks from disk, returning the removed blocks. +/// +/// This should be spawned in its own thread with [`std::thread::spawn`], since this performs +/// blocking database operations in an endless loop. +#[derive(Debug)] +pub struct Persistence { + /// The db / static file provider to use + provider: ProviderFactory, + /// Incoming requests to persist stuff + incoming: Receiver, + /// The pruner + pruner: Pruner, +} + +impl Persistence { + /// Create a new persistence task + const fn new( + provider: ProviderFactory, + incoming: Receiver, + pruner: Pruner, + ) -> Self { + Self { provider, incoming, pruner } + } + + /// Writes the cloned tree state to the database + fn write(&self, blocks: Vec) -> ProviderResult<()> { + let provider_rw = self.provider.provider_rw()?; + + if blocks.is_empty() { + debug!(target: "tree::persistence", "Attempted to write empty block range"); + return Ok(()) + } + + let first_number = blocks.first().unwrap().block().number; + + let last = blocks.last().unwrap().block(); + let last_block_number = last.number; + + // TODO: remove all the clones and do performant / batched writes for each type of object + // instead of a loop over all blocks, + // meaning: + // * blocks + // * state + // * hashed state + // * trie updates (cannot naively extend, need helper) + // * indices (already done basically) + // Insert the blocks + for block in blocks { + let sealed_block = + block.block().clone().try_with_senders_unchecked(block.senders().clone()).unwrap(); + provider_rw.insert_block(sealed_block)?; + + // Write state and changesets to the database. + // Must be written after blocks because of the receipt lookup. + let execution_outcome = block.execution_outcome().clone(); + execution_outcome.write_to_storage( + provider_rw.tx_ref(), + None, + OriginalValuesKnown::No, + )?; + + // insert hashes and intermediate merkle nodes + { + let trie_updates = block.trie_updates().clone(); + let hashed_state = block.hashed_state(); + HashedStateChanges(hashed_state.clone()).write_to_db(provider_rw.tx_ref())?; + trie_updates.write_to_database(provider_rw.tx_ref())?; + } + + // update history indices + provider_rw.update_history_indices(first_number..=last_block_number)?; + + // Update pipeline progress + provider_rw.update_pipeline_stages(last_block_number, false)?; + } + + debug!(target: "tree::persistence", range = ?first_number..=last_block_number, "Appended blocks"); + + Ok(()) + } + + /// Removes the blocks above the give block number from the database, returning them. + fn remove_blocks_above(&self, _block_number: u64) -> Vec { + todo!("implement this") + } + + /// Prunes block data before the given block hash according to the configured prune + /// configuration. + fn prune_before(&mut self, block_num: u64) -> PruneProgress { + // TODO: doing this properly depends on pruner segment changes + self.pruner.run(block_num).expect("todo: handle errors") + } + + /// Removes static file related data from the database, depending on the current block height in + /// existing static files. + fn clean_static_file_duplicates(&self) { + todo!("implement this") + } +} + +impl Persistence +where + DB: Database + 'static, +{ + /// Create a new persistence task, spawning it, and returning a [`PersistenceHandle`]. + fn spawn_new(provider: ProviderFactory, pruner: Pruner) -> PersistenceHandle { + let (tx, rx) = std::sync::mpsc::channel(); + let task = Self::new(provider, rx, pruner); + std::thread::Builder::new() + .name("Persistence Task".to_string()) + .spawn(|| task.run()) + .unwrap(); + + PersistenceHandle::new(tx) + } +} + +impl Persistence +where + DB: Database, +{ + /// This is the main loop, that will listen to persistence events and perform the requested + /// database actions + fn run(mut self) { + // If the receiver errors then senders have disconnected, so the loop should then end. + while let Ok(action) = self.incoming.recv() { + match action { + PersistenceAction::RemoveBlocksAbove((new_tip_num, sender)) => { + let output = self.remove_blocks_above(new_tip_num); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(output); + } + PersistenceAction::SaveBlocks((blocks, sender)) => { + if blocks.is_empty() { + todo!("return error or something"); + } + let last_block_hash = blocks.last().unwrap().block().hash(); + self.write(blocks).unwrap(); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(last_block_hash); + } + PersistenceAction::PruneBefore((block_num, sender)) => { + let res = self.prune_before(block_num); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(res); + } + PersistenceAction::CleanStaticFileDuplicates(sender) => { + self.clean_static_file_duplicates(); + + // we ignore the error because the caller may or may not care about the result + let _ = sender.send(()); + } + } + } + } +} + +/// A signal to the persistence task that part of the tree state can be persisted. +#[derive(Debug)] +pub enum PersistenceAction { + /// The section of tree state that should be persisted. These blocks are expected in order of + /// increasing block number. + SaveBlocks((Vec, oneshot::Sender)), + + /// Removes the blocks above the given block number from the database. + RemoveBlocksAbove((u64, oneshot::Sender>)), + + /// Prune associated block data before the given block number, according to already-configured + /// prune modes. + PruneBefore((u64, oneshot::Sender)), + + /// Trigger a read of static file data, and delete data depending on the highest block in each + /// static file segment. + CleanStaticFileDuplicates(oneshot::Sender<()>), +} + +/// A handle to the persistence task +#[derive(Debug, Clone)] +pub struct PersistenceHandle { + /// The channel used to communicate with the persistence task + sender: Sender, +} + +impl PersistenceHandle { + /// Create a new [`PersistenceHandle`] from a [`Sender`]. + pub const fn new(sender: Sender) -> Self { + Self { sender } + } + + /// Sends a specific [`PersistenceAction`] in the contained channel. The caller is responsible + /// for creating any channels for the given action. + pub fn send_action( + &self, + action: PersistenceAction, + ) -> Result<(), SendError> { + self.sender.send(action) + } + + /// Tells the persistence task to save a certain list of finalized blocks. The blocks are + /// assumed to be ordered by block number. + /// + /// This returns the latest hash that has been saved, allowing removal of that block and any + /// previous blocks from in-memory data structures. + pub async fn save_blocks(&self, blocks: Vec) -> B256 { + let (tx, rx) = oneshot::channel(); + self.sender + .send(PersistenceAction::SaveBlocks((blocks, tx))) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } + + /// Tells the persistence task to remove blocks above a certain block number. The removed blocks + /// are returned by the task. + pub async fn remove_blocks_above(&self, block_num: u64) -> Vec { + let (tx, rx) = oneshot::channel(); + self.sender + .send(PersistenceAction::RemoveBlocksAbove((block_num, tx))) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } + + /// Tells the persistence task to remove block data before the given hash, according to the + /// configured prune config. + pub async fn prune_before(&self, block_num: u64) -> PruneProgress { + let (tx, rx) = oneshot::channel(); + self.sender + .send(PersistenceAction::PruneBefore((block_num, tx))) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } + + /// Tells the persistence task to read static file data, and delete data depending on the + /// highest block in each static file segment. + pub async fn clean_static_file_duplicates(&self) { + let (tx, rx) = oneshot::channel(); + self.sender + .send(PersistenceAction::CleanStaticFileDuplicates(tx)) + .expect("should be able to send"); + rx.await.expect("todo: err handling") + } +} diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs new file mode 100644 index 000000000..0a5fbd5ad --- /dev/null +++ b/crates/engine/tree/src/test_utils.rs @@ -0,0 +1,77 @@ +use reth_chainspec::ChainSpec; +use reth_db::{mdbx::DatabaseEnv, test_utils::TempDatabase}; +use reth_network_p2p::test_utils::TestFullBlockClient; +use reth_primitives::{BlockBody, SealedHeader, B256}; +use reth_provider::{test_utils::create_test_provider_factory_with_chain_spec, ExecutionOutcome}; +use reth_prune_types::PruneModes; +use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; +use reth_stages_api::Pipeline; +use reth_static_file::StaticFileProducer; +use std::{collections::VecDeque, ops::Range, sync::Arc}; +use tokio::sync::watch; + +/// Test pipeline builder. +#[derive(Default)] +pub struct TestPipelineBuilder { + pipeline_exec_outputs: VecDeque>, + executor_results: Vec, +} + +impl TestPipelineBuilder { + /// Create a new [`TestPipelineBuilder`]. + pub const fn new() -> Self { + Self { pipeline_exec_outputs: VecDeque::new(), executor_results: Vec::new() } + } + + /// Set the pipeline execution outputs to use for the test consensus engine. + pub fn with_pipeline_exec_outputs( + mut self, + pipeline_exec_outputs: VecDeque>, + ) -> Self { + self.pipeline_exec_outputs = pipeline_exec_outputs; + self + } + + /// Set the executor results to use for the test consensus engine. + #[allow(dead_code)] + pub fn with_executor_results(mut self, executor_results: Vec) -> Self { + self.executor_results = executor_results; + self + } + + /// Builds the pipeline. + pub fn build(self, chain_spec: Arc) -> Pipeline>> { + reth_tracing::init_test_tracing(); + + // Setup pipeline + let (tip_tx, _tip_rx) = watch::channel(B256::default()); + let pipeline = Pipeline::builder() + .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) + .with_tip_sender(tip_tx); + + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); + + let static_file_producer = + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); + + pipeline.build(provider_factory, static_file_producer) + } +} + +pub(crate) fn insert_headers_into_client( + client: &TestFullBlockClient, + genesis_header: SealedHeader, + range: Range, +) { + let mut sealed_header = genesis_header; + let body = BlockBody::default(); + for _ in range { + let (mut header, hash) = sealed_header.split(); + // update to the next header + header.parent_hash = hash; + header.number += 1; + header.timestamp += 1; + sealed_header = header.seal_slow(); + client.insert(sealed_header.clone(), body.clone()); + } +} diff --git a/crates/engine/tree/src/tree/memory_overlay.rs b/crates/engine/tree/src/tree/memory_overlay.rs new file mode 100644 index 000000000..f11eece8e --- /dev/null +++ b/crates/engine/tree/src/tree/memory_overlay.rs @@ -0,0 +1,135 @@ +use super::ExecutedBlock; +use reth_errors::ProviderResult; +use reth_primitives::{Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256}; +use reth_provider::{ + AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, +}; +use reth_trie::{updates::TrieUpdates, AccountProof}; +use revm::db::BundleState; + +/// A state provider that stores references to in-memory blocks along with their state as well as +/// the historical state provider for fallback lookups. +#[derive(Debug)] +pub struct MemoryOverlayStateProvider { + /// The collection of executed parent blocks. + in_memory: Vec, + /// Historical state provider for state lookups that are not found in in-memory blocks. + historical: H, +} + +impl MemoryOverlayStateProvider { + /// Create new memory overlay state provider. + pub const fn new(in_memory: Vec, historical: H) -> Self { + Self { in_memory, historical } + } +} + +impl BlockHashReader for MemoryOverlayStateProvider +where + H: BlockHashReader, +{ + fn block_hash(&self, number: BlockNumber) -> ProviderResult> { + for block in self.in_memory.iter().rev() { + if block.block.number == number { + return Ok(Some(block.block.hash())) + } + } + + self.historical.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult> { + let range = start..end; + let mut earliest_block_number = None; + let mut in_memory_hashes = Vec::new(); + for block in self.in_memory.iter().rev() { + if range.contains(&block.block.number) { + in_memory_hashes.insert(0, block.block.hash()); + earliest_block_number = Some(block.block.number); + } + } + + let mut hashes = + self.historical.canonical_hashes_range(start, earliest_block_number.unwrap_or(end))?; + hashes.append(&mut in_memory_hashes); + Ok(hashes) + } +} + +impl AccountReader for MemoryOverlayStateProvider +where + H: AccountReader + Send, +{ + fn basic_account(&self, address: Address) -> ProviderResult> { + for block in self.in_memory.iter().rev() { + if let Some(account) = block.execution_output.account(&address) { + return Ok(account) + } + } + + self.historical.basic_account(address) + } +} + +impl StateRootProvider for MemoryOverlayStateProvider +where + H: StateRootProvider + Send, +{ + fn state_root(&self, bundle_state: &BundleState) -> ProviderResult { + todo!() + } + + fn state_root_with_updates( + &self, + bundle_state: &BundleState, + ) -> ProviderResult<(B256, TrieUpdates)> { + todo!() + } +} + +impl StateProofProvider for MemoryOverlayStateProvider +where + H: StateProofProvider + Send, +{ + fn proof( + &self, + state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult { + todo!() + } +} + +impl StateProvider for MemoryOverlayStateProvider +where + H: StateProvider + Send, +{ + fn storage( + &self, + address: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + for block in self.in_memory.iter().rev() { + if let Some(value) = block.execution_output.storage(&address, storage_key.into()) { + return Ok(Some(value)) + } + } + + self.historical.storage(address, storage_key) + } + + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { + for block in self.in_memory.iter().rev() { + if let Some(contract) = block.execution_output.bytecode(&code_hash) { + return Ok(Some(contract)) + } + } + + self.historical.bytecode_by_hash(code_hash) + } +} diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs new file mode 100644 index 000000000..8afed3104 --- /dev/null +++ b/crates/engine/tree/src/tree/mod.rs @@ -0,0 +1,757 @@ +use crate::{ + backfill::BackfillAction, + chain::FromOrchestrator, + engine::{DownloadRequest, EngineApiEvent, FromEngine}, +}; +use reth_beacon_consensus::{ + BeaconEngineMessage, ForkchoiceStateTracker, InvalidHeaderCache, OnForkChoiceUpdated, +}; +use reth_blockchain_tree::{ + error::InsertBlockErrorKind, BlockAttachment, BlockBuffer, BlockStatus, +}; +use reth_blockchain_tree_api::{error::InsertBlockError, InsertPayloadOk}; +use reth_consensus::{Consensus, PostExecutionInput}; +use reth_engine_primitives::EngineTypes; +use reth_errors::{ConsensusError, ProviderResult}; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_payload_primitives::PayloadTypes; +use reth_payload_validator::ExecutionPayloadValidator; +use reth_primitives::{ + Address, Block, BlockNumber, GotExpected, Receipts, Requests, SealedBlock, + SealedBlockWithSenders, B256, U256, +}; +use reth_provider::{ + BlockReader, ExecutionOutcome, StateProvider, StateProviderFactory, StateRootProvider, +}; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_types::{ + engine::{ + CancunPayloadFields, ForkchoiceState, PayloadStatus, PayloadStatusEnum, + PayloadValidationError, + }, + ExecutionPayload, +}; +use reth_trie::{updates::TrieUpdates, HashedPostState}; +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + sync::{mpsc::Receiver, Arc}, +}; +use tokio::sync::mpsc::UnboundedSender; +use tracing::*; + +mod memory_overlay; +pub use memory_overlay::MemoryOverlayStateProvider; + +/// Represents an executed block stored in-memory. +#[derive(Clone, Debug)] +pub struct ExecutedBlock { + block: Arc, + senders: Arc>, + execution_output: Arc, + hashed_state: Arc, + trie: Arc, +} + +impl ExecutedBlock { + /// Returns a reference to the executed block. + pub(crate) fn block(&self) -> &SealedBlock { + &self.block + } + + /// Returns a reference to the block's senders + pub(crate) fn senders(&self) -> &Vec
{ + &self.senders + } + + /// Returns a reference to the block's execution outcome + pub(crate) fn execution_outcome(&self) -> &ExecutionOutcome { + &self.execution_output + } + + /// Returns a reference to the hashed state result of the execution outcome + pub(crate) fn hashed_state(&self) -> &HashedPostState { + &self.hashed_state + } + + /// Returns a reference to the trie updates for the block + pub(crate) fn trie_updates(&self) -> &TrieUpdates { + &self.trie + } +} + +/// Keeps track of the state of the tree. +#[derive(Debug, Default)] +pub struct TreeState { + /// All executed blocks by hash. + blocks_by_hash: HashMap, + /// Executed blocks grouped by their respective block number. + blocks_by_number: BTreeMap>, +} + +impl TreeState { + fn block_by_hash(&self, hash: B256) -> Option> { + self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) + } + + /// Insert executed block into the state. + fn insert_executed(&mut self, executed: ExecutedBlock) { + self.blocks_by_number.entry(executed.block.number).or_default().push(executed.clone()); + let existing = self.blocks_by_hash.insert(executed.block.hash(), executed); + debug_assert!(existing.is_none(), "inserted duplicate block"); + } + + /// Remove blocks before specified block number. + pub(crate) fn remove_before(&mut self, block_number: BlockNumber) { + while self + .blocks_by_number + .first_key_value() + .map(|entry| entry.0 < &block_number) + .unwrap_or_default() + { + let (_, to_remove) = self.blocks_by_number.pop_first().unwrap(); + for block in to_remove { + let block_hash = block.block.hash(); + let removed = self.blocks_by_hash.remove(&block_hash); + debug_assert!( + removed.is_some(), + "attempted to remove non-existing block {block_hash}" + ); + } + } + } +} + +/// Tracks the state of the engine api internals. +/// +/// This type is shareable. +#[derive(Debug)] +pub struct EngineApiTreeState { + /// Tracks the state of the blockchain tree. + tree_state: TreeState, + /// Tracks the received forkchoice state updates received by the CL. + forkchoice_state_tracker: ForkchoiceStateTracker, + /// Buffer of detached blocks. + buffer: BlockBuffer, + /// Tracks the header of invalid payloads that were rejected by the engine because they're + /// invalid. + invalid_headers: InvalidHeaderCache, +} + +impl EngineApiTreeState { + fn new(block_buffer_limit: u32, max_invalid_header_cache_length: u32) -> Self { + Self { + invalid_headers: InvalidHeaderCache::new(max_invalid_header_cache_length), + buffer: BlockBuffer::new(block_buffer_limit), + tree_state: TreeState::default(), + forkchoice_state_tracker: ForkchoiceStateTracker::default(), + } + } +} + +/// The type responsible for processing engine API requests. +/// +/// TODO: design: should the engine handler functions also accept the response channel or return the +/// result and the caller redirects the response +pub trait EngineApiTreeHandler { + /// The engine type that this handler is for. + type Engine: EngineTypes; + + /// Invoked when previously requested blocks were downloaded. + fn on_downloaded(&mut self, blocks: Vec) -> Option; + + /// When the Consensus layer receives a new block via the consensus gossip protocol, + /// the transactions in the block are sent to the execution layer in the form of a + /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the + /// state in the block header, then passes validation data back to Consensus layer, that + /// adds the block to the head of its own blockchain and attests to it. The block is then + /// broadcast over the consensus p2p network in the form of a "Beacon block". + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). + /// + /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and + /// returns an error if an internal error occurred. + fn on_new_payload( + &mut self, + payload: ExecutionPayload, + cancun_fields: Option, + ) -> ProviderResult>; + + /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree + /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid + /// chain. + /// + /// These responses should adhere to the [Engine API Spec for + /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). + /// + /// Returns an error if an internal error occurred like a database error. + fn on_forkchoice_updated( + &mut self, + state: ForkchoiceState, + attrs: Option<::PayloadAttributes>, + ) -> ProviderResult>; +} + +/// The outcome of a tree operation. +#[derive(Debug)] +pub struct TreeOutcome { + /// The outcome of the operation. + pub outcome: T, + /// An optional event to tell the caller to do something. + pub event: Option, +} + +impl TreeOutcome { + /// Create new tree outcome. + pub const fn new(outcome: T) -> Self { + Self { outcome, event: None } + } + + /// Set event on the outcome. + pub fn with_event(mut self, event: TreeEvent) -> Self { + self.event = Some(event); + self + } +} + +/// Events that can be emitted by the [`EngineApiTreeHandler`]. +#[derive(Debug)] +pub enum TreeEvent { + /// Tree action is needed. + TreeAction(TreeAction), + /// Backfill action is needed. + BackfillAction(BackfillAction), + /// Block download is needed. + Download(DownloadRequest), +} + +/// The actions that can be performed on the tree. +#[derive(Debug)] +pub enum TreeAction { + /// Make target canonical. + MakeCanonical(B256), +} + +#[derive(Debug)] +pub struct EngineApiTreeHandlerImpl { + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + state: EngineApiTreeState, + incoming: Receiver>>, + outgoing: UnboundedSender, + /// (tmp) The flag indicating whether the pipeline is active. + is_pipeline_active: bool, + _marker: PhantomData, +} + +impl EngineApiTreeHandlerImpl +where + P: BlockReader + StateProviderFactory + Clone + 'static, + E: BlockExecutorProvider, + T: EngineTypes + 'static, +{ + #[allow(clippy::too_many_arguments)] + fn new( + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + incoming: Receiver>>, + outgoing: UnboundedSender, + state: EngineApiTreeState, + ) -> Self { + Self { + provider, + executor_provider, + consensus, + payload_validator, + incoming, + outgoing, + is_pipeline_active: false, + state, + _marker: PhantomData, + } + } + + #[allow(clippy::too_many_arguments)] + fn spawn_new( + provider: P, + executor_provider: E, + consensus: Arc, + payload_validator: ExecutionPayloadValidator, + incoming: Receiver>>, + state: EngineApiTreeState, + ) -> UnboundedSender { + let (outgoing, rx) = tokio::sync::mpsc::unbounded_channel(); + let task = Self::new( + provider, + executor_provider, + consensus, + payload_validator, + incoming, + outgoing.clone(), + state, + ); + std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); + outgoing + } + + fn run(mut self) { + loop { + while let Ok(msg) = self.incoming.recv() { + match msg { + FromEngine::Event(event) => match event { + FromOrchestrator::BackfillSyncFinished => { + todo!() + } + FromOrchestrator::BackfillSyncStarted => { + todo!() + } + }, + FromEngine::Request(request) => match request { + BeaconEngineMessage::ForkchoiceUpdated { state, payload_attrs, tx } => { + let output = self.on_forkchoice_updated(state, payload_attrs); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) + { + error!("Failed to send event: {err:?}"); + } + } + BeaconEngineMessage::NewPayload { payload, cancun_fields, tx } => { + let output = self.on_new_payload(payload, cancun_fields); + if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(|e| { + reth_beacon_consensus::BeaconOnNewPayloadError::Internal(Box::new( + e, + )) + })) { + error!("Failed to send event: {err:?}"); + } + } + BeaconEngineMessage::TransitionConfigurationExchanged => { + todo!() + } + }, + FromEngine::DownloadedBlocks(blocks) => { + if let Some(event) = self.on_downloaded(blocks) { + if let Err(err) = self.outgoing.send(EngineApiEvent::FromTree(event)) { + error!("Failed to send event: {err:?}"); + } + } + } + } + } + } + } + + /// Return block from database or in-memory state by hash. + fn block_by_hash(&self, hash: B256) -> ProviderResult> { + // check database first + let mut block = self.provider.block_by_hash(hash)?; + if block.is_none() { + // Note: it's fine to return the unsealed block because the caller already has + // the hash + block = self + .state + .tree_state + .block_by_hash(hash) + // TODO: clone for compatibility. should we return an Arc here? + .map(|block| block.as_ref().clone().unseal()); + } + Ok(block) + } + + /// Return state provider with reference to in-memory blocks that overlay database state. + fn state_provider( + &self, + hash: B256, + ) -> ProviderResult>> { + let mut in_memory = Vec::new(); + let mut parent_hash = hash; + while let Some(executed) = self.state.tree_state.blocks_by_hash.get(&parent_hash) { + parent_hash = executed.block.parent_hash; + in_memory.insert(0, executed.clone()); + } + + let historical = self.provider.state_by_block_hash(parent_hash)?; + Ok(MemoryOverlayStateProvider::new(in_memory, historical)) + } + + /// Return the parent hash of the lowest buffered ancestor for the requested block, if there + /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does + /// not exist in the buffer, this returns the hash that is passed in. + /// + /// Returns the parent hash of the block itself if the block is buffered and has no other + /// buffered ancestors. + fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { + self.state + .buffer + .lowest_ancestor(&hash) + .map(|block| block.parent_hash) + .unwrap_or_else(|| hash) + } + + /// If validation fails, the response MUST contain the latest valid hash: + /// + /// - The block hash of the ancestor of the invalid payload satisfying the following two + /// conditions: + /// - It is fully validated and deemed VALID + /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID + /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above + /// conditions are satisfied by a `PoW` block. + /// - null if client software cannot determine the ancestor of the invalid payload satisfying + /// the above conditions. + fn latest_valid_hash_for_invalid_payload( + &mut self, + parent_hash: B256, + ) -> ProviderResult> { + // Check if parent exists in side chain or in canonical chain. + if self.block_by_hash(parent_hash)?.is_some() { + return Ok(Some(parent_hash)) + } + + // iterate over ancestors in the invalid cache + // until we encounter the first valid ancestor + let mut current_hash = parent_hash; + let mut current_header = self.state.invalid_headers.get(¤t_hash); + while let Some(header) = current_header { + current_hash = header.parent_hash; + current_header = self.state.invalid_headers.get(¤t_hash); + + // If current_header is None, then the current_hash does not have an invalid + // ancestor in the cache, check its presence in blockchain tree + if current_header.is_none() && self.block_by_hash(current_hash)?.is_some() { + return Ok(Some(current_hash)) + } + } + Ok(None) + } + + /// Prepares the invalid payload response for the given hash, checking the + /// database for the parent hash and populating the payload status with the latest valid hash + /// according to the engine api spec. + fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { + // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal + // PoW block, which we need to identify by looking at the parent's block difficulty + if let Some(parent) = self.block_by_hash(parent_hash)? { + if !parent.is_zero_difficulty() { + parent_hash = B256::ZERO; + } + } + + let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; + Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { + validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), + }) + .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) + } + + /// Checks if the given `check` hash points to an invalid header, inserting the given `head` + /// block into the invalid header cache if the `check` hash has a known invalid ancestor. + /// + /// Returns a payload status response according to the engine API spec if the block is known to + /// be invalid. + fn check_invalid_ancestor_with_head( + &mut self, + check: B256, + head: B256, + ) -> ProviderResult> { + // check if the check hash was previously marked as invalid + let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; + + // populate the latest valid hash field + let status = self.prepare_invalid_response(header.parent_hash)?; + + // insert the head block into the invalid header cache + self.state.invalid_headers.insert_with_invalid_ancestor(head, header); + + Ok(Some(status)) + } + + /// Checks if the given `head` points to an invalid header, which requires a specific response + /// to a forkchoice update. + fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { + // check if the head was previously marked as invalid + let Some(header) = self.state.invalid_headers.get(&head) else { return Ok(None) }; + // populate the latest valid hash field + Ok(Some(self.prepare_invalid_response(header.parent_hash)?)) + } + + /// Validate if block is correct and satisfies all the consensus rules that concern the header + /// and block body itself. + fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { + if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { + error!( + ?block, + "Failed to validate total difficulty for block {}: {e}", + block.header.hash() + ); + return Err(e) + } + + if let Err(e) = self.consensus.validate_header(block) { + error!(?block, "Failed to validate header {}: {e}", block.header.hash()); + return Err(e) + } + + if let Err(e) = self.consensus.validate_block_pre_execution(block) { + error!(?block, "Failed to validate block {}: {e}", block.header.hash()); + return Err(e) + } + + Ok(()) + } + + fn buffer_block_without_senders(&mut self, block: SealedBlock) -> Result<(), InsertBlockError> { + match block.try_seal_with_senders() { + Ok(block) => self.buffer_block(block), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { + if let Err(err) = self.validate_block(&block) { + return Err(InsertBlockError::consensus_error(err, block.block)) + } + self.state.buffer.insert_block(block); + Ok(()) + } + + fn insert_block_without_senders( + &mut self, + block: SealedBlock, + ) -> Result { + match block.try_seal_with_senders() { + Ok(block) => self.insert_block(block), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + fn insert_block( + &mut self, + block: SealedBlockWithSenders, + ) -> Result { + self.insert_block_inner(block.clone()) + .map_err(|kind| InsertBlockError::new(block.block, kind)) + } + + fn insert_block_inner( + &mut self, + block: SealedBlockWithSenders, + ) -> Result { + if self.block_by_hash(block.hash())?.is_some() { + let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid(attachment))) + } + + // validate block consensus rules + self.validate_block(&block)?; + + let state_provider = self.state_provider(block.parent_hash).unwrap(); + let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); + + let block_number = block.number; + let block_hash = block.hash(); + let block = block.unseal(); + let output = executor.execute((&block, U256::MAX).into()).unwrap(); + self.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&output.receipts, &output.requests), + )?; + + // TODO: change StateRootProvider API to accept hashed post state + let hashed_state = HashedPostState::from_bundle_state(&output.state.state); + + let (state_root, trie_output) = state_provider.state_root_with_updates(&output.state)?; + if state_root != block.state_root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.state_root }.into(), + ) + .into()) + } + + let executed = ExecutedBlock { + block: Arc::new(block.block.seal(block_hash)), + senders: Arc::new(block.senders), + execution_output: Arc::new(ExecutionOutcome::new( + output.state, + Receipts::from(output.receipts), + block_number, + vec![Requests::from(output.requests)], + )), + hashed_state: Arc::new(hashed_state), + trie: Arc::new(trie_output), + }; + self.state.tree_state.insert_executed(executed); + + let attachment = BlockAttachment::Canonical; // TODO: remove or revise attachment + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid(attachment))) + } + + /// Pre-validate forkchoice update and check whether it can be processed. + /// + /// This method returns the update outcome if validation fails or + /// the node is syncing and the update cannot be processed at the moment. + fn pre_validate_forkchoice_update( + &mut self, + state: ForkchoiceState, + ) -> ProviderResult> { + if state.head_block_hash.is_zero() { + return Ok(Some(OnForkChoiceUpdated::invalid_state())) + } + + // check if the new head hash is connected to any ancestor that we previously marked as + // invalid + let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); + if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { + return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) + } + + if self.is_pipeline_active { + // We can only process new forkchoice updates if the pipeline is idle, since it requires + // exclusive access to the database + trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); + return Ok(Some(OnForkChoiceUpdated::syncing())) + } + + Ok(None) + } +} + +impl EngineApiTreeHandler for EngineApiTreeHandlerImpl +where + P: BlockReader + StateProviderFactory + Clone + 'static, + E: BlockExecutorProvider, + T: EngineTypes + 'static, +{ + type Engine = T; + + fn on_downloaded(&mut self, _blocks: Vec) -> Option { + todo!() + } + + fn on_new_payload( + &mut self, + payload: ExecutionPayload, + cancun_fields: Option, + ) -> ProviderResult> { + // Ensures that the given payload does not violate any consensus rules that concern the + // block's layout, like: + // - missing or invalid base fee + // - invalid extra data + // - invalid transactions + // - incorrect hash + // - the versioned hashes passed with the payload do not exactly match transaction + // versioned hashes + // - the block does not contain blob transactions if it is pre-cancun + // + // This validates the following engine API rule: + // + // 3. Given the expected array of blob versioned hashes client software **MUST** run its + // validation by taking the following steps: + // + // 1. Obtain the actual array by concatenating blob versioned hashes lists + // (`tx.blob_versioned_hashes`) of each [blob + // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included + // in the payload, respecting the order of inclusion. If the payload has no blob + // transactions the expected array **MUST** be `[]`. + // + // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | + // null}` if the expected and the actual arrays don't match. + // + // This validation **MUST** be instantly run in all cases even during active sync process. + let parent_hash = payload.parent_hash(); + let block = match self + .payload_validator + .ensure_well_formed_payload(payload, cancun_fields.into()) + { + Ok(block) => block, + Err(error) => { + error!(target: "engine::tree", %error, "Invalid payload"); + // we need to convert the error to a payload status (response to the CL) + + let latest_valid_hash = + if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { + // Engine-API rules: + // > `latestValidHash: null` if the blockHash validation has failed () + // > `latestValidHash: null` if the expected and the actual arrays don't match () + None + } else { + self.latest_valid_hash_for_invalid_payload(parent_hash)? + }; + + let status = PayloadStatusEnum::from(error); + return Ok(TreeOutcome::new(PayloadStatus::new(status, latest_valid_hash))) + } + }; + + let block_hash = block.hash(); + let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); + if lowest_buffered_ancestor == block_hash { + lowest_buffered_ancestor = block.parent_hash; + } + + // now check the block itself + if let Some(status) = + self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, block_hash)? + { + return Ok(TreeOutcome::new(status)) + } + + let status = if self.is_pipeline_active { + self.buffer_block_without_senders(block).unwrap(); + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } else { + let mut latest_valid_hash = None; + let status = match self.insert_block_without_senders(block).unwrap() { + InsertPayloadOk::Inserted(BlockStatus::Valid(_)) | + InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { + latest_valid_hash = Some(block_hash); + PayloadStatusEnum::Valid + } + InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | + InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { + // TODO: isn't this check redundant? + // check if the block's parent is already marked as invalid + // if let Some(status) = self + // .check_invalid_ancestor_with_head(block.parent_hash, block.hash()) + // .map_err(|error| { + // InsertBlockError::new(block, InsertBlockErrorKind::Provider(error)) + // })? + // { + // return Ok(status) + // } + + // not known to be invalid, but we don't know anything else + PayloadStatusEnum::Syncing + } + }; + PayloadStatus::new(status, latest_valid_hash) + }; + + let mut outcome = TreeOutcome::new(status); + if outcome.outcome.is_valid() { + if let Some(target) = self.state.forkchoice_state_tracker.sync_target_state() { + if target.head_block_hash == block_hash { + outcome = outcome + .with_event(TreeEvent::TreeAction(TreeAction::MakeCanonical(block_hash))); + } + } + } + Ok(outcome) + } + + fn on_forkchoice_updated( + &mut self, + state: ForkchoiceState, + attrs: Option<::PayloadAttributes>, + ) -> ProviderResult> { + if let Some(on_updated) = self.pre_validate_forkchoice_update(state)? { + self.state.forkchoice_state_tracker.set_latest(state, on_updated.forkchoice_status()); + return Ok(TreeOutcome::new(on_updated)) + } + + todo!() + } +} diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml new file mode 100644 index 000000000..26d504a74 --- /dev/null +++ b/crates/engine/util/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "reth-engine-util" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-fs-util.workspace = true +reth-rpc.workspace = true +reth-rpc-types.workspace = true +reth-engine-primitives.workspace = true +reth-beacon-consensus.workspace = true + +# async +tokio-util.workspace = true +pin-project.workspace = true + +# misc +eyre.workspace = true + +# io +serde.workspace = true +serde_json.workspace = true + +# tracing +tracing.workspace = true + +# async +futures.workspace = true + +[features] +optimism = [ + "reth-rpc/optimism", + "reth-beacon-consensus/optimism", +] diff --git a/crates/node-core/src/engine/engine_store.rs b/crates/engine/util/src/engine_store.rs similarity index 100% rename from crates/node-core/src/engine/engine_store.rs rename to crates/engine/util/src/engine_store.rs diff --git a/crates/node-core/src/engine/mod.rs b/crates/engine/util/src/lib.rs similarity index 100% rename from crates/node-core/src/engine/mod.rs rename to crates/engine/util/src/lib.rs diff --git a/crates/node-core/src/engine/skip_fcu.rs b/crates/engine/util/src/skip_fcu.rs similarity index 100% rename from crates/node-core/src/engine/skip_fcu.rs rename to crates/engine/util/src/skip_fcu.rs diff --git a/crates/node-core/src/engine/skip_new_payload.rs b/crates/engine/util/src/skip_new_payload.rs similarity index 100% rename from crates/node-core/src/engine/skip_new_payload.rs rename to crates/engine/util/src/skip_new_payload.rs diff --git a/crates/ethereum-forks/Cargo.toml b/crates/ethereum-forks/Cargo.toml index 56437c233..cc2c51b2b 100644 --- a/crates/ethereum-forks/Cargo.toml +++ b/crates/ethereum-forks/Cargo.toml @@ -23,11 +23,15 @@ crc = "3" # misc serde = { workspace = true, features = ["derive"], optional = true } thiserror-no-std = { workspace = true, default-features = false } +once_cell.workspace = true +dyn-clone.workspace = true +rustc-hash.workspace = true # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } +auto_impl.workspace = true [dev-dependencies] arbitrary = { workspace = true, features = ["derive"] } diff --git a/crates/ethereum-forks/src/chains/dev.rs b/crates/ethereum-forks/src/chains/dev.rs deleted file mode 100644 index 866be0dd4..000000000 --- a/crates/ethereum-forks/src/chains/dev.rs +++ /dev/null @@ -1,23 +0,0 @@ -use crate::{ForkCondition, Hardfork}; -use alloy_primitives::uint; - -/// Dev hardforks -pub const DEV_HARDFORKS: [(Hardfork, ForkCondition); 14] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(1561651)), - (Hardfork::Berlin, ForkCondition::Block(4460644)), - (Hardfork::London, ForkCondition::Block(5062605)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: None, total_difficulty: uint!(10_790_000_U256) }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(1678832736)), - (Hardfork::Cancun, ForkCondition::Timestamp(1705473120)), -]; diff --git a/crates/ethereum-forks/src/chains/ethereum.rs b/crates/ethereum-forks/src/chains/ethereum.rs deleted file mode 100644 index 6db4d95fc..000000000 --- a/crates/ethereum-forks/src/chains/ethereum.rs +++ /dev/null @@ -1,94 +0,0 @@ -use crate::{ForkCondition, Hardfork}; -use alloy_primitives::{uint, U256}; - -/// Ethereum mainnet hardforks -pub const MAINNET_HARDFORKS: [(Hardfork, ForkCondition); 17] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(1150000)), - (Hardfork::Dao, ForkCondition::Block(1920000)), - (Hardfork::Tangerine, ForkCondition::Block(2463000)), - (Hardfork::SpuriousDragon, ForkCondition::Block(2675000)), - (Hardfork::Byzantium, ForkCondition::Block(4370000)), - (Hardfork::Constantinople, ForkCondition::Block(7280000)), - (Hardfork::Petersburg, ForkCondition::Block(7280000)), - (Hardfork::Istanbul, ForkCondition::Block(9069000)), - (Hardfork::MuirGlacier, ForkCondition::Block(9200000)), - (Hardfork::Berlin, ForkCondition::Block(12244000)), - (Hardfork::London, ForkCondition::Block(12965000)), - (Hardfork::ArrowGlacier, ForkCondition::Block(13773000)), - (Hardfork::GrayGlacier, ForkCondition::Block(15050000)), - ( - Hardfork::Paris, - ForkCondition::TTD { - fork_block: None, - total_difficulty: uint!(58_750_000_000_000_000_000_000_U256), - }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(1681338455)), - (Hardfork::Cancun, ForkCondition::Timestamp(1710338135)), -]; - -/// Ethereum Goerli hardforks -pub const GOERLI_HARDFORKS: [(Hardfork, ForkCondition); 14] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(1561651)), - (Hardfork::Berlin, ForkCondition::Block(4460644)), - (Hardfork::London, ForkCondition::Block(5062605)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: None, total_difficulty: uint!(10_790_000_U256) }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(1678832736)), - (Hardfork::Cancun, ForkCondition::Timestamp(1705473120)), -]; - -/// Ethereum Sepolia hardforks -pub const SEPOLIA_HARDFORKS: [(Hardfork, ForkCondition); 15] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - ( - Hardfork::Paris, - ForkCondition::TTD { - fork_block: Some(1735371), - total_difficulty: uint!(17_000_000_000_000_000_U256), - }, - ), - (Hardfork::Shanghai, ForkCondition::Timestamp(1677557088)), - (Hardfork::Cancun, ForkCondition::Timestamp(1706655072)), -]; - -/// Ethereum Holesky hardforks -pub const HOLESKY_HARDFORKS: [(Hardfork, ForkCondition); 15] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Dao, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::Paris, ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }), - (Hardfork::Shanghai, ForkCondition::Timestamp(1696000704)), - (Hardfork::Cancun, ForkCondition::Timestamp(1707305664)), -]; diff --git a/crates/ethereum-forks/src/chains/mod.rs b/crates/ethereum-forks/src/chains/mod.rs deleted file mode 100644 index ef775777f..000000000 --- a/crates/ethereum-forks/src/chains/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -/// Ethereum chains -pub mod ethereum; - -/// Optimism chains -#[cfg(feature = "optimism")] -pub mod optimism; - -/// Dev chain -pub mod dev; diff --git a/crates/ethereum-forks/src/chains/optimism.rs b/crates/ethereum-forks/src/chains/optimism.rs deleted file mode 100644 index 37af4a19f..000000000 --- a/crates/ethereum-forks/src/chains/optimism.rs +++ /dev/null @@ -1,105 +0,0 @@ -use crate::{ForkCondition, Hardfork}; -use alloy_primitives::U256; - -/// Optimism mainnet hardforks -pub const OP_MAINNET_HARDFORKS: [(Hardfork, ForkCondition); 21] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(3950000)), - (Hardfork::London, ForkCondition::Block(105235063)), - (Hardfork::ArrowGlacier, ForkCondition::Block(105235063)), - (Hardfork::GrayGlacier, ForkCondition::Block(105235063)), - ( - Hardfork::Paris, - ForkCondition::TTD { fork_block: Some(105235063), total_difficulty: U256::ZERO }, - ), - (Hardfork::Bedrock, ForkCondition::Block(105235063)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)), - (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), - (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), - (Hardfork::Fjord, ForkCondition::Timestamp(1720627201)), -]; - -/// Optimism Sepolia hardforks -pub const OP_SEPOLIA_HARDFORKS: [(Hardfork, ForkCondition); 21] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::ArrowGlacier, ForkCondition::Block(0)), - (Hardfork::GrayGlacier, ForkCondition::Block(0)), - (Hardfork::Paris, ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }), - (Hardfork::Bedrock, ForkCondition::Block(0)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1699981200)), - (Hardfork::Canyon, ForkCondition::Timestamp(1699981200)), - (Hardfork::Cancun, ForkCondition::Timestamp(1708534800)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1708534800)), - (Hardfork::Fjord, ForkCondition::Timestamp(1716998400)), -]; - -/// Base Sepolia hardforks -pub const BASE_SEPOLIA_HARDFORKS: [(Hardfork, ForkCondition); 21] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::ArrowGlacier, ForkCondition::Block(0)), - (Hardfork::GrayGlacier, ForkCondition::Block(0)), - (Hardfork::Paris, ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }), - (Hardfork::Bedrock, ForkCondition::Block(0)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1699981200)), - (Hardfork::Canyon, ForkCondition::Timestamp(1699981200)), - (Hardfork::Cancun, ForkCondition::Timestamp(1708534800)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1708534800)), - (Hardfork::Fjord, ForkCondition::Timestamp(1716998400)), -]; - -/// Base Mainnet hardforks -pub const BASE_MAINNET_HARDFORKS: [(Hardfork, ForkCondition); 21] = [ - (Hardfork::Frontier, ForkCondition::Block(0)), - (Hardfork::Homestead, ForkCondition::Block(0)), - (Hardfork::Tangerine, ForkCondition::Block(0)), - (Hardfork::SpuriousDragon, ForkCondition::Block(0)), - (Hardfork::Byzantium, ForkCondition::Block(0)), - (Hardfork::Constantinople, ForkCondition::Block(0)), - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(0)), - (Hardfork::MuirGlacier, ForkCondition::Block(0)), - (Hardfork::Berlin, ForkCondition::Block(0)), - (Hardfork::London, ForkCondition::Block(0)), - (Hardfork::ArrowGlacier, ForkCondition::Block(0)), - (Hardfork::GrayGlacier, ForkCondition::Block(0)), - (Hardfork::Paris, ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }), - (Hardfork::Bedrock, ForkCondition::Block(0)), - (Hardfork::Regolith, ForkCondition::Timestamp(0)), - (Hardfork::Shanghai, ForkCondition::Timestamp(1704992401)), - (Hardfork::Canyon, ForkCondition::Timestamp(1704992401)), - (Hardfork::Cancun, ForkCondition::Timestamp(1710374401)), - (Hardfork::Ecotone, ForkCondition::Timestamp(1710374401)), - (Hardfork::Fjord, ForkCondition::Timestamp(1720627201)), -]; diff --git a/crates/ethereum-forks/src/display.rs b/crates/ethereum-forks/src/display.rs index d68f8cd95..5b459b8de 100644 --- a/crates/ethereum-forks/src/display.rs +++ b/crates/ethereum-forks/src/display.rs @@ -6,9 +6,7 @@ use alloc::{ vec::Vec, }; -use crate::{ForkCondition, Hardfork}; -#[cfg(feature = "std")] -use std::collections::BTreeMap; +use crate::{hardforks::Hardforks, ForkCondition}; /// A container to pretty-print a hardfork. /// @@ -146,27 +144,22 @@ impl core::fmt::Display for DisplayHardforks { impl DisplayHardforks { /// Creates a new [`DisplayHardforks`] from an iterator of hardforks. - pub fn new( - hardforks: &BTreeMap, - known_paris_block: Option, - ) -> Self { + pub fn new(hardforks: &H, known_paris_block: Option) -> Self { let mut pre_merge = Vec::new(); let mut with_merge = Vec::new(); let mut post_merge = Vec::new(); - for (fork, condition) in hardforks { + for (fork, condition) in hardforks.forks_iter() { let mut display_fork = - DisplayFork { name: fork.to_string(), activated_at: *condition, eip: None }; + DisplayFork { name: fork.name().to_string(), activated_at: condition, eip: None }; match condition { ForkCondition::Block(_) => { pre_merge.push(display_fork); } ForkCondition::TTD { total_difficulty, .. } => { - display_fork.activated_at = ForkCondition::TTD { - fork_block: known_paris_block, - total_difficulty: *total_difficulty, - }; + display_fork.activated_at = + ForkCondition::TTD { fork_block: known_paris_block, total_difficulty }; with_merge.push(display_fork); } ForkCondition::Timestamp(_) => { diff --git a/crates/ethereum-forks/src/hardfork.rs b/crates/ethereum-forks/src/hardfork.rs deleted file mode 100644 index 1d66df0a6..000000000 --- a/crates/ethereum-forks/src/hardfork.rs +++ /dev/null @@ -1,750 +0,0 @@ -use alloy_chains::Chain; -use core::{ - fmt, - fmt::{Display, Formatter}, - str::FromStr, -}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -#[cfg(not(feature = "std"))] -use alloc::{format, string::String}; - -/// Represents the consensus type of a blockchain fork. -/// -/// This enum defines two variants: `ProofOfWork` for hardforks that use a proof-of-work consensus -/// mechanism, and `ProofOfStake` for hardforks that use a proof-of-stake consensus mechanism. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum ConsensusType { - /// Indicates a proof-of-work consensus mechanism. - ProofOfWork, - /// Indicates a proof-of-stake consensus mechanism. - ProofOfStake, -} - -/// The name of an Ethereum hardfork. -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] -#[non_exhaustive] -pub enum Hardfork { - /// Frontier: . - Frontier, - /// Homestead: . - Homestead, - /// The DAO fork: . - Dao, - /// Tangerine: . - Tangerine, - /// Spurious Dragon: . - SpuriousDragon, - /// Byzantium: . - Byzantium, - /// Constantinople: . - Constantinople, - /// Petersburg: . - Petersburg, - /// Istanbul: . - Istanbul, - /// Muir Glacier: . - MuirGlacier, - /// BSC `Ramanujan` hardfork - Ramanujan, - /// BSC `Niels` hardfork - Niels, - /// BSC `MirrorSync` hardfork - MirrorSync, - /// BSC `Bruno` hardfork - Bruno, - /// BSC `Euler` hardfork - Euler, - /// BSC `Nano` hardfork - Nano, - /// BSC `Moran` hardfork - Moran, - /// BSC `Gibbs` hardfork - Gibbs, - /// BSC `Planck` hardfork - Planck, - /// BSC `Luban` hardfork - Luban, - /// BSC `Plato` hardfork - Plato, - /// Berlin: . - Berlin, - /// London: . - London, - /// BSC `Hertz` hardfork - Hertz, - /// BSC `HertzFix` hardfork - HertzFix, - /// Arrow Glacier: . - ArrowGlacier, - /// Gray Glacier: . - GrayGlacier, - /// Paris: . - Paris, - /// Bedrock: . - #[cfg(feature = "optimism")] - Bedrock, - /// Regolith: . - #[cfg(feature = "optimism")] - Regolith, - /// Shanghai: . - Shanghai, - /// BSC `Kepler` hardfork - Kepler, - /// BSC `Feynman` hardfork - Feynman, - /// BSC `FeynmanFix` hardfork - FeynmanFix, - /// `Fermat` - #[cfg(all(feature = "optimism", feature = "opbnb"))] - Fermat, - /// Canyon: - /// . - #[cfg(feature = "optimism")] - Canyon, - // ArbOS11, - /// Cancun. - Cancun, - /// Ecotone: . - #[cfg(feature = "optimism")] - Ecotone, - /// `PreContractForkBlock` - #[cfg(all(feature = "optimism", feature = "opbnb"))] - PreContractForkBlock, - // ArbOS20Atlas, - /// BSC `Haber` hardfork - Haber, - /// BSC `Haber` hardfork - HaberFix, - - // Upcoming - /// Prague: - Prague, - /// Fjord: - #[cfg(feature = "optimism")] - Fjord, -} - -impl Hardfork { - /// Retrieves the consensus type for the specified hardfork. - pub fn consensus_type(&self) -> ConsensusType { - if *self >= Self::Paris { - ConsensusType::ProofOfStake - } else { - ConsensusType::ProofOfWork - } - } - - /// Checks if the hardfork uses Proof of Stake consensus. - pub fn is_proof_of_stake(&self) -> bool { - matches!(self.consensus_type(), ConsensusType::ProofOfStake) - } - - /// Checks if the hardfork uses Proof of Work consensus. - pub fn is_proof_of_work(&self) -> bool { - matches!(self.consensus_type(), ConsensusType::ProofOfWork) - } - - /// Retrieves the activation block for the specified hardfork on the given chain. - pub fn activation_block(&self, chain: Chain) -> Option { - if chain == Chain::mainnet() { - return self.mainnet_activation_block(); - } - if chain == Chain::sepolia() { - return self.sepolia_activation_block(); - } - if chain == Chain::holesky() { - return self.holesky_activation_block(); - } - - #[cfg(feature = "optimism")] - { - if chain == Chain::base_sepolia() { - return self.base_sepolia_activation_block(); - } - if chain == Chain::base_mainnet() { - return self.base_mainnet_activation_block(); - } - } - - None - } - - /// Retrieves the activation block for the specified hardfork on the Ethereum mainnet. - pub const fn mainnet_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier => Some(0), - Self::Homestead => Some(1150000), - Self::Dao => Some(1920000), - Self::Tangerine => Some(2463000), - Self::SpuriousDragon => Some(2675000), - Self::Byzantium => Some(4370000), - Self::Constantinople | Self::Petersburg => Some(7280000), - Self::Istanbul => Some(9069000), - Self::MuirGlacier => Some(9200000), - Self::Berlin => Some(12244000), - Self::London => Some(12965000), - Self::ArrowGlacier => Some(13773000), - Self::GrayGlacier => Some(15050000), - Self::Paris => Some(15537394), - Self::Shanghai => Some(17034870), - Self::Cancun => Some(19426587), - - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Sepolia testnet. - pub const fn sepolia_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Paris => Some(1735371), - Self::Shanghai => Some(2990908), - Self::Cancun => Some(5187023), - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier => Some(0), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Arbitrum Sepolia testnet. - pub const fn arbitrum_sepolia_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(0), - Self::Shanghai => Some(10653737), - // Hardfork::ArbOS11 => Some(10653737), - Self::Cancun => Some(18683405), - // Hardfork::ArbOS20Atlas => Some(18683405), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Arbitrum One mainnet. - pub const fn arbitrum_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(0), - Self::Shanghai => Some(184097479), - // Hardfork::ArbOS11 => Some(184097479), - Self::Cancun => Some(190301729), - // Hardfork::ArbOS20Atlas => Some(190301729), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Base Sepolia testnet. - #[cfg(feature = "optimism")] - pub const fn base_sepolia_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris | - Self::Bedrock | - Self::Regolith => Some(0), - Self::Shanghai | Self::Canyon => Some(2106456), - Self::Cancun | Self::Ecotone => Some(6383256), - Self::Fjord => Some(10615056), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the Base mainnet. - #[cfg(feature = "optimism")] - pub const fn base_mainnet_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris | - Self::Bedrock | - Self::Regolith => Some(0), - Self::Shanghai | Self::Canyon => Some(9101527), - Self::Cancun | Self::Ecotone => Some(11188936), - _ => None, - } - } - - /// Retrieves the activation block for the specified hardfork on the holesky testnet. - const fn holesky_activation_block(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(0), - Self::Shanghai => Some(6698), - Self::Cancun => Some(894733), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the given chain. - pub fn activation_timestamp(&self, chain: Chain) -> Option { - if chain == Chain::mainnet() { - return self.mainnet_activation_timestamp(); - } - if chain == Chain::sepolia() { - return self.sepolia_activation_timestamp(); - } - if chain == Chain::holesky() { - return self.holesky_activation_timestamp(); - } - #[cfg(feature = "optimism")] - { - if chain == Chain::base_sepolia() { - return self.base_sepolia_activation_timestamp(); - } - if chain == Chain::base_mainnet() { - return self.base_mainnet_activation_timestamp(); - } - } - - None - } - - /// Retrieves the activation timestamp for the specified hardfork on the Ethereum mainnet. - pub const fn mainnet_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier => Some(1438226773), - Self::Homestead => Some(1457938193), - Self::Dao => Some(1468977640), - Self::Tangerine => Some(1476753571), - Self::SpuriousDragon => Some(1479788144), - Self::Byzantium => Some(1508131331), - Self::Constantinople | Self::Petersburg => Some(1551340324), - Self::Istanbul => Some(1575807909), - Self::MuirGlacier => Some(1577953849), - Self::Berlin => Some(1618481223), - Self::London => Some(1628166822), - Self::ArrowGlacier => Some(1639036523), - Self::GrayGlacier => Some(1656586444), - Self::Paris => Some(1663224162), - Self::Shanghai => Some(1681338455), - Self::Cancun => Some(1710338135), - - // upcoming hardforks - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Sepolia testnet. - pub const fn sepolia_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(1633267481), - Self::Shanghai => Some(1677557088), - Self::Cancun => Some(1706655072), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Holesky testnet. - pub const fn holesky_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Shanghai => Some(1696000704), - Self::Cancun => Some(1707305664), - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(1695902100), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Arbitrum Sepolia - /// testnet. - pub const fn arbitrum_sepolia_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(1692726996), - Self::Shanghai => Some(1706634000), - // Hardfork::ArbOS11 => Some(1706634000), - Self::Cancun => Some(1709229600), - // Hardfork::ArbOS20Atlas => Some(1709229600), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Arbitrum One mainnet. - pub const fn arbitrum_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris => Some(1622240000), - Self::Shanghai => Some(1708804873), - // Hardfork::ArbOS11 => Some(1708804873), - Self::Cancun => Some(1710424089), - // Hardfork::ArbOS20Atlas => Some(1710424089), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Base Sepolia testnet. - #[cfg(feature = "optimism")] - pub const fn base_sepolia_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris | - Self::Bedrock | - Self::Regolith => Some(1695768288), - Self::Shanghai | Self::Canyon => Some(1699981200), - Self::Cancun | Self::Ecotone => Some(1708534800), - Self::Fjord => Some(1716998400), - _ => None, - } - } - - /// Retrieves the activation timestamp for the specified hardfork on the Base mainnet. - #[cfg(feature = "optimism")] - pub const fn base_mainnet_activation_timestamp(&self) -> Option { - #[allow(unreachable_patterns)] - match self { - Self::Frontier | - Self::Homestead | - Self::Dao | - Self::Tangerine | - Self::SpuriousDragon | - Self::Byzantium | - Self::Constantinople | - Self::Petersburg | - Self::Istanbul | - Self::MuirGlacier | - Self::Berlin | - Self::London | - Self::ArrowGlacier | - Self::GrayGlacier | - Self::Paris | - Self::Bedrock | - Self::Regolith => Some(1686789347), - Self::Shanghai | Self::Canyon => Some(1704992401), - Self::Cancun | Self::Ecotone => Some(1710374401), - Self::Fjord => Some(1720627201), - _ => None, - } - } -} - -impl FromStr for Hardfork { - type Err = String; - - fn from_str(s: &str) -> Result { - Ok(match s.to_lowercase().as_str() { - "frontier" => Self::Frontier, - "homestead" => Self::Homestead, - "dao" => Self::Dao, - "tangerine" => Self::Tangerine, - "spuriousdragon" => Self::SpuriousDragon, - "byzantium" => Self::Byzantium, - "constantinople" => Self::Constantinople, - "petersburg" => Self::Petersburg, - "istanbul" => Self::Istanbul, - "muirglacier" => Self::MuirGlacier, - "berlin" => Self::Berlin, - "london" => Self::London, - "arrowglacier" => Self::ArrowGlacier, - "grayglacier" => Self::GrayGlacier, - "paris" => Self::Paris, - "shanghai" => Self::Shanghai, - "cancun" => Self::Cancun, - #[cfg(feature = "optimism")] - "bedrock" => Self::Bedrock, - #[cfg(feature = "optimism")] - "regolith" => Self::Regolith, - #[cfg(all(feature = "optimism", feature = "opbnb"))] - "precontractforkblock" => Self::PreContractForkBlock, - #[cfg(all(feature = "optimism", feature = "opbnb"))] - "fermat" => Self::Fermat, - #[cfg(all(feature = "optimism", feature = "opbnb"))] - "haber" => Self::Haber, - #[cfg(feature = "optimism")] - "canyon" => Self::Canyon, - #[cfg(feature = "optimism")] - "ecotone" => Self::Ecotone, - #[cfg(feature = "optimism")] - "fjord" => Self::Fjord, - "prague" => Self::Prague, - // "arbos11" => Self::ArbOS11, - // "arbos20atlas" => Self::ArbOS20Atlas, - _ => return Err(format!("Unknown hardfork: {s}")), - }) - } -} - -impl Display for Hardfork { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{self:?}") - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn check_hardfork_from_str() { - let hardfork_str = [ - "frOntier", - "homEstead", - "dao", - "tAngerIne", - "spurIousdrAgon", - "byzAntium", - "constantinople", - "petersburg", - "istanbul", - "muirglacier", - "bErlin", - "lonDon", - "arrowglacier", - "grayglacier", - "PARIS", - "ShAnGhAI", - "CaNcUn", - "PrAguE", - ]; - let expected_hardforks = [ - Hardfork::Frontier, - Hardfork::Homestead, - Hardfork::Dao, - Hardfork::Tangerine, - Hardfork::SpuriousDragon, - Hardfork::Byzantium, - Hardfork::Constantinople, - Hardfork::Petersburg, - Hardfork::Istanbul, - Hardfork::MuirGlacier, - Hardfork::Berlin, - Hardfork::London, - Hardfork::ArrowGlacier, - Hardfork::GrayGlacier, - Hardfork::Paris, - Hardfork::Shanghai, - Hardfork::Cancun, - Hardfork::Prague, - ]; - - let hardforks: Vec = - hardfork_str.iter().map(|h| Hardfork::from_str(h).unwrap()).collect(); - - assert_eq!(hardforks, expected_hardforks); - } - - #[test] - #[cfg(feature = "optimism")] - fn check_op_hardfork_from_str() { - let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD"]; - let expected_hardforks = [ - Hardfork::Bedrock, - Hardfork::Regolith, - Hardfork::Canyon, - Hardfork::Ecotone, - Hardfork::Fjord, - ]; - - let hardforks: Vec = - hardfork_str.iter().map(|h| Hardfork::from_str(h).unwrap()).collect(); - - assert_eq!(hardforks, expected_hardforks); - } - - #[test] - fn check_nonexistent_hardfork_from_str() { - assert!(Hardfork::from_str("not a hardfork").is_err()); - } - - #[test] - fn check_consensus_type() { - let pow_hardforks = [ - Hardfork::Frontier, - Hardfork::Homestead, - Hardfork::Dao, - Hardfork::Tangerine, - Hardfork::SpuriousDragon, - Hardfork::Byzantium, - Hardfork::Constantinople, - Hardfork::Petersburg, - Hardfork::Istanbul, - Hardfork::MuirGlacier, - Hardfork::Berlin, - Hardfork::London, - Hardfork::ArrowGlacier, - Hardfork::GrayGlacier, - ]; - - let pos_hardforks = [Hardfork::Paris, Hardfork::Shanghai, Hardfork::Cancun]; - - #[cfg(feature = "optimism")] - let op_hardforks = [ - Hardfork::Bedrock, - Hardfork::Regolith, - Hardfork::Canyon, - Hardfork::Ecotone, - Hardfork::Fjord, - ]; - - for hardfork in &pow_hardforks { - assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfWork); - assert!(!hardfork.is_proof_of_stake()); - assert!(hardfork.is_proof_of_work()); - } - - for hardfork in &pos_hardforks { - assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfStake); - assert!(hardfork.is_proof_of_stake()); - assert!(!hardfork.is_proof_of_work()); - } - - #[cfg(feature = "optimism")] - for hardfork in &op_hardforks { - assert_eq!(hardfork.consensus_type(), ConsensusType::ProofOfStake); - assert!(hardfork.is_proof_of_stake()); - assert!(!hardfork.is_proof_of_work()); - } - } -} diff --git a/crates/ethereum-forks/src/hardfork/bsc.rs b/crates/ethereum-forks/src/hardfork/bsc.rs new file mode 100644 index 000000000..060ed5e20 --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/bsc.rs @@ -0,0 +1,290 @@ +use crate::{hardfork, ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; +use alloy_chains::Chain; +use core::{ + any::Any, + fmt::{self, Display, Formatter}, + str::FromStr, +}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +hardfork!( + /// The name of a bsc hardfork. + /// + /// When building a list of hardforks for a chain, it's still expected to mix with [`EthereumHardfork`]. + BscHardfork { + /// BSC `Ramanujan` hardfork + Ramanujan, + /// BSC `Niels` hardfork + Niels, + /// BSC `MirrorSync` hardfork + MirrorSync, + /// BSC `Bruno` hardfork + Bruno, + /// BSC `Euler` hardfork + Euler, + /// BSC `Nano` hardfork + Nano, + /// BSC `Moran` hardfork + Moran, + /// BSC `Gibbs` hardfork + Gibbs, + /// BSC `Planck` hardfork + Planck, + /// BSC `Luban` hardfork + Luban, + /// BSC `Plato` hardfork + Plato, + /// BSC `Hertz` hardfork + Hertz, + /// BSC `HertzFix` hardfork + HertzFix, + /// BSC `Kepler` hardfork + Kepler, + /// BSC `Feynman` hardfork + Feynman, + /// BSC `FeynmanFix` hardfork + FeynmanFix, + /// BSC `Haber` hardfork + Haber, + /// BSC `HaberFix` hardfork + HaberFix, + } +); + +impl BscHardfork { + /// Retrieves the activation block for the specified hardfork on the given chain. + pub fn activation_block(self, fork: H, chain: Chain) -> Option { + if chain == Chain::bsc_mainnet() { + return Self::bsc_mainnet_activation_block(fork) + } + if chain == Chain::bsc_testnet() { + return Self::bsc_testnet_activation_block(fork) + } + + None + } + + /// Retrieves the activation timestamp for the specified hardfork on the given chain. + pub fn activation_timestamp(self, fork: H, chain: Chain) -> Option { + if chain == Chain::bsc_mainnet() { + return Self::bsc_mainnet_activation_timestamp(fork) + } + if chain == Chain::bsc_testnet() { + return Self::bsc_testnet_activation_timestamp(fork) + } + + None + } + + /// Retrieves the activation block for the specified hardfork on the BSC mainnet. + pub fn bsc_mainnet_activation_block(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier => Some(0), + EthereumHardfork::Berlin | EthereumHardfork::London => Some(31302048), + _ => None, + }, + |fork| match fork { + Self::Ramanujan | Self::Niels => Some(0), + Self::MirrorSync => Some(5184000), + Self::Bruno => Some(13082000), + Self::Euler => Some(18907621), + Self::Nano => Some(21962149), + Self::Moran => Some(22107423), + Self::Gibbs => Some(23846001), + Self::Planck => Some(27281024), + Self::Luban => Some(29020050), + Self::Plato => Some(30720096), + Self::Hertz => Some(31302048), + Self::HertzFix => Some(34140700), + _ => None, + }, + ) + } + + /// Retrieves the activation block for the specified hardfork on the BSC testnet. + pub fn bsc_testnet_activation_block(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier => Some(0), + EthereumHardfork::Berlin | EthereumHardfork::London => Some(31103030), + _ => None, + }, + |fork| match fork { + Self::Ramanujan => Some(1010000), + Self::Niels => Some(1014369), + Self::MirrorSync => Some(5582500), + Self::Bruno => Some(13837000), + Self::Euler => Some(19203503), + Self::Gibbs => Some(22800220), + Self::Nano => Some(23482428), + Self::Moran => Some(23603940), + Self::Planck => Some(28196022), + Self::Luban => Some(29295050), + Self::Plato => Some(29861024), + Self::Hertz => Some(31103030), + Self::HertzFix => Some(35682300), + _ => None, + }, + ) + } + + /// Retrieves the activation timestamp for the specified hardfork on the BSC mainnet. + pub fn bsc_mainnet_activation_timestamp(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Shanghai => Some(1705996800), + EthereumHardfork::Cancun => Some(1718863500), + _ => None, + }, + |fork| match fork { + Self::Kepler => Some(1705996800), + Self::Feynman | Self::FeynmanFix => Some(1713419340), + Self::Haber => Some(1718863500), + _ => None, + }, + ) + } + + /// Retrieves the activation timestamp for the specified hardfork on the BSC testnet. + pub fn bsc_testnet_activation_timestamp(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Shanghai => Some(1702972800), + EthereumHardfork::Cancun => Some(1713330442), + _ => None, + }, + |fork| match fork { + Self::Kepler => Some(1702972800), + Self::Feynman => Some(1710136800), + Self::FeynmanFix => Some(1711342800), + Self::Haber => Some(1716962820), + Self::HaberFix => Some(1719986788), + _ => None, + }, + ) + } + + /// Bsc mainnet list of hardforks. + pub fn bsc_mainnet() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (Self::Ramanujan.boxed(), ForkCondition::Block(0)), + (Self::Niels.boxed(), ForkCondition::Block(0)), + (Self::MirrorSync.boxed(), ForkCondition::Block(5184000)), + (Self::Bruno.boxed(), ForkCondition::Block(13082000)), + (Self::Euler.boxed(), ForkCondition::Block(18907621)), + (Self::Nano.boxed(), ForkCondition::Block(21962149)), + (Self::Moran.boxed(), ForkCondition::Block(22107423)), + (Self::Gibbs.boxed(), ForkCondition::Block(23846001)), + (Self::Planck.boxed(), ForkCondition::Block(27281024)), + (Self::Luban.boxed(), ForkCondition::Block(29020050)), + (Self::Plato.boxed(), ForkCondition::Block(30720096)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(31302048)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(31302048)), + (Self::Hertz.boxed(), ForkCondition::Block(31302048)), + (Self::HertzFix.boxed(), ForkCondition::Block(34140700)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1705996800)), + (Self::Kepler.boxed(), ForkCondition::Timestamp(1705996800)), + (Self::Feynman.boxed(), ForkCondition::Timestamp(1713419340)), + (Self::FeynmanFix.boxed(), ForkCondition::Timestamp(1713419340)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1718863500)), + (Self::Haber.boxed(), ForkCondition::Timestamp(1718863500)), + ]) + } + + /// Bsc testnet list of hardforks. + pub fn bsc_testnet() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (Self::Ramanujan.boxed(), ForkCondition::Block(1010000)), + (Self::Niels.boxed(), ForkCondition::Block(1014369)), + (Self::MirrorSync.boxed(), ForkCondition::Block(5582500)), + (Self::Bruno.boxed(), ForkCondition::Block(13837000)), + (Self::Euler.boxed(), ForkCondition::Block(19203503)), + (Self::Gibbs.boxed(), ForkCondition::Block(22800220)), + (Self::Nano.boxed(), ForkCondition::Block(23482428)), + (Self::Moran.boxed(), ForkCondition::Block(23603940)), + (Self::Planck.boxed(), ForkCondition::Block(28196022)), + (Self::Luban.boxed(), ForkCondition::Block(29295050)), + (Self::Plato.boxed(), ForkCondition::Block(29861024)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(31103030)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(31103030)), + (Self::Hertz.boxed(), ForkCondition::Block(31103030)), + (Self::HertzFix.boxed(), ForkCondition::Block(35682300)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1702972800)), + (Self::Kepler.boxed(), ForkCondition::Timestamp(1702972800)), + (Self::Feynman.boxed(), ForkCondition::Timestamp(1710136800)), + (Self::FeynmanFix.boxed(), ForkCondition::Timestamp(1711342800)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1713330442)), + (Self::Haber.boxed(), ForkCondition::Timestamp(1716962820)), + (Self::HaberFix.boxed(), ForkCondition::Timestamp(1719986788)), + ]) + } +} + +/// Match helper method since it's not possible to match on `dyn Hardfork` +fn match_hardfork(fork: H, hardfork_fn: HF, bsc_hardfork_fn: BHF) -> Option +where + H: Hardfork, + HF: Fn(&EthereumHardfork) -> Option, + BHF: Fn(&BscHardfork) -> Option, +{ + let fork: &dyn Any = ⋔ + if let Some(fork) = fork.downcast_ref::() { + return hardfork_fn(fork) + } + fork.downcast_ref::().and_then(bsc_hardfork_fn) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_match_hardfork() { + assert_eq!(BscHardfork::bsc_mainnet_activation_block(EthereumHardfork::Cancun), None); + assert_eq!( + BscHardfork::bsc_mainnet_activation_timestamp(EthereumHardfork::Cancun), + Some(1718863500) + ); + assert_eq!(BscHardfork::bsc_mainnet_activation_timestamp(BscHardfork::HaberFix), None); + } +} diff --git a/crates/ethereum-forks/src/hardfork/dev.rs b/crates/ethereum-forks/src/hardfork/dev.rs new file mode 100644 index 000000000..4b422141b --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/dev.rs @@ -0,0 +1,32 @@ +use crate::{ChainHardforks, EthereumHardfork, ForkCondition}; +use alloy_primitives::U256; +use once_cell::sync::Lazy; + +/// Dev hardforks +pub static DEV_HARDFORKS: Lazy = Lazy::new(|| { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Dao.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: None, total_difficulty: U256::ZERO }, + ), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(0)), + #[cfg(feature = "optimism")] + (crate::OptimismHardfork::Regolith.boxed(), ForkCondition::Timestamp(0)), + #[cfg(feature = "optimism")] + (crate::OptimismHardfork::Bedrock.boxed(), ForkCondition::Block(0)), + #[cfg(feature = "optimism")] + (crate::OptimismHardfork::Ecotone.boxed(), ForkCondition::Timestamp(0)), + ]) +}); diff --git a/crates/ethereum-forks/src/hardfork/ethereum.rs b/crates/ethereum-forks/src/hardfork/ethereum.rs new file mode 100644 index 000000000..7a2618c3c --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/ethereum.rs @@ -0,0 +1,418 @@ +use crate::{hardfork, ChainHardforks, ForkCondition, Hardfork}; +use alloy_chains::Chain; +use alloy_primitives::{uint, U256}; +use core::{ + fmt, + fmt::{Display, Formatter}, + str::FromStr, +}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +hardfork!( + /// The name of an Ethereum hardfork. + EthereumHardfork { + /// Frontier: . + Frontier, + /// Homestead: . + Homestead, + /// The DAO fork: . + Dao, + /// Tangerine: . + Tangerine, + /// Spurious Dragon: . + SpuriousDragon, + /// Byzantium: . + Byzantium, + /// Constantinople: . + Constantinople, + /// Petersburg: . + Petersburg, + /// Istanbul: . + Istanbul, + /// Muir Glacier: . + MuirGlacier, + /// Berlin: . + Berlin, + /// London: . + London, + /// Arrow Glacier: . + ArrowGlacier, + /// Gray Glacier: . + GrayGlacier, + /// Paris: . + Paris, + /// Shanghai: . + Shanghai, + /// Cancun. + Cancun, + /// Prague: + Prague, + } +); + +impl EthereumHardfork { + /// Retrieves the activation block for the specified hardfork on the given chain. + pub fn activation_block(&self, chain: Chain) -> Option { + if chain == Chain::mainnet() { + return self.mainnet_activation_block() + } + if chain == Chain::sepolia() { + return self.sepolia_activation_block() + } + if chain == Chain::holesky() { + return self.holesky_activation_block() + } + + None + } + + /// Retrieves the activation block for the specified hardfork on the Ethereum mainnet. + pub const fn mainnet_activation_block(&self) -> Option { + match self { + Self::Frontier => Some(0), + Self::Homestead => Some(1150000), + Self::Dao => Some(1920000), + Self::Tangerine => Some(2463000), + Self::SpuriousDragon => Some(2675000), + Self::Byzantium => Some(4370000), + Self::Constantinople | Self::Petersburg => Some(7280000), + Self::Istanbul => Some(9069000), + Self::MuirGlacier => Some(9200000), + Self::Berlin => Some(12244000), + Self::London => Some(12965000), + Self::ArrowGlacier => Some(13773000), + Self::GrayGlacier => Some(15050000), + Self::Paris => Some(15537394), + Self::Shanghai => Some(17034870), + Self::Cancun => Some(19426587), + _ => None, + } + } + + /// Retrieves the activation block for the specified hardfork on the Sepolia testnet. + pub const fn sepolia_activation_block(&self) -> Option { + match self { + Self::Paris => Some(1735371), + Self::Shanghai => Some(2990908), + Self::Cancun => Some(5187023), + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier => Some(0), + _ => None, + } + } + + /// Retrieves the activation block for the specified hardfork on the holesky testnet. + const fn holesky_activation_block(&self) -> Option { + match self { + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(0), + Self::Shanghai => Some(6698), + Self::Cancun => Some(894733), + _ => None, + } + } + + /// Retrieves the activation block for the specified hardfork on the Arbitrum Sepolia testnet. + pub const fn arbitrum_sepolia_activation_block(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(0), + Self::Shanghai => Some(10653737), + // Hardfork::ArbOS11 => Some(10653737), + Self::Cancun => Some(18683405), + // Hardfork::ArbOS20Atlas => Some(18683405), + _ => None, + } + } + + /// Retrieves the activation block for the specified hardfork on the Arbitrum One mainnet. + pub const fn arbitrum_activation_block(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(0), + Self::Shanghai => Some(184097479), + // Hardfork::ArbOS11 => Some(184097479), + Self::Cancun => Some(190301729), + // Hardfork::ArbOS20Atlas => Some(190301729), + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the given chain. + pub fn activation_timestamp(&self, chain: Chain) -> Option { + if chain == Chain::mainnet() { + return self.mainnet_activation_timestamp() + } + if chain == Chain::sepolia() { + return self.sepolia_activation_timestamp() + } + if chain == Chain::holesky() { + return self.holesky_activation_timestamp() + } + + None + } + + /// Retrieves the activation timestamp for the specified hardfork on the Ethereum mainnet. + pub const fn mainnet_activation_timestamp(&self) -> Option { + match self { + Self::Frontier => Some(1438226773), + Self::Homestead => Some(1457938193), + Self::Dao => Some(1468977640), + Self::Tangerine => Some(1476753571), + Self::SpuriousDragon => Some(1479788144), + Self::Byzantium => Some(1508131331), + Self::Constantinople | Self::Petersburg => Some(1551340324), + Self::Istanbul => Some(1575807909), + Self::MuirGlacier => Some(1577953849), + Self::Berlin => Some(1618481223), + Self::London => Some(1628166822), + Self::ArrowGlacier => Some(1639036523), + Self::GrayGlacier => Some(1656586444), + Self::Paris => Some(1663224162), + Self::Shanghai => Some(1681338455), + Self::Cancun => Some(1710338135), + + // upcoming hardforks + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the Sepolia testnet. + pub const fn sepolia_activation_timestamp(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(1633267481), + Self::Shanghai => Some(1677557088), + Self::Cancun => Some(1706655072), + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the Holesky testnet. + pub const fn holesky_activation_timestamp(&self) -> Option { + match self { + Self::Shanghai => Some(1696000704), + Self::Cancun => Some(1707305664), + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(1695902100), + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the Arbitrum Sepolia + /// testnet. + pub const fn arbitrum_sepolia_activation_timestamp(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(1692726996), + Self::Shanghai => Some(1706634000), + // Hardfork::ArbOS11 => Some(1706634000), + Self::Cancun => Some(1709229600), + // Hardfork::ArbOS20Atlas => Some(1709229600), + _ => None, + } + } + + /// Retrieves the activation timestamp for the specified hardfork on the Arbitrum One mainnet. + pub const fn arbitrum_activation_timestamp(&self) -> Option { + match self { + Self::Frontier | + Self::Homestead | + Self::Dao | + Self::Tangerine | + Self::SpuriousDragon | + Self::Byzantium | + Self::Constantinople | + Self::Petersburg | + Self::Istanbul | + Self::MuirGlacier | + Self::Berlin | + Self::London | + Self::ArrowGlacier | + Self::GrayGlacier | + Self::Paris => Some(1622240000), + Self::Shanghai => Some(1708804873), + // Hardfork::ArbOS11 => Some(1708804873), + Self::Cancun => Some(1710424089), + // Hardfork::ArbOS20Atlas => Some(1710424089), + _ => None, + } + } + + /// Ethereum mainnet list of hardforks. + pub const fn mainnet() -> [(Self, ForkCondition); 17] { + [ + (Self::Frontier, ForkCondition::Block(0)), + (Self::Homestead, ForkCondition::Block(1150000)), + (Self::Dao, ForkCondition::Block(1920000)), + (Self::Tangerine, ForkCondition::Block(2463000)), + (Self::SpuriousDragon, ForkCondition::Block(2675000)), + (Self::Byzantium, ForkCondition::Block(4370000)), + (Self::Constantinople, ForkCondition::Block(7280000)), + (Self::Petersburg, ForkCondition::Block(7280000)), + (Self::Istanbul, ForkCondition::Block(9069000)), + (Self::MuirGlacier, ForkCondition::Block(9200000)), + (Self::Berlin, ForkCondition::Block(12244000)), + (Self::London, ForkCondition::Block(12965000)), + (Self::ArrowGlacier, ForkCondition::Block(13773000)), + (Self::GrayGlacier, ForkCondition::Block(15050000)), + ( + Self::Paris, + ForkCondition::TTD { + fork_block: None, + total_difficulty: uint!(58_750_000_000_000_000_000_000_U256), + }, + ), + (Self::Shanghai, ForkCondition::Timestamp(1681338455)), + (Self::Cancun, ForkCondition::Timestamp(1710338135)), + ] + } + + /// Ethereum sepolia list of hardforks. + pub const fn sepolia() -> [(Self, ForkCondition); 15] { + [ + (Self::Frontier, ForkCondition::Block(0)), + (Self::Homestead, ForkCondition::Block(0)), + (Self::Dao, ForkCondition::Block(0)), + (Self::Tangerine, ForkCondition::Block(0)), + (Self::SpuriousDragon, ForkCondition::Block(0)), + (Self::Byzantium, ForkCondition::Block(0)), + (Self::Constantinople, ForkCondition::Block(0)), + (Self::Petersburg, ForkCondition::Block(0)), + (Self::Istanbul, ForkCondition::Block(0)), + (Self::MuirGlacier, ForkCondition::Block(0)), + (Self::Berlin, ForkCondition::Block(0)), + (Self::London, ForkCondition::Block(0)), + ( + Self::Paris, + ForkCondition::TTD { + fork_block: Some(1735371), + total_difficulty: uint!(17_000_000_000_000_000_U256), + }, + ), + (Self::Shanghai, ForkCondition::Timestamp(1677557088)), + (Self::Cancun, ForkCondition::Timestamp(1706655072)), + ] + } + + /// Ethereum holesky list of hardforks. + pub const fn holesky() -> [(Self, ForkCondition); 15] { + [ + (Self::Frontier, ForkCondition::Block(0)), + (Self::Homestead, ForkCondition::Block(0)), + (Self::Dao, ForkCondition::Block(0)), + (Self::Tangerine, ForkCondition::Block(0)), + (Self::SpuriousDragon, ForkCondition::Block(0)), + (Self::Byzantium, ForkCondition::Block(0)), + (Self::Constantinople, ForkCondition::Block(0)), + (Self::Petersburg, ForkCondition::Block(0)), + (Self::Istanbul, ForkCondition::Block(0)), + (Self::MuirGlacier, ForkCondition::Block(0)), + (Self::Berlin, ForkCondition::Block(0)), + (Self::London, ForkCondition::Block(0)), + (Self::Paris, ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }), + (Self::Shanghai, ForkCondition::Timestamp(1696000704)), + (Self::Cancun, ForkCondition::Timestamp(1707305664)), + ] + } +} + +impl From<[(EthereumHardfork, ForkCondition); N]> for ChainHardforks { + fn from(list: [(EthereumHardfork, ForkCondition); N]) -> Self { + Self::new( + list.into_iter() + .map(|(fork, cond)| (Box::new(fork) as Box, cond)) + .collect(), + ) + } +} diff --git a/crates/ethereum-forks/src/hardfork/macros.rs b/crates/ethereum-forks/src/hardfork/macros.rs new file mode 100644 index 000000000..780c15f6e --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/macros.rs @@ -0,0 +1,52 @@ +/// Macro that defines different variants of a chain specific enum. See [`crate::Hardfork`] as an +/// example. +#[macro_export] +macro_rules! hardfork { + ($(#[$enum_meta:meta])* $enum:ident { $( $(#[$meta:meta])* $variant:ident ),* $(,)? }) => { + $(#[$enum_meta])* + #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] + #[derive(Debug, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] + pub enum $enum { + $( $(#[$meta])* $variant ),* + } + + impl $enum { + /// Returns variant as `str`. + pub const fn name(&self) -> &'static str { + match self { + $( $enum::$variant => stringify!($variant), )* + } + } + + /// Boxes `self` and returns it as `Box`. + pub fn boxed(self) -> Box { + Box::new(self) + } + } + + impl FromStr for $enum { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + $( + s if s == stringify!($variant).to_lowercase() => Ok($enum::$variant), + )* + _ => return Err(format!("Unknown hardfork: {s}")), + } + } + } + + impl Hardfork for $enum { + fn name(&self) -> &'static str { + self.name() + } + } + + impl Display for $enum { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{self:?}") + } + } + } +} diff --git a/crates/ethereum-forks/src/hardfork/mod.rs b/crates/ethereum-forks/src/hardfork/mod.rs new file mode 100644 index 000000000..08f9703f7 --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/mod.rs @@ -0,0 +1,129 @@ +mod macros; + +mod ethereum; +pub use ethereum::EthereumHardfork; + +mod optimism; +pub use optimism::OptimismHardfork; + +mod bsc; +pub use bsc::BscHardfork; + +mod dev; +pub use dev::DEV_HARDFORKS; + +use core::{ + any::Any, + hash::{Hash, Hasher}, +}; +use dyn_clone::DynClone; + +#[cfg(not(feature = "std"))] +use alloc::{format, string::String}; + +/// Generic hardfork trait. +#[auto_impl::auto_impl(&, Box)] +pub trait Hardfork: Any + DynClone + Send + Sync + 'static { + /// Fork name. + fn name(&self) -> &'static str; +} + +dyn_clone::clone_trait_object!(Hardfork); + +impl core::fmt::Debug for dyn Hardfork + 'static { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct(self.name()).finish() + } +} + +impl PartialEq for dyn Hardfork + 'static { + fn eq(&self, other: &Self) -> bool { + self.name() == other.name() + } +} + +impl Eq for dyn Hardfork + 'static {} + +impl Hash for dyn Hardfork + 'static { + fn hash(&self, state: &mut H) { + self.name().hash(state) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::hardfork::optimism::OptimismHardfork; + use std::str::FromStr; + + #[test] + fn check_hardfork_from_str() { + let hardfork_str = [ + "frOntier", + "homEstead", + "dao", + "tAngerIne", + "spurIousdrAgon", + "byzAntium", + "constantinople", + "petersburg", + "istanbul", + "muirglacier", + "bErlin", + "lonDon", + "arrowglacier", + "grayglacier", + "PARIS", + "ShAnGhAI", + "CaNcUn", + "PrAguE", + ]; + let expected_hardforks = [ + EthereumHardfork::Frontier, + EthereumHardfork::Homestead, + EthereumHardfork::Dao, + EthereumHardfork::Tangerine, + EthereumHardfork::SpuriousDragon, + EthereumHardfork::Byzantium, + EthereumHardfork::Constantinople, + EthereumHardfork::Petersburg, + EthereumHardfork::Istanbul, + EthereumHardfork::MuirGlacier, + EthereumHardfork::Berlin, + EthereumHardfork::London, + EthereumHardfork::ArrowGlacier, + EthereumHardfork::GrayGlacier, + EthereumHardfork::Paris, + EthereumHardfork::Shanghai, + EthereumHardfork::Cancun, + EthereumHardfork::Prague, + ]; + + let hardforks: Vec = + hardfork_str.iter().map(|h| EthereumHardfork::from_str(h).unwrap()).collect(); + + assert_eq!(hardforks, expected_hardforks); + } + + #[test] + fn check_op_hardfork_from_str() { + let hardfork_str = ["beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD"]; + let expected_hardforks = [ + OptimismHardfork::Bedrock, + OptimismHardfork::Regolith, + OptimismHardfork::Canyon, + OptimismHardfork::Ecotone, + OptimismHardfork::Fjord, + ]; + + let hardforks: Vec = + hardfork_str.iter().map(|h| OptimismHardfork::from_str(h).unwrap()).collect(); + + assert_eq!(hardforks, expected_hardforks); + } + + #[test] + fn check_nonexistent_hardfork_from_str() { + assert!(EthereumHardfork::from_str("not a hardfork").is_err()); + } +} diff --git a/crates/ethereum-forks/src/hardfork/optimism.rs b/crates/ethereum-forks/src/hardfork/optimism.rs new file mode 100644 index 000000000..1d75ea78a --- /dev/null +++ b/crates/ethereum-forks/src/hardfork/optimism.rs @@ -0,0 +1,518 @@ +use crate::{hardfork, ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; +use alloy_chains::Chain; +use alloy_primitives::U256; +use core::{ + any::Any, + fmt::{self, Display, Formatter}, + str::FromStr, +}; +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +hardfork!( + /// The name of an optimism hardfork. + /// + /// When building a list of hardforks for a chain, it's still expected to mix with [`EthereumHardfork`]. + OptimismHardfork { + /// Bedrock: . + Bedrock, + /// Regolith: . + Regolith, + /// `Fermat` + Fermat, + /// . + Canyon, + /// Ecotone: . + Ecotone, + /// `PreContractForkBlock` + PreContractForkBlock, + /// `Haber` + Haber, + /// Fjord: + Fjord, + } +); + +impl OptimismHardfork { + /// Retrieves the activation block for the specified hardfork on the given chain. + pub fn activation_block(self, fork: H, chain: Chain) -> Option { + if chain == Chain::base_sepolia() { + return Self::base_sepolia_activation_block(fork) + } + if chain == Chain::base_mainnet() { + return Self::base_mainnet_activation_block(fork) + } + if chain == Chain::opbnb_mainnet() { + return Self::opbnb_mainnet_activation_block(fork) + } + if chain == Chain::opbnb_testnet() { + return Self::opbnb_testnet_activation_block(fork) + } + + None + } + + /// Retrieves the activation timestamp for the specified hardfork on the given chain. + pub fn activation_timestamp(self, fork: H, chain: Chain) -> Option { + if chain == Chain::base_sepolia() { + return Self::base_sepolia_activation_timestamp(fork) + } + if chain == Chain::base_mainnet() { + return Self::base_mainnet_activation_timestamp(fork) + } + if chain == Chain::opbnb_mainnet() { + return Self::opbnb_mainnet_activation_timestamp(fork) + } + if chain == Chain::opbnb_testnet() { + return Self::opbnb_testnet_activation_timestamp(fork) + } + + None + } + + /// Retrieves the activation block for the specified hardfork on the Base Sepolia testnet. + pub fn base_sepolia_activation_block(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Dao | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris | + EthereumHardfork::Shanghai => Some(2106456), + EthereumHardfork::Cancun => Some(6383256), + _ => None, + }, + |fork| match fork { + Self::Bedrock | Self::Regolith => Some(0), + Self::Canyon => Some(2106456), + Self::Ecotone => Some(6383256), + Self::Fjord => Some(10615056), + _ => None, + }, + ) + } + + /// Retrieves the activation block for the specified hardfork on the Base mainnet. + pub fn base_mainnet_activation_block(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Dao | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris | + EthereumHardfork::Shanghai => Some(9101527), + EthereumHardfork::Cancun => Some(11188936), + _ => None, + }, + |fork| match fork { + Self::Bedrock | Self::Regolith => Some(0), + Self::Canyon => Some(9101527), + Self::Ecotone => Some(11188936), + _ => None, + }, + ) + } + + /// Retrieves the activation block for the specified hardfork on the opBNB mainnet. + pub fn opbnb_mainnet_activation_block(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris => Some(0), + _ => None, + }, + |fork| match fork { + Self::Bedrock => Some(0), + _ => None, + }, + ) + } + + /// Retrieves the activation block for the specified hardfork on the opBNB testnet. + pub fn opbnb_testnet_activation_block(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris => Some(0), + _ => None, + }, + |fork| match fork { + Self::Bedrock => Some(0), + Self::PreContractForkBlock => Some(5805494), + _ => None, + }, + ) + } + + /// Retrieves the activation timestamp for the specified hardfork on the Base Sepolia testnet. + pub fn base_sepolia_activation_timestamp(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Dao | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris | + EthereumHardfork::Shanghai => Some(1699981200), + EthereumHardfork::Cancun => Some(1708534800), + _ => None, + }, + |fork| match fork { + Self::Bedrock | Self::Regolith => Some(1695768288), + Self::Canyon => Some(1699981200), + Self::Ecotone => Some(1708534800), + Self::Fjord => Some(1716998400), + _ => None, + }, + ) + } + + /// Retrieves the activation timestamp for the specified hardfork on the Base mainnet. + pub fn base_mainnet_activation_timestamp(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Frontier | + EthereumHardfork::Homestead | + EthereumHardfork::Dao | + EthereumHardfork::Tangerine | + EthereumHardfork::SpuriousDragon | + EthereumHardfork::Byzantium | + EthereumHardfork::Constantinople | + EthereumHardfork::Petersburg | + EthereumHardfork::Istanbul | + EthereumHardfork::MuirGlacier | + EthereumHardfork::Berlin | + EthereumHardfork::London | + EthereumHardfork::ArrowGlacier | + EthereumHardfork::GrayGlacier | + EthereumHardfork::Paris | + EthereumHardfork::Shanghai => Some(1704992401), + EthereumHardfork::Cancun => Some(1710374401), + _ => None, + }, + |fork| match fork { + Self::Bedrock | Self::Regolith => Some(1686789347), + Self::Canyon => Some(1704992401), + Self::Ecotone => Some(1710374401), + Self::Fjord => Some(1720627201), + _ => None, + }, + ) + } + + /// Retrieves the activation timestamp for the specified hardfork on the opBNB mainnet. + pub fn opbnb_mainnet_activation_timestamp(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Shanghai => Some(1718870400), + EthereumHardfork::Cancun => Some(1718871600), + _ => None, + }, + |fork| match fork { + Self::Regolith => Some(0), + Self::Fermat => Some(1701151200), + Self::Canyon => Some(1718870400), + Self::Ecotone => Some(1718871600), + Self::Haber => Some(1718872200), + _ => None, + }, + ) + } + + /// Retrieves the activation timestamp for the specified hardfork on the opBNB testnet. + pub fn opbnb_testnet_activation_timestamp(fork: H) -> Option { + match_hardfork( + fork, + |fork| match fork { + EthereumHardfork::Shanghai => Some(1715753400), + EthereumHardfork::Cancun => Some(1715754600), + _ => None, + }, + |fork| match fork { + Self::Regolith => Some(0), + Self::Fermat => Some(1698991506), + Self::Canyon => Some(1715753400), + Self::Ecotone => Some(1715754600), + Self::Haber => Some(1717048800), + _ => None, + }, + ) + } + + /// Optimism mainnet list of hardforks. + pub fn op_mainnet() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(3950000)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(105235063)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(105235063)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(105235063)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(105235063), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(105235063)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1704992401)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1704992401)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1710374401)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1710374401)), + (Self::Fjord.boxed(), ForkCondition::Timestamp(1720627201)), + ]) + } + + /// Optimism sepolia list of hardforks. + pub fn op_sepolia() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(0)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1699981200)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1699981200)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1708534800)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), + (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), + ]) + } + + /// Base sepolia list of hardforks. + pub fn base_sepolia() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(0)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1699981200)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1699981200)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1708534800)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1708534800)), + (Self::Fjord.boxed(), ForkCondition::Timestamp(1716998400)), + ]) + } + + /// Base mainnet list of hardforks. + pub fn base_mainnet() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(0)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1704992401)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1704992401)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1710374401)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1710374401)), + (Self::Fjord.boxed(), ForkCondition::Timestamp(1720627201)), + ]) + } + + /// opBNB mainnet list of hardforks. + pub fn opbnb_mainnet() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(0)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (Self::Fermat.boxed(), ForkCondition::Timestamp(1701151200)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1718870400)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1718870400)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1718871600)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1718871600)), + (Self::Haber.boxed(), ForkCondition::Timestamp(1718872200)), + ]) + } + + /// opBNB testnet list of hardforks. + pub fn opbnb_testnet() -> ChainHardforks { + ChainHardforks::new(vec![ + (EthereumHardfork::Frontier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Homestead.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Tangerine.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::SpuriousDragon.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Byzantium.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Constantinople.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::ArrowGlacier.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::GrayGlacier.boxed(), ForkCondition::Block(0)), + ( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { fork_block: Some(0), total_difficulty: U256::ZERO }, + ), + (Self::Bedrock.boxed(), ForkCondition::Block(0)), + (Self::Regolith.boxed(), ForkCondition::Timestamp(0)), + (Self::PreContractForkBlock.boxed(), ForkCondition::Block(5805494)), + (Self::Fermat.boxed(), ForkCondition::Timestamp(1698991506)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Timestamp(1715753400)), + (Self::Canyon.boxed(), ForkCondition::Timestamp(1715753400)), + (EthereumHardfork::Cancun.boxed(), ForkCondition::Timestamp(1715754600)), + (Self::Ecotone.boxed(), ForkCondition::Timestamp(1715754600)), + (Self::Haber.boxed(), ForkCondition::Timestamp(1717048800)), + ]) + } +} + +/// Match helper method since it's not possible to match on `dyn Hardfork` +fn match_hardfork(fork: H, hardfork_fn: HF, optimism_hardfork_fn: OHF) -> Option +where + H: Hardfork, + HF: Fn(&EthereumHardfork) -> Option, + OHF: Fn(&OptimismHardfork) -> Option, +{ + let fork: &dyn Any = ⋔ + if let Some(fork) = fork.downcast_ref::() { + return hardfork_fn(fork) + } + fork.downcast_ref::().and_then(optimism_hardfork_fn) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_match_hardfork() { + assert_eq!( + OptimismHardfork::base_mainnet_activation_block(EthereumHardfork::Cancun), + Some(11188936) + ); + assert_eq!( + OptimismHardfork::base_mainnet_activation_block(OptimismHardfork::Canyon), + Some(9101527) + ); + } +} diff --git a/crates/ethereum-forks/src/hardforks/bsc.rs b/crates/ethereum-forks/src/hardforks/bsc.rs new file mode 100644 index 000000000..69418d4ba --- /dev/null +++ b/crates/ethereum-forks/src/hardforks/bsc.rs @@ -0,0 +1,112 @@ +use crate::{BscHardfork, ChainHardforks, EthereumHardforks}; + +/// Extends [`crate::EthereumHardforks`] with bsc helper methods. +pub trait BscHardforks: EthereumHardforks { + /// Convenience method to check if [`BscHardfork::Ramanujan`] is firstly active at a given + /// block. + fn is_on_ramanujan_at_block(&self, block_number: u64) -> bool { + self.fork(BscHardfork::Ramanujan).transitions_at_block(block_number) + } + + /// Convenience method to check if [`BscHardfork::Ramanujan`] is active at a given block. + fn is_ramanujan_active_at_block(&self, block_number: u64) -> bool { + self.is_fork_active_at_block(BscHardfork::Ramanujan, block_number) + } + + /// Convenience method to check if [`BscHardfork::Euler`] is firstly active at a given block. + fn is_on_euler_at_block(&self, block_number: u64) -> bool { + self.fork(BscHardfork::Euler).transitions_at_block(block_number) + } + + /// Convenience method to check if [`BscHardfork::Euler`] is active at a given block. + fn is_euler_active_at_block(&self, block_number: u64) -> bool { + self.is_fork_active_at_block(BscHardfork::Euler, block_number) + } + + /// Convenience method to check if [`BscHardfork::Planck`] is firstly active at a given block. + fn is_on_planck_at_block(&self, block_number: u64) -> bool { + self.fork(BscHardfork::Planck).transitions_at_block(block_number) + } + + /// Convenience method to check if [`BscHardfork::Planck`] is active at a given block. + fn is_planck_active_at_block(&self, block_number: u64) -> bool { + self.is_fork_active_at_block(BscHardfork::Planck, block_number) + } + + /// Convenience method to check if [`BscHardfork::Luban`] is firstly active at a given block. + fn is_on_luban_at_block(&self, block_number: u64) -> bool { + self.fork(BscHardfork::Luban).transitions_at_block(block_number) + } + + /// Convenience method to check if [`BscHardfork::Luban`] is active at a given block. + fn is_luban_active_at_block(&self, block_number: u64) -> bool { + self.is_fork_active_at_block(BscHardfork::Luban, block_number) + } + + /// Convenience method to check if [`BscHardfork::Plato`] is firstly active at a given block. + fn is_on_plato_at_block(&self, block_number: u64) -> bool { + self.fork(BscHardfork::Plato).transitions_at_block(block_number) + } + + /// Convenience method to check if [`BscHardfork::Plato`] is active at a given block. + fn is_plato_active_at_block(&self, block_number: u64) -> bool { + self.is_fork_active_at_block(BscHardfork::Plato, block_number) + } + + /// Convenience method to check if [`BscHardfork::Kepler`] is firstly active at a given + /// timestamp and parent timestamp. + fn is_on_kepler_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { + self.fork(BscHardfork::Kepler).transitions_at_timestamp(timestamp, parent_timestamp) + } + + /// Convenience method to check if [`BscHardfork::Kepler`] is active at a given timestamp. + fn is_kepler_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(BscHardfork::Kepler, timestamp) + } + + /// Convenience method to check if [`BscHardfork::Feynman`] is firstly active at a given + /// timestamp and parent timestamp. + fn is_on_feynman_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { + self.fork(BscHardfork::Feynman).transitions_at_timestamp(timestamp, parent_timestamp) + } + + /// Convenience method to check if [`BscHardfork::Feynman`] is active at a given timestamp. + fn is_feynman_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(BscHardfork::Feynman, timestamp) + } + + /// Convenience method to check if [`BscHardfork::FeynmanFix`] is firstly active at a given + /// timestamp and parent timestamp. + fn is_on_feynman_fix_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { + self.fork(BscHardfork::FeynmanFix).transitions_at_timestamp(timestamp, parent_timestamp) + } + + /// Convenience method to check if [`BscHardfork::FeynmanFix`] is active at a given timestamp. + fn is_feynman_fix_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(BscHardfork::FeynmanFix, timestamp) + } + + /// Convenience method to check if [`BscHardfork::Haber`] is firstly active at a given timestamp + /// and parent timestamp. + fn is_on_haber_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { + self.fork(BscHardfork::Haber).transitions_at_timestamp(timestamp, parent_timestamp) + } + + /// Convenience method to check if [`BscHardfork::Haber`] is active at a given timestamp. + fn is_haber_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(BscHardfork::Haber, timestamp) + } + + /// Convenience method to check if [`BscHardfork::HaberFix`] is firstly active at a given + /// timestamp and parent timestamp. + fn is_on_haber_fix_at_timestamp(&self, timestamp: u64, parent_timestamp: u64) -> bool { + self.fork(BscHardfork::HaberFix).transitions_at_timestamp(timestamp, parent_timestamp) + } + + /// Convenience method to check if [`BscHardfork::HaberFix`] is active at a given timestamp. + fn is_haber_fix_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(BscHardfork::HaberFix, timestamp) + } +} + +impl BscHardforks for ChainHardforks {} diff --git a/crates/ethereum-forks/src/hardforks/ethereum.rs b/crates/ethereum-forks/src/hardforks/ethereum.rs new file mode 100644 index 000000000..3b4c860ad --- /dev/null +++ b/crates/ethereum-forks/src/hardforks/ethereum.rs @@ -0,0 +1,56 @@ +use crate::{ + hardforks::{ChainHardforks, Hardforks}, + EthereumHardfork, ForkCondition, +}; + +/// Helper methods for Ethereum forks. +pub trait EthereumHardforks: Hardforks { + /// Convenience method to check if [`EthereumHardfork::Shanghai`] is active at a given + /// timestamp. + fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Shanghai, timestamp) + } + + /// Convenience method to check if [`EthereumHardfork::Cancun`] is active at a given timestamp. + fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Cancun, timestamp) + } + + /// Convenience method to check if [`EthereumHardfork::Prague`] is active at a given timestamp. + fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { + self.is_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) + } + + /// Convenience method to check if [`EthereumHardfork::Byzantium`] is active at a given block + /// number. + fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { + self.fork(EthereumHardfork::Byzantium).active_at_block(block_number) + } + + /// Convenience method to check if [`EthereumHardfork::SpuriousDragon`] is active at a given + /// block number. + fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool { + self.fork(EthereumHardfork::SpuriousDragon).active_at_block(block_number) + } + + /// Convenience method to check if [`EthereumHardfork::Homestead`] is active at a given block + /// number. + fn is_homestead_active_at_block(&self, block_number: u64) -> bool { + self.fork(EthereumHardfork::Homestead).active_at_block(block_number) + } + + /// The Paris hardfork (merge) is activated via block number. If we have knowledge of the block, + /// this function will return true if the block number is greater than or equal to the Paris + /// (merge) block. + fn is_paris_active_at_block(&self, block_number: u64) -> Option { + match self.fork(EthereumHardfork::Paris) { + ForkCondition::Block(paris_block) => Some(block_number >= paris_block), + ForkCondition::TTD { fork_block, .. } => { + fork_block.map(|paris_block| block_number >= paris_block) + } + _ => None, + } + } +} + +impl EthereumHardforks for ChainHardforks {} diff --git a/crates/ethereum-forks/src/hardforks/mod.rs b/crates/ethereum-forks/src/hardforks/mod.rs new file mode 100644 index 000000000..cc89a882c --- /dev/null +++ b/crates/ethereum-forks/src/hardforks/mod.rs @@ -0,0 +1,134 @@ +/// Ethereum helper methods +mod ethereum; +pub use ethereum::EthereumHardforks; + +/// Optimism helper methods +mod optimism; +pub use optimism::OptimismHardforks; + +mod bsc; +pub use bsc::BscHardforks; + +use crate::{ForkCondition, Hardfork}; +use rustc_hash::FxHashMap; + +/// Generic trait over a set of ordered hardforks +pub trait Hardforks: Default + Clone { + /// Retrieves [`ForkCondition`] from `fork`. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + fn fork(&self, fork: H) -> ForkCondition; + + /// Get an iterator of all hardforks with their respective activation conditions. + fn forks_iter(&self) -> impl Iterator; + + /// Convenience method to check if a fork is active at a given timestamp. + fn is_fork_active_at_timestamp(&self, fork: H, timestamp: u64) -> bool { + self.fork(fork).active_at_timestamp(timestamp) + } + + /// Convenience method to check if a fork is active at a given block number. + fn is_fork_active_at_block(&self, fork: H, block_number: u64) -> bool { + self.fork(fork).active_at_block(block_number) + } +} + +/// Ordered list of a chain hardforks that implement [`Hardfork`]. +#[derive(Default, Clone, PartialEq, Eq)] +pub struct ChainHardforks { + forks: Vec<(Box, ForkCondition)>, + map: FxHashMap<&'static str, ForkCondition>, +} + +impl ChainHardforks { + /// Creates a new [`ChainHardforks`] from a list which **must be ordered** by activation. + /// + /// Equivalent Ethereum hardforks **must be included** as well. + pub fn new(forks: Vec<(Box, ForkCondition)>) -> Self { + let map = forks.iter().map(|(fork, condition)| (fork.name(), *condition)).collect(); + + Self { forks, map } + } + + /// Total number of hardforks. + pub fn len(&self) -> usize { + self.forks.len() + } + + /// Checks if the fork list is empty. + pub fn is_empty(&self) -> bool { + self.forks.is_empty() + } + + /// Retrieves [`ForkCondition`] from `fork`. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + pub fn fork(&self, fork: H) -> ForkCondition { + self.get(fork).unwrap_or(ForkCondition::Never) + } + + /// Retrieves [`ForkCondition`] from `fork` if it exists, otherwise `None`. + pub fn get(&self, fork: H) -> Option { + self.map.get(fork.name()).copied() + } + + /// Get an iterator of all hardforks with their respective activation conditions. + pub fn forks_iter(&self) -> impl Iterator { + self.forks.iter().map(|(f, b)| (&**f, *b)) + } + + /// Get last hardfork from the list. + pub fn last(&self) -> Option<(Box, ForkCondition)> { + self.forks.last().map(|(f, b)| (f.clone(), *b)) + } + + /// Convenience method to check if a fork is active at a given timestamp. + pub fn is_fork_active_at_timestamp(&self, fork: H, timestamp: u64) -> bool { + self.fork(fork).active_at_timestamp(timestamp) + } + + /// Convenience method to check if a fork is active at a given block number. + pub fn is_fork_active_at_block(&self, fork: H, block_number: u64) -> bool { + self.fork(fork).active_at_block(block_number) + } + + /// Inserts `fork` into list, updating with a new [`ForkCondition`] if it already exists. + pub fn insert(&mut self, fork: H, condition: ForkCondition) { + match self.map.entry(fork.name()) { + std::collections::hash_map::Entry::Occupied(mut entry) => { + *entry.get_mut() = condition; + if let Some((_, inner)) = + self.forks.iter_mut().find(|(inner, _)| inner.name() == fork.name()) + { + *inner = condition; + } + } + std::collections::hash_map::Entry::Vacant(entry) => { + entry.insert(condition); + self.forks.push((Box::new(fork), condition)); + } + } + } + + /// Removes `fork` from list. + pub fn remove(&mut self, fork: H) { + self.forks.retain(|(inner_fork, _)| inner_fork.name() != fork.name()); + self.map.remove(fork.name()); + } +} + +impl Hardforks for ChainHardforks { + fn fork(&self, fork: H) -> ForkCondition { + self.fork(fork) + } + + fn forks_iter(&self) -> impl Iterator { + self.forks_iter() + } +} + +impl core::fmt::Debug for ChainHardforks { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ChainHardforks") + .field("0", &self.forks_iter().map(|(hf, cond)| (hf.name(), cond)).collect::>()) + .finish() + } +} diff --git a/crates/ethereum-forks/src/hardforks/optimism.rs b/crates/ethereum-forks/src/hardforks/optimism.rs new file mode 100644 index 000000000..39b2bf4ab --- /dev/null +++ b/crates/ethereum-forks/src/hardforks/optimism.rs @@ -0,0 +1,12 @@ +use crate::{ChainHardforks, EthereumHardforks, OptimismHardfork}; + +/// Extends [`crate::EthereumHardforks`] with optimism helper methods. +pub trait OptimismHardforks: EthereumHardforks { + /// Convenience method to check if [`OptimismHardfork::Bedrock`] is active at a given block + /// number. + fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { + self.fork(OptimismHardfork::Bedrock).active_at_block(block_number) + } +} + +impl OptimismHardforks for ChainHardforks {} diff --git a/crates/ethereum-forks/src/lib.rs b/crates/ethereum-forks/src/lib.rs index 1a7e0f56e..98ff7e36a 100644 --- a/crates/ethereum-forks/src/lib.rs +++ b/crates/ethereum-forks/src/lib.rs @@ -12,8 +12,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] @@ -24,19 +22,18 @@ mod display; mod forkcondition; mod forkid; mod hardfork; +mod hardforks; mod head; pub use forkid::{ EnrForkIdEntry, ForkFilter, ForkFilterKey, ForkHash, ForkId, ForkTransition, ValidationError, }; -pub use hardfork::Hardfork; +pub use hardfork::{BscHardfork, EthereumHardfork, Hardfork, OptimismHardfork, DEV_HARDFORKS}; pub use head::Head; pub use display::DisplayHardforks; pub use forkcondition::ForkCondition; - -/// Chains hardforks -pub mod chains; +pub use hardforks::*; #[cfg(any(test, feature = "arbitrary"))] pub use arbitrary; diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml new file mode 100644 index 000000000..18b5f9a47 --- /dev/null +++ b/crates/ethereum/cli/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "reth-ethereum-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] diff --git a/crates/ethereum/cli/src/lib.rs b/crates/ethereum/cli/src/lib.rs new file mode 100644 index 000000000..c55b2ab38 --- /dev/null +++ b/crates/ethereum/cli/src/lib.rs @@ -0,0 +1,9 @@ +//! Reth CLI implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index 210f54461..09d9a6636 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_chainspec::{Chain, ChainSpec, Hardfork}; +use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_4844_header_standalone, validate_against_parent_4844, @@ -51,7 +51,7 @@ impl EthBeaconConsensus { ) -> Result<(), ConsensusError> { // Determine the parent gas limit, considering elasticity multiplier on the London fork. let parent_gas_limit = - if self.chain_spec.fork(Hardfork::London).transitions_at_block(header.number) { + if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number) { parent.gas_limit * self.chain_spec .base_fee_params_at_timestamp(header.timestamp) @@ -153,7 +153,7 @@ impl Consensus for EthBeaconConsensus { ) -> Result<(), ConsensusError> { let is_post_merge = self .chain_spec - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .active_at_ttd(total_difficulty, header.difficulty); if is_post_merge { @@ -198,10 +198,9 @@ impl Consensus for EthBeaconConsensus { }) } - // Goerli and early OP exception: - // * If the network is goerli pre-merge, ignore the extradata check, since we do not - // support clique. Same goes for OP blocks below Bedrock. - if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() { + // Early OP exception: + // * If the network is pre-Bedrock OP, ignore the extradata check. + if !self.chain_spec.is_optimism() { validate_header_extradata(header)?; } } diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 1566ec176..523bed077 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,4 +1,4 @@ -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ gas_spent_by_transactions, BlockWithSenders, Bloom, GotExpected, Receipt, Request, B256, @@ -14,6 +14,16 @@ pub fn validate_block_post_execution( receipts: &[Receipt], requests: &[Request], ) -> Result<(), ConsensusError> { + // Check if gas used matches the value set in header. + let cumulative_gas_used = + receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); + if block.gas_used != cumulative_gas_used { + return Err(ConsensusError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: gas_spent_by_transactions(receipts), + }) + } + // Before Byzantium, receipts contained state root that would mean that expensive // operation as hashing that is required for state root got calculated in every // transaction This was replaced with is_success flag. @@ -27,16 +37,6 @@ pub fn validate_block_post_execution( } } - // Check if gas used matches the value set in header. - let cumulative_gas_used = - receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); - if block.gas_used != cumulative_gas_used { - return Err(ConsensusError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: gas_spent_by_transactions(receipts), - }) - } - // Validate that the header requests root matches the calculated requests root if chain_spec.is_prague_active_at_timestamp(block.timestamp) { let Some(header_requests_root) = block.header.requests_root else { diff --git a/crates/ethereum/engine-primitives/Cargo.toml b/crates/ethereum/engine-primitives/Cargo.toml index 231c7f640..8a1f25808 100644 --- a/crates/ethereum/engine-primitives/Cargo.toml +++ b/crates/ethereum/engine-primitives/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-evm-ethereum.workspace = true reth-primitives.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index 8976f0caa..f9fde7028 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -2,10 +2,11 @@ use alloy_rlp::Encodable; use reth_chainspec::ChainSpec; +use reth_evm_ethereum::revm_spec_by_timestamp_after_merge; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ - constants::EIP1559_INITIAL_BASE_FEE, revm::config::revm_spec_by_timestamp_after_merge, Address, - BlobTransactionSidecar, Hardfork, Header, SealedBlock, Withdrawals, B256, U256, + constants::EIP1559_INITIAL_BASE_FEE, Address, BlobTransactionSidecar, EthereumHardfork, Header, + SealedBlock, Withdrawals, B256, U256, }; use reth_rpc_types::engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, @@ -266,7 +267,7 @@ impl PayloadBuilderAttributes for EthPayloadBuilderAttributes { // If we are on the London fork boundary, we need to multiply the parent's gas limit by the // elasticity multiplier to get the new gas limit. - if chain_spec.fork(Hardfork::London).transitions_at_block(parent.number + 1) { + if chain_spec.fork(EthereumHardfork::London).transitions_at_block(parent.number + 1) { let elasticity_multiplier = chain_spec.base_fee_params_at_timestamp(self.timestamp()).elasticity_multiplier; diff --git a/crates/ethereum/engine/Cargo.toml b/crates/ethereum/engine/Cargo.toml new file mode 100644 index 000000000..05fbc4386 --- /dev/null +++ b/crates/ethereum/engine/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "reth-ethereum-engine" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-beacon-consensus.workspace = true +reth-chainspec.workspace = true +reth-db-api.workspace = true +reth-engine-tree.workspace = true +reth-ethereum-engine-primitives.workspace = true +reth-network-p2p.workspace = true +reth-stages-api.workspace = true +reth-tasks.workspace = true + +# async +futures.workspace = true +pin-project.workspace = true +tokio = { workspace = true, features = ["sync"] } +tokio-stream.workspace = true + +[dev-dependencies] +reth-engine-tree = { workspace = true, features = ["test-utils"] } diff --git a/crates/ethereum/engine/src/lib.rs b/crates/ethereum/engine/src/lib.rs new file mode 100644 index 000000000..8cb60de59 --- /dev/null +++ b/crates/ethereum/engine/src/lib.rs @@ -0,0 +1,12 @@ +//! Ethereum engine implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// Ethereum engine service. +pub mod service; diff --git a/crates/ethereum/engine/src/service.rs b/crates/ethereum/engine/src/service.rs new file mode 100644 index 000000000..0abf352ee --- /dev/null +++ b/crates/ethereum/engine/src/service.rs @@ -0,0 +1,138 @@ +use futures::{ready, StreamExt}; +use pin_project::pin_project; +use reth_beacon_consensus::{BeaconEngineMessage, EthBeaconConsensus}; +use reth_chainspec::ChainSpec; +use reth_db_api::database::Database; +use reth_engine_tree::{ + backfill::PipelineSync, + chain::ChainOrchestrator, + download::BasicBlockDownloader, + engine::{EngineApiEvent, EngineApiRequestHandler, EngineHandler, FromEngine}, +}; +use reth_ethereum_engine_primitives::EthEngineTypes; +use reth_network_p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; +use reth_stages_api::Pipeline; +use reth_tasks::TaskSpawner; +use std::{ + future::Future, + pin::Pin, + sync::{mpsc::Sender, Arc}, + task::{Context, Poll}, +}; +use tokio::sync::mpsc::UnboundedReceiver; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// Alias for Ethereum chain orchestrator. +type EthServiceType = ChainOrchestrator< + EngineHandler< + EngineApiRequestHandler, + UnboundedReceiverStream>, + BasicBlockDownloader, + >, + PipelineSync, +>; + +/// The type that drives the Ethereum chain forward and communicates progress. +#[pin_project] +#[allow(missing_debug_implementations)] +pub struct EthService +where + DB: Database + 'static, + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + orchestrator: EthServiceType, +} + +impl EthService +where + DB: Database + 'static, + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + /// Constructor for `EthService`. + pub fn new( + chain_spec: Arc, + client: Client, + to_tree: Sender>>, + from_tree: UnboundedReceiver, + incoming_requests: UnboundedReceiverStream>, + pipeline: Pipeline, + pipeline_task_spawner: Box, + ) -> Self { + let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); + let downloader = BasicBlockDownloader::new(client, consensus); + + let engine_handler = EngineApiRequestHandler::new(to_tree, from_tree); + let handler = EngineHandler::new(engine_handler, downloader, incoming_requests); + + let backfill_sync = PipelineSync::new(pipeline, pipeline_task_spawner); + + Self { orchestrator: ChainOrchestrator::new(handler, backfill_sync) } + } +} + +impl Future for EthService +where + DB: Database + 'static, + Client: HeadersClient + BodiesClient + Clone + Unpin + 'static, +{ + type Output = Result<(), EthServiceError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Call poll on the inner orchestrator. + let mut orchestrator = self.project().orchestrator; + loop { + match ready!(StreamExt::poll_next_unpin(&mut orchestrator, cx)) { + Some(_event) => continue, + None => return Poll::Ready(Ok(())), + } + } + } +} + +/// Potential error returned by `EthService`. +#[derive(Debug)] +pub struct EthServiceError {} + +#[cfg(test)] +mod tests { + use super::*; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_engine_tree::test_utils::TestPipelineBuilder; + use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_network_p2p::test_utils::TestFullBlockClient; + use reth_tasks::TokioTaskExecutor; + use std::sync::{mpsc::channel, Arc}; + use tokio::sync::mpsc::unbounded_channel; + + #[test] + fn eth_chain_orchestrator_build() { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .paris_activated() + .build(), + ); + + let client = TestFullBlockClient::default(); + + let (_tx, rx) = unbounded_channel::>(); + let incoming_requests = UnboundedReceiverStream::new(rx); + + let pipeline = TestPipelineBuilder::new().build(chain_spec.clone()); + let pipeline_task_spawner = Box::::default(); + + let (to_tree_tx, _to_tree_rx) = channel(); + let (_from_tree_tx, from_tree_rx) = unbounded_channel(); + + let _eth_chain_orchestrator = EthService::new( + chain_spec, + client, + to_tree_tx, + from_tree_rx, + incoming_requests, + pipeline, + pipeline_task_spawner, + ); + } +} diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index 1d996e5d3..7ea2e4b58 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # Reth reth-chainspec.workspace = true +reth-ethereum-forks.workspace = true reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs new file mode 100644 index 000000000..77082b1f7 --- /dev/null +++ b/crates/ethereum/evm/src/config.rs @@ -0,0 +1,220 @@ +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_ethereum_forks::{EthereumHardfork, Head}; + +/// Returns the spec id at the given timestamp. +/// +/// Note: This is only intended to be used after the merge, when hardforks are activated by +/// timestamp. +pub fn revm_spec_by_timestamp_after_merge( + chain_spec: &ChainSpec, + timestamp: u64, +) -> revm_primitives::SpecId { + if chain_spec.is_prague_active_at_timestamp(timestamp) { + revm_primitives::PRAGUE + } else if chain_spec.is_cancun_active_at_timestamp(timestamp) { + revm_primitives::CANCUN + } else if chain_spec.is_shanghai_active_at_timestamp(timestamp) { + revm_primitives::SHANGHAI + } else { + revm_primitives::MERGE + } +} + +/// return `revm_spec` from spec configuration. +pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecId { + if chain_spec.fork(EthereumHardfork::Prague).active_at_head(block) { + revm_primitives::PRAGUE + } else if chain_spec.fork(EthereumHardfork::Cancun).active_at_head(block) { + revm_primitives::CANCUN + } else if chain_spec.fork(EthereumHardfork::Shanghai).active_at_head(block) { + revm_primitives::SHANGHAI + } else if chain_spec.fork(EthereumHardfork::Paris).active_at_head(block) { + revm_primitives::MERGE + } else if chain_spec.fork(EthereumHardfork::London).active_at_head(block) { + revm_primitives::LONDON + } else if chain_spec.fork(EthereumHardfork::Berlin).active_at_head(block) { + revm_primitives::BERLIN + } else if chain_spec.fork(EthereumHardfork::Istanbul).active_at_head(block) { + revm_primitives::ISTANBUL + } else if chain_spec.fork(EthereumHardfork::Petersburg).active_at_head(block) { + revm_primitives::PETERSBURG + } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_head(block) { + revm_primitives::BYZANTIUM + } else if chain_spec.fork(EthereumHardfork::SpuriousDragon).active_at_head(block) { + revm_primitives::SPURIOUS_DRAGON + } else if chain_spec.fork(EthereumHardfork::Tangerine).active_at_head(block) { + revm_primitives::TANGERINE + } else if chain_spec.fork(EthereumHardfork::Homestead).active_at_head(block) { + revm_primitives::HOMESTEAD + } else if chain_spec.fork(EthereumHardfork::Frontier).active_at_head(block) { + revm_primitives::FRONTIER + } else { + panic!( + "invalid hardfork chainspec: expected at least one hardfork, got {:?}", + chain_spec.hardforks + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::U256; + use reth_chainspec::{ChainSpecBuilder, MAINNET}; + + #[test] + fn test_revm_spec_by_timestamp_after_merge() { + assert_eq!( + revm_spec_by_timestamp_after_merge( + &ChainSpecBuilder::mainnet().cancun_activated().build(), + 0 + ), + revm_primitives::CANCUN + ); + assert_eq!( + revm_spec_by_timestamp_after_merge( + &ChainSpecBuilder::mainnet().shanghai_activated().build(), + 0 + ), + revm_primitives::SHANGHAI + ); + assert_eq!( + revm_spec_by_timestamp_after_merge(&ChainSpecBuilder::mainnet().build(), 0), + revm_primitives::MERGE + ); + } + + #[test] + fn test_to_revm_spec() { + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().cancun_activated().build(), &Head::default()), + revm_primitives::CANCUN + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().shanghai_activated().build(), &Head::default()), + revm_primitives::SHANGHAI + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().paris_activated().build(), &Head::default()), + revm_primitives::MERGE + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().london_activated().build(), &Head::default()), + revm_primitives::LONDON + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().berlin_activated().build(), &Head::default()), + revm_primitives::BERLIN + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().istanbul_activated().build(), &Head::default()), + revm_primitives::ISTANBUL + ); + assert_eq!( + revm_spec( + &ChainSpecBuilder::mainnet().petersburg_activated().build(), + &Head::default() + ), + revm_primitives::PETERSBURG + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().byzantium_activated().build(), &Head::default()), + revm_primitives::BYZANTIUM + ); + assert_eq!( + revm_spec( + &ChainSpecBuilder::mainnet().spurious_dragon_activated().build(), + &Head::default() + ), + revm_primitives::SPURIOUS_DRAGON + ); + assert_eq!( + revm_spec( + &ChainSpecBuilder::mainnet().tangerine_whistle_activated().build(), + &Head::default() + ), + revm_primitives::TANGERINE + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().homestead_activated().build(), &Head::default()), + revm_primitives::HOMESTEAD + ); + assert_eq!( + revm_spec(&ChainSpecBuilder::mainnet().frontier_activated().build(), &Head::default()), + revm_primitives::FRONTIER + ); + } + + #[test] + fn test_eth_spec() { + assert_eq!( + revm_spec(&MAINNET, &Head { timestamp: 1710338135, ..Default::default() }), + revm_primitives::CANCUN + ); + assert_eq!( + revm_spec(&MAINNET, &Head { timestamp: 1681338455, ..Default::default() }), + revm_primitives::SHANGHAI + ); + + assert_eq!( + revm_spec( + &MAINNET, + &Head { + total_difficulty: U256::from(58_750_000_000_000_000_000_010_u128), + difficulty: U256::from(10_u128), + ..Default::default() + } + ), + revm_primitives::MERGE + ); + // TTD trumps the block number + assert_eq!( + revm_spec( + &MAINNET, + &Head { + number: 15537394 - 10, + total_difficulty: U256::from(58_750_000_000_000_000_000_010_u128), + difficulty: U256::from(10_u128), + ..Default::default() + } + ), + revm_primitives::MERGE + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 15537394 - 10, ..Default::default() }), + revm_primitives::LONDON + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 12244000 + 10, ..Default::default() }), + revm_primitives::BERLIN + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 12244000 - 10, ..Default::default() }), + revm_primitives::ISTANBUL + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 7280000 + 10, ..Default::default() }), + revm_primitives::PETERSBURG + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 7280000 - 10, ..Default::default() }), + revm_primitives::BYZANTIUM + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 2675000 + 10, ..Default::default() }), + revm_primitives::SPURIOUS_DRAGON + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 2675000 - 10, ..Default::default() }), + revm_primitives::TANGERINE + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 1150000 + 10, ..Default::default() }), + revm_primitives::HOMESTEAD + ); + assert_eq!( + revm_spec(&MAINNET, &Head { number: 1150000 - 10, ..Default::default() }), + revm_primitives::FRONTIER + ); + } +} diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 7276e14f4..45ed8c874 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -4,40 +4,37 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, EthEvmConfig, }; -use reth_chainspec::{ChainSpec, MAINNET}; +use reth_chainspec::{ChainSpec, EthereumHardforks, MAINNET}; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, + system_calls::{ + apply_beacon_root_contract_call, apply_consolidation_requests_contract_call, + apply_withdrawal_requests_contract_call, + }, ConfigureEvm, }; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ - BlockNumber, BlockWithSenders, Hardfork, Header, Receipt, Request, Withdrawals, U256, + BlockNumber, BlockWithSenders, EthereumHardfork, Header, Receipt, Request, U256, }; use reth_prune_types::PruneModes; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - state_change::{ - apply_beacon_root_contract_call, apply_blockhashes_update, - apply_withdrawal_requests_contract_call, post_block_balance_increments, - }, + state_change::{apply_blockhashes_update, post_block_balance_increments}, Evm, State, }; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, ResultAndState, }; -#[cfg(not(feature = "std"))] -use alloc::{sync::Arc, vec, vec::Vec}; - #[cfg(feature = "std")] -use std::sync::Arc; - +use std::{fmt::Display, sync::Arc, vec, vec::Vec}; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] pub struct EthExecutorProvider { @@ -70,7 +67,7 @@ where { fn eth_executor(&self, db: DB) -> EthBlockExecutor where - DB: Database, + DB: Database>, { EthBlockExecutor::new( self.chain_spec.clone(), @@ -84,25 +81,27 @@ impl BlockExecutorProvider for EthExecutorProvider where EvmConfig: ConfigureEvm, { - type Executor> = EthBlockExecutor; + type Executor + Display>> = + EthBlockExecutor; - type BatchExecutor> = EthBatchExecutor; + type BatchExecutor + Display>> = + EthBatchExecutor; fn executor(&self, db: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { self.eth_executor(db) } - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { let executor = self.eth_executor(db); EthBatchExecutor { executor, - batch_record: BlockBatchRecord::new(prune_modes), + batch_record: BlockBatchRecord::default(), stats: BlockExecutorStats::default(), } } @@ -145,10 +144,12 @@ where mut evm: Evm<'_, Ext, &mut State>, ) -> Result where - DB: Database, + DB: Database, + DB::Error: Into + std::fmt::Display, { // apply pre execution changes apply_beacon_root_contract_call( + &self.evm_config, &self.chain_spec, block.timestamp, block.number, @@ -178,14 +179,21 @@ where .into()) } - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); + self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { + let new_err = match err { + EVMError::Transaction(e) => EVMError::Transaction(e), + EVMError::Header(e) => EVMError::Header(e), + EVMError::Database(e) => EVMError::Database(e.into()), + EVMError::Custom(e) => EVMError::Custom(e), + EVMError::Precompile(e) => EVMError::Precompile(e), + }; // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { hash: transaction.recalculate_hash(), - error: err.into(), + error: Box::new(new_err), } })?; evm.db_mut().commit(state); @@ -215,9 +223,14 @@ where crate::eip6110::parse_deposits_from_receipts(&self.chain_spec, &receipts)?; // Collect all EIP-7685 requests - let withdrawal_requests = apply_withdrawal_requests_contract_call(&mut evm)?; + let withdrawal_requests = + apply_withdrawal_requests_contract_call(&self.evm_config, &mut evm)?; - [deposit_requests, withdrawal_requests].concat() + // Collect all EIP-7251 requests + let consolidation_requests = + apply_consolidation_requests_contract_call(&self.evm_config, &mut evm)?; + + [deposit_requests, withdrawal_requests, consolidation_requests].concat() } else { vec![] }; @@ -260,7 +273,7 @@ impl EthBlockExecutor { impl EthBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + Display>, { /// Configures a new evm configuration and block environment for the given block. /// @@ -270,7 +283,7 @@ where fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); let mut block_env = BlockEnv::default(); - EvmConfig::fill_cfg_and_block_env( + self.executor.evm_config.fill_cfg_and_block_env( &mut cfg, &mut block_env, self.chain_spec(), @@ -322,19 +335,11 @@ where block: &BlockWithSenders, total_difficulty: U256, ) -> Result<(), BlockExecutionError> { - let mut balance_increments = post_block_balance_increments( - self.chain_spec(), - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); + let mut balance_increments = + post_block_balance_increments(self.chain_spec(), block, total_difficulty); // Irregular state change at Ethereum DAO hardfork - if self.chain_spec().fork(Hardfork::Dao).transitions_at_block(block.number) { + if self.chain_spec().fork(EthereumHardfork::Dao).transitions_at_block(block.number) { // drain balances from hardcoded addresses. let drained_balance: u128 = self .state @@ -358,19 +363,17 @@ where impl Executor for EthBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = BlockExecutionOutput; type Error = BlockExecutionError; - /// Executes the block and commits the state changes. + /// Executes the block and commits the changes to the internal state. /// /// Returns the receipts of the transactions in the block. /// /// Returns an error if the block could not be executed or failed verification. - /// - /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; let EthExecuteOutput { receipts, requests, gas_used } = @@ -414,7 +417,7 @@ impl EthBatchExecutor { impl BatchExecutor for EthBatchExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = ExecutionOutcome; @@ -459,6 +462,10 @@ where self.batch_record.set_tip(tip); } + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.batch_record.set_prune_modes(prune_modes); + } + fn size_hint(&self) -> Option { Some(self.executor.state.bundle_state.size_hint()) } @@ -537,7 +544,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) .build(), ); @@ -554,6 +561,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -585,6 +593,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -634,7 +643,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) .build(), ); @@ -642,7 +651,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail provider - .batch_executor(StateProviderDatabase::new(&db), PruneModes::none()) + .batch_executor(StateProviderDatabase::new(&db)) .execute_and_verify_one( ( &BlockWithSenders { @@ -651,6 +660,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -677,7 +687,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) .build(), ); @@ -692,8 +702,7 @@ mod tests { ..Header::default() }; - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute an empty block with parent beacon block root, this should not fail executor @@ -705,6 +714,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -730,14 +740,13 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(0)) .build(), ); let mut header = chain_spec.genesis_header(); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); @@ -750,6 +759,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -777,6 +787,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -817,15 +828,14 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Cancun, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Cancun, ForkCondition::Timestamp(1)) .build(), ); let provider = executor_provider(chain_spec); // execute header - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // Now execute a block with the fixed header, ensure that it does not fail executor @@ -837,6 +847,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -887,13 +898,12 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Never) + .with_fork(EthereumHardfork::Prague, ForkCondition::Never) .build(), ); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // construct the header for block one let header = Header { timestamp: 1, number: 1, ..Header::default() }; @@ -908,6 +918,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -940,14 +951,13 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) .build(), ); let header = chain_spec.genesis_header(); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute genesis block, this should not fail executor @@ -959,6 +969,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -992,7 +1003,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) .build(), ); @@ -1004,8 +1015,7 @@ mod tests { ..Header::default() }; let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute the fork activation block, this should not fail executor @@ -1017,6 +1027,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -1055,13 +1066,12 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(1)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(1)) .build(), ); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); let header = Header { parent_hash: B256::random(), @@ -1081,6 +1091,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -1114,7 +1125,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) .build(), ); @@ -1123,8 +1134,7 @@ mod tests { let header_hash = header.hash_slow(); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); // attempt to execute the genesis block, this should not fail executor @@ -1136,6 +1146,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -1175,6 +1186,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -1217,6 +1229,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![], @@ -1251,7 +1264,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) .build(), ); @@ -1306,6 +1319,7 @@ mod tests { body: vec![tx], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, } .with_recovered_senders() @@ -1332,7 +1346,7 @@ mod tests { let chain_spec = Arc::new( ChainSpecBuilder::from(&*MAINNET) .shanghai_activated() - .with_fork(Hardfork::Prague, ForkCondition::Timestamp(0)) + .with_fork(EthereumHardfork::Prague, ForkCondition::Timestamp(0)) .build(), ); @@ -1394,6 +1408,7 @@ mod tests { body: vec![tx], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, } .with_recovered_senders() diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 4134849ea..cd8398ebe 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -12,14 +12,14 @@ #[cfg(not(feature = "std"))] extern crate alloc; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, Head}; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::{ - revm::{config::revm_spec, env::fill_tx_env}, - revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, - Address, Head, Header, TransactionSigned, U256, -}; +use reth_primitives::{transaction::FillTxEnv, Address, Header, TransactionSigned, U256}; use reth_revm::{Database, EvmBuilder}; +use revm_primitives::{AnalysisKind, Bytes, CfgEnvWithHandlerCfg, Env, TxEnv, TxKind}; + +mod config; +pub use config::{revm_spec, revm_spec_by_timestamp_after_merge}; pub mod execute; @@ -35,19 +35,16 @@ pub mod eip6110; pub struct EthEvmConfig; impl ConfigureEvmEnv for EthEvmConfig { - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - fill_tx_env(tx_env, transaction, sender) - } - fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ) { - let spec_id = revm_spec( + let spec_id = config::revm_spec( chain_spec, - Head { + &Head { number: header.number, timestamp: header.timestamp, difficulty: header.difficulty, @@ -61,6 +58,50 @@ impl ConfigureEvmEnv for EthEvmConfig { cfg_env.handler_cfg.spec_id = spec_id; } + + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + transaction.fill_tx_env(tx_env, sender); + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + #[allow(clippy::needless_update)] // side-effect of optimism fields + let tx = TxEnv { + caller, + transact_to: TxKind::Call(contract), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data, + // Setting the gas price to zero enforces that no value is transferred as part of the + // call, and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from + // the `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + // TODO remove this once this crate is no longer built with optimism + ..Default::default() + }; + env.tx = tx; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; + } } impl ConfigureEvm for EthEvmConfig { @@ -77,7 +118,12 @@ impl ConfigureEvm for EthEvmConfig { #[cfg(test)] mod tests { use super::*; - use reth_primitives::revm_primitives::{BlockEnv, CfgEnv, SpecId}; + use reth_chainspec::ChainSpec; + use reth_primitives::{ + revm_primitives::{BlockEnv, CfgEnv, SpecId}, + Header, U256, + }; + use revm_primitives::CfgEnvWithHandlerCfg; #[test] #[ignore] @@ -88,7 +134,7 @@ mod tests { let chain_spec = ChainSpec::default(); let total_difficulty = U256::ZERO; - EthEvmConfig::fill_cfg_and_block_env( + EthEvmConfig::default().fill_cfg_and_block_env( &mut cfg_env, &mut block_env, &chain_spec, diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 54e54a0eb..f053b35b9 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -43,3 +43,7 @@ futures.workspace = true tokio.workspace = true futures-util.workspace = true serde_json.workspace = true + +[features] +default = [] +test-utils = ["reth-node-builder/test-utils"] diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 990c6f0bf..0e289cfd3 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -1,12 +1,14 @@ -use crate::utils::EthNode; +use std::sync::Arc; + use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; -use reth::rpc::eth::EthTransactions; +use reth::rpc::api::eth::helpers::EthTransactions; use reth_chainspec::ChainSpec; use reth_e2e_test_utils::setup; use reth_provider::CanonStateSubscriptions; -use std::sync::Arc; + +use crate::utils::EthNode; #[tokio::test] async fn can_run_dev_node() -> eyre::Result<()> { @@ -17,7 +19,7 @@ async fn can_run_dev_node() -> eyre::Result<()> { Ok(()) } -async fn assert_chain_advances(mut node: EthNode) { +async fn assert_chain_advances(node: EthNode) { let mut notifications = node.inner.provider.canonical_state_stream(); // submit tx through rpc diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 1e53180d9..b229d7630 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -10,12 +10,16 @@ #![allow(clippy::useless_let_if_seq)] use reth_basic_payload_builder::{ - commit_withdrawals, is_better_payload, post_block_withdrawal_requests_contract_call, - pre_block_beacon_root_contract_call, BuildArguments, BuildOutcome, PayloadBuilder, + commit_withdrawals, is_better_payload, BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig, WithdrawalsOutcome, }; use reth_errors::RethError; -use reth_evm::ConfigureEvm; +use reth_evm::{ + system_calls::{ + post_block_withdrawal_requests_contract_call, pre_block_beacon_root_contract_call, + }, + ConfigureEvm, +}; use reth_evm_ethereum::{eip6110::parse_deposits_from_receipts, EthEvmConfig}; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::{ @@ -27,8 +31,8 @@ use reth_primitives::{ }, eip4844::calculate_excess_blob_gas, proofs::{self, calculate_requests_root}, - revm::env::tx_env_with_recovered, - Block, Header, IntoRecoveredTransaction, Receipt, EMPTY_OMMER_ROOT_HASH, U256, + Block, EthereumHardforks, Header, IntoRecoveredTransaction, Receipt, EMPTY_OMMER_ROOT_HASH, + U256, }; use reth_provider::StateProviderFactory; use reth_revm::{database::StateProviderDatabase, state_change::apply_blockhashes_update}; @@ -114,11 +118,13 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( &mut db, + &self.evm_config, &chain_spec, - block_number, &initialized_cfg, &initialized_block_env, - &attributes, + block_number, + attributes.timestamp, + attributes.parent_beacon_block_root, ) .map_err(|err| { warn!(target: "payload_builder", @@ -126,7 +132,7 @@ where %err, "failed to apply beacon root contract call for empty payload" ); - err + PayloadBuilderError::Internal(err.into()) })?; // apply eip-2935 blockhashes update @@ -189,22 +195,25 @@ where } // Calculate the requests and the requests root. - let (requests, requests_root) = - if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { - // We do not calculate the EIP-6110 deposit requests because there are no - // transactions in an empty payload. - let withdrawal_requests = post_block_withdrawal_requests_contract_call( - &mut db, - &initialized_cfg, - &initialized_block_env, - )?; - - let requests = withdrawal_requests; - let requests_root = calculate_requests_root(&requests); - (Some(requests.into()), Some(requests_root)) - } else { - (None, None) - }; + let (requests, requests_root) = if chain_spec + .is_prague_active_at_timestamp(attributes.timestamp) + { + // We do not calculate the EIP-6110 deposit requests because there are no + // transactions in an empty payload. + let withdrawal_requests = post_block_withdrawal_requests_contract_call::( + &self.evm_config, + &mut db, + &initialized_cfg, + &initialized_block_env, + ) + .map_err(|err| PayloadBuilderError::Internal(err.into()))?; + + let requests = withdrawal_requests; + let requests_root = calculate_requests_root(&requests); + (Some(requests.into()), Some(requests_root)) + } else { + (None, None) + }; let header = Header { parent_hash: parent_block.hash(), @@ -230,7 +239,8 @@ where requests_root, }; - let block = Block { header, body: vec![], ommers: vec![], withdrawals, requests }; + let block = + Block { header, body: vec![], ommers: vec![], withdrawals, sidecars: None, requests }; let sealed_block = block.seal_slow(); Ok(EthBuiltPayload::new(attributes.payload_id(), sealed_block, U256::ZERO)) @@ -288,12 +298,22 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( &mut db, + &evm_config, &chain_spec, - block_number, &initialized_cfg, &initialized_block_env, - &attributes, - )?; + block_number, + attributes.timestamp, + attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + PayloadBuilderError::Internal(err.into()) + })?; // apply eip-2935 blockhashes update apply_blockhashes_update( @@ -342,7 +362,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - tx_env_with_recovered(&tx), + evm_config.tx_env(&tx), ); // Configure the environment for the block. @@ -426,10 +446,12 @@ where let deposit_requests = parse_deposits_from_receipts(&chain_spec, receipts.iter().flatten()) .map_err(|err| PayloadBuilderError::Internal(RethError::Execution(err.into())))?; let withdrawal_requests = post_block_withdrawal_requests_contract_call( + &evm_config, &mut db, &initialized_cfg, &initialized_block_env, - )?; + ) + .map_err(|err| PayloadBuilderError::Internal(err.into()))?; let requests = [deposit_requests, withdrawal_requests].concat(); let requests_root = calculate_requests_root(&requests); @@ -514,7 +536,8 @@ where }; // seal the block - let block = Block { header, body: executed_txs, ommers: vec![], withdrawals, requests }; + let block = + Block { header, body: executed_txs, ommers: vec![], withdrawals, sidecars: None, requests }; let sealed_block = block.seal_slow(); debug!(target: "payload_builder", ?sealed_block, "sealed built block"); diff --git a/crates/etl/src/lib.rs b/crates/etl/src/lib.rs index 137a96fff..0b1bd129c 100644 --- a/crates/etl/src/lib.rs +++ b/crates/etl/src/lib.rs @@ -164,6 +164,14 @@ where } } +/// Type alias for the items stored in the heap of [`EtlIter`]. +/// +/// Each item in the heap is a tuple containing: +/// - A `Reverse` tuple of a key-value pair (`Vec, Vec`), used to maintain the heap in +/// ascending order of keys. +/// - An index (`usize`) representing the source file from which the key-value pair was read. +type HeapItem = (Reverse<(Vec, Vec)>, usize); + /// `EtlIter` is an iterator for traversing through sorted key-value pairs in a collection of ETL /// files. These files are created using the [`Collector`] and contain data where keys are encoded /// and values are compressed. @@ -174,8 +182,7 @@ where #[derive(Debug)] pub struct EtlIter<'a> { /// Heap managing the next items to be iterated. - #[allow(clippy::type_complexity)] - heap: BinaryHeap<(Reverse<(Vec, Vec)>, usize)>, + heap: BinaryHeap, /// Reference to the vector of ETL files being iterated over. files: &'a mut Vec, } diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 23f7e1b25..ab3383719 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -21,7 +21,7 @@ reth-storage-errors.workspace = true reth-execution-types.workspace = true revm.workspace = true - +alloy-eips.workspace = true auto_impl.workspace = true futures-util.workspace = true parking_lot = { workspace = true, optional = true } diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index 9cc4d2ec1..1fdee9856 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -98,6 +98,14 @@ pub enum BlockValidationError { /// The error message. message: String, }, + /// EVM error during consolidation requests contract call [EIP-7251] + /// + /// [EIP-7251]: https://eips.ethereum.org/EIPS/eip-7251 + #[error("failed to apply consolidation requests contract call: {message}")] + ConsolidationRequestsContractCall { + /// The error message. + message: String, + }, /// Error when decoding deposit requests from receipts [EIP-6110] /// /// [EIP-6110]: https://eips.ethereum.org/EIPS/eip-6110 diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index babcbaa9c..0dec57836 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -94,6 +94,11 @@ impl Chain { &self.execution_outcome } + /// Get mutable execution outcome of this chain + pub fn execution_outcome_mut(&mut self) -> &mut ExecutionOutcome { + &mut self.execution_outcome + } + /// Prepends the given state to the current state. pub fn prepend_state(&mut self, state: BundleState) { self.execution_outcome.prepend_state(state); diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 2c8edfd29..f6af36d2e 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -1,5 +1,7 @@ //! Helper type that represents one of two possible executor types +use std::fmt::Display; + use crate::execute::{ BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; @@ -18,13 +20,15 @@ where A: BlockExecutorProvider, B: BlockExecutorProvider, { - type Executor> = Either, B::Executor>; - type BatchExecutor> = + type Executor + Display>> = + Either, B::Executor>; + + type BatchExecutor + Display>> = Either, B::BatchExecutor>; fn executor(&self, db: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { match self { Self::Left(a) => Either::Left(a.executor(db)), @@ -32,13 +36,13 @@ where } } - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { match self { - Self::Left(a) => Either::Left(a.batch_executor(db, prune_modes)), - Self::Right(b) => Either::Right(b.batch_executor(db, prune_modes)), + Self::Left(a) => Either::Left(a.batch_executor(db)), + Self::Right(b) => Either::Right(b.batch_executor(db)), } } } @@ -57,7 +61,7 @@ where Output = BlockExecutionOutput, Error = BlockExecutionError, >, - DB: Database, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = BlockExecutionOutput; @@ -85,7 +89,7 @@ where Output = ExecutionOutcome, Error = BlockExecutionError, >, - DB: Database, + DB: Database + Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = ExecutionOutcome; @@ -112,6 +116,13 @@ where } } + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + match self { + Self::Left(a) => a.set_prune_modes(prune_modes), + Self::Right(b) => b.set_prune_modes(prune_modes), + } + } + fn size_hint(&self) -> Option { match self { Self::Left(a) => a.size_hint(), diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index cea9a63a1..c3ca2ba9a 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -5,6 +5,7 @@ use reth_primitives::{parlia::Snapshot, BlockNumber, BlockWithSenders, Receipt, use reth_prune_types::PruneModes; use revm::db::BundleState; use revm_primitives::db::Database; +use std::fmt::Display; #[cfg(not(feature = "std"))] use alloc::vec::Vec; @@ -84,6 +85,11 @@ pub trait BatchExecutor { /// This can be used to optimize state pruning during execution. fn set_tip(&mut self, tip: BlockNumber); + /// Set the prune modes. + /// + /// They are used to determine which parts of the state should be kept during execution. + fn set_prune_modes(&mut self, prune_modes: PruneModes); + /// The size hint of the batch's tracked state size. /// /// This is used to optimize DB commits depending on the size of the state. @@ -95,7 +101,7 @@ pub trait BatchExecutor { /// Contains the state changes, transaction receipts, and total gas used in the block. /// /// TODO(mattsse): combine with `ExecutionOutcome` -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct BlockExecutionOutput { /// The changed state of the block after execution. pub state: BundleState, @@ -146,7 +152,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// /// It is not expected to validate the state trie root, this must be done by the caller using /// the returned state. - type Executor>: for<'a> Executor< + type Executor + Display>>: for<'a> Executor< DB, Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, Output = BlockExecutionOutput, @@ -154,7 +160,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { >; /// An executor that can execute a batch of blocks given a database. - type BatchExecutor>: for<'a> BatchExecutor< + type BatchExecutor + Display>>: for<'a> BatchExecutor< DB, Input<'a> = BlockExecutionInput<'a, BlockWithSenders>, Output = ExecutionOutcome, @@ -166,18 +172,15 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// This is used to execute a single block and get the changed state. fn executor(&self, db: DB) -> Self::Executor where - DB: Database; + DB: Database + Display>; /// Creates a new batch executor with the given database and pruning modes. /// /// Batch executor is used to execute multiple blocks in sequence and keep track of the state /// during historical sync which involves executing multiple blocks in sequence. - /// - /// The pruning modes are used to determine which parts of the state should be kept during - /// execution. - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database; + DB: Database + Display>; } #[cfg(test)] @@ -191,19 +194,19 @@ mod tests { struct TestExecutorProvider; impl BlockExecutorProvider for TestExecutorProvider { - type Executor> = TestExecutor; - type BatchExecutor> = TestExecutor; + type Executor + Display>> = TestExecutor; + type BatchExecutor + Display>> = TestExecutor; fn executor(&self, _db: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { TestExecutor(PhantomData) } - fn batch_executor(&self, _db: DB, _prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, _db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { TestExecutor(PhantomData) } @@ -238,6 +241,10 @@ mod tests { todo!() } + fn set_prune_modes(&mut self, _prune_modes: PruneModes) { + todo!() + } + fn size_hint(&self) -> Option { None } @@ -253,6 +260,7 @@ mod tests { body: vec![], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }; let block = BlockWithSenders::new(block, Default::default()).unwrap(); diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index a3e643e88..445d9625f 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -12,21 +12,27 @@ #[cfg(not(feature = "std"))] extern crate alloc; +use core::ops::Deref; + use reth_chainspec::ChainSpec; -use reth_primitives::{revm::env::fill_block_env, Address, Header, TransactionSigned, U256}; +use reth_primitives::{Address, Header, TransactionSigned, TransactionSignedEcRecovered, U256}; use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, SpecId, TxEnv}; +use revm_primitives::{ + BlockEnv, Bytes, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv, +}; pub mod either; pub mod execute; pub mod noop; pub mod provider; +pub mod system_calls; #[cfg(any(test, feature = "test-utils"))] /// test helpers for mocking executor pub mod test_utils; /// Trait for configuring the EVM for executing full blocks. +#[auto_impl::auto_impl(&, Arc)] pub trait ConfigureEvm: ConfigureEvmEnv { /// Associated type for the default external context that should be configured for the EVM. type DefaultExternalContext<'a>; @@ -98,29 +104,71 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// This represents the set of methods used to configure the EVM's environment before block /// execution. +/// +/// Default trait method implementation is done w.r.t. L1. +#[auto_impl::auto_impl(&, Arc)] pub trait ConfigureEvmEnv: Send + Sync + Unpin + Clone + 'static { + /// Returns a [`TxEnv`] from a [`TransactionSignedEcRecovered`]. + fn tx_env(&self, transaction: &TransactionSignedEcRecovered) -> TxEnv { + let mut tx_env = TxEnv::default(); + self.fill_tx_env(&mut tx_env, transaction.deref(), transaction.signer()); + tx_env + } + /// Fill transaction environment from a [`TransactionSigned`] and the given sender address. - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address); + + /// Fill transaction environment with a system contract call. + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ); /// Fill [`CfgEnvWithHandlerCfg`] fields according to the chain spec and given header fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ); + /// Fill [`BlockEnv`] field according to the chain spec and given header + fn fill_block_env(&self, block_env: &mut BlockEnv, header: &Header, after_merge: bool) { + block_env.number = U256::from(header.number); + block_env.coinbase = header.beneficiary; + block_env.timestamp = U256::from(header.timestamp); + if after_merge { + block_env.prevrandao = Some(header.mix_hash); + block_env.difficulty = U256::ZERO; + } else { + block_env.difficulty = header.difficulty; + block_env.prevrandao = None; + } + block_env.basefee = U256::from(header.base_fee_per_gas.unwrap_or_default()); + block_env.gas_limit = U256::from(header.gas_limit); + + // EIP-4844 excess blob gas of this block, introduced in Cancun + if let Some(excess_blob_gas) = header.excess_blob_gas { + block_env.set_blob_excess_gas_and_price(excess_blob_gas); + } + } + /// Convenience function to call both [`fill_cfg_env`](ConfigureEvmEnv::fill_cfg_env) and - /// [`fill_block_env`]. + /// [`ConfigureEvmEnv::fill_block_env`]. fn fill_cfg_and_block_env( + &self, cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ) { - Self::fill_cfg_env(cfg, chain_spec, header, total_difficulty); + self.fill_cfg_env(cfg, chain_spec, header, total_difficulty); let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; - fill_block_env(block_env, chain_spec, header, after_merge); + self.fill_block_env(block_env, header, after_merge); } } diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index fdee35239..80a2b76de 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -1,5 +1,7 @@ //! A no operation block executor implementation. +use std::fmt::Display; + use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; @@ -19,20 +21,20 @@ const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; pub struct NoopBlockExecutorProvider; impl BlockExecutorProvider for NoopBlockExecutorProvider { - type Executor> = Self; + type Executor + Display>> = Self; - type BatchExecutor> = Self; + type BatchExecutor + Display>> = Self; fn executor(&self, _: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { Self } - fn batch_executor(&self, _: DB, _: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, _: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { Self } @@ -63,6 +65,8 @@ impl BatchExecutor for NoopBlockExecutorProvider { fn set_tip(&mut self, _: BlockNumber) {} + fn set_prune_modes(&mut self, _: PruneModes) {} + fn size_hint(&self) -> Option { None } diff --git a/crates/evm/src/provider.rs b/crates/evm/src/provider.rs index abf04be89..2e73ff2fa 100644 --- a/crates/evm/src/provider.rs +++ b/crates/evm/src/provider.rs @@ -6,13 +6,13 @@ use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// A provider type that knows chain specific information required to configure a -/// [CfgEnvWithHandlerCfg]. +/// [`CfgEnvWithHandlerCfg`]. /// /// This type is mainly used to provide required data to configure the EVM environment that is /// usually stored on disk. #[auto_impl::auto_impl(&, Arc)] pub trait EvmEnvProvider: Send + Sync { - /// Fills the [CfgEnvWithHandlerCfg] and [BlockEnv] fields with values specific to the given + /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given /// [BlockHashOrNumber]. fn fill_env_at( &self, @@ -24,7 +24,7 @@ pub trait EvmEnvProvider: Send + Sync { where EvmConfig: ConfigureEvmEnv; - /// Fills the default [CfgEnvWithHandlerCfg] and [BlockEnv] fields with values specific to the + /// Fills the default [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the /// given [Header]. fn env_with_header( &self, @@ -40,7 +40,7 @@ pub trait EvmEnvProvider: Send + Sync { Ok((cfg, block_env)) } - /// Fills the [CfgEnvWithHandlerCfg] and [BlockEnv] fields with values specific to the given + /// Fills the [`CfgEnvWithHandlerCfg`] and [BlockEnv] fields with values specific to the given /// [Header]. fn fill_env_with_header( &self, @@ -52,21 +52,7 @@ pub trait EvmEnvProvider: Send + Sync { where EvmConfig: ConfigureEvmEnv; - /// Fills the [BlockEnv] fields with values specific to the given [BlockHashOrNumber]. - fn fill_block_env_at( - &self, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - ) -> ProviderResult<()>; - - /// Fills the [BlockEnv] fields with values specific to the given [Header]. - fn fill_block_env_with_header( - &self, - block_env: &mut BlockEnv, - header: &Header, - ) -> ProviderResult<()>; - - /// Fills the [CfgEnvWithHandlerCfg] fields with values specific to the given + /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given /// [BlockHashOrNumber]. fn fill_cfg_env_at( &self, @@ -77,7 +63,7 @@ pub trait EvmEnvProvider: Send + Sync { where EvmConfig: ConfigureEvmEnv; - /// Fills the [CfgEnvWithHandlerCfg] fields with values specific to the given [Header]. + /// Fills the [`CfgEnvWithHandlerCfg`] fields with values specific to the given [Header]. fn fill_cfg_env_with_header( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/evm/src/system_calls.rs b/crates/evm/src/system_calls.rs new file mode 100644 index 000000000..9d493f517 --- /dev/null +++ b/crates/evm/src/system_calls.rs @@ -0,0 +1,402 @@ +//! System contract call functions. + +use crate::ConfigureEvm; +use alloy_eips::{ + eip4788::BEACON_ROOTS_ADDRESS, + eip7002::{WithdrawalRequest, WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS}, + eip7251::{ConsolidationRequest, CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS}, +}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_primitives::{Buf, Request}; +use revm::{interpreter::Host, Database, DatabaseCommit, Evm}; +use revm_primitives::{ + Address, BlockEnv, Bytes, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, FixedBytes, + ResultAndState, B256, +}; + +/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. +/// +/// This constructs a new [Evm] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the pre block contract call. +/// +/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state +/// change. +#[allow(clippy::too_many_arguments)] +pub fn pre_block_beacon_root_contract_call( + db: &mut DB, + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, + block_number: u64, + block_timestamp: u64, + parent_beacon_block_root: Option, +) -> Result<(), BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: std::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // apply pre-block EIP-4788 contract call + let mut evm_pre_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the pre block call needs the block itself + apply_beacon_root_contract_call( + evm_config, + chain_spec, + block_timestamp, + block_number, + parent_beacon_block_root, + &mut evm_pre_block, + ) +} + +/// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, +/// [`ChainSpec`], EVM. +/// +/// If Cancun is not activated or the block is the genesis block, then this is a no-op, and no +/// state changes are made. +/// +/// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 +#[inline] +pub fn apply_beacon_root_contract_call( + evm_config: &EvmConfig, + chain_spec: &ChainSpec, + block_timestamp: u64, + block_number: u64, + parent_beacon_block_root: Option, + evm: &mut Evm<'_, EXT, DB>, +) -> Result<(), BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm, +{ + if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { + return Ok(()) + } + + let parent_beacon_block_root = + parent_beacon_block_root.ok_or(BlockValidationError::MissingParentBeaconBlockRoot)?; + + // if the block number is zero (genesis block) then the parent beacon block root must + // be 0x0 and no system transaction may occur as per EIP-4788 + if block_number == 0 { + if parent_beacon_block_root != B256::ZERO { + return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero { + parent_beacon_block_root, + } + .into()) + } + return Ok(()) + } + + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // modify env for pre block call + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip4788::SYSTEM_ADDRESS, + BEACON_ROOTS_ADDRESS, + parent_beacon_block_root.0.into(), + ); + + let mut state = match evm.transact() { + Ok(res) => res.state, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::BeaconRootContractCall { + parent_beacon_block_root: Box::new(parent_beacon_block_root), + message: e.to_string(), + } + .into()) + } + }; + + state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + Ok(()) +} + +/// Apply the [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) post block contract call. +/// +/// This constructs a new [Evm] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. +/// +/// This uses [`apply_withdrawal_requests_contract_call`] to ultimately calculate the +/// [requests](Request). +pub fn post_block_withdrawal_requests_contract_call( + evm_config: &EvmConfig, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: std::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // apply post-block EIP-7002 contract call + let mut evm_post_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the post block call needs the block itself + apply_withdrawal_requests_contract_call::(evm_config, &mut evm_post_block) +} + +/// Applies the post-block call to the EIP-7002 withdrawal requests contract. +/// +/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is +/// returned. Otherwise, the withdrawal requests are returned. +#[inline] +pub fn apply_withdrawal_requests_contract_call( + evm_config: &EvmConfig, + evm: &mut Evm<'_, EXT, DB>, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // Fill transaction environment with the EIP-7002 withdrawal requests contract message data. + // + // This requirement for the withdrawal requests contract call defined by + // [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) is: + // + // At the end of processing any execution block where `block.timestamp >= FORK_TIMESTAMP` (i.e. + // after processing all transactions and after performing the block body withdrawal requests + // validations), call the contract as `SYSTEM_ADDRESS`. + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip7002::SYSTEM_ADDRESS, + WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, + Bytes::new(), + ); + + let ResultAndState { result, mut state } = match evm.transact() { + Ok(res) => res, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution failed: {e}"), + } + .into()) + } + }; + + // cleanup the state + state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + let mut data = match result { + ExecutionResult::Success { output, .. } => Ok(output.into_data()), + ExecutionResult::Revert { output, .. } => { + Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution reverted: {output}"), + }) + } + ExecutionResult::Halt { reason, .. } => { + Err(BlockValidationError::WithdrawalRequestsContractCall { + message: format!("execution halted: {reason:?}"), + }) + } + }?; + + // Withdrawals are encoded as a series of withdrawal requests, each with the following + // format: + // + // +------+--------+--------+ + // | addr | pubkey | amount | + // +------+--------+--------+ + // 20 48 8 + + const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; + let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); + while data.has_remaining() { + if data.remaining() < WITHDRAWAL_REQUEST_SIZE { + return Err(BlockValidationError::WithdrawalRequestsContractCall { + message: "invalid withdrawal request length".to_string(), + } + .into()) + } + + let mut source_address = Address::ZERO; + data.copy_to_slice(source_address.as_mut_slice()); + + let mut validator_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(validator_pubkey.as_mut_slice()); + + let amount = data.get_u64(); + + withdrawal_requests.push(Request::WithdrawalRequest(WithdrawalRequest { + source_address, + validator_pubkey, + amount, + })); + } + + Ok(withdrawal_requests) +} + +/// Apply the [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) post block contract call. +/// +/// This constructs a new [Evm] with the given DB, and environment +/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. +/// +/// This uses [`apply_consolidation_requests_contract_call`] to ultimately calculate the +/// [requests](Request). +pub fn post_block_consolidation_requests_contract_call( + evm_config: &EvmConfig, + db: &mut DB, + initialized_cfg: &CfgEnvWithHandlerCfg, + initialized_block_env: &BlockEnv, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: std::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // apply post-block EIP-7251 contract call + let mut evm_post_block = Evm::builder() + .with_db(db) + .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( + initialized_cfg.clone(), + initialized_block_env.clone(), + Default::default(), + )) + .build(); + + // initialize a block from the env, because the post block call needs the block itself + apply_consolidation_requests_contract_call::(evm_config, &mut evm_post_block) +} + +/// Applies the post-block call to the EIP-7251 consolidation requests contract. +/// +/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is +/// returned. Otherwise, the consolidation requests are returned. +#[inline] +pub fn apply_consolidation_requests_contract_call( + evm_config: &EvmConfig, + evm: &mut Evm<'_, EXT, DB>, +) -> Result, BlockExecutionError> +where + DB: Database + DatabaseCommit, + DB::Error: core::fmt::Display, + EvmConfig: ConfigureEvm, +{ + // get previous env + let previous_env = Box::new(evm.context.env().clone()); + + // Fill transaction environment with the EIP-7251 consolidation requests contract message data. + // + // This requirement for the consolidation requests contract call defined by + // [EIP-7251](https://eips.ethereum.org/EIPS/eip-7251) is: + // + // At the end of processing any execution block where block.timestamp >= FORK_TIMESTAMP (i.e. + // after processing all transactions and after performing the block body requests validations) + // clienst software MUST [..] call the contract as `SYSTEM_ADDRESS` and empty input data to + // trigger the system subroutine execute. + evm_config.fill_tx_env_system_contract_call( + &mut evm.context.evm.env, + alloy_eips::eip7002::SYSTEM_ADDRESS, + CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS, + Bytes::new(), + ); + + let ResultAndState { result, mut state } = match evm.transact() { + Ok(res) => res, + Err(e) => { + evm.context.evm.env = previous_env; + return Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution failed: {e}"), + } + .into()) + } + }; + + // cleanup the state + state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); + state.remove(&evm.block().coinbase); + evm.context.evm.db.commit(state); + + // re-set the previous env + evm.context.evm.env = previous_env; + + let mut data = match result { + ExecutionResult::Success { output, .. } => Ok(output.into_data()), + ExecutionResult::Revert { output, .. } => { + Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution reverted: {output}"), + }) + } + ExecutionResult::Halt { reason, .. } => { + Err(BlockValidationError::ConsolidationRequestsContractCall { + message: format!("execution halted: {reason:?}"), + }) + } + }?; + + // Consolidations are encoded as a series of consolidation requests, each with the following + // format: + // + // +------+--------+---------------+ + // | addr | pubkey | target pubkey | + // +------+--------+---------------+ + // 20 48 48 + + const CONSOLIDATION_REQUEST_SIZE: usize = 20 + 48 + 48; + let mut consolidation_requests = Vec::with_capacity(data.len() / CONSOLIDATION_REQUEST_SIZE); + while data.has_remaining() { + if data.remaining() < CONSOLIDATION_REQUEST_SIZE { + return Err(BlockValidationError::ConsolidationRequestsContractCall { + message: "invalid consolidation request length".to_string(), + } + .into()) + } + + let mut source_address = Address::ZERO; + data.copy_to_slice(source_address.as_mut_slice()); + + let mut source_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(source_pubkey.as_mut_slice()); + + let mut target_pubkey = FixedBytes::<48>::ZERO; + data.copy_to_slice(target_pubkey.as_mut_slice()); + + consolidation_requests.push(Request::ConsolidationRequest(ConsolidationRequest { + source_address, + source_pubkey, + target_pubkey, + })); + } + + Ok(consolidation_requests) +} diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index b9786fe1c..67edbce41 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -10,7 +10,7 @@ use reth_primitives::{BlockNumber, BlockWithSenders, Receipt}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; /// A [`BlockExecutorProvider`] that returns mocked execution results. #[derive(Clone, Debug, Default)] @@ -26,20 +26,20 @@ impl MockExecutorProvider { } impl BlockExecutorProvider for MockExecutorProvider { - type Executor> = Self; + type Executor + Display>> = Self; - type BatchExecutor> = Self; + type BatchExecutor + Display>> = Self; fn executor(&self, _: DB) -> Self::Executor where - DB: Database, + DB: Database + Display>, { self.clone() } - fn batch_executor(&self, _: DB, _: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, _: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + Display>, { self.clone() } @@ -78,6 +78,8 @@ impl BatchExecutor for MockExecutorProvider { fn set_tip(&mut self, _: BlockNumber) {} + fn set_prune_modes(&mut self, _: PruneModes) {} + fn size_hint(&self) -> Option { None } diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 5bbf177d0..ec86deb6f 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -19,11 +19,16 @@ reth-metrics.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-tasks.workspace = true reth-tracing.workspace = true reth-network.workspace = true reth-payload-builder.workspace = true +reth-evm.workspace = true +reth-prune-types.workspace = true +reth-revm.workspace = true +reth-stages-api.workspace = true ## async tokio.workspace = true @@ -34,6 +39,18 @@ eyre.workspace = true metrics.workspace = true serde = { workspace = true, optional = true } +[dev-dependencies] +reth-chainspec.workspace = true +reth-evm-ethereum.workspace = true +reth-testing-utils.workspace = true +reth-blockchain-tree.workspace = true +reth-db-common.workspace = true +reth-node-api.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-db-api.workspace = true + +secp256k1.workspace = true + [features] default = [] serde = ["dep:serde", "reth-provider/serde"] diff --git a/crates/exex/exex/src/backfill.rs b/crates/exex/exex/src/backfill.rs new file mode 100644 index 000000000..22dde6930 --- /dev/null +++ b/crates/exex/exex/src/backfill.rs @@ -0,0 +1,524 @@ +use reth_evm::execute::{ + BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, +}; +use reth_node_api::FullNodeComponents; +use reth_primitives::{Block, BlockNumber, BlockWithSenders, Receipt}; +use reth_primitives_traits::format_gas_throughput; +use reth_provider::{ + BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, +}; +use reth_prune_types::PruneModes; +use reth_revm::database::StateProviderDatabase; +use reth_stages_api::ExecutionStageThresholds; +use reth_tracing::tracing::{debug, trace}; +use std::{ + ops::RangeInclusive, + time::{Duration, Instant}, +}; + +/// Factory for creating new backfill jobs. +#[derive(Debug, Clone)] +pub struct BackfillJobFactory { + executor: E, + provider: P, + prune_modes: PruneModes, + thresholds: ExecutionStageThresholds, +} + +impl BackfillJobFactory { + /// Creates a new [`BackfillJobFactory`]. + pub fn new(executor: E, provider: P) -> Self { + Self { + executor, + provider, + prune_modes: PruneModes::none(), + thresholds: ExecutionStageThresholds::default(), + } + } + + /// Sets the prune modes + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } + + /// Sets the thresholds + pub const fn with_thresholds(mut self, thresholds: ExecutionStageThresholds) -> Self { + self.thresholds = thresholds; + self + } +} + +impl BackfillJobFactory { + /// Creates a new backfill job for the given range. + pub fn backfill(&self, range: RangeInclusive) -> BackfillJob { + BackfillJob { + executor: self.executor.clone(), + provider: self.provider.clone(), + prune_modes: self.prune_modes.clone(), + range, + thresholds: self.thresholds.clone(), + } + } +} + +impl BackfillJobFactory<(), ()> { + /// Creates a new [`BackfillJobFactory`] from [`FullNodeComponents`]. + pub fn new_from_components( + components: Node, + ) -> BackfillJobFactory { + BackfillJobFactory::<_, _>::new( + components.block_executor().clone(), + components.provider().clone(), + ) + } +} + +/// Backfill job started for a specific range. +/// +/// It implements [`Iterator`] that executes blocks in batches according to the provided thresholds +/// and yields [`Chain`] +#[derive(Debug)] +pub struct BackfillJob { + executor: E, + provider: P, + prune_modes: PruneModes, + thresholds: ExecutionStageThresholds, + range: RangeInclusive, +} + +impl Iterator for BackfillJob +where + E: BlockExecutorProvider, + P: HeaderProvider + BlockReader + StateProviderFactory, +{ + type Item = Result; + + fn next(&mut self) -> Option { + if self.range.is_empty() { + return None + } + + Some(self.execute_range()) + } +} + +impl BackfillJob +where + E: BlockExecutorProvider, + P: BlockReader + HeaderProvider + StateProviderFactory, +{ + fn execute_range(&mut self) -> Result { + let mut executor = self.executor.batch_executor(StateProviderDatabase::new( + self.provider.history_by_block_number(self.range.start().saturating_sub(1))?, + )); + executor.set_prune_modes(self.prune_modes.clone()); + + let mut fetch_block_duration = Duration::default(); + let mut execution_duration = Duration::default(); + let mut cumulative_gas = 0; + let batch_start = Instant::now(); + + let mut blocks = Vec::new(); + for block_number in self.range.clone() { + // Fetch the block + let fetch_block_start = Instant::now(); + + let td = self + .provider + .header_td_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + // we need the block's transactions along with their hashes + let block = self + .provider + .sealed_block_with_senders(block_number.into(), TransactionVariant::WithHash)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + fetch_block_duration += fetch_block_start.elapsed(); + + cumulative_gas += block.gas_used; + + // Configure the executor to use the current state. + trace!(target: "exex::backfill", number = block_number, txs = block.body.len(), "Executing block"); + + // Execute the block + let execute_start = Instant::now(); + + // Unseal the block for execution + let (block, senders) = block.into_components(); + let (unsealed_header, hash) = block.header.split(); + let block = Block { + header: unsealed_header, + body: block.body, + ommers: block.ommers, + withdrawals: block.withdrawals, + sidecars: block.sidecars, + requests: block.requests, + } + .with_senders_unchecked(senders); + + executor.execute_and_verify_one((&block, td).into())?; + execution_duration += execute_start.elapsed(); + + // TODO(alexey): report gas metrics using `block.header.gas_used` + + // Seal the block back and save it + blocks.push(block.seal(hash)); + + // Check if we should commit now + let bundle_size_hint = executor.size_hint().unwrap_or_default() as u64; + if self.thresholds.is_end_of_batch( + block_number - *self.range.start(), + bundle_size_hint, + cumulative_gas, + batch_start.elapsed(), + ) { + break + } + } + + let last_block_number = blocks.last().expect("blocks should not be empty").number; + debug!( + target: "exex::backfill", + range = ?*self.range.start()..=last_block_number, + block_fetch = ?fetch_block_duration, + execution = ?execution_duration, + throughput = format_gas_throughput(cumulative_gas, execution_duration), + "Finished executing block range" + ); + self.range = last_block_number + 1..=*self.range.end(); + + let chain = Chain::new(blocks, executor.finalize(), None); + Ok(chain) + } +} + +impl BackfillJob { + /// Converts the backfill job into a single block backfill job. + pub fn into_single_blocks(self) -> SingleBlockBackfillJob { + self.into() + } +} + +impl From> for SingleBlockBackfillJob { + fn from(value: BackfillJob) -> Self { + Self { executor: value.executor, provider: value.provider, range: value.range } + } +} + +/// Single block Backfill job started for a specific range. +/// +/// It implements [`Iterator`] which executes a block each time the +/// iterator is advanced and yields ([`BlockWithSenders`], [`BlockExecutionOutput`]) +#[derive(Debug)] +pub struct SingleBlockBackfillJob { + executor: E, + provider: P, + range: RangeInclusive, +} + +impl Iterator for SingleBlockBackfillJob +where + E: BlockExecutorProvider, + P: HeaderProvider + BlockReader + StateProviderFactory, +{ + type Item = Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError>; + + fn next(&mut self) -> Option { + self.range.next().map(|block_number| self.execute_block(block_number)) + } +} + +impl SingleBlockBackfillJob +where + E: BlockExecutorProvider, + P: HeaderProvider + BlockReader + StateProviderFactory, +{ + fn execute_block( + &self, + block_number: u64, + ) -> Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError> { + let td = self + .provider + .header_td_by_number(block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + // Fetch the block with senders for execution. + let block_with_senders = self + .provider + .block_with_senders(block_number.into(), TransactionVariant::WithHash)? + .ok_or_else(|| ProviderError::HeaderNotFound(block_number.into()))?; + + // Configure the executor to use the previous block's state. + let executor = self.executor.executor(StateProviderDatabase::new( + self.provider.history_by_block_number(block_number.saturating_sub(1))?, + )); + + trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body.len(), "Executing block"); + + let block_execution_output = executor.execute((&block_with_senders, td).into())?; + + Ok((block_with_senders, block_execution_output)) + } +} + +#[cfg(test)] +mod tests { + use crate::BackfillJobFactory; + use eyre::OptionExt; + use reth_blockchain_tree::noop::NoopBlockchainTree; + use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET}; + use reth_db_common::init::init_genesis; + use reth_evm::execute::{ + BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, + }; + use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_primitives::{ + b256, constants::ETH_TO_WEI, public_key_to_address, Address, Block, BlockWithSenders, + Genesis, GenesisAccount, Header, Receipt, Requests, SealedBlockWithSenders, Transaction, + TxEip2930, TxKind, U256, + }; + use reth_provider::{ + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, + BlockWriter, ExecutionOutcome, LatestStateProviderRef, ProviderFactory, + }; + use reth_revm::database::StateProviderDatabase; + use reth_testing_utils::generators::{self, sign_tx_with_key_pair}; + use secp256k1::Keypair; + use std::sync::Arc; + + fn to_execution_outcome( + block_number: u64, + block_execution_output: &BlockExecutionOutput, + ) -> ExecutionOutcome { + ExecutionOutcome { + bundle: block_execution_output.state.clone(), + receipts: block_execution_output.receipts.clone().into(), + first_block: block_number, + requests: vec![Requests(block_execution_output.requests.clone())], + snapshots: vec![], + } + } + + fn chain_spec(address: Address) -> Arc { + // Create a chain spec with a genesis state that contains the + // provided sender + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis { + alloc: [( + address, + GenesisAccount { balance: U256::from(ETH_TO_WEI), ..Default::default() }, + )] + .into(), + ..MAINNET.genesis.clone() + }) + .paris_activated() + .build(), + ) + } + + fn execute_block_and_commit_to_database( + provider_factory: &ProviderFactory, + chain_spec: Arc, + block: &BlockWithSenders, + ) -> eyre::Result> + where + DB: reth_db_api::database::Database, + { + let provider = provider_factory.provider()?; + + // Execute the block to produce a block execution output + let mut block_execution_output = EthExecutorProvider::ethereum(chain_spec) + .executor(StateProviderDatabase::new(LatestStateProviderRef::new( + provider.tx_ref(), + provider.static_file_provider().clone(), + ))) + .execute(BlockExecutionInput { block, total_difficulty: U256::ZERO })?; + block_execution_output.state.reverts.sort(); + + // Convert the block execution output to an execution outcome for committing to the database + let execution_outcome = to_execution_outcome(block.number, &block_execution_output); + + // Commit the block's execution outcome to the database + let provider_rw = provider_factory.provider_rw()?; + let block = block.clone().seal_slow(); + provider_rw.append_blocks_with_state( + vec![block], + execution_outcome, + Default::default(), + Default::default(), + )?; + provider_rw.commit()?; + + Ok(block_execution_output) + } + + fn blocks_and_execution_outputs( + provider_factory: ProviderFactory, + chain_spec: Arc, + key_pair: Keypair, + ) -> eyre::Result)>> + where + DB: reth_db_api::database::Database, + { + // First block has a transaction that transfers some ETH to zero address + let block1 = Block { + header: Header { + parent_hash: chain_spec.genesis_hash(), + receipts_root: b256!( + "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: 1, + gas_limit: 21000, + gas_used: 21000, + ..Default::default() + }, + body: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce: 0, + gas_limit: 21000, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + // Second block resends the same transaction with increased nonce + let block2 = Block { + header: Header { + parent_hash: block1.header.hash_slow(), + receipts_root: b256!( + "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" + ), + difficulty: chain_spec.fork(EthereumHardfork::Paris).ttd().expect("Paris TTD"), + number: 2, + gas_limit: 21000, + gas_used: 21000, + ..Default::default() + }, + body: vec![sign_tx_with_key_pair( + key_pair, + Transaction::Eip2930(TxEip2930 { + chain_id: chain_spec.chain.id(), + nonce: 1, + gas_limit: 21000, + gas_price: 1_500_000_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(0.1 * ETH_TO_WEI as f64), + ..Default::default() + }), + )], + ..Default::default() + } + .with_recovered_senders() + .ok_or_eyre("failed to recover senders")?; + + let block_output1 = + execute_block_and_commit_to_database(&provider_factory, chain_spec.clone(), &block1)?; + let block_output2 = + execute_block_and_commit_to_database(&provider_factory, chain_spec, &block2)?; + + let block1 = block1.seal_slow(); + let block2 = block2.seal_slow(); + + Ok(vec![(block1, block_output1), (block2, block_output2)]) + } + + #[test] + fn test_backfill() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Create a key pair for the sender + let key_pair = Keypair::new_global(&mut generators::rng()); + let address = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec(address); + + let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new( + provider_factory.clone(), + Arc::new(NoopBlockchainTree::default()), + )?; + + let blocks_and_execution_outputs = + blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; + let (block, block_execution_output) = blocks_and_execution_outputs.first().unwrap(); + let execution_outcome = to_execution_outcome(block.number, block_execution_output); + + // Backfill the first block + let factory = BackfillJobFactory::new(executor, blockchain_db); + let job = factory.backfill(1..=1); + let chains = job.collect::, _>>()?; + + // Assert that the backfill job produced the same chain as we got before when we were + // executing only the first block + assert_eq!(chains.len(), 1); + let mut chain = chains.into_iter().next().unwrap(); + chain.execution_outcome_mut().bundle.reverts.sort(); + assert_eq!(chain.blocks(), &[(1, block.clone())].into()); + assert_eq!(chain.execution_outcome(), &execution_outcome); + + Ok(()) + } + + #[test] + fn test_single_block_backfill() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Create a key pair for the sender + let key_pair = Keypair::new_global(&mut generators::rng()); + let address = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec(address); + + let executor = EthExecutorProvider::ethereum(chain_spec.clone()); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new( + provider_factory.clone(), + Arc::new(NoopBlockchainTree::default()), + )?; + + let blocks_and_execution_outcomes = + blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; + + // Backfill the first block + let factory = BackfillJobFactory::new(executor, blockchain_db); + let job = factory.backfill(1..=1); + let single_job = job.into_single_blocks(); + let block_execution_it = single_job.into_iter(); + + // Assert that the backfill job only produces a single block + let blocks_and_outcomes = block_execution_it.collect::>(); + assert_eq!(blocks_and_outcomes.len(), 1); + + // Assert that the backfill job single block iterator produces the expected output for each + // block + for (i, res) in blocks_and_outcomes.into_iter().enumerate() { + let (block, mut execution_output) = res?; + execution_output.state.reverts.sort(); + + let sealed_block_with_senders = blocks_and_execution_outcomes[i].0.clone(); + let expected_block = sealed_block_with_senders.unseal(); + let expected_output = &blocks_and_execution_outcomes[i].1; + + assert_eq!(block, expected_block); + assert_eq!(&execution_output, expected_output); + } + + Ok(()) + } +} diff --git a/crates/exex/exex/src/lib.rs b/crates/exex/exex/src/lib.rs index a7661d855..5f859accc 100644 --- a/crates/exex/exex/src/lib.rs +++ b/crates/exex/exex/src/lib.rs @@ -34,6 +34,9 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] +mod backfill; +pub use backfill::*; + mod context; pub use context::*; diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index b7db9a98f..b5b62471b 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -24,11 +24,11 @@ reth-exex.workspace = true reth-network.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true -reth-node-builder.workspace = true +reth-node-builder = { workspace = true, features = ["test-utils"] } reth-node-ethereum.workspace = true reth-payload-builder.workspace = true reth-primitives.workspace = true -reth-provider.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } diff --git a/crates/exex/types/Cargo.toml b/crates/exex/types/Cargo.toml index 8797376da..e03b63342 100644 --- a/crates/exex/types/Cargo.toml +++ b/crates/exex/types/Cargo.toml @@ -12,4 +12,4 @@ description = "Commonly used types for exex usage in reth." workspace = true [dependencies] -alloy-primitives.workspace = true \ No newline at end of file +alloy-primitives.workspace = true diff --git a/crates/net/banlist/src/ban_list.rs b/crates/net/banlist/src/ban_list.rs deleted file mode 100644 index e69de29bb..000000000 diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 50311fd3c..7c14eac9b 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -214,9 +214,8 @@ impl Discv4 { /// ``` /// # use std::io; /// use rand::thread_rng; - /// use reth_chainspec::net::NodeRecord; /// use reth_discv4::{Discv4, Discv4Config}; - /// use reth_network_peers::{pk2id, PeerId}; + /// use reth_network_peers::{pk2id, NodeRecord, PeerId}; /// use secp256k1::SECP256K1; /// use std::{net::SocketAddr, str::FromStr}; /// # async fn t() -> io::Result<()> { @@ -1537,7 +1536,7 @@ impl Discv4Service { /// - timestamp is expired (lower than current local UNIX timestamp) fn ensure_not_expired(&self, timestamp: u64) -> Result<(), ()> { // ensure the timestamp is a valid UNIX timestamp - let _ = i64::try_from(timestamp).map_err(|_| ())?; + let _ = i64::try_from(timestamp).map_err(drop)?; let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); if self.config.enforce_expiration_timestamps && timestamp < now { @@ -2288,8 +2287,8 @@ mod tests { use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable}; use rand::{thread_rng, Rng}; - use reth_chainspec::net::mainnet_nodes; use reth_ethereum_forks::{EnrForkIdEntry, ForkHash}; + use reth_network_peers::mainnet_nodes; use std::future::poll_fn; #[tokio::test] diff --git a/crates/net/discv5/src/enr.rs b/crates/net/discv5/src/enr.rs index eb8b6be00..bb49f72e8 100644 --- a/crates/net/discv5/src/enr.rs +++ b/crates/net/discv5/src/enr.rs @@ -58,7 +58,7 @@ mod tests { use super::*; use alloy_rlp::Encodable; use discv5::enr::{CombinedKey, EnrKey}; - use reth_chainspec::{Hardfork, MAINNET}; + use reth_chainspec::{EthereumHardfork, MAINNET}; use reth_network_peers::NodeRecord; #[test] @@ -84,7 +84,7 @@ mod tests { let key = CombinedKey::generate_secp256k1(); let mut buf = Vec::new(); - let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier); + let fork_id = MAINNET.hardfork_fork_id(EthereumHardfork::Frontier); fork_id.unwrap().encode(&mut buf); let enr = Enr::builder() diff --git a/crates/net/dns/src/lib.rs b/crates/net/dns/src/lib.rs index f07fde2e4..55d93c459 100644 --- a/crates/net/dns/src/lib.rs +++ b/crates/net/dns/src/lib.rs @@ -415,7 +415,7 @@ mod tests { use alloy_rlp::{Decodable, Encodable}; use enr::EnrKey; use reth_chainspec::MAINNET; - use reth_ethereum_forks::{ForkHash, Hardfork}; + use reth_ethereum_forks::{EthereumHardfork, ForkHash}; use secp256k1::rand::thread_rng; use std::{future::poll_fn, net::Ipv4Addr}; @@ -513,7 +513,7 @@ mod tests { resolver.insert(link.domain.clone(), root.to_string()); let mut builder = Enr::builder(); - let fork_id = MAINNET.hardfork_fork_id(Hardfork::Frontier).unwrap(); + let fork_id = MAINNET.hardfork_fork_id(EthereumHardfork::Frontier).unwrap(); builder .ip4(Ipv4Addr::LOCALHOST) .udp4(30303) diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 4f009b445..f17ce036d 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -33,7 +33,7 @@ alloy-rlp.workspace = true futures.workspace = true futures-util.workspace = true pin-project.workspace = true -tokio = { workspace = true, features = ["sync"] } +tokio = { workspace = true, features = ["sync", "fs", "io-util"] } tokio-stream.workspace = true tokio-util = { workspace = true, features = ["codec"] } diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index b730510e8..4bc482cad 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -24,6 +24,7 @@ pub(crate) fn zip_blocks<'a>( body: body.transactions, ommers: body.ommers, withdrawals: body.withdrawals, + sidecars: body.sidecars, requests: body.requests, }) } diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 9f566598a..eaf392677 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -5,13 +5,12 @@ use reth_network_p2p::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, error::RequestError, - headers::client::{HeadersClient, HeadersFut, HeadersRequest}, + headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, priority::Priority, }; use reth_network_peers::PeerId; use reth_primitives::{ - BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, HeadersDirection, SealedHeader, - B256, + BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, Header, SealedHeader, B256, }; use std::{collections::HashMap, io, path::Path}; use thiserror::Error; @@ -228,16 +227,7 @@ impl FromReader for FileClient { // add to the internal maps headers.insert(block.header.number, block.header.clone()); hash_to_number.insert(block_hash, block.header.number); - bodies.insert( - block_hash, - BlockBody { - transactions: block.body, - ommers: block.ommers, - withdrawals: block.withdrawals, - sidecars: None, - requests: block.requests, - }, - ); + bodies.insert(block_hash, block.into()); if log_interval == 0 { trace!(target: "downloaders::file", diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index e123ce712..c9e4a51c2 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -10,16 +10,14 @@ use reth_consensus::Consensus; use reth_network_p2p::{ error::{DownloadError, DownloadResult, PeerRequestResult}, headers::{ - client::{HeadersClient, HeadersRequest}, + client::{HeadersClient, HeadersDirection, HeadersRequest}, downloader::{validate_header_download, HeaderDownloader, SyncTarget}, error::{HeadersDownloaderError, HeadersDownloaderResult}, }, priority::Priority, }; use reth_network_peers::PeerId; -use reth_primitives::{ - BlockHashOrNumber, BlockNumber, GotExpected, Header, HeadersDirection, SealedHeader, B256, -}; +use reth_primitives::{BlockHashOrNumber, BlockNumber, GotExpected, Header, SealedHeader, B256}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::{Ordering, Reverse}, diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 905a1d8ed..04c2b974d 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -33,7 +33,7 @@ pub(crate) fn generate_bodies( transactions: block.body, ommers: block.ommers, withdrawals: block.withdrawals, - sidecars: None, + sidecars: block.sidecars, requests: block.requests, }, ) diff --git a/crates/net/ecies/src/codec.rs b/crates/net/ecies/src/codec.rs index 54250e102..c3e9b8d58 100644 --- a/crates/net/ecies/src/codec.rs +++ b/crates/net/ecies/src/codec.rs @@ -1,3 +1,5 @@ +//! This contains the main codec for `RLPx` ECIES messages + use crate::{algorithm::ECIES, ECIESError, EgressECIESValue, IngressECIESValue}; use alloy_primitives::{bytes::BytesMut, B512 as PeerId}; use secp256k1::SecretKey; @@ -7,14 +9,14 @@ use tracing::{instrument, trace}; /// Tokio codec for ECIES #[derive(Debug)] -pub(crate) struct ECIESCodec { +pub struct ECIESCodec { ecies: ECIES, state: ECIESState, } /// Current ECIES state of a connection #[derive(Clone, Copy, Debug, PartialEq, Eq)] -enum ECIESState { +pub enum ECIESState { /// The first stage of the ECIES handshake, where each side of the connection sends an auth /// message containing the ephemeral public key, signature of the public key, nonce, and other /// metadata. @@ -23,7 +25,12 @@ enum ECIESState { /// The second stage of the ECIES handshake, where each side of the connection sends an ack /// message containing the nonce and other metadata. Ack, + + /// The third stage of the ECIES handshake, where header is parsed, message integrity checks + /// performed, and message is decrypted. Header, + + /// The final stage, where the ECIES message is actually read and returned by the ECIES codec. Body, } diff --git a/crates/net/ecies/src/lib.rs b/crates/net/ecies/src/lib.rs index 378398d6b..f766b48b2 100644 --- a/crates/net/ecies/src/lib.rs +++ b/crates/net/ecies/src/lib.rs @@ -16,7 +16,7 @@ pub mod util; mod error; pub use error::ECIESError; -mod codec; +pub mod codec; use alloy_primitives::{ bytes::{Bytes, BytesMut}, diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index d30f072ff..671883dae 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -16,6 +16,9 @@ workspace = true reth-chainspec.workspace = true reth-codecs-derive.workspace = true reth-primitives.workspace = true + +# ethereum +alloy-chains = { workspace = true, features = ["rlp"] } alloy-rlp = { workspace = true, features = ["derive"] } alloy-genesis.workspace = true @@ -27,26 +30,23 @@ serde = { workspace = true, optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } -proptest-derive = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } - +alloy-chains = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true -proptest-derive.workspace = true proptest-arbitrary-interop.workspace = true +proptest-derive.workspace = true rand.workspace = true [features] -default = ["serde"] -serde = ["dep:serde"] arbitrary = [ "reth-primitives/arbitrary", + "alloy-chains/arbitrary", "dep:arbitrary", "dep:proptest", - "dep:proptest-derive", "dep:proptest-arbitrary-interop", ] - +serde = ["dep:serde"] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index d0720229e..68e3e86c3 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -1,14 +1,10 @@ //! Implements the `GetBlockHeaders`, `GetBlockBodies`, `BlockHeaders`, and `BlockBodies` message //! types. +use crate::HeadersDirection; use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; use reth_codecs_derive::{add_arbitrary_tests, derive_arbitrary}; -#[cfg(any(test, feature = "arbitrary"))] -use reth_primitives::generate_valid_header; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; - -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header, B256}; /// A request for a peer to return block headers starting at the requested block. /// The peer must return at most [`limit`](#structfield.limit) headers. @@ -21,7 +17,7 @@ use serde::{Deserialize, Serialize}; /// in the direction specified by [`reverse`](#structfield.reverse). #[derive_arbitrary(rlp)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RlpEncodable, RlpDecodable)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetBlockHeaders { /// The block number or hash that the peer should start returning headers from. pub start_block: BlockHashOrNumber, @@ -41,7 +37,7 @@ pub struct GetBlockHeaders { /// The response to [`GetBlockHeaders`], containing headers if any headers were found. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[add_arbitrary_tests(rlp, 10)] pub struct BlockHeaders( /// The requested headers. @@ -55,7 +51,7 @@ impl<'a> arbitrary::Arbitrary<'a> for BlockHeaders { let mut headers = Vec::with_capacity(headers_count); for _ in 0..headers_count { - headers.push(generate_valid_header( + headers.push(reth_primitives::generate_valid_header( u.arbitrary()?, u.arbitrary()?, u.arbitrary()?, @@ -77,7 +73,7 @@ impl From> for BlockHeaders { /// A request for a peer to return block bodies for the given block hashes. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetBlockBodies( /// The block hashes to request bodies for. pub Vec, @@ -93,7 +89,7 @@ impl From> for GetBlockBodies { /// any were found. #[derive_arbitrary(rlp, 16)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockBodies( /// The requested block bodies, each of which should correspond to a hash in the request. pub Vec, @@ -107,11 +103,14 @@ impl From> for BlockBodies { #[cfg(test)] mod tests { - use crate::{message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders}; + use crate::{ + message::RequestPair, BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, + HeadersDirection, + }; use alloy_rlp::{Decodable, Encodable}; use reth_primitives::{ - hex, BlockHashOrNumber, Header, HeadersDirection, Signature, Transaction, - TransactionSigned, TxKind, TxLegacy, U256, + hex, BlockHashOrNumber, Header, Signature, Transaction, TransactionSigned, TxKind, + TxLegacy, U256, }; use std::str::FromStr; diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index e880e2441..9c9bddbd5 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -22,13 +22,10 @@ use proptest::{collection::vec, prelude::*}; #[cfg(feature = "arbitrary")] use proptest_arbitrary_interop::arb; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - /// This informs peers of new blocks that have appeared on the network. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NewBlockHashes( /// New block hashes and the block number for each blockhash. /// Clients should request blocks using a [`GetBlockBodies`](crate::GetBlockBodies) message. @@ -52,7 +49,7 @@ impl NewBlockHashes { /// A block hash _and_ a block number. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct BlockHashNumber { /// The block hash pub hash: B256, @@ -75,7 +72,7 @@ impl From for Vec { /// A new block with the current total difficulty, which includes the difficulty of the returned /// block. #[derive(Clone, Debug, PartialEq, Eq, RlpEncodable, RlpDecodable, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive_arbitrary(rlp, 25)] #[rlp(trailing)] pub struct NewBlock { @@ -93,7 +90,7 @@ pub struct NewBlock { /// in a block. #[derive_arbitrary(rlp, 10)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Transactions( /// New transactions for the peer to include in its mempool. pub Vec, @@ -298,7 +295,7 @@ impl From for NewPooledTransactionHashes { /// but have not been included in a block. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NewPooledTransactionHashes66( /// Transaction hashes for new transactions that have appeared on the network. /// Clients should request the transactions with the given hashes using a @@ -315,7 +312,7 @@ impl From> for NewPooledTransactionHashes66 { /// Same as [`NewPooledTransactionHashes66`] but extends that that beside the transaction hashes, /// the node sends the transaction types and their sizes (as defined in EIP-2718) as well. #[derive(Clone, Debug, PartialEq, Eq, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NewPooledTransactionHashes68 { /// Transaction types for new transactions that have appeared on the network. /// diff --git a/crates/primitives/src/header.rs b/crates/net/eth-wire-types/src/header.rs similarity index 98% rename from crates/primitives/src/header.rs rename to crates/net/eth-wire-types/src/header.rs index ea80328e7..607d6ba3e 100644 --- a/crates/primitives/src/header.rs +++ b/crates/net/eth-wire-types/src/header.rs @@ -2,10 +2,7 @@ use alloy_rlp::{Decodable, Encodable}; use bytes::BufMut; -use reth_codecs::derive_arbitrary; -use serde::{Deserialize, Serialize}; - -pub use reth_primitives_traits::{Header, HeaderError, SealedHeader}; +use reth_codecs_derive::derive_arbitrary; /// Represents the direction for a headers request depending on the `reverse` field of the request. /// > The response must contain a number of block headers, of rising number when reverse is 0, @@ -18,7 +15,8 @@ pub use reth_primitives_traits::{Header, HeaderError, SealedHeader}; /// /// See also #[derive_arbitrary(rlp)] -#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Default, Serialize, Deserialize)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum HeadersDirection { /// Falling block number. Falling, @@ -87,10 +85,9 @@ impl From for bool { #[cfg(test)] mod tests { - use crate::{ - address, b256, bloom, bytes, hex, Address, Bytes, Header, HeadersDirection, B256, U256, - }; + use super::*; use alloy_rlp::{Decodable, Encodable}; + use reth_primitives::{address, b256, bloom, bytes, hex, Address, Bytes, Header, B256, U256}; use std::str::FromStr; // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs index d1079294b..d95ed51b3 100644 --- a/crates/net/eth-wire-types/src/lib.rs +++ b/crates/net/eth-wire-types/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![allow(clippy::needless_lifetimes)] // side effect of optimism fields #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] @@ -20,6 +18,9 @@ pub use version::EthVersion; pub mod message; pub use message::{EthMessage, EthMessageID, ProtocolMessage}; +pub mod header; +pub use header::*; + pub mod blocks; pub use blocks::*; diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index a68d5273e..3b3dbf622 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -16,8 +16,6 @@ use crate::{EthVersion, SharedTransactions}; use alloy_rlp::{length_of_length, Decodable, Encodable, Header}; use reth_primitives::bytes::{Buf, BufMut}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; use std::{fmt::Debug, sync::Arc}; /// [`MAX_MESSAGE_SIZE`] is the maximum cap on the size of a protocol message. @@ -37,7 +35,7 @@ pub enum MessageError { /// An `eth` protocol message, containing a message ID and payload. #[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct ProtocolMessage { /// The unique identifier representing the type of the Ethereum message. pub message_type: EthMessageID, @@ -187,7 +185,7 @@ impl From for ProtocolBroadcastMessage { /// it, `NewPooledTransactionHashes` is renamed as [`NewPooledTransactionHashes66`] and /// [`NewPooledTransactionHashes68`] is defined. #[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum EthMessage { /// Represents a Status message required for the protocol handshake. Status(Status), @@ -343,7 +341,7 @@ impl Encodable for EthBroadcastMessage { /// Represents message IDs for eth protocol messages. #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum EthMessageID { /// Status message. Status = 0x00, @@ -450,7 +448,7 @@ impl TryFrom for EthMessageID { /// This can represent either a request or a response, since both include a message payload and /// request id. #[derive(Clone, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct RequestPair { /// id for the contained request or response message pub request_id: u64, diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index 4816f8554..cbe74f964 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -4,13 +4,10 @@ use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ReceiptWithBloom, B256}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - /// A request for transaction receipts from the given block hashes. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetReceipts( /// The block hashes to request receipts for. pub Vec, @@ -20,7 +17,7 @@ pub struct GetReceipts( /// requested. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Receipts( /// Each receipt hash should correspond to a block hash in the request. pub Vec>, diff --git a/crates/net/eth-wire-types/src/state.rs b/crates/net/eth-wire-types/src/state.rs index 5f3dc8339..aa1e064d0 100644 --- a/crates/net/eth-wire-types/src/state.rs +++ b/crates/net/eth-wire-types/src/state.rs @@ -4,15 +4,12 @@ use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use reth_codecs_derive::derive_arbitrary; use reth_primitives::{Bytes, B256}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - /// A request for state tree nodes corresponding to the given hashes. /// This message was removed in `eth/67`, only clients running `eth/66` or earlier will respond to /// this message. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetNodeData(pub Vec); /// The response to [`GetNodeData`], containing the state tree nodes or contract bytecode @@ -22,7 +19,7 @@ pub struct GetNodeData(pub Vec); /// This message was removed in `eth/67`. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct NodeData(pub Vec); #[cfg(test)] diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index b42131a49..71bfab780 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -1,11 +1,10 @@ use crate::EthVersion; +use alloy_chains::{Chain, NamedChain}; use alloy_genesis::Genesis; use alloy_rlp::{RlpDecodable, RlpEncodable}; -use reth_chainspec::{Chain, ChainSpec, NamedChain, MAINNET}; +use reth_chainspec::{ChainSpec, MAINNET}; use reth_codecs_derive::derive_arbitrary; -use reth_primitives::{hex, ForkId, Hardfork, Head, B256, U256}; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; +use reth_primitives::{hex, EthereumHardfork, ForkId, Head, B256, U256}; use std::fmt::{Debug, Display}; /// The status message is used in the eth protocol handshake to ensure that peers are on the same @@ -15,7 +14,7 @@ use std::fmt::{Debug, Display}; /// hash. This information should be treated as untrusted. #[derive_arbitrary(rlp)] #[derive(Copy, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Status { /// The current protocol version. For example, peers running `eth/66` would have a version of /// 66. @@ -142,7 +141,7 @@ impl Default for Status { blockhash: mainnet_genesis, genesis: mainnet_genesis, forkid: MAINNET - .hardfork_fork_id(Hardfork::Frontier) + .hardfork_fork_id(EthereumHardfork::Frontier) .expect("The Frontier hardfork should always exist"), } } @@ -152,7 +151,7 @@ impl Default for Status { /// /// # Example /// ``` -/// use reth_chainspec::{Chain, Hardfork, MAINNET}; +/// use reth_chainspec::{Chain, EthereumHardfork, MAINNET}; /// use reth_eth_wire_types::{EthVersion, Status}; /// use reth_primitives::{B256, MAINNET_GENESIS_HASH, U256}; /// @@ -163,7 +162,7 @@ impl Default for Status { /// .total_difficulty(U256::from(100)) /// .blockhash(B256::from(MAINNET_GENESIS_HASH)) /// .genesis(B256::from(MAINNET_GENESIS_HASH)) -/// .forkid(MAINNET.hardfork_fork_id(Hardfork::Paris).unwrap()) +/// .forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Paris).unwrap()) /// .build(); /// /// assert_eq!( @@ -174,7 +173,7 @@ impl Default for Status { /// total_difficulty: U256::from(100), /// blockhash: B256::from(MAINNET_GENESIS_HASH), /// genesis: B256::from(MAINNET_GENESIS_HASH), -/// forkid: MAINNET.hardfork_fork_id(Hardfork::Paris).unwrap(), +/// forkid: MAINNET.hardfork_fork_id(EthereumHardfork::Paris).unwrap(), /// } /// ); /// ``` @@ -233,7 +232,7 @@ mod tests { use alloy_rlp::{Decodable, Encodable}; use rand::Rng; use reth_chainspec::{Chain, ChainSpec, ForkCondition, NamedChain}; - use reth_primitives::{hex, ForkHash, ForkId, Hardfork, Head, B256, U256}; + use reth_primitives::{hex, EthereumHardfork, ForkHash, ForkId, Head, B256, U256}; use std::str::FromStr; #[test] @@ -368,12 +367,12 @@ mod tests { // add a few hardforks let hardforks = vec![ - (Hardfork::Tangerine, ForkCondition::Block(1)), - (Hardfork::SpuriousDragon, ForkCondition::Block(2)), - (Hardfork::Byzantium, ForkCondition::Block(3)), - (Hardfork::MuirGlacier, ForkCondition::Block(5)), - (Hardfork::London, ForkCondition::Block(8)), - (Hardfork::Shanghai, ForkCondition::Timestamp(13)), + (EthereumHardfork::Tangerine, ForkCondition::Block(1)), + (EthereumHardfork::SpuriousDragon, ForkCondition::Block(2)), + (EthereumHardfork::Byzantium, ForkCondition::Block(3)), + (EthereumHardfork::MuirGlacier, ForkCondition::Block(5)), + (EthereumHardfork::London, ForkCondition::Block(8)), + (EthereumHardfork::Shanghai, ForkCondition::Timestamp(13)), ]; let mut chainspec = ChainSpec::builder().genesis(genesis).chain(Chain::from_id(1337)); diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index d0a42d49b..a5bf40b79 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -7,13 +7,10 @@ use reth_primitives::{ transaction::TransactionConversionError, PooledTransactionsElement, TransactionSigned, B256, }; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - /// A list of transaction hashes that the peer would like transaction bodies for. #[derive_arbitrary(rlp)] #[derive(Clone, Debug, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper, Default)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct GetPooledTransactions( /// The transaction hashes to request transaction bodies for. pub Vec, @@ -48,7 +45,7 @@ where Deref, Constructor, )] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct PooledTransactions( /// The transaction bodies, each of which should correspond to a requested hash. pub Vec, diff --git a/crates/net/eth-wire-types/src/upgrade_status.rs b/crates/net/eth-wire-types/src/upgrade_status.rs index 1031d1751..3a473587c 100644 --- a/crates/net/eth-wire-types/src/upgrade_status.rs +++ b/crates/net/eth-wire-types/src/upgrade_status.rs @@ -3,13 +3,12 @@ use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs_derive::derive_arbitrary; -use serde::{Deserialize, Serialize}; /// UpdateStatus packet introduced in BSC to notify peers whether to broadcast transaction or not. /// It is used during the p2p handshake. #[derive_arbitrary(rlp)] #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct UpgradeStatus { /// Extension for support customized features for BSC. pub extension: UpgradeStatusExtension, @@ -19,7 +18,7 @@ pub struct UpgradeStatus { /// This flag currently is ignored, and will be supported later. #[derive_arbitrary(rlp)] #[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct UpgradeStatusExtension { // TODO: support disable_peer_tx_broadcast flag /// To notify a peer to disable the broadcast of transactions or not. diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index ad8a73195..3bf4bc5af 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -18,7 +18,6 @@ reth-codecs.workspace = true reth-primitives.workspace = true reth-ecies.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } -reth-discv4.workspace = true reth-eth-wire-types.workspace = true reth-network-peers.workspace = true @@ -29,7 +28,7 @@ bytes.workspace = true derive_more.workspace = true thiserror.workspace = true serde = { workspace = true, optional = true } -tokio = { workspace = true, features = ["full"] } +tokio = { workspace = true, features = ["macros", "net", "sync", "time"] } tokio-util = { workspace = true, features = ["io", "codec"] } futures.workspace = true tokio-stream.workspace = true @@ -45,6 +44,7 @@ reth-primitives = { workspace = true, features = ["arbitrary"] } reth-tracing.workspace = true test-fuzz.workspace = true +tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } tokio-util = { workspace = true, features = ["io", "codec"] } rand.workspace = true secp256k1 = { workspace = true, features = [ @@ -58,16 +58,16 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true proptest-derive.workspace = true async-stream.workspace = true +serde.workspace = true [features] -default = ["serde"] arbitrary = [ "reth-primitives/arbitrary", + "reth-eth-wire-types/arbitrary", "dep:arbitrary", ] -optimism = ["reth-primitives/optimism"] bsc = ["reth-primitives/bsc"] -serde = ["dep:serde"] +serde = ["dep:serde", "reth-eth-wire-types/serde"] [[test]] name = "fuzz_roundtrip" diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index 6696f2486..0ec50b782 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -412,12 +412,12 @@ mod tests { use crate::{ broadcast::BlockHashNumber, errors::{EthHandshakeError, EthStreamError}, + hello::DEFAULT_TCP_PORT, p2pstream::{ProtocolVersion, UnauthedP2PStream}, EthMessage, EthStream, EthVersion, HelloMessageWithProtocols, PassthroughCodec, Status, }; use futures::{SinkExt, StreamExt}; use reth_chainspec::NamedChain; - use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_ecies::stream::ECIESStream; use reth_network_peers::pk2id; use reth_primitives::{ForkFilter, Head, B256, U256}; @@ -688,7 +688,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), protocols: vec![EthVersion::Eth67.into()], - port: DEFAULT_DISCOVERY_PORT, + port: DEFAULT_TCP_PORT, id: pk2id(&server_key.public_key(SECP256K1)), }; @@ -716,7 +716,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "bitcoind/1.0.0".to_string(), protocols: vec![EthVersion::Eth67.into()], - port: DEFAULT_DISCOVERY_PORT, + port: DEFAULT_TCP_PORT, id: pk2id(&client_key.public_key(SECP256K1)), }; diff --git a/crates/net/eth-wire/src/hello.rs b/crates/net/eth-wire/src/hello.rs index fbdffecec..2e95e2c7e 100644 --- a/crates/net/eth-wire/src/hello.rs +++ b/crates/net/eth-wire/src/hello.rs @@ -1,10 +1,14 @@ use crate::{capability::Capability, EthVersion, ProtocolVersion}; use alloy_rlp::{RlpDecodable, RlpEncodable}; use reth_codecs::derive_arbitrary; -use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_network_peers::PeerId; use reth_primitives::constants::RETH_CLIENT_VERSION; +/// The default tcp port for p2p. +/// +/// Note: this is the same as discovery port: `DEFAULT_DISCOVERY_PORT` +pub(crate) const DEFAULT_TCP_PORT: u16 = 30303; + use crate::protocol::Protocol; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -29,6 +33,8 @@ pub struct HelloMessageWithProtocols { /// The list of supported capabilities and their versions. pub protocols: Vec, /// The port that the client is listening on, zero indicates the client is not listening. + /// + /// By default this is `30303` which is the same as the default discovery port. pub port: u16, /// The secp256k1 public key corresponding to the node's private key. pub id: PeerId, @@ -200,7 +206,7 @@ impl HelloMessageBuilder { protocols: protocols.unwrap_or_else(|| { vec![EthVersion::Eth68.into(), EthVersion::Eth67.into(), EthVersion::Eth66.into()] }), - port: port.unwrap_or(DEFAULT_DISCOVERY_PORT), + port: port.unwrap_or(DEFAULT_TCP_PORT), id, } } @@ -208,14 +214,12 @@ impl HelloMessageBuilder { #[cfg(test)] mod tests { - use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; - use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_network_peers::pk2id; - use secp256k1::{SecretKey, SECP256K1}; - use crate::{ capability::Capability, p2pstream::P2PMessage, EthVersion, HelloMessage, ProtocolVersion, }; + use alloy_rlp::{Decodable, Encodable, EMPTY_STRING_CODE}; + use reth_network_peers::pk2id; + use secp256k1::{SecretKey, SECP256K1}; #[test] fn test_hello_encoding_round_trip() { @@ -225,7 +229,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)], - port: DEFAULT_DISCOVERY_PORT, + port: 30303, id, }); @@ -245,7 +249,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)], - port: DEFAULT_DISCOVERY_PORT, + port: 30303, id, }); @@ -264,7 +268,7 @@ mod tests { protocol_version: ProtocolVersion::V5, client_version: "reth/0.1.0".to_string(), capabilities: vec![Capability::new_static("eth", EthVersion::Eth67 as usize)], - port: DEFAULT_DISCOVERY_PORT, + port: 30303, id, }); diff --git a/crates/net/eth-wire/src/lib.rs b/crates/net/eth-wire/src/lib.rs index 3830baa1b..e96a27077 100644 --- a/crates/net/eth-wire/src/lib.rs +++ b/crates/net/eth-wire/src/lib.rs @@ -11,8 +11,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod capability; diff --git a/crates/net/eth-wire/src/p2pstream.rs b/crates/net/eth-wire/src/p2pstream.rs index 23f106da9..fb80048f0 100644 --- a/crates/net/eth-wire/src/p2pstream.rs +++ b/crates/net/eth-wire/src/p2pstream.rs @@ -478,11 +478,10 @@ where // // It's possible we already tried to RLP decode this, but it was snappy // compressed, so we need to RLP decode it again. - let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).map_err(|err| { + let reason = DisconnectReason::decode(&mut &decompress_buf[1..]).inspect_err(|&err| { debug!( %err, msg=%hex::encode(&decompress_buf[1..]), "Failed to decode disconnect message from peer" ); - err })?; return Poll::Ready(Some(Err(P2PStreamError::Disconnected(reason)))) } diff --git a/crates/net/eth-wire/src/test_utils.rs b/crates/net/eth-wire/src/test_utils.rs index 466bc0f1c..2d74cd184 100644 --- a/crates/net/eth-wire/src/test_utils.rs +++ b/crates/net/eth-wire/src/test_utils.rs @@ -1,10 +1,10 @@ //! Utilities for testing p2p protocol. use crate::{ - EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, Status, UnauthedP2PStream, + hello::DEFAULT_TCP_PORT, EthVersion, HelloMessageWithProtocols, P2PStream, ProtocolVersion, + Status, UnauthedP2PStream, }; use reth_chainspec::Chain; -use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_network_peers::pk2id; use reth_primitives::{ForkFilter, Head, B256, U256}; use secp256k1::{SecretKey, SECP256K1}; @@ -22,7 +22,7 @@ pub fn eth_hello() -> (HelloMessageWithProtocols, SecretKey) { protocol_version: ProtocolVersion::V5, client_version: "eth/1.0.0".to_string(), protocols, - port: DEFAULT_DISCOVERY_PORT, + port: DEFAULT_TCP_PORT, id: pk2id(&server_key.public_key(SECP256K1)), }; (hello, server_key) diff --git a/crates/net/eth-wire/tests/fuzz_roundtrip.rs b/crates/net/eth-wire/tests/fuzz_roundtrip.rs index f20d0397c..ec55fc448 100644 --- a/crates/net/eth-wire/tests/fuzz_roundtrip.rs +++ b/crates/net/eth-wire/tests/fuzz_roundtrip.rs @@ -1,8 +1,5 @@ //! Round-trip encoding fuzzing for the `eth-wire` crate. -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] - use alloy_rlp::{Decodable, Encodable}; use serde::Serialize; use std::fmt::Debug; diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 6c6f8036d..8efaec5f0 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -66,9 +66,14 @@ pub trait PeersInfo: Send + Sync { /// Provides an API for managing the peers of the network. pub trait Peers: PeersInfo { - /// Adds a peer to the peer set. - fn add_peer(&self, peer: PeerId, addr: SocketAddr) { - self.add_peer_kind(peer, PeerKind::Basic, addr); + /// Adds a peer to the peer set with UDP `SocketAddr`. + fn add_peer(&self, peer: PeerId, tcp_addr: SocketAddr) { + self.add_peer_kind(peer, PeerKind::Static, tcp_addr, None); + } + + /// Adds a peer to the peer set with TCP and UDP `SocketAddr`. + fn add_peer_with_udp(&self, peer: PeerId, tcp_addr: SocketAddr, udp_addr: SocketAddr) { + self.add_peer_kind(peer, PeerKind::Static, tcp_addr, Some(udp_addr)); } /// Adds a trusted [`PeerId`] to the peer set. @@ -76,13 +81,24 @@ pub trait Peers: PeersInfo { /// This allows marking a peer as trusted without having to know the peer's address. fn add_trusted_peer_id(&self, peer: PeerId); - /// Adds a trusted peer to the peer set. - fn add_trusted_peer(&self, peer: PeerId, addr: SocketAddr) { - self.add_peer_kind(peer, PeerKind::Trusted, addr); + /// Adds a trusted peer to the peer set with UDP `SocketAddr`. + fn add_trusted_peer(&self, peer: PeerId, tcp_addr: SocketAddr) { + self.add_peer_kind(peer, PeerKind::Trusted, tcp_addr, None); + } + + /// Adds a trusted peer with TCP and UDP `SocketAddr` to the peer set. + fn add_trusted_peer_with_udp(&self, peer: PeerId, tcp_addr: SocketAddr, udp_addr: SocketAddr) { + self.add_peer_kind(peer, PeerKind::Trusted, tcp_addr, Some(udp_addr)); } /// Adds a peer to the known peer set, with the given kind. - fn add_peer_kind(&self, peer: PeerId, kind: PeerKind, addr: SocketAddr); + fn add_peer_kind( + &self, + peer: PeerId, + kind: PeerKind, + tcp_addr: SocketAddr, + udp_addr: Option, + ); /// Returns the rpc [`PeerInfo`] for all connected [`PeerKind::Trusted`] peers. fn get_trusted_peers( @@ -147,6 +163,8 @@ pub enum PeerKind { /// Basic peer kind. #[default] Basic, + /// Static peer, added via JSON-RPC. + Static, /// Trusted peer. Trusted, } @@ -157,6 +175,11 @@ impl PeerKind { matches!(self, Self::Trusted) } + /// Returns `true` if the peer is static. + pub const fn is_static(&self) -> bool { + matches!(self, Self::Static) + } + /// Returns `true` if the peer is basic. pub const fn is_basic(&self) -> bool { matches!(self, Self::Basic) @@ -172,6 +195,10 @@ pub struct PeerInfo { pub remote_id: PeerId, /// The client's name and version pub client_version: Arc, + /// The peer's enode + pub enode: String, + /// The peer's enr + pub enr: Option, /// The peer's address we're connected to pub remote_addr: SocketAddr, /// The local address of the connection @@ -184,6 +211,8 @@ pub struct PeerInfo { pub status: Arc, /// The timestamp when the session to that peer has been established. pub session_established: Instant, + /// The peer's connection kind + pub kind: PeerKind, } /// The direction of the connection. diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 745613f40..a74204a3f 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -71,7 +71,14 @@ impl PeersInfo for NoopNetwork { impl Peers for NoopNetwork { fn add_trusted_peer_id(&self, _peer: PeerId) {} - fn add_peer_kind(&self, _peer: PeerId, _kind: PeerKind, _addr: SocketAddr) {} + fn add_peer_kind( + &self, + _peer: PeerId, + _kind: PeerKind, + _tcp_addr: SocketAddr, + _udp_addr: Option, + ) { + } async fn get_peers_by_kind(&self, _kind: PeerKind) -> Result, NetworkError> { Ok(vec![]) diff --git a/crates/net/network-types/Cargo.toml b/crates/net/network-types/Cargo.toml new file mode 100644 index 000000000..66c1f4d84 --- /dev/null +++ b/crates/net/network-types/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "reth-network-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Commonly used network types" + +[lints] +workspace = true + +[dependencies] +# reth +reth-network-api.workspace = true +reth-network-peers.workspace = true +reth-net-banlist.workspace = true + +# io +serde = { workspace = true, optional = true } +humantime-serde = { workspace = true, optional = true } +serde_json = { workspace = true } + +# misc +tracing.workspace = true + +[features] +serde = ["dep:serde", "dep:humantime-serde"] +test-utils = [] diff --git a/crates/net/network-types/src/backoff.rs b/crates/net/network-types/src/backoff.rs new file mode 100644 index 000000000..8ee9f68a4 --- /dev/null +++ b/crates/net/network-types/src/backoff.rs @@ -0,0 +1,27 @@ +/// Describes the type of backoff should be applied. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BackoffKind { + /// Use the lowest configured backoff duration. + /// + /// This applies to connection problems where there is a chance that they will be resolved + /// after the short duration. + Low, + /// Use a slightly higher duration to put a peer in timeout + /// + /// This applies to more severe connection problems where there is a lower chance that they + /// will be resolved. + Medium, + /// Use the max configured backoff duration. + /// + /// This is intended for spammers, or bad peers in general. + High, +} + +// === impl BackoffKind === + +impl BackoffKind { + /// Returns true if the backoff is considered severe. + pub const fn is_severe(&self) -> bool { + matches!(self, Self::Medium | Self::High) + } +} diff --git a/crates/net/network-types/src/lib.rs b/crates/net/network-types/src/lib.rs new file mode 100644 index 000000000..5b075d609 --- /dev/null +++ b/crates/net/network-types/src/lib.rs @@ -0,0 +1,24 @@ +//! Commonly used networking types. +//! +//! ## Feature Flags +//! +//! - `serde` (default): Enable serde support + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// Types related to peering. +pub mod peers; +pub use peers::{ConnectionsConfig, PeersConfig, ReputationChangeWeights}; + +pub mod session; +pub use session::{SessionLimits, SessionsConfig}; + +/// [`BackoffKind`] definition. +mod backoff; +pub use backoff::BackoffKind; diff --git a/crates/net/network-types/src/peers/config.rs b/crates/net/network-types/src/peers/config.rs new file mode 100644 index 000000000..5143c4c6f --- /dev/null +++ b/crates/net/network-types/src/peers/config.rs @@ -0,0 +1,292 @@ +//! Configuration for peering. + +use crate::{BackoffKind, ReputationChangeWeights}; +use reth_net_banlist::BanList; +use reth_network_peers::NodeRecord; +use std::{ + collections::HashSet, + io::{self, ErrorKind}, + path::Path, + time::Duration, +}; +use tracing::info; + +/// Maximum number of available slots for outbound sessions. +pub const DEFAULT_MAX_COUNT_PEERS_OUTBOUND: u32 = 100; + +/// Maximum number of available slots for inbound sessions. +pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30; + +/// Maximum number of available slots for concurrent outgoing dials. +/// +/// This restricts how many outbound dials can be performed concurrently. +pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15; + +/// The durations to use when a backoff should be applied to a peer. +/// +/// See also [`BackoffKind`]. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct PeerBackoffDurations { + /// Applies to connection problems where there is a chance that they will be resolved after the + /// short duration. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub low: Duration, + /// Applies to more severe connection problems where there is a lower chance that they will be + /// resolved. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub medium: Duration, + /// Intended for spammers, or bad peers in general. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub high: Duration, + /// Maximum total backoff duration. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub max: Duration, +} + +impl PeerBackoffDurations { + /// Returns the corresponding [`Duration`] + pub const fn backoff(&self, kind: BackoffKind) -> Duration { + match kind { + BackoffKind::Low => self.low, + BackoffKind::Medium => self.medium, + BackoffKind::High => self.high, + } + } + + /// Returns the timestamp until which we should backoff. + /// + /// The Backoff duration is capped by the configured maximum backoff duration. + pub fn backoff_until(&self, kind: BackoffKind, backoff_counter: u8) -> std::time::Instant { + let backoff_time = self.backoff(kind); + let backoff_time = backoff_time + backoff_time * backoff_counter as u32; + let now = std::time::Instant::now(); + now + backoff_time.min(self.max) + } + + /// Returns durations for testing. + #[cfg(any(test, feature = "test-utils"))] + pub const fn test() -> Self { + Self { + low: Duration::from_millis(200), + medium: Duration::from_millis(200), + high: Duration::from_millis(200), + max: Duration::from_millis(200), + } + } +} + +impl Default for PeerBackoffDurations { + fn default() -> Self { + Self { + low: Duration::from_secs(30), + // 3min + medium: Duration::from_secs(60 * 3), + // 15min + high: Duration::from_secs(60 * 15), + // 1h + max: Duration::from_secs(60 * 60), + } + } +} + +/// Tracks stats about connected nodes +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize), serde(default))] +pub struct ConnectionsConfig { + /// Maximum allowed outbound connections. + pub max_outbound: usize, + /// Maximum allowed inbound connections. + pub max_inbound: usize, + /// Maximum allowed concurrent outbound dials. + #[cfg_attr(feature = "serde", serde(default))] + pub max_concurrent_outbound_dials: usize, +} + +impl Default for ConnectionsConfig { + fn default() -> Self { + Self { + max_outbound: DEFAULT_MAX_COUNT_PEERS_OUTBOUND as usize, + max_inbound: DEFAULT_MAX_COUNT_PEERS_INBOUND as usize, + max_concurrent_outbound_dials: DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS, + } + } +} + +/// Config type for initiating a `PeersManager` instance. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "serde", serde(default))] +pub struct PeersConfig { + /// How often to recheck free slots for outbound connections. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub refill_slots_interval: Duration, + /// Trusted nodes to connect to or accept from + pub trusted_nodes: HashSet, + /// Connect to or accept from trusted nodes only? + #[cfg_attr(feature = "serde", serde(alias = "connect_trusted_nodes_only"))] + pub trusted_nodes_only: bool, + /// Maximum number of backoff attempts before we give up on a peer and dropping. + /// + /// The max time spent of a peer before it's removed from the set is determined by the + /// configured backoff duration and the max backoff count. + /// + /// With a backoff counter of 5 and a backoff duration of 1h, the minimum time spent of the + /// peer in the table is the sum of all backoffs (1h + 2h + 3h + 4h + 5h = 15h). + /// + /// Note: this does not apply to trusted peers. + pub max_backoff_count: u8, + /// Basic nodes to connect to. + #[cfg_attr(feature = "serde", serde(skip))] + pub basic_nodes: HashSet, + /// How long to ban bad peers. + #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] + pub ban_duration: Duration, + /// Restrictions on `PeerIds` and Ips. + #[cfg_attr(feature = "serde", serde(skip))] + pub ban_list: BanList, + /// Restrictions on connections. + pub connection_info: ConnectionsConfig, + /// How to weigh reputation changes. + pub reputation_weights: ReputationChangeWeights, + /// How long to backoff peers that we are failed to connect to for non-fatal reasons. + /// + /// The backoff duration increases with number of backoff attempts. + pub backoff_durations: PeerBackoffDurations, +} + +impl Default for PeersConfig { + fn default() -> Self { + Self { + refill_slots_interval: Duration::from_millis(5_000), + connection_info: Default::default(), + reputation_weights: Default::default(), + ban_list: Default::default(), + // Ban peers for 12h + ban_duration: Duration::from_secs(60 * 60 * 12), + backoff_durations: Default::default(), + trusted_nodes: Default::default(), + trusted_nodes_only: false, + basic_nodes: Default::default(), + max_backoff_count: 5, + } + } +} + +impl PeersConfig { + /// A set of `peer_ids` and ip addr that we want to never connect to + pub fn with_ban_list(mut self, ban_list: BanList) -> Self { + self.ban_list = ban_list; + self + } + + /// Configure how long to ban bad peers + pub const fn with_ban_duration(mut self, ban_duration: Duration) -> Self { + self.ban_duration = ban_duration; + self + } + + /// Maximum allowed outbound connections. + pub const fn with_max_outbound(mut self, max_outbound: usize) -> Self { + self.connection_info.max_outbound = max_outbound; + self + } + + /// Maximum allowed inbound connections with optional update. + pub const fn with_max_inbound_opt(mut self, max_inbound: Option) -> Self { + if let Some(max_inbound) = max_inbound { + self.connection_info.max_inbound = max_inbound; + } + self + } + + /// Maximum allowed outbound connections with optional update. + pub const fn with_max_outbound_opt(mut self, max_outbound: Option) -> Self { + if let Some(max_outbound) = max_outbound { + self.connection_info.max_outbound = max_outbound; + } + self + } + + /// Maximum allowed inbound connections. + pub const fn with_max_inbound(mut self, max_inbound: usize) -> Self { + self.connection_info.max_inbound = max_inbound; + self + } + + /// Maximum allowed concurrent outbound dials. + pub const fn with_max_concurrent_dials(mut self, max_concurrent_outbound_dials: usize) -> Self { + self.connection_info.max_concurrent_outbound_dials = max_concurrent_outbound_dials; + self + } + + /// Nodes to always connect to. + pub fn with_trusted_nodes(mut self, nodes: HashSet) -> Self { + self.trusted_nodes = nodes; + self + } + + /// Connect only to trusted nodes. + pub const fn with_trusted_nodes_only(mut self, trusted_only: bool) -> Self { + self.trusted_nodes_only = trusted_only; + self + } + + /// Nodes available at launch. + pub fn with_basic_nodes(mut self, nodes: HashSet) -> Self { + self.basic_nodes = nodes; + self + } + + /// Configures the max allowed backoff count. + pub const fn with_max_backoff_count(mut self, max_backoff_count: u8) -> Self { + self.max_backoff_count = max_backoff_count; + self + } + + /// Configures how to weigh reputation changes. + pub const fn with_reputation_weights( + mut self, + reputation_weights: ReputationChangeWeights, + ) -> Self { + self.reputation_weights = reputation_weights; + self + } + + /// Configures how long to backoff peers that are we failed to connect to for non-fatal reasons + pub const fn with_backoff_durations(mut self, backoff_durations: PeerBackoffDurations) -> Self { + self.backoff_durations = backoff_durations; + self + } + + /// Returns the maximum number of peers, inbound and outbound. + pub const fn max_peers(&self) -> usize { + self.connection_info.max_outbound + self.connection_info.max_inbound + } + + /// Read from file nodes available at launch. Ignored if None. + pub fn with_basic_nodes_from_file( + self, + optional_file: Option>, + ) -> Result { + let Some(file_path) = optional_file else { return Ok(self) }; + let reader = match std::fs::File::open(file_path.as_ref()) { + Ok(file) => io::BufReader::new(file), + Err(e) if e.kind() == ErrorKind::NotFound => return Ok(self), + Err(e) => Err(e)?, + }; + info!(target: "net::peers", file = %file_path.as_ref().display(), "Loading saved peers"); + let nodes: HashSet = serde_json::from_reader(reader)?; + Ok(self.with_basic_nodes(nodes)) + } + + /// Returns settings for testing + #[cfg(any(test, feature = "test-utils"))] + pub fn test() -> Self { + Self { + refill_slots_interval: Duration::from_millis(100), + backoff_durations: PeerBackoffDurations::test(), + ..Default::default() + } + } +} diff --git a/crates/net/network-types/src/peers/mod.rs b/crates/net/network-types/src/peers/mod.rs new file mode 100644 index 000000000..4b195750b --- /dev/null +++ b/crates/net/network-types/src/peers/mod.rs @@ -0,0 +1,5 @@ +pub mod reputation; +pub use reputation::ReputationChangeWeights; + +pub mod config; +pub use config::{ConnectionsConfig, PeersConfig}; diff --git a/crates/net/network/src/peers/reputation.rs b/crates/net/network-types/src/peers/reputation.rs similarity index 92% rename from crates/net/network/src/peers/reputation.rs rename to crates/net/network-types/src/peers/reputation.rs index 9d3ec256b..13fac4c1e 100644 --- a/crates/net/network/src/peers/reputation.rs +++ b/crates/net/network-types/src/peers/reputation.rs @@ -3,13 +3,13 @@ use reth_network_api::{Reputation, ReputationChangeKind}; /// The default reputation of a peer -pub(crate) const DEFAULT_REPUTATION: Reputation = 0; +pub const DEFAULT_REPUTATION: Reputation = 0; /// The minimal unit we're measuring reputation const REPUTATION_UNIT: i32 = -1024; /// The reputation value below which new connection from/to peers are rejected. -pub(crate) const BANNED_REPUTATION: i32 = 50 * REPUTATION_UNIT; +pub const BANNED_REPUTATION: i32 = 50 * REPUTATION_UNIT; /// The reputation change to apply to a peer that dropped the connection. const REMOTE_DISCONNECT_REPUTATION_CHANGE: i32 = 4 * REPUTATION_UNIT; @@ -42,11 +42,11 @@ const BAD_ANNOUNCEMENT_REPUTATION_CHANGE: i32 = REPUTATION_UNIT; /// This gives a trusted peer more leeway when interacting with the node, which is useful for in /// custom setups. By not setting this to `0` we still allow trusted peer penalization but less than /// untrusted peers. -pub(crate) const MAX_TRUSTED_PEER_REPUTATION_CHANGE: Reputation = 2 * REPUTATION_UNIT; +pub const MAX_TRUSTED_PEER_REPUTATION_CHANGE: Reputation = 2 * REPUTATION_UNIT; /// Returns `true` if the given reputation is below the [`BANNED_REPUTATION`] threshold #[inline] -pub(crate) const fn is_banned_reputation(reputation: i32) -> bool { +pub const fn is_banned_reputation(reputation: i32) -> bool { reputation < BANNED_REPUTATION } @@ -80,7 +80,7 @@ pub struct ReputationChangeWeights { impl ReputationChangeWeights { /// Returns the quantifiable [`ReputationChange`] for the given [`ReputationChangeKind`] using /// the configured weights - pub(crate) fn change(&self, kind: ReputationChangeKind) -> ReputationChange { + pub fn change(&self, kind: ReputationChangeKind) -> ReputationChange { match kind { ReputationChangeKind::BadMessage => self.bad_message.into(), ReputationChangeKind::BadBlock => self.bad_block.into(), @@ -115,14 +115,14 @@ impl Default for ReputationChangeWeights { /// Represents a change in a peer's reputation. #[derive(Debug, Copy, Clone, Default)] -pub(crate) struct ReputationChange(Reputation); +pub struct ReputationChange(Reputation); // === impl ReputationChange === impl ReputationChange { /// Helper type for easier conversion #[inline] - pub(crate) const fn as_i32(self) -> Reputation { + pub const fn as_i32(self) -> Reputation { self.0 } } diff --git a/crates/net/network/src/session/config.rs b/crates/net/network-types/src/session/config.rs similarity index 65% rename from crates/net/network/src/session/config.rs rename to crates/net/network-types/src/session/config.rs index 6c7fc282d..941448eff 100644 --- a/crates/net/network/src/session/config.rs +++ b/crates/net/network-types/src/session/config.rs @@ -1,9 +1,6 @@ -//! Configuration types for [`SessionManager`](crate::session::SessionManager). +//! Configuration types for peer sessions manager. -use crate::{ - peers::{DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND}, - session::{Direction, ExceedsSessionLimit}, -}; +use crate::peers::config::{DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND}; use std::time::Duration; /// Default request timeout for a single request. @@ -29,7 +26,7 @@ const DEFAULT_MAX_PEERS: usize = /// With maxed out peers, this will allow for 3 messages per session (average) const DEFAULT_SESSION_EVENT_BUFFER_SIZE: usize = DEFAULT_MAX_PEERS * 2; -/// Configuration options when creating a [`SessionManager`](crate::session::SessionManager). +/// Configuration options for peer session management. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "serde", serde(default))] @@ -111,10 +108,14 @@ impl SessionsConfig { #[derive(Debug, Clone, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct SessionLimits { - max_pending_inbound: Option, - max_pending_outbound: Option, - max_established_inbound: Option, - max_established_outbound: Option, + /// Maximum allowed inbound connections. + pub max_pending_inbound: Option, + /// Maximum allowed outbound connections. + pub max_pending_outbound: Option, + /// Maximum allowed established inbound connections. + pub max_established_inbound: Option, + /// Maximum allowed established outbound connections. + pub max_established_outbound: Option, } impl SessionLimits { @@ -143,107 +144,10 @@ impl SessionLimits { } } -/// Keeps track of all sessions. -#[derive(Debug, Clone)] -pub struct SessionCounter { - /// Limits to enforce. - limits: SessionLimits, - /// Number of pending incoming sessions. - pending_inbound: u32, - /// Number of pending outgoing sessions. - pending_outbound: u32, - /// Number of active inbound sessions. - active_inbound: u32, - /// Number of active outbound sessions. - active_outbound: u32, -} - -// === impl SessionCounter === - -impl SessionCounter { - pub(crate) const fn new(limits: SessionLimits) -> Self { - Self { - limits, - pending_inbound: 0, - pending_outbound: 0, - active_inbound: 0, - active_outbound: 0, - } - } - - pub(crate) fn inc_pending_inbound(&mut self) { - self.pending_inbound += 1; - } - - pub(crate) fn inc_pending_outbound(&mut self) { - self.pending_outbound += 1; - } - - pub(crate) fn dec_pending(&mut self, direction: &Direction) { - match direction { - Direction::Outgoing(_) => { - self.pending_outbound -= 1; - } - Direction::Incoming => { - self.pending_inbound -= 1; - } - } - } - - pub(crate) fn inc_active(&mut self, direction: &Direction) { - match direction { - Direction::Outgoing(_) => { - self.active_outbound += 1; - } - Direction::Incoming => { - self.active_inbound += 1; - } - } - } - - pub(crate) fn dec_active(&mut self, direction: &Direction) { - match direction { - Direction::Outgoing(_) => { - self.active_outbound -= 1; - } - Direction::Incoming => { - self.active_inbound -= 1; - } - } - } - - pub(crate) const fn ensure_pending_outbound(&self) -> Result<(), ExceedsSessionLimit> { - Self::ensure(self.pending_outbound, self.limits.max_pending_outbound) - } - - pub(crate) const fn ensure_pending_inbound(&self) -> Result<(), ExceedsSessionLimit> { - Self::ensure(self.pending_inbound, self.limits.max_pending_inbound) - } - - const fn ensure(current: u32, limit: Option) -> Result<(), ExceedsSessionLimit> { - if let Some(limit) = limit { - if current >= limit { - return Err(ExceedsSessionLimit(limit)) - } - } - Ok(()) - } -} - #[cfg(test)] mod tests { use super::*; - #[test] - fn test_limits() { - let mut limits = SessionCounter::new(SessionLimits::default().with_max_pending_inbound(2)); - assert!(limits.ensure_pending_outbound().is_ok()); - limits.inc_pending_inbound(); - assert!(limits.ensure_pending_inbound().is_ok()); - limits.inc_pending_inbound(); - assert!(limits.ensure_pending_inbound().is_err()); - } - #[test] fn scale_session_event_buffer() { let config = SessionsConfig::default().with_upscaled_event_buffer(10); diff --git a/crates/net/network-types/src/session/mod.rs b/crates/net/network-types/src/session/mod.rs new file mode 100644 index 000000000..a5b613189 --- /dev/null +++ b/crates/net/network-types/src/session/mod.rs @@ -0,0 +1,4 @@ +//! Peer sessions configuration. + +pub mod config; +pub use config::{SessionLimits, SessionsConfig}; diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 439d23bfd..695af130a 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -14,6 +14,7 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true +reth-fs-util.workspace = true reth-primitives.workspace = true reth-net-banlist.workspace = true reth-network-api.workspace = true @@ -25,10 +26,12 @@ reth-eth-wire.workspace = true reth-ecies.workspace = true reth-tasks.workspace = true reth-transaction-pool.workspace = true -reth-provider.workspace = true +reth-storage-api.workspace = true +reth-provider = { workspace = true, optional = true } reth-tokio-util.workspace = true reth-consensus.workspace = true -reth-network-peers.workspace = true +reth-network-peers = { workspace = true, features = ["net"] } +reth-network-types.workspace = true # ethereum enr = { workspace = true, features = ["serde", "rust-secp256k1"] } @@ -75,6 +78,7 @@ reth-primitives = { workspace = true, features = ["test-utils"] } # integration tests reth-network = { workspace = true, features = ["test-utils"] } reth-network-p2p = { workspace = true, features = ["test-utils"] } +reth-network-types = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true @@ -95,8 +99,8 @@ criterion = { workspace = true, features = ["async_tokio", "html_reports"] } [features] default = ["serde"] geth-tests = [] -serde = ["dep:serde", "dep:humantime-serde", "secp256k1/serde", "enr/serde", "dep:serde_json"] -test-utils = ["reth-provider/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils"] +serde = ["dep:serde", "dep:humantime-serde", "secp256k1/serde", "enr/serde", "dep:serde_json", "reth-network-types/serde"] +test-utils = ["dep:reth-provider", "reth-provider?/test-utils", "dep:tempfile", "reth-transaction-pool/test-utils", "reth-network-types/test-utils"] bsc = ["reth-eth-wire/bsc"] [[bench]] diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 151421b4a..b197fc55f 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -3,22 +3,18 @@ use crate::{ error::NetworkError, import::{BlockImport, ProofOfStakeBlockImport}, - peers::PeersConfig, - session::SessionsConfig, transactions::TransactionsManagerConfig, NetworkHandle, NetworkManager, }; -use reth_chainspec::{ - net::{mainnet_nodes, sepolia_nodes, TrustedPeer}, - ChainSpec, MAINNET, -}; +use reth_chainspec::{ChainSpec, MAINNET}; use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; -use reth_network_peers::{pk2id, PeerId}; +use reth_network_peers::{mainnet_nodes, pk2id, sepolia_nodes, PeerId, TrustedPeer}; +use reth_network_types::{PeersConfig, SessionsConfig}; use reth_primitives::{ForkFilter, Head}; -use reth_provider::{BlockReader, HeaderProvider}; +use reth_storage_api::{BlockNumReader, BlockReader, HeaderProvider}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use secp256k1::SECP256K1; use std::{collections::HashSet, net::SocketAddr, sync::Arc}; @@ -92,6 +88,11 @@ impl NetworkConfig<()> { pub fn builder(secret_key: SecretKey) -> NetworkConfigBuilder { NetworkConfigBuilder::new(secret_key) } + + /// Convenience method for creating the corresponding builder type with a random secret key. + pub fn builder_with_rng_secret_key() -> NetworkConfigBuilder { + NetworkConfigBuilder::with_rng_secret_key() + } } impl NetworkConfig { @@ -118,6 +119,16 @@ impl NetworkConfig { } } +impl NetworkConfig +where + C: BlockNumReader, +{ + /// Convenience method for calling [`NetworkManager::new`]. + pub async fn manager(self) -> Result, NetworkError> { + NetworkManager::new(self).await + } +} + impl NetworkConfig where C: BlockReader + HeaderProvider + Clone + Unpin + 'static, @@ -180,6 +191,12 @@ pub struct NetworkConfigBuilder { #[allow(missing_docs)] impl NetworkConfigBuilder { + /// Create a new builder instance with a random secret key. + pub fn with_rng_secret_key() -> Self { + Self::new(rng_secret_key()) + } + + /// Create a new builder instance with the given secret key. pub fn new(secret_key: SecretKey) -> Self { Self { secret_key, @@ -216,6 +233,11 @@ impl NetworkConfigBuilder { pk2id(&self.secret_key.public_key(SECP256K1)) } + /// Returns the configured [`SecretKey`], from which the node's identity is derived. + pub const fn secret_key(&self) -> &SecretKey { + &self.secret_key + } + /// Sets the chain spec. pub fn chain_spec(mut self, chain_spec: Arc) -> Self { self.chain_spec = chain_spec; @@ -411,36 +433,6 @@ impl NetworkConfigBuilder { } } - /// Calls a closure on [`reth_discv5::ConfigBuilder`], if discv5 discovery is enabled and the - /// builder has been set. - /// ``` - /// use reth_chainspec::MAINNET; - /// use reth_network::NetworkConfigBuilder; - /// use reth_provider::test_utils::NoopProvider; - /// use secp256k1::{rand::thread_rng, SecretKey}; - /// - /// let sk = SecretKey::new(&mut thread_rng()); - /// let fork_id = MAINNET.latest_fork_id(); - /// let network_config = NetworkConfigBuilder::new(sk) - /// .map_discv5_config_builder(|builder| builder.fork(b"eth", fork_id)) - /// .build(NoopProvider::default()); - /// ``` - pub fn map_discv5_config_builder( - mut self, - f: impl FnOnce(reth_discv5::ConfigBuilder) -> reth_discv5::ConfigBuilder, - ) -> Self { - if let Some(mut builder) = self.discovery_v5_builder { - if let Some(network_stack_id) = NetworkStackId::id(&self.chain_spec) { - let fork_id = self.chain_spec.latest_fork_id(); - builder = builder.fork(network_stack_id, fork_id); - } - - self.discovery_v5_builder = Some(f(builder)); - } - - self - } - /// Adds a new additional protocol to the `RLPx` sub-protocol list. pub fn add_rlpx_sub_protocol(mut self, protocol: impl IntoRlpxSubProtocol) -> Self { self.extra_protocols.push(protocol); @@ -461,11 +453,10 @@ impl NetworkConfigBuilder { /// Convenience function for creating a [`NetworkConfig`] with a noop provider that does /// nothing. - #[cfg(any(test, feature = "test-utils"))] pub fn build_with_noop_provider( self, - ) -> NetworkConfig { - self.build(reth_provider::test_utils::NoopProvider::default()) + ) -> NetworkConfig { + self.build(Default::default()) } /// Consumes the type and creates the actual [`NetworkConfig`] @@ -480,7 +471,7 @@ impl NetworkConfigBuilder { secret_key, mut dns_discovery_config, discovery_v4_builder, - discovery_v5_builder, + mut discovery_v5_builder, boot_nodes, discovery_addr, listener_addr, @@ -497,6 +488,15 @@ impl NetworkConfigBuilder { transactions_manager_config, } = self; + discovery_v5_builder = discovery_v5_builder.map(|mut builder| { + if let Some(network_stack_id) = NetworkStackId::id(&chain_spec) { + let fork_id = chain_spec.latest_fork_id(); + builder = builder.fork(network_stack_id, fork_id) + } + + builder + }); + let listener_addr = listener_addr.unwrap_or(DEFAULT_DISCOVERY_ADDRESS); let mut hello_message = @@ -585,7 +585,6 @@ mod tests { use reth_dns_discovery::tree::LinkEntry; use reth_primitives::ForkHash; use reth_provider::test_utils::NoopProvider; - use std::collections::BTreeMap; fn builder() -> NetworkConfigBuilder { let secret_key = SecretKey::new(&mut thread_rng()); @@ -609,7 +608,7 @@ mod tests { let mut chain_spec = Arc::clone(&MAINNET); // remove any `next` fields we would have by removing all hardforks - Arc::make_mut(&mut chain_spec).hardforks = BTreeMap::new(); + Arc::make_mut(&mut chain_spec).hardforks = Default::default(); // check that the forkid is initialized with the genesis and no other forks let genesis_fork_hash = ForkHash::from(chain_spec.genesis_hash()); diff --git a/crates/net/network/src/discovery.rs b/crates/net/network/src/discovery.rs index c320c7fe1..2e51a6b71 100644 --- a/crates/net/network/src/discovery.rs +++ b/crates/net/network/src/discovery.rs @@ -4,6 +4,7 @@ use crate::{ cache::LruMap, error::{NetworkError, ServiceKind}, manager::DiscoveredEvent, + peers::PeerAddr, }; use enr::Enr; use futures::StreamExt; @@ -40,7 +41,7 @@ pub struct Discovery { /// All nodes discovered via discovery protocol. /// /// These nodes can be ephemeral and are updated via the discovery protocol. - discovered_nodes: LruMap, + discovered_nodes: LruMap, /// Local ENR of the discovery v4 service (discv5 ENR has same [`PeerId`]). local_enr: NodeRecord, /// Handler to interact with the Discovery v4 service @@ -204,12 +205,14 @@ impl Discovery { /// Processes an incoming [`NodeRecord`] update from a discovery service fn on_node_record_update(&mut self, record: NodeRecord, fork_id: Option) { - let id = record.id; - let addr = record.tcp_addr(); + let peer_id = record.id; + let tcp_addr = record.tcp_addr(); + let udp_addr = record.udp_addr(); + let addr = PeerAddr::new(tcp_addr, Some(udp_addr)); _ = - self.discovered_nodes.get_or_insert(id, || { + self.discovered_nodes.get_or_insert(peer_id, || { self.queued_events.push_back(DiscoveryEvent::NewNode( - DiscoveredEvent::EventQueued { peer_id: id, socket_addr: addr, fork_id }, + DiscoveredEvent::EventQueued { peer_id, addr, fork_id }, )); addr @@ -224,8 +227,8 @@ impl Discovery { DiscoveryUpdate::EnrForkId(node, fork_id) => { self.queued_events.push_back(DiscoveryEvent::EnrForkId(node.id, fork_id)) } - DiscoveryUpdate::Removed(node) => { - self.discovered_nodes.remove(&node); + DiscoveryUpdate::Removed(peer_id) => { + self.discovered_nodes.remove(&peer_id); } DiscoveryUpdate::Batch(updates) => { for update in updates { @@ -427,7 +430,7 @@ mod tests { assert_eq!( DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { peer_id: discv4_id_2, - socket_addr: discv4_enr_2.tcp_addr(), + addr: PeerAddr::new(discv4_enr_2.tcp_addr(), Some(discv4_enr_2.udp_addr())), fork_id: None }), event_node_1 @@ -435,7 +438,7 @@ mod tests { assert_eq!( DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { peer_id: discv4_id_1, - socket_addr: discv4_enr_1.tcp_addr(), + addr: PeerAddr::new(discv4_enr_1.tcp_addr(), Some(discv4_enr_1.udp_addr())), fork_id: None }), event_node_2 diff --git a/crates/net/network/src/error.rs b/crates/net/network/src/error.rs index 9019a79f2..d5e0f4537 100644 --- a/crates/net/network/src/error.rs +++ b/crates/net/network/src/error.rs @@ -6,6 +6,7 @@ use reth_eth_wire::{ errors::{EthHandshakeError, EthStreamError, P2PHandshakeError, P2PStreamError}, DisconnectReason, }; +use reth_network_types::BackoffKind; use std::{fmt, io, io::ErrorKind, net::SocketAddr}; /// Service kind. @@ -104,34 +105,6 @@ pub(crate) trait SessionError: fmt::Debug + fmt::Display { fn should_backoff(&self) -> Option; } -/// Describes the type of backoff should be applied. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum BackoffKind { - /// Use the lowest configured backoff duration. - /// - /// This applies to connection problems where there is a chance that they will be resolved - /// after the short duration. - Low, - /// Use a slightly higher duration to put a peer in timeout - /// - /// This applies to more severe connection problems where there is a lower chance that they - /// will be resolved. - Medium, - /// Use the max configured backoff duration. - /// - /// This is intended for spammers, or bad peers in general. - High, -} - -// === impl BackoffKind === - -impl BackoffKind { - /// Returns true if the backoff is considered severe. - pub(crate) const fn is_severe(&self) -> bool { - matches!(self, Self::Medium | Self::High) - } -} - impl SessionError for EthStreamError { fn merits_discovery_ban(&self) -> bool { match self { diff --git a/crates/net/network/src/eth_requests.rs b/crates/net/network/src/eth_requests.rs index 2c9e2f4d0..8ee317554 100644 --- a/crates/net/network/src/eth_requests.rs +++ b/crates/net/network/src/eth_requests.rs @@ -7,13 +7,13 @@ use crate::{ use alloy_rlp::Encodable; use futures::StreamExt; use reth_eth_wire::{ - BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, NodeData, - Receipts, + BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders, GetNodeData, GetReceipts, + HeadersDirection, NodeData, Receipts, }; use reth_network_p2p::error::RequestResult; use reth_network_peers::PeerId; -use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection}; -use reth_provider::{BlockReader, HeaderProvider, ReceiptProvider}; +use reth_primitives::{BlockBody, BlockHashOrNumber, Header}; +use reth_storage_api::{BlockReader, HeaderProvider, ReceiptProvider}; use std::{ future::Future, pin::Pin, @@ -66,8 +66,12 @@ pub struct EthRequestHandler { impl EthRequestHandler { /// Create a new instance pub fn new(client: C, peers: PeersHandle, incoming: Receiver) -> Self { - let metrics = Default::default(); - Self { client, peers, incoming_requests: ReceiverStream::new(incoming), metrics } + Self { + client, + peers, + incoming_requests: ReceiverStream::new(incoming), + metrics: Default::default(), + } } } @@ -124,11 +128,7 @@ where total_bytes += header.length(); headers.push(header); - if headers.len() >= MAX_HEADERS_SERVE { - break - } - - if total_bytes > SOFT_RESPONSE_LIMIT { + if headers.len() >= MAX_HEADERS_SERVE || total_bytes > SOFT_RESPONSE_LIMIT { break } } else { @@ -163,22 +163,12 @@ where for hash in request.0 { if let Some(block) = self.client.block_by_hash(hash).unwrap_or_default() { - let body = BlockBody { - transactions: block.body, - ommers: block.ommers, - withdrawals: block.withdrawals, - sidecars: None, - requests: block.requests, - }; + let body: BlockBody = block.into(); total_bytes += body.length(); bodies.push(body); - if bodies.len() >= MAX_BODIES_SERVE { - break - } - - if total_bytes > SOFT_RESPONSE_LIMIT { + if bodies.len() >= MAX_BODIES_SERVE || total_bytes > SOFT_RESPONSE_LIMIT { break } } else { @@ -213,11 +203,7 @@ where total_bytes += receipt.length(); receipts.push(receipt); - if receipts.len() >= MAX_RECEIPTS_SERVE { - break - } - - if total_bytes > SOFT_RESPONSE_LIMIT { + if receipts.len() >= MAX_RECEIPTS_SERVE || total_bytes > SOFT_RESPONSE_LIMIT { break } } else { diff --git a/crates/net/network/src/lib.rs b/crates/net/network/src/lib.rs index 224bc6926..5ccffda4f 100644 --- a/crates/net/network/src/lib.rs +++ b/crates/net/network/src/lib.rs @@ -46,8 +46,8 @@ //! //! ``` //! # async fn launch() { -//! use reth_chainspec::net::mainnet_nodes; //! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +//! use reth_network_peers::mainnet_nodes; //! use reth_provider::test_utils::NoopProvider; //! //! // This block provider implementation is used for testing purposes. @@ -71,8 +71,8 @@ //! ### Configure all components of the Network with the [`NetworkBuilder`] //! //! ``` -//! use reth_chainspec::net::mainnet_nodes; //! use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; +//! use reth_network_peers::mainnet_nodes; //! use reth_provider::test_utils::NoopProvider; //! use reth_transaction_pool::TransactionPool; //! async fn launch(pool: Pool) { @@ -143,12 +143,12 @@ pub use fetch::FetchClient; pub use manager::{NetworkEvent, NetworkManager}; pub use message::PeerRequest; pub use network::{NetworkEvents, NetworkHandle, NetworkProtocols}; -pub use peers::PeersConfig; pub use session::{ ActiveSessionHandle, ActiveSessionMessage, Direction, PeerInfo, PendingSessionEvent, PendingSessionHandle, PendingSessionHandshakeError, SessionCommand, SessionEvent, SessionId, - SessionLimits, SessionManager, SessionsConfig, + SessionManager, }; pub use transactions::{FilterAnnouncement, MessageFilter, ValidateTx68}; pub use reth_eth_wire::{DisconnectReason, HelloMessageWithProtocols}; +pub use reth_network_types::{PeersConfig, SessionsConfig}; diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index acdc533ce..a3098982a 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -31,7 +31,7 @@ use crate::{ DisconnectMetrics, NetworkMetrics, NETWORK_PEER_SCOPE, NETWORK_POOL_TRANSACTIONS_SCOPE, }, network::{NetworkHandle, NetworkHandleMessage}, - peers::{PeersHandle, PeersManager}, + peers::{PeerAddr, PeersHandle, PeersManager}, poll_nested_stream_with_budget, protocol::IntoRlpxSubProtocol, session::SessionManager, @@ -46,16 +46,18 @@ use reth_eth_wire::{ capability::{Capabilities, CapabilityMessage}, DisconnectReason, EthVersion, Status, }; +use reth_fs_util::{self as fs, FsPathError}; use reth_metrics::common::mpsc::UnboundedMeteredSender; -use reth_network_api::{EthProtocolInfo, NetworkStatus, ReputationChangeKind}; +use reth_network_api::{EthProtocolInfo, NetworkStatus, PeerInfo, ReputationChangeKind}; use reth_network_peers::{NodeRecord, PeerId}; use reth_primitives::ForkId; -use reth_provider::{BlockNumReader, BlockReader}; +use reth_storage_api::BlockNumReader; use reth_tasks::shutdown::GracefulShutdown; use reth_tokio_util::EventSender; use secp256k1::SecretKey; use std::{ net::SocketAddr, + path::Path, pin::Pin, sync::{ atomic::{AtomicU64, AtomicUsize, Ordering}, @@ -291,8 +293,8 @@ where /// components of the network /// /// ``` - /// use reth_chainspec::net::mainnet_nodes; /// use reth_network::{config::rng_secret_key, NetworkConfig, NetworkManager}; + /// use reth_network_peers::mainnet_nodes; /// use reth_provider::test_utils::NoopProvider; /// use reth_transaction_pool::TransactionPool; /// async fn launch(pool: Pool) { @@ -347,6 +349,11 @@ where self.swarm.state().peers().iter_peers() } + /// Returns the number of peers in the peer set. + pub fn num_known_peers(&self) -> usize { + self.swarm.state().peers().num_known_peers() + } + /// Returns a new [`PeersHandle`] that can be cloned and shared. /// /// The [`PeersHandle`] can be used to interact with the network's peer set. @@ -354,6 +361,18 @@ where self.swarm.state().peers().handle() } + /// Collect the peers from the [`NetworkManager`] and write them to the given + /// `persistent_peers_file`. + pub fn write_peers_to_file(&self, persistent_peers_file: &Path) -> Result<(), FsPathError> { + let known_peers = self.all_peers().collect::>(); + let known_peers = serde_json::to_string_pretty(&known_peers).map_err(|e| { + FsPathError::WriteJson { source: e, path: persistent_peers_file.to_path_buf() } + })?; + persistent_peers_file.parent().map(fs::create_dir_all).transpose()?; + fs::write(persistent_peers_file, known_peers)?; + Ok(()) + } + /// Returns a new [`FetchClient`] that can be cloned and shared. /// /// The [`FetchClient`] is the entrypoint for sending requests to the network. @@ -614,7 +633,7 @@ where } } NetworkHandleMessage::RemovePeer(peer_id, kind) => { - self.swarm.state_mut().remove_peer(peer_id, kind); + self.swarm.state_mut().remove_peer_kind(peer_id, kind); } NetworkHandleMessage::DisconnectPeer(peer_id, reason) => { self.swarm.sessions_mut().disconnect(peer_id, reason); @@ -656,17 +675,17 @@ where } } NetworkHandleMessage::GetPeerInfos(tx) => { - let _ = tx.send(self.swarm.sessions_mut().get_peer_info()); + let _ = tx.send(self.get_peer_infos()); } NetworkHandleMessage::GetPeerInfoById(peer_id, tx) => { - let _ = tx.send(self.swarm.sessions_mut().get_peer_info_by_id(peer_id)); + let _ = tx.send(self.get_peer_info_by_id(peer_id)); } NetworkHandleMessage::GetPeerInfosByIds(peer_ids, tx) => { - let _ = tx.send(self.swarm.sessions().get_peer_infos_by_ids(peer_ids)); + let _ = tx.send(self.get_peer_infos_by_ids(peer_ids)); } NetworkHandleMessage::GetPeerInfosByPeerKind(kind, tx) => { - let peers = self.swarm.state().peers().peers_by_kind(kind); - let _ = tx.send(self.swarm.sessions().get_peer_infos_by_ids(peers)); + let peer_ids = self.swarm.state().peers().peers_by_kind(kind); + let _ = tx.send(self.get_peer_infos_by_ids(peer_ids)); } NetworkHandleMessage::AddRlpxSubProtocol(proto) => self.add_rlpx_sub_protocol(proto), NetworkHandleMessage::GetTransactionsHandle(tx) => { @@ -917,6 +936,42 @@ where } } + /// Returns [`PeerInfo`] for all connected peers + fn get_peer_infos(&self) -> Vec { + self.swarm + .sessions() + .active_sessions() + .iter() + .filter_map(|(&peer_id, session)| { + self.swarm + .state() + .peers() + .peer_by_id(peer_id) + .map(|(record, kind)| session.peer_info(&record, kind)) + }) + .collect() + } + + /// Returns [`PeerInfo`] for a given peer. + /// + /// Returns `None` if there's no active session to the peer. + fn get_peer_info_by_id(&self, peer_id: PeerId) -> Option { + self.swarm.sessions().active_sessions().get(&peer_id).and_then(|session| { + self.swarm + .state() + .peers() + .peer_by_id(peer_id) + .map(|(record, kind)| session.peer_info(&record, kind)) + }) + } + + /// Returns [`PeerInfo`] for a given peers. + /// + /// Ignore the non-active peer. + fn get_peer_infos_by_ids(&self, peer_ids: impl IntoIterator) -> Vec { + peer_ids.into_iter().filter_map(|peer_id| self.get_peer_info_by_id(peer_id)).collect() + } + /// Updates the metrics for active,established connections #[inline] fn update_active_connection_metrics(&self) { @@ -942,7 +997,7 @@ where impl NetworkManager where - C: BlockReader + Unpin, + C: BlockNumReader + Unpin, { /// Drives the [`NetworkManager`] future until a [`GracefulShutdown`] signal is received. /// @@ -971,7 +1026,7 @@ where impl Future for NetworkManager where - C: BlockReader + Unpin, + C: BlockNumReader + Unpin, { type Output = (); @@ -1082,7 +1137,7 @@ pub enum NetworkEvent { #[derive(Debug, Clone, PartialEq, Eq)] pub enum DiscoveredEvent { - EventQueued { peer_id: PeerId, socket_addr: SocketAddr, fork_id: Option }, + EventQueued { peer_id: PeerId, addr: PeerAddr, fork_id: Option }, } #[derive(Debug, Default)] diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 23e424173..3144312cf 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -3,7 +3,7 @@ use crate::{ discovery::DiscoveryEvent, manager::NetworkEvent, message::{EngineMessage, PeerRequest}, - peers::PeersHandle, + peers::{PeerAddr, PeersHandle}, protocol::RlpxSubProtocol, swarm::NetworkConnectionState, transactions::TransactionsHandle, @@ -272,7 +272,14 @@ impl Peers for NetworkHandle { /// Sends a message to the [`NetworkManager`](crate::NetworkManager) to add a peer to the known /// set, with the given kind. - fn add_peer_kind(&self, peer: PeerId, kind: PeerKind, addr: SocketAddr) { + fn add_peer_kind( + &self, + peer: PeerId, + kind: PeerKind, + tcp_addr: SocketAddr, + udp_addr: Option, + ) { + let addr = PeerAddr::new(tcp_addr, udp_addr); self.send_message(NetworkHandleMessage::AddPeerAddress(peer, kind, addr)); } @@ -437,7 +444,7 @@ pub(crate) enum NetworkHandleMessage { /// Marks a peer as trusted. AddTrustedPeerId(PeerId), /// Adds an address for a peer, including its ID, kind, and socket address. - AddPeerAddress(PeerId, PeerKind, SocketAddr), + AddPeerAddress(PeerId, PeerKind, PeerAddr), /// Removes a peer from the peerset corresponding to the given kind. RemovePeer(PeerId, PeerKind), /// Disconnects a connection to a peer if it exists, optionally providing a disconnect reason. diff --git a/crates/net/network/src/peers/manager.rs b/crates/net/network/src/peers.rs similarity index 84% rename from crates/net/network/src/peers/manager.rs rename to crates/net/network/src/peers.rs index bde0bd066..419502262 100644 --- a/crates/net/network/src/peers/manager.rs +++ b/crates/net/network/src/peers.rs @@ -1,12 +1,7 @@ +//! Peer related implementations + use crate::{ - error::{BackoffKind, SessionError}, - peers::{ - reputation::{ - is_banned_reputation, DEFAULT_REPUTATION, MAX_TRUSTED_PEER_REPUTATION_CHANGE, - }, - ReputationChangeWeights, DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS, - DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND, - }, + error::SessionError, session::{Direction, PendingSessionHandshakeError}, swarm::NetworkConnectionState, }; @@ -15,13 +10,21 @@ use reth_eth_wire::{errors::EthStreamError, DisconnectReason}; use reth_net_banlist::BanList; use reth_network_api::{PeerKind, ReputationChangeKind}; use reth_network_peers::{NodeRecord, PeerId}; +use reth_network_types::{ + peers::{ + config::PeerBackoffDurations, + reputation::{ + is_banned_reputation, DEFAULT_REPUTATION, MAX_TRUSTED_PEER_REPUTATION_CHANGE, + }, + }, + ConnectionsConfig, PeersConfig, ReputationChangeWeights, +}; use reth_primitives::ForkId; use std::{ collections::{hash_map::Entry, HashMap, HashSet, VecDeque}, fmt::Display, - io::{self, ErrorKind}, + io::{self}, net::{IpAddr, SocketAddr}, - path::Path, task::{Context, Poll}, time::Duration, }; @@ -31,7 +34,7 @@ use tokio::{ time::{Instant, Interval}, }; use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::{info, trace}; +use tracing::trace; /// A communication channel to the [`PeersManager`] to apply manual changes to the peer set. #[derive(Clone, Debug)] @@ -152,13 +155,17 @@ impl PeersManager { let mut peers = HashMap::with_capacity(trusted_nodes.len() + basic_nodes.len()); let mut trusted_peer_ids = HashSet::with_capacity(trusted_nodes.len()); - for NodeRecord { address, tcp_port, udp_port: _, id } in trusted_nodes { + for NodeRecord { address, tcp_port, udp_port, id } in trusted_nodes { trusted_peer_ids.insert(id); - peers.entry(id).or_insert_with(|| Peer::trusted(SocketAddr::from((address, tcp_port)))); + peers.entry(id).or_insert_with(|| { + Peer::trusted(PeerAddr::new_with_ports(address, tcp_port, Some(udp_port))) + }); } - for NodeRecord { address, tcp_port, udp_port: _, id } in basic_nodes { - peers.entry(id).or_insert_with(|| Peer::new(SocketAddr::from((address, tcp_port)))); + for NodeRecord { address, tcp_port, udp_port, id } in basic_nodes { + peers.entry(id).or_insert_with(|| { + Peer::new(PeerAddr::new_with_ports(address, tcp_port, Some(udp_port))) + }); } Self { @@ -170,7 +177,7 @@ impl PeersManager { reputation_weights, refill_slots_interval: tokio::time::interval(refill_slots_interval), release_interval: tokio::time::interval_at(now + unban_interval, unban_interval), - connection_info, + connection_info: ConnectionInfo::new(connection_info), ban_list, backed_off_peers: Default::default(), ban_duration, @@ -195,7 +202,29 @@ impl PeersManager { /// Returns an iterator over all peers pub(crate) fn iter_peers(&self) -> impl Iterator + '_ { - self.peers.iter().map(|(peer_id, v)| NodeRecord::new(v.addr, *peer_id)) + self.peers.iter().map(|(peer_id, v)| { + NodeRecord::new_with_ports( + v.addr.tcp.ip(), + v.addr.tcp.port(), + v.addr.udp.map(|addr| addr.port()), + *peer_id, + ) + }) + } + + /// Returns the `NodeRecord` and `PeerKind` for the given peer id + pub(crate) fn peer_by_id(&self, peer_id: PeerId) -> Option<(NodeRecord, PeerKind)> { + self.peers.get(&peer_id).map(|v| { + ( + NodeRecord::new_with_ports( + v.addr.tcp.ip(), + v.addr.tcp.port(), + v.addr.udp.map(|addr| addr.port()), + peer_id, + ), + v.kind, + ) + }) } /// Returns an iterator over all peer ids for peers with the given kind @@ -238,9 +267,7 @@ impl PeersManager { return Err(InboundConnectionError::IpBanned) } - if (!self.connection_info.has_in_capacity() || self.connection_info.max_inbound == 0) && - self.trusted_peer_ids.is_empty() - { + if !self.connection_info.has_in_capacity() && self.trusted_peer_ids.is_empty() { // if we don't have any inbound slots and no trusted peers, we don't accept any new // connections return Err(InboundConnectionError::ExceedsCapacity) @@ -324,33 +351,24 @@ impl PeersManager { peer.state = PeerConnectionState::In; is_trusted = is_trusted || peer.is_trusted(); - - // if a peer is not trusted and we don't have capacity for more inbound connections, - // disconnecting the peer - if !is_trusted && !has_in_capacity { - self.queued_actions.push_back(PeerAction::Disconnect { - peer_id, - reason: Some(DisconnectReason::TooManyPeers), - }); - } } Entry::Vacant(entry) => { // peer is missing in the table, we add it but mark it as to be removed after // disconnect, because we only know the outgoing port - let mut peer = Peer::with_state(addr, PeerConnectionState::In); + let mut peer = Peer::with_state(PeerAddr::tcp(addr), PeerConnectionState::In); peer.remove_after_disconnect = true; entry.insert(peer); self.queued_actions.push_back(PeerAction::PeerAdded(peer_id)); - - // disconnect the peer if we don't have capacity for more inbound connections - if !is_trusted && !has_in_capacity { - self.queued_actions.push_back(PeerAction::Disconnect { - peer_id, - reason: Some(DisconnectReason::TooManyPeers), - }); - } } } + + // disconnect the peer if we don't have capacity for more inbound connections + if !is_trusted && !has_in_capacity { + self.queued_actions.push_back(PeerAction::Disconnect { + peer_id, + reason: Some(DisconnectReason::TooManyPeers), + }); + } } /// Bans the peer temporarily with the configured ban timeout @@ -662,7 +680,7 @@ impl PeersManager { /// Called for a newly discovered peer. /// /// If the peer already exists, then the address, kind and `fork_id` will be updated. - pub(crate) fn add_peer(&mut self, peer_id: PeerId, addr: SocketAddr, fork_id: Option) { + pub(crate) fn add_peer(&mut self, peer_id: PeerId, addr: PeerAddr, fork_id: Option) { self.add_peer_kind(peer_id, PeerKind::Basic, addr, fork_id) } @@ -675,7 +693,7 @@ impl PeersManager { /// /// If the peer already exists, then the address and kind will be updated. #[allow(dead_code)] - pub(crate) fn add_trusted_peer(&mut self, peer_id: PeerId, addr: SocketAddr) { + pub(crate) fn add_trusted_peer(&mut self, peer_id: PeerId, addr: PeerAddr) { self.add_peer_kind(peer_id, PeerKind::Trusted, addr, None) } @@ -686,10 +704,10 @@ impl PeersManager { &mut self, peer_id: PeerId, kind: PeerKind, - addr: SocketAddr, + addr: PeerAddr, fork_id: Option, ) { - if self.ban_list.is_banned(&peer_id, &addr.ip()) { + if self.ban_list.is_banned(&peer_id, &addr.tcp.ip()) { return } @@ -708,7 +726,7 @@ impl PeersManager { } } Entry::Vacant(entry) => { - trace!(target: "net::peers", ?peer_id, ?addr, "discovered new node"); + trace!(target: "net::peers", ?peer_id, ?addr.tcp, "discovered new node"); let mut peer = Peer::with_kind(addr, kind); peer.fork_id = fork_id; entry.insert(peer); @@ -812,7 +830,7 @@ impl PeersManager { return } - // as long as there a slots available fill them with the best peers + // as long as there are slots available fill them with the best peers while self.connection_info.has_out_capacity() { let action = { let (peer_id, peer) = match self.best_unconnected() { @@ -823,7 +841,7 @@ impl PeersManager { trace!(target: "net::peers", ?peer_id, addr=?peer.addr, "schedule outbound connection"); peer.state = PeerConnectionState::PendingOut; - PeerAction::Connect { peer_id, remote_addr: peer.addr } + PeerAction::Connect { peer_id, remote_addr: peer.addr.tcp } }; self.connection_info.inc_pending_out(); @@ -861,7 +879,7 @@ impl PeersManager { while let Poll::Ready(Some(cmd)) = self.handle_rx.poll_next_unpin(cx) { match cmd { PeerCommand::Add(peer_id, addr) => { - self.add_peer(peer_id, addr, None); + self.add_peer(peer_id, PeerAddr::tcp(addr), None); } PeerCommand::Remove(peer) => self.remove_peer(peer), PeerCommand::ReputationChange(peer_id, rep) => { @@ -918,42 +936,37 @@ impl Default for PeersManager { } /// Tracks stats about connected nodes -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize), serde(default))] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct ConnectionInfo { /// Counter for currently occupied slots for active outbound connections. - #[cfg_attr(feature = "serde", serde(skip))] num_outbound: usize, /// Counter for pending outbound connections. - #[cfg_attr(feature = "serde", serde(skip))] num_pending_out: usize, /// Counter for currently occupied slots for active inbound connections. - #[cfg_attr(feature = "serde", serde(skip))] num_inbound: usize, /// Counter for pending inbound connections. - #[cfg_attr(feature = "serde", serde(skip))] num_pending_in: usize, - /// Maximum allowed outbound connections. - max_outbound: usize, - /// Maximum allowed inbound connections. - max_inbound: usize, - /// Maximum allowed concurrent outbound dials. - #[cfg_attr(feature = "serde", serde(default))] - max_concurrent_outbound_dials: usize, + /// Restrictions on number of connections. + config: ConnectionsConfig, } // === impl ConnectionInfo === impl ConnectionInfo { + /// Returns a new [`ConnectionInfo`] with the given config. + const fn new(config: ConnectionsConfig) -> Self { + Self { config, num_outbound: 0, num_pending_out: 0, num_inbound: 0, num_pending_in: 0 } + } + /// Returns `true` if there's still capacity for a new outgoing connection. const fn has_out_capacity(&self) -> bool { - self.num_pending_out < self.max_concurrent_outbound_dials && - self.num_outbound < self.max_outbound + self.num_pending_out < self.config.max_concurrent_outbound_dials && + self.num_outbound < self.config.max_outbound } /// Returns `true` if there's still capacity for a new incoming connection. const fn has_in_capacity(&self) -> bool { - self.num_inbound < self.max_inbound + self.num_inbound < self.config.max_inbound } fn decr_state(&mut self, state: PeerConnectionState) { @@ -998,25 +1011,43 @@ impl ConnectionInfo { } } -impl Default for ConnectionInfo { - fn default() -> Self { - Self { - num_outbound: 0, - num_inbound: 0, - max_outbound: DEFAULT_MAX_COUNT_PEERS_OUTBOUND as usize, - max_inbound: DEFAULT_MAX_COUNT_PEERS_INBOUND as usize, - max_concurrent_outbound_dials: DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS, - num_pending_out: 0, - num_pending_in: 0, - } +/// Represents a peer's address information. +/// +/// # Fields +/// +/// - `tcp`: A `SocketAddr` representing the peer's data transfer address. +/// - `udp`: An optional `SocketAddr` representing the peer's discover address. `None` if the peer +/// is directly connecting to us or the port is the same to `tcp`'s +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct PeerAddr { + tcp: SocketAddr, + udp: Option, +} + +impl PeerAddr { + /// Returns a new `PeerAddr` with the given `tcp` and `udp` addresses. + pub const fn new(tcp: SocketAddr, udp: Option) -> Self { + Self { tcp, udp } + } + + /// Returns a new `PeerAddr` with a `tcp` address only. + pub const fn tcp(tcp: SocketAddr) -> Self { + Self { tcp, udp: None } + } + + /// Returns a new `PeerAddr` with the given `tcp` and `udp` ports. + fn new_with_ports(ip: IpAddr, tcp_port: u16, udp_port: Option) -> Self { + let tcp = SocketAddr::new(ip, tcp_port); + let udp = udp_port.map(|port| SocketAddr::new(ip, port)); + Self::new(tcp, udp) } } /// Tracks info about a single peer. #[derive(Debug, Clone)] pub struct Peer { - /// Where to reach the peer - addr: SocketAddr, + /// Where to reach the peer. + addr: PeerAddr, /// Reputation of the peer. reputation: i32, /// The state of the connection, if any. @@ -1029,18 +1060,19 @@ pub struct Peer { kind: PeerKind, /// Whether the peer is currently backed off. backed_off: bool, - /// Counts number of times the peer was backed off due to a severe [`BackoffKind`]. + /// Counts number of times the peer was backed off due to a severe + /// [`reth_network_types::BackoffKind`]. severe_backoff_counter: u8, } // === impl Peer === impl Peer { - fn new(addr: SocketAddr) -> Self { + fn new(addr: PeerAddr) -> Self { Self::with_state(addr, Default::default()) } - fn trusted(addr: SocketAddr) -> Self { + fn trusted(addr: PeerAddr) -> Self { Self { kind: PeerKind::Trusted, ..Self::new(addr) } } @@ -1049,7 +1081,7 @@ impl Peer { self.reputation } - fn with_state(addr: SocketAddr, state: PeerConnectionState) -> Self { + fn with_state(addr: PeerAddr, state: PeerConnectionState) -> Self { Self { addr, state, @@ -1062,7 +1094,7 @@ impl Peer { } } - fn with_kind(addr: SocketAddr, kind: PeerKind) -> Self { + fn with_kind(addr: PeerAddr, kind: PeerKind) -> Self { Self { kind, ..Self::new(addr) } } @@ -1263,265 +1295,6 @@ pub enum PeerAction { PeerRemoved(PeerId), } -/// Config type for initiating a [`PeersManager`] instance. -#[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "serde", serde(default))] -pub struct PeersConfig { - /// How often to recheck free slots for outbound connections. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub refill_slots_interval: Duration, - /// Trusted nodes to connect to or accept from - pub trusted_nodes: HashSet, - /// Connect to or accept from trusted nodes only? - #[cfg_attr(feature = "serde", serde(alias = "connect_trusted_nodes_only"))] - pub trusted_nodes_only: bool, - /// Maximum number of backoff attempts before we give up on a peer and dropping. - /// - /// The max time spent of a peer before it's removed from the set is determined by the - /// configured backoff duration and the max backoff count. - /// - /// With a backoff counter of 5 and a backoff duration of 1h, the minimum time spent of the - /// peer in the table is the sum of all backoffs (1h + 2h + 3h + 4h + 5h = 15h). - /// - /// Note: this does not apply to trusted peers. - pub max_backoff_count: u8, - /// Basic nodes to connect to. - #[cfg_attr(feature = "serde", serde(skip))] - pub basic_nodes: HashSet, - /// How long to ban bad peers. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub ban_duration: Duration, - /// Restrictions on `PeerIds` and Ips. - #[cfg_attr(feature = "serde", serde(skip))] - pub ban_list: BanList, - /// Restrictions on connections. - pub connection_info: ConnectionInfo, - /// How to weigh reputation changes. - pub reputation_weights: ReputationChangeWeights, - /// How long to backoff peers that are we failed to connect to for non-fatal reasons, such as - /// [`DisconnectReason::TooManyPeers`]. - /// - /// The backoff duration increases with number of backoff attempts. - pub backoff_durations: PeerBackoffDurations, -} - -impl Default for PeersConfig { - fn default() -> Self { - Self { - refill_slots_interval: Duration::from_millis(5_000), - connection_info: Default::default(), - reputation_weights: Default::default(), - ban_list: Default::default(), - // Ban peers for 12h - ban_duration: Duration::from_secs(60 * 60 * 12), - backoff_durations: Default::default(), - trusted_nodes: Default::default(), - trusted_nodes_only: false, - basic_nodes: Default::default(), - max_backoff_count: 5, - } - } -} - -impl PeersConfig { - /// A set of `peer_ids` and ip addr that we want to never connect to - pub fn with_ban_list(mut self, ban_list: BanList) -> Self { - self.ban_list = ban_list; - self - } - - /// Configure how long to ban bad peers - pub const fn with_ban_duration(mut self, ban_duration: Duration) -> Self { - self.ban_duration = ban_duration; - self - } - - /// Maximum occupied slots for outbound connections. - pub const fn with_max_pending_outbound(mut self, num_outbound: usize) -> Self { - self.connection_info.num_outbound = num_outbound; - self - } - - /// Maximum occupied slots for inbound connections. - pub const fn with_max_pending_inbound(mut self, num_inbound: usize) -> Self { - self.connection_info.num_inbound = num_inbound; - self - } - - /// Maximum allowed outbound connections. - pub const fn with_max_outbound(mut self, max_outbound: usize) -> Self { - self.connection_info.max_outbound = max_outbound; - self - } - - /// Maximum allowed inbound connections with optional update. - pub const fn with_max_inbound_opt(mut self, max_inbound: Option) -> Self { - if let Some(max_inbound) = max_inbound { - self.connection_info.max_inbound = max_inbound; - } - self - } - - /// Maximum allowed outbound connections with optional update. - pub const fn with_max_outbound_opt(mut self, max_outbound: Option) -> Self { - if let Some(max_outbound) = max_outbound { - self.connection_info.max_outbound = max_outbound; - } - self - } - - /// Maximum allowed inbound connections. - pub const fn with_max_inbound(mut self, max_inbound: usize) -> Self { - self.connection_info.max_inbound = max_inbound; - self - } - - /// Maximum allowed concurrent outbound dials. - pub const fn with_max_concurrent_dials(mut self, max_concurrent_outbound_dials: usize) -> Self { - self.connection_info.max_concurrent_outbound_dials = max_concurrent_outbound_dials; - self - } - - /// Nodes to always connect to. - pub fn with_trusted_nodes(mut self, nodes: HashSet) -> Self { - self.trusted_nodes = nodes; - self - } - - /// Connect only to trusted nodes. - pub const fn with_trusted_nodes_only(mut self, trusted_only: bool) -> Self { - self.trusted_nodes_only = trusted_only; - self - } - - /// Nodes available at launch. - pub fn with_basic_nodes(mut self, nodes: HashSet) -> Self { - self.basic_nodes = nodes; - self - } - - /// Configures the max allowed backoff count. - pub const fn with_max_backoff_count(mut self, max_backoff_count: u8) -> Self { - self.max_backoff_count = max_backoff_count; - self - } - - /// Configures how to weigh reputation changes. - pub const fn with_reputation_weights( - mut self, - reputation_weights: ReputationChangeWeights, - ) -> Self { - self.reputation_weights = reputation_weights; - self - } - - /// Configures how long to backoff peers that are we failed to connect to for non-fatal reasons - pub const fn with_backoff_durations(mut self, backoff_durations: PeerBackoffDurations) -> Self { - self.backoff_durations = backoff_durations; - self - } - - /// Returns the maximum number of peers, inbound and outbound. - pub const fn max_peers(&self) -> usize { - self.connection_info.max_outbound + self.connection_info.max_inbound - } - - /// Read from file nodes available at launch. Ignored if None. - pub fn with_basic_nodes_from_file( - self, - optional_file: Option>, - ) -> Result { - let Some(file_path) = optional_file else { return Ok(self) }; - let reader = match std::fs::File::open(file_path.as_ref()) { - Ok(file) => io::BufReader::new(file), - Err(e) if e.kind() == ErrorKind::NotFound => return Ok(self), - Err(e) => Err(e)?, - }; - info!(target: "net::peers", file = %file_path.as_ref().display(), "Loading saved peers"); - let nodes: HashSet = serde_json::from_reader(reader)?; - Ok(self.with_basic_nodes(nodes)) - } - - /// Returns settings for testing - #[cfg(test)] - fn test() -> Self { - Self { - refill_slots_interval: Duration::from_millis(100), - backoff_durations: PeerBackoffDurations::test(), - ..Default::default() - } - } -} - -/// The durations to use when a backoff should be applied to a peer. -/// -/// See also [`BackoffKind`]. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct PeerBackoffDurations { - /// Applies to connection problems where there is a chance that they will be resolved after the - /// short duration. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub low: Duration, - /// Applies to more severe connection problems where there is a lower chance that they will be - /// resolved. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub medium: Duration, - /// Intended for spammers, or bad peers in general. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub high: Duration, - /// Maximum total backoff duration. - #[cfg_attr(feature = "serde", serde(with = "humantime_serde"))] - pub max: Duration, -} - -impl PeerBackoffDurations { - /// Returns the corresponding [`Duration`] - pub const fn backoff(&self, kind: BackoffKind) -> Duration { - match kind { - BackoffKind::Low => self.low, - BackoffKind::Medium => self.medium, - BackoffKind::High => self.high, - } - } - - /// Returns the timestamp until which we should backoff. - /// - /// The Backoff duration is capped by the configured maximum backoff duration. - pub fn backoff_until(&self, kind: BackoffKind, backoff_counter: u8) -> std::time::Instant { - let backoff_time = self.backoff(kind); - let backoff_time = backoff_time + backoff_time * backoff_counter as u32; - let now = std::time::Instant::now(); - now + backoff_time.min(self.max) - } - - /// Returns durations for testing. - #[cfg(test)] - const fn test() -> Self { - Self { - low: Duration::from_millis(200), - medium: Duration::from_millis(200), - high: Duration::from_millis(200), - max: Duration::from_millis(200), - } - } -} - -impl Default for PeerBackoffDurations { - fn default() -> Self { - Self { - low: Duration::from_secs(30), - // 3min - medium: Duration::from_secs(60 * 3), - // 15min - high: Duration::from_secs(60 * 15), - // 1h - max: Duration::from_secs(60 * 60), - } - } -} - /// Error thrown when a incoming connection is rejected right away #[derive(Debug, Error, PartialEq, Eq)] pub enum InboundConnectionError { @@ -1541,11 +1314,9 @@ impl Display for InboundConnectionError { mod tests { use super::PeersManager; use crate::{ - error::BackoffKind, peers::{ - manager::{ConnectionInfo, PeerBackoffDurations, PeerConnectionState}, - reputation::DEFAULT_REPUTATION, - InboundConnectionError, PeerAction, + ConnectionInfo, InboundConnectionError, PeerAction, PeerAddr, PeerBackoffDurations, + PeerConnectionState, }, session::PendingSessionHandshakeError, PeersConfig, @@ -1558,6 +1329,7 @@ mod tests { use reth_net_banlist::BanList; use reth_network_api::{Direction, ReputationChangeKind}; use reth_network_peers::PeerId; + use reth_network_types::{peers::reputation::DEFAULT_REPUTATION, BackoffKind}; use reth_primitives::B512; use std::{ collections::HashSet, @@ -1592,7 +1364,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -1607,6 +1379,37 @@ mod tests { } _ => unreachable!(), } + + let (record, _) = peers.peer_by_id(peer).unwrap(); + assert_eq!(record.tcp_addr(), socket_addr); + assert_eq!(record.udp_addr(), socket_addr); + } + + #[tokio::test] + async fn test_insert_udp() { + let peer = PeerId::random(); + let tcp_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); + let udp_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8009); + let mut peers = PeersManager::default(); + peers.add_peer(peer, PeerAddr::new(tcp_addr, Some(udp_addr)), None); + + match event!(peers) { + PeerAction::PeerAdded(peer_id) => { + assert_eq!(peer_id, peer); + } + _ => unreachable!(), + } + match event!(peers) { + PeerAction::Connect { peer_id, remote_addr } => { + assert_eq!(peer_id, peer); + assert_eq!(remote_addr, tcp_addr); + } + _ => unreachable!(), + } + + let (record, _) = peers.peer_by_id(peer).unwrap(); + assert_eq!(record.tcp_addr(), tcp_addr); + assert_eq!(record.udp_addr(), udp_addr); } #[tokio::test] @@ -1615,7 +1418,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); peers.ban_peer(peer); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::BanPeer { peer_id } => { @@ -1637,7 +1440,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); peers.ban_peer(peer); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::BanPeer { peer_id } => { @@ -1674,7 +1477,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::new(PeersConfig::test()); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -1733,7 +1536,7 @@ mod tests { let backoff_durations = PeerBackoffDurations::test(); let config = PeersConfig { backoff_durations, ..PeersConfig::test() }; let mut peers = PeersManager::new(config); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -1790,7 +1593,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let config = PeersConfig::test(); let mut peers = PeersManager::new(config); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); let peer_struct = peers.peers.get_mut(&peer).unwrap(); let backoff_timestamp = peers @@ -1807,7 +1610,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let config = PeersConfig::default(); let mut peers = PeersManager::new(config); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); let peer_struct = peers.peers.get_mut(&peer).unwrap(); // Simulate a peer that was already backed off once @@ -1835,7 +1638,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -1892,7 +1695,7 @@ mod tests { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let config = PeersConfig::test(); let mut peers = PeersManager::new(config.clone()); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); let peer_struct = peers.peers.get_mut(&peer).unwrap(); // Simulate a peer that was already backed off once @@ -1946,7 +1749,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2058,7 +1861,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2106,7 +1909,7 @@ mod tests { peers.add_trusted_peer_id(trusted); // saturate the inbound slots - for i in 0..peers.connection_info.max_inbound { + for i in 0..peers.connection_info.config.max_inbound { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, i as u8)), 8008); assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_ok()); let peer_id = PeerId::random(); @@ -2176,7 +1979,7 @@ mod tests { // to increase by 1 peers.on_incoming_session_established(peer, socket_addr); let p = peers.peers.get_mut(&peer).expect("peer not found"); - assert_eq!(p.addr, socket_addr); + assert_eq!(p.addr.tcp, socket_addr); assert_eq!(peers.connection_info.num_pending_in, 0); assert_eq!(peers.connection_info.num_inbound, 1); @@ -2191,7 +1994,7 @@ mod tests { peers.on_already_connected(Direction::Incoming); let p = peers.peers.get_mut(&peer).expect("peer not found"); - assert_eq!(p.addr, socket_addr); + assert_eq!(p.addr.tcp, socket_addr); assert_eq!(peers.connection_info.num_pending_in, 0); assert_eq!(peers.connection_info.num_inbound, 1); } @@ -2201,7 +2004,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_trusted_peer(peer, socket_addr); + peers.add_trusted_peer(peer, PeerAddr::tcp(socket_addr)); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2253,7 +2056,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); assert_eq!(peers.get_reputation(&peer), Some(0)); peers.apply_reputation_change(&peer, ReputationChangeKind::Other(1024)); @@ -2268,7 +2071,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2305,7 +2108,7 @@ mod tests { let p = peers.peers.get(&peer).unwrap(); assert_eq!(p.state, PeerConnectionState::PendingOut); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); let p = peers.peers.get(&peer).unwrap(); assert_eq!(p.state, PeerConnectionState::PendingOut); @@ -2318,7 +2121,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2353,7 +2156,7 @@ mod tests { let peer = PeerId::random(); let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); let mut peers = PeersManager::default(); - peers.add_peer(peer, socket_addr, None); + peers.add_peer(peer, PeerAddr::tcp(socket_addr), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2387,7 +2190,7 @@ mod tests { let ban_list = BanList::new(HashSet::new(), vec![ip]); let config = PeersConfig::default().with_ban_list(ban_list); let mut peer_manager = PeersManager::new(config); - peer_manager.add_peer(B512::default(), socket_addr, None); + peer_manager.add_peer(B512::default(), PeerAddr::tcp(socket_addr), None); assert!(peer_manager.peers.is_empty()); } @@ -2404,7 +2207,7 @@ mod tests { match a { Ok(_) => panic!(), Err(err) => match err { - super::InboundConnectionError::IpBanned {} => { + InboundConnectionError::IpBanned {} => { assert_eq!(peer_manager.connection_info.num_pending_in, 0) } _ => unreachable!(), @@ -2490,7 +2293,7 @@ mod tests { let basic_peer = PeerId::random(); let basic_sock = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8009); - peers.add_peer(basic_peer, basic_sock, None); + peers.add_peer(basic_peer, PeerAddr::tcp(basic_sock), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2530,7 +2333,7 @@ mod tests { let basic_peer = PeerId::random(); let basic_sock = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8009); - peers.add_peer(basic_peer, basic_sock, None); + peers.add_peer(basic_peer, PeerAddr::tcp(basic_sock), None); match event!(peers) { PeerAction::PeerAdded(peer_id) => { @@ -2638,7 +2441,7 @@ mod tests { let config = PeersConfig::test(); let mut peer_manager = PeersManager::new(config); let peer_id = PeerId::random(); - peer_manager.add_peer(peer_id, socket_addr, None); + peer_manager.add_peer(peer_id, PeerAddr::tcp(socket_addr), None); tokio::time::sleep(Duration::from_secs(1)).await; peer_manager.tick(); @@ -2693,7 +2496,7 @@ mod tests { assert!(peer.remove_after_disconnect); // trigger discovery manually while the peer is still connected - peers.add_peer(peer_id, addr, None); + peers.add_peer(peer_id, PeerAddr::tcp(addr), None); peers.on_active_session_gracefully_closed(peer_id); @@ -2709,7 +2512,7 @@ mod tests { let mut peers = PeersManager::default(); peers.on_incoming_pending_session(addr.ip()).unwrap(); - peers.add_peer(peer_id, addr, None); + peers.add_peer(peer_id, PeerAddr::tcp(addr), None); match event!(peers) { PeerAction::PeerAdded(_) => {} @@ -2737,7 +2540,7 @@ mod tests { let mut peers = PeersManager::default(); peers.on_incoming_pending_session(addr.ip()).unwrap(); - peers.add_peer(peer_id, addr, None); + peers.add_peer(peer_id, PeerAddr::tcp(addr), None); match event!(peers) { PeerAction::PeerAdded(_) => {} @@ -2768,9 +2571,9 @@ mod tests { let config = PeersConfig::default(); let mut peer_manager = PeersManager::new(config); let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); - let socket_addr = SocketAddr::new(ip, 8008); - for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials * 2 { - peer_manager.add_peer(PeerId::random(), socket_addr, None); + let peer_addr = PeerAddr::tcp(SocketAddr::new(ip, 8008)); + for _ in 0..peer_manager.connection_info.config.max_concurrent_outbound_dials * 2 { + peer_manager.add_peer(PeerId::random(), peer_addr, None); } peer_manager.fill_outbound_slots(); @@ -2779,7 +2582,7 @@ mod tests { .iter() .filter(|ev| matches!(ev, PeerAction::Connect { .. })) .count(); - assert_eq!(dials, peer_manager.connection_info.max_concurrent_outbound_dials); + assert_eq!(dials, peer_manager.connection_info.config.max_concurrent_outbound_dials); } #[tokio::test] @@ -2787,21 +2590,21 @@ mod tests { let config = PeersConfig::default(); let mut peer_manager = PeersManager::new(config); let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)); - let socket_addr = SocketAddr::new(ip, 8008); + let peer_addr = PeerAddr::tcp(SocketAddr::new(ip, 8008)); // add more peers than allowed - for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials * 2 { - peer_manager.add_peer(PeerId::random(), socket_addr, None); + for _ in 0..peer_manager.connection_info.config.max_concurrent_outbound_dials * 2 { + peer_manager.add_peer(PeerId::random(), peer_addr, None); } - for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials * 2 { + for _ in 0..peer_manager.connection_info.config.max_concurrent_outbound_dials * 2 { match event!(peer_manager) { PeerAction::PeerAdded(_) => {} _ => unreachable!(), } } - for _ in 0..peer_manager.connection_info.max_concurrent_outbound_dials { + for _ in 0..peer_manager.connection_info.config.max_concurrent_outbound_dials { match event!(peer_manager) { PeerAction::Connect { .. } => {} _ => unreachable!(), @@ -2813,7 +2616,7 @@ mod tests { // all dialed connections should be in 'PendingOut' state let dials = peer_manager.connection_info.num_pending_out; - assert_eq!(dials, peer_manager.connection_info.max_concurrent_outbound_dials); + assert_eq!(dials, peer_manager.connection_info.config.max_concurrent_outbound_dials); let num_pendingout_states = peer_manager .peers @@ -2823,7 +2626,7 @@ mod tests { .collect::>(); assert_eq!( num_pendingout_states.len(), - peer_manager.connection_info.max_concurrent_outbound_dials + peer_manager.connection_info.config.max_concurrent_outbound_dials ); // establish dialed connections diff --git a/crates/net/network/src/peers/mod.rs b/crates/net/network/src/peers/mod.rs deleted file mode 100644 index fafb2d762..000000000 --- a/crates/net/network/src/peers/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! Peer related implementations - -mod manager; -mod reputation; - -pub(crate) use manager::InboundConnectionError; -pub use manager::{ConnectionInfo, Peer, PeerAction, PeersConfig, PeersHandle, PeersManager}; -pub use reputation::ReputationChangeWeights; -pub use reth_network_api::PeerKind; - -/// Maximum number of available slots for outbound sessions. -pub const DEFAULT_MAX_COUNT_PEERS_OUTBOUND: u32 = 100; - -/// Maximum number of available slots for inbound sessions. -pub const DEFAULT_MAX_COUNT_PEERS_INBOUND: u32 = 30; - -/// Maximum number of available slots for concurrent outgoing dials. -/// -/// This restricts how many outbound dials can be performed concurrently. -pub const DEFAULT_MAX_COUNT_CONCURRENT_OUTBOUND_DIALS: usize = 15; diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index cd558e853..c75185ba0 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -3,7 +3,6 @@ use crate::{ message::{NewBlockMessage, PeerMessage, PeerRequest, PeerResponse, PeerResponseResult}, session::{ - config::INITIAL_REQUEST_TIMEOUT, conn::EthRlpxConnection, handle::{ActiveSessionMessage, SessionCommand}, SessionId, @@ -20,6 +19,7 @@ use reth_eth_wire::{ use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_p2p::error::RequestError; use reth_network_peers::PeerId; +use reth_network_types::session::config::INITIAL_REQUEST_TIMEOUT; use rustc_hash::FxHashMap; use std::{ collections::VecDeque, @@ -763,10 +763,7 @@ fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> #[cfg(test)] mod tests { use super::*; - use crate::session::{ - config::PROTOCOL_BREACH_REQUEST_TIMEOUT, handle::PendingSessionEvent, - start_pending_incoming_session, - }; + use crate::session::{handle::PendingSessionEvent, start_pending_incoming_session}; use reth_chainspec::MAINNET; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ @@ -774,7 +771,8 @@ mod tests { UnauthedEthStream, UnauthedP2PStream, }; use reth_network_peers::pk2id; - use reth_primitives::{ForkFilter, Hardfork}; + use reth_network_types::session::config::PROTOCOL_BREACH_REQUEST_TIMEOUT; + use reth_primitives::{EthereumHardfork, ForkFilter}; use secp256k1::{SecretKey, SECP256K1}; use tokio::{ net::{TcpListener, TcpStream}, @@ -924,7 +922,7 @@ mod tests { local_peer_id, status: StatusBuilder::default().build(), fork_filter: MAINNET - .hardfork_fork_filter(Hardfork::Frontier) + .hardfork_fork_filter(EthereumHardfork::Frontier) .expect("The Frontier fork filter should exist on mainnet"), } } diff --git a/crates/net/network/src/session/counter.rs b/crates/net/network/src/session/counter.rs new file mode 100644 index 000000000..0d8f764f2 --- /dev/null +++ b/crates/net/network/src/session/counter.rs @@ -0,0 +1,106 @@ +use reth_network_api::Direction; +use reth_network_types::SessionLimits; + +use super::ExceedsSessionLimit; + +/// Keeps track of all sessions. +#[derive(Debug, Clone)] +pub struct SessionCounter { + /// Limits to enforce. + limits: SessionLimits, + /// Number of pending incoming sessions. + pending_inbound: u32, + /// Number of pending outgoing sessions. + pending_outbound: u32, + /// Number of active inbound sessions. + active_inbound: u32, + /// Number of active outbound sessions. + active_outbound: u32, +} + +// === impl SessionCounter === + +impl SessionCounter { + pub(crate) const fn new(limits: SessionLimits) -> Self { + Self { + limits, + pending_inbound: 0, + pending_outbound: 0, + active_inbound: 0, + active_outbound: 0, + } + } + + pub(crate) fn inc_pending_inbound(&mut self) { + self.pending_inbound += 1; + } + + pub(crate) fn inc_pending_outbound(&mut self) { + self.pending_outbound += 1; + } + + pub(crate) fn dec_pending(&mut self, direction: &Direction) { + match direction { + Direction::Outgoing(_) => { + self.pending_outbound -= 1; + } + Direction::Incoming => { + self.pending_inbound -= 1; + } + } + } + + pub(crate) fn inc_active(&mut self, direction: &Direction) { + match direction { + Direction::Outgoing(_) => { + self.active_outbound += 1; + } + Direction::Incoming => { + self.active_inbound += 1; + } + } + } + + pub(crate) fn dec_active(&mut self, direction: &Direction) { + match direction { + Direction::Outgoing(_) => { + self.active_outbound -= 1; + } + Direction::Incoming => { + self.active_inbound -= 1; + } + } + } + + pub(crate) const fn ensure_pending_outbound(&self) -> Result<(), ExceedsSessionLimit> { + Self::ensure(self.pending_outbound, self.limits.max_pending_outbound) + } + + pub(crate) const fn ensure_pending_inbound(&self) -> Result<(), ExceedsSessionLimit> { + Self::ensure(self.pending_inbound, self.limits.max_pending_inbound) + } + + const fn ensure(current: u32, limit: Option) -> Result<(), ExceedsSessionLimit> { + if let Some(limit) = limit { + if current >= limit { + return Err(ExceedsSessionLimit(limit)) + } + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_limits() { + let mut limits = SessionCounter::new(SessionLimits::default().with_max_pending_inbound(2)); + assert!(limits.ensure_pending_outbound().is_ok()); + limits.inc_pending_inbound(); + assert!(limits.ensure_pending_inbound().is_ok()); + limits.inc_pending_inbound(); + assert!(limits.ensure_pending_inbound().is_err()); + } +} diff --git a/crates/net/network/src/session/handle.rs b/crates/net/network/src/session/handle.rs index b28b1e27e..4c1a5e531 100644 --- a/crates/net/network/src/session/handle.rs +++ b/crates/net/network/src/session/handle.rs @@ -11,8 +11,8 @@ use reth_eth_wire::{ errors::EthStreamError, DisconnectReason, EthVersion, Status, }; -use reth_network_api::PeerInfo; -use reth_network_peers::PeerId; +use reth_network_api::{PeerInfo, PeerKind}; +use reth_network_peers::{NodeRecord, PeerId}; use std::{io, net::SocketAddr, sync::Arc, time::Instant}; use tokio::sync::{ mpsc::{self, error::SendError}, @@ -136,10 +136,12 @@ impl ActiveSessionHandle { } /// Extracts the [`PeerInfo`] from the session handle. - pub(crate) fn peer_info(&self) -> PeerInfo { + pub(crate) fn peer_info(&self, record: &NodeRecord, kind: PeerKind) -> PeerInfo { PeerInfo { remote_id: self.remote_id, direction: self.direction, + enode: record.to_string(), + enr: None, remote_addr: self.remote_addr, local_addr: self.local_addr, capabilities: self.capabilities.clone(), @@ -147,6 +149,7 @@ impl ActiveSessionHandle { eth_version: self.version, status: self.status.clone(), session_established: self.established, + kind, } } } diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 6ce4e97aa..fc6f0e6a1 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -1,10 +1,7 @@ //! Support for handling peer sessions. -use crate::{ - message::PeerMessage, - metrics::SessionManagerMetrics, - session::{active::ActiveSession, config::SessionCounter}, -}; +use crate::{message::PeerMessage, metrics::SessionManagerMetrics, session::active::ActiveSession}; +use counter::SessionCounter; use futures::{future::Either, io, FutureExt, StreamExt}; use reth_ecies::{stream::ECIESStream, ECIESError}; use reth_eth_wire::{ @@ -15,6 +12,7 @@ use reth_eth_wire::{ }; use reth_metrics::common::mpsc::MeteredPollSender; use reth_network_peers::PeerId; +use reth_network_types::SessionsConfig; use reth_primitives::{ForkFilter, ForkId, ForkTransition, Head}; use reth_tasks::TaskSpawner; use rustc_hash::FxHashMap; @@ -37,12 +35,11 @@ use tokio_util::sync::PollSender; use tracing::{debug, instrument, trace}; mod active; -mod config; mod conn; +mod counter; mod handle; pub use crate::message::PeerRequestSender; use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocolHandlers, RlpxSubProtocols}; -pub use config::{SessionLimits, SessionsConfig}; pub use handle::{ ActiveSessionHandle, ActiveSessionMessage, PendingSessionEvent, PendingSessionHandle, SessionCommand, @@ -173,6 +170,11 @@ impl SessionManager { self.secret_key } + /// Returns a borrowed reference to the active sessions. + pub const fn active_sessions(&self) -> &HashMap { + &self.active_sessions + } + /// Returns the session hello message. pub fn hello_message(&self) -> HelloMessageWithProtocols { self.hello_message.clone() @@ -590,35 +592,6 @@ impl SessionManager { } } } - - /// Returns [`PeerInfo`] for all connected peers - pub(crate) fn get_peer_info(&self) -> Vec { - self.active_sessions.values().map(ActiveSessionHandle::peer_info).collect() - } - - /// Returns [`PeerInfo`] for a given peer. - /// - /// Returns `None` if there's no active session to the peer. - pub(crate) fn get_peer_info_by_id(&self, peer_id: PeerId) -> Option { - self.active_sessions.get(&peer_id).map(ActiveSessionHandle::peer_info) - } - /// Returns [`PeerInfo`] for a given peer. - /// - /// Returns `None` if there's no active session to the peer. - pub(crate) fn get_peer_infos_by_ids( - &self, - peer_ids: impl IntoIterator, - ) -> Vec { - let mut infos = Vec::new(); - for peer_id in peer_ids { - if let Some(info) = - self.active_sessions.get(&peer_id).map(ActiveSessionHandle::peer_info) - { - infos.push(info); - } - } - infos - } } /// Events produced by the [`SessionManager`] @@ -1006,10 +979,7 @@ async fn authenticate_stream( (eth_stream.into(), their_status) } else { // Multiplex the stream with the extra protocols - let (mut multiplex_stream, their_status) = RlpxProtocolMultiplexer::new(p2p_stream) - .into_eth_satellite_stream(status, fork_filter) - .await - .unwrap(); + let mut multiplex_stream = RlpxProtocolMultiplexer::new(p2p_stream); // install additional handlers for handler in extra_handlers.into_iter() { @@ -1022,6 +992,19 @@ async fn authenticate_stream( .ok(); } + let (multiplex_stream, their_status) = + match multiplex_stream.into_eth_satellite_stream(status, fork_filter).await { + Ok((multiplex_stream, their_status)) => (multiplex_stream, their_status), + Err(err) => { + return PendingSessionEvent::Disconnected { + remote_addr, + session_id, + direction, + error: Some(PendingSessionHandshakeError::Eth(err)), + } + } + }; + (multiplex_stream.into(), their_status) }; diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index afbf05dde..3dce15cfc 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -9,7 +9,7 @@ use crate::{ BlockRequest, NewBlockMessage, PeerRequest, PeerRequestSender, PeerResponse, PeerResponseResult, }, - peers::{PeerAction, PeersManager}, + peers::{PeerAction, PeerAddr, PeersManager}, FetchClient, }; use rand::seq::SliceRandom; @@ -20,7 +20,7 @@ use reth_eth_wire::{ use reth_network_api::PeerKind; use reth_network_peers::PeerId; use reth_primitives::{ForkId, B256}; -use reth_provider::BlockNumReader; +use reth_storage_api::BlockNumReader; use std::{ collections::{HashMap, VecDeque}, net::{IpAddr, SocketAddr}, @@ -274,13 +274,14 @@ where } /// Adds a peer and its address with the given kind to the peerset. - pub(crate) fn add_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind, addr: SocketAddr) { + pub(crate) fn add_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind, addr: PeerAddr) { self.peers_manager.add_peer_kind(peer_id, kind, addr, None) } - pub(crate) fn remove_peer(&mut self, peer_id: PeerId, kind: PeerKind) { + /// Removes a peer and its address with the given kind from the peerset. + pub(crate) fn remove_peer_kind(&mut self, peer_id: PeerId, kind: PeerKind) { match kind { - PeerKind::Basic => self.peers_manager.remove_peer(peer_id), + PeerKind::Basic | PeerKind::Static => self.peers_manager.remove_peer(peer_id), PeerKind::Trusted => self.peers_manager.remove_peer_from_trusted_set(peer_id), } } @@ -288,14 +289,10 @@ where /// Event hook for events received from the discovery service. fn on_discovery_event(&mut self, event: DiscoveryEvent) { match event { - DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { - peer_id, - socket_addr, - fork_id, - }) => { + DiscoveryEvent::NewNode(DiscoveredEvent::EventQueued { peer_id, addr, fork_id }) => { self.queued_messages.push_back(StateAction::DiscoveredNode { peer_id, - socket_addr, + addr, fork_id, }); } @@ -516,7 +513,7 @@ pub(crate) enum StateAction { fork_id: ForkId, }, /// A new node was found through the discovery, possibly with a `ForkId` - DiscoveredNode { peer_id: PeerId, socket_addr: SocketAddr, fork_id: Option }, + DiscoveredNode { peer_id: PeerId, addr: PeerAddr, fork_id: Option }, /// A peer was added PeerAdded(PeerId), /// A peer was dropped diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index cfc1f8417..faf39f839 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -13,7 +13,7 @@ use reth_eth_wire::{ EthVersion, Status, }; use reth_network_peers::PeerId; -use reth_provider::{BlockNumReader, BlockReader}; +use reth_storage_api::BlockNumReader; use std::{ io, net::SocketAddr, @@ -247,14 +247,14 @@ where } StateAction::PeerAdded(peer_id) => return Some(SwarmEvent::PeerAdded(peer_id)), StateAction::PeerRemoved(peer_id) => return Some(SwarmEvent::PeerRemoved(peer_id)), - StateAction::DiscoveredNode { peer_id, socket_addr, fork_id } => { + StateAction::DiscoveredNode { peer_id, addr, fork_id } => { // Don't try to connect to peer if node is shutting down if self.is_shutting_down() { return None } // Insert peer only if no fork id or a valid fork id if fork_id.map_or_else(|| true, |f| self.sessions.is_valid_fork_id(f)) { - self.state_mut().peers_mut().add_peer(peer_id, socket_addr, fork_id); + self.state_mut().peers_mut().add_peer(peer_id, addr, fork_id); } } StateAction::DiscoveredEnrForkId { peer_id, fork_id } => { @@ -287,7 +287,7 @@ where impl Stream for Swarm where - C: BlockReader + Unpin, + C: BlockNumReader + Unpin, { type Item = SwarmEvent; diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index c7642accd..5de45b229 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -16,9 +16,8 @@ use reth_chainspec::MAINNET; use reth_eth_wire::{protocol::Protocol, DisconnectReason, HelloMessageWithProtocols}; use reth_network_api::{NetworkInfo, Peers}; use reth_network_peers::PeerId; -use reth_provider::{ - test_utils::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, -}; +use reth_provider::test_utils::NoopProvider; +use reth_storage_api::{BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory}; use reth_tasks::TokioTaskExecutor; use reth_tokio_util::EventStream; use reth_transaction_pool::{ diff --git a/crates/net/network/src/transactions/constants.rs b/crates/net/network/src/transactions/constants.rs index 59ec103cd..48fb8857c 100644 --- a/crates/net/network/src/transactions/constants.rs +++ b/crates/net/network/src/transactions/constants.rs @@ -57,9 +57,9 @@ pub mod tx_manager { /// Constants used by [`TransactionFetcher`](super::TransactionFetcher). pub mod tx_fetcher { - use crate::{ - peers::{DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND}, - transactions::fetcher::TransactionFetcherInfo, + use crate::transactions::fetcher::TransactionFetcherInfo; + use reth_network_types::peers::config::{ + DEFAULT_MAX_COUNT_PEERS_INBOUND, DEFAULT_MAX_COUNT_PEERS_OUTBOUND, }; use super::{ diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index a191443ed..b379a6704 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -3,9 +3,8 @@ use alloy_node_bindings::Geth; use alloy_provider::{ext::AdminApi, ProviderBuilder}; use futures::StreamExt; -use reth_chainspec::net::mainnet_nodes; use reth_discv4::Discv4Config; -use reth_eth_wire::DisconnectReason; +use reth_eth_wire::{DisconnectReason, HeadersDirection}; use reth_net_banlist::BanList; use reth_network::{ test_utils::{enr_to_peer_id, NetworkEventStream, PeerConfig, Testnet, GETH_TIMEOUT}, @@ -16,8 +15,7 @@ use reth_network_p2p::{ headers::client::{HeadersClient, HeadersRequest}, sync::{NetworkSyncUpdater, SyncState}, }; -use reth_network_peers::NodeRecord; -use reth_primitives::HeadersDirection; +use reth_network_peers::{mainnet_nodes, NodeRecord}; use reth_provider::test_utils::NoopProvider; use reth_transaction_pool::test_utils::testing_pool; use secp256k1::SecretKey; diff --git a/crates/net/network/tests/it/requests.rs b/crates/net/network/tests/it/requests.rs index 7873366cc..9bb391547 100644 --- a/crates/net/network/tests/it/requests.rs +++ b/crates/net/network/tests/it/requests.rs @@ -2,6 +2,7 @@ //! Tests for eth related requests use rand::Rng; +use reth_eth_wire::HeadersDirection; use reth_network::{ test_utils::{NetworkEventStream, Testnet}, NetworkEvents, @@ -12,8 +13,8 @@ use reth_network_p2p::{ headers::client::{HeadersClient, HeadersRequest}, }; use reth_primitives::{ - Block, BlockBody, Bytes, Header, HeadersDirection, Signature, Transaction, TransactionSigned, - TxEip2930, TxKind, U256, + Block, BlockBody, Bytes, Header, Signature, Transaction, TransactionSigned, TxEip2930, TxKind, + U256, }; use reth_provider::test_utils::MockEthProvider; use std::sync::Arc; diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 93af03d5b..724290ec3 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -4,12 +4,10 @@ use crate::{ error::PeerRequestResult, headers::client::{HeadersClient, SingleHeaderRequest}, }; -use futures::Stream; use reth_consensus::{Consensus, ConsensusError}; +use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; -use reth_primitives::{ - BlockBody, GotExpected, Header, HeadersDirection, SealedBlock, SealedHeader, B256, -}; +use reth_primitives::{BlockBody, GotExpected, Header, SealedBlock, SealedHeader, B256}; use std::{ cmp::Reverse, collections::{HashMap, VecDeque}, @@ -635,69 +633,6 @@ where } } -/// A type that buffers the result of a range request so we can return it as a `Stream`. -struct FullBlockRangeStream -where - Client: BodiesClient + HeadersClient, -{ - /// The inner [`FetchFullBlockRangeFuture`] that is polled. - inner: FetchFullBlockRangeFuture, - /// The blocks that have been received so far. - /// - /// If this is `None` then the request is still in progress. If the vec is empty, then all of - /// the response values have been consumed. - blocks: Option>, -} - -impl From> for FullBlockRangeStream -where - Client: BodiesClient + HeadersClient, -{ - fn from(inner: FetchFullBlockRangeFuture) -> Self { - Self { inner, blocks: None } - } -} - -impl Stream for FullBlockRangeStream -where - Client: BodiesClient + HeadersClient + Unpin + 'static, -{ - type Item = SealedBlock; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - // If all blocks have been consumed, then return `None`. - if let Some(blocks) = &mut this.blocks { - if blocks.is_empty() { - // Stream is finished - return Poll::Ready(None) - } - - // return the next block if it's ready - the vec should be in ascending order since it - // is reversed right after it is received from the future, so we can just pop() the - // elements to return them from the stream in descending order - return Poll::Ready(blocks.pop()) - } - - // poll the inner future if the blocks are not yet ready - let mut blocks = ready!(Pin::new(&mut this.inner).poll(cx)); - - // the blocks are returned in descending order, reverse the list so we can just pop() the - // vec to yield the next block in the stream - blocks.reverse(); - - // pop the first block from the vec as the first stream element and store the rest - let first_result = blocks.pop(); - - // if the inner future is ready, then we can return the blocks - this.blocks = Some(blocks); - - // return the first block - Poll::Ready(first_result) - } -} - /// A request for a range of full blocks. Polling this will poll the inner headers and bodies /// futures until they return responses. It will return either the header or body result, depending /// on which future successfully returned. @@ -743,7 +678,6 @@ enum RangeResponseResult { mod tests { use super::*; use crate::test_utils::TestFullBlockClient; - use futures::StreamExt; use std::ops::Range; #[tokio::test] @@ -809,43 +743,6 @@ mod tests { } } - #[tokio::test] - async fn download_full_block_range_stream() { - let client = TestFullBlockClient::default(); - let (header, body) = insert_headers_into_client(&client, 0..50); - let client = FullBlockClient::test_client(client); - - let future = client.get_full_block_range(header.hash(), 1); - let mut stream = FullBlockRangeStream::from(future); - - // ensure only block in the stream is the one we requested - let received = stream.next().await.expect("response should not be None"); - assert_eq!(received, SealedBlock::new(header.clone(), body.clone())); - - // stream should be done now - assert_eq!(stream.next().await, None); - - // there are 11 total blocks - let future = client.get_full_block_range(header.hash(), 11); - let mut stream = FullBlockRangeStream::from(future); - - // check first header - let received = stream.next().await.expect("response should not be None"); - let mut curr_number = received.number; - assert_eq!(received, SealedBlock::new(header.clone(), body.clone())); - - // check the rest of the headers - for _ in 0..10 { - let received = stream.next().await.expect("response should not be None"); - assert_eq!(received.number, curr_number - 1); - curr_number = received.number; - } - - // ensure stream is done - let received = stream.next().await; - assert!(received.is_none()); - } - #[tokio::test] async fn download_full_block_range_over_soft_limit() { // default soft limit is 20, so we will request 50 blocks diff --git a/crates/net/p2p/src/headers/client.rs b/crates/net/p2p/src/headers/client.rs index 5b70aa1e5..4a4b903a8 100644 --- a/crates/net/p2p/src/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -1,7 +1,7 @@ use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::{Future, FutureExt}; -pub use reth_eth_wire_types::BlockHeaders; -use reth_primitives::{BlockHashOrNumber, Header, HeadersDirection}; +pub use reth_eth_wire_types::{BlockHeaders, HeadersDirection}; +use reth_primitives::{BlockHashOrNumber, Header}; use std::{ fmt::Debug, pin::Pin, diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index cfba59dbf..731aa39e7 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -6,10 +6,10 @@ use crate::{ priority::Priority, }; use parking_lot::Mutex; +use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::{ - BlockBody, BlockHashOrNumber, BlockNumHash, Header, HeadersDirection, SealedBlock, - SealedHeader, B256, + BlockBody, BlockHashOrNumber, BlockNumHash, Header, SealedBlock, SealedHeader, B256, }; use std::{collections::HashMap, sync::Arc}; diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index 73dd04849..a47753539 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -12,8 +12,9 @@ use crate::{ }; use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; -use reth_primitives::{Header, HeadersDirection, SealedHeader}; +use reth_primitives::{Header, SealedHeader}; use std::{ fmt, pin::Pin, diff --git a/crates/net/peers/Cargo.toml b/crates/net/peers/Cargo.toml index bd6af706c..bf05fb68a 100644 --- a/crates/net/peers/Cargo.toml +++ b/crates/net/peers/Cargo.toml @@ -25,13 +25,15 @@ secp256k1 = { workspace = true, optional = true } serde_with.workspace = true thiserror.workspace = true url.workspace = true -tokio = { workspace = true, features = ["full"] } +tokio = { workspace = true, optional = true } [dev-dependencies] alloy-primitives = { workspace = true, features = ["rand"] } rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } serde_json.workspace = true +tokio = { workspace = true, features = ["net", "macros", "rt"] } [features] secp256k1 = ["dep:secp256k1", "enr/secp256k1"] +net = ["dep:tokio", "tokio?/net"] diff --git a/crates/net/peers/src/bootnodes/bsc.rs b/crates/net/peers/src/bootnodes/bsc.rs new file mode 100644 index 000000000..40dfa4de8 --- /dev/null +++ b/crates/net/peers/src/bootnodes/bsc.rs @@ -0,0 +1,19 @@ +//! BSC bootnodes + +/// Bsc testnet boot nodes. +pub static BSC_MAINNET_BOOTNODES: &[&str] = &[ + "enode://433c8bfdf53a3e2268ccb1b829e47f629793291cbddf0c76ae626da802f90532251fc558e2e0d10d6725e759088439bf1cd4714716b03a259a35d4b2e4acfa7f@52.69.102.73:30311", + "enode://571bee8fb902a625942f10a770ccf727ae2ba1bab2a2b64e121594a99c9437317f6166a395670a00b7d93647eacafe598b6bbcef15b40b6d1a10243865a3e80f@35.73.84.120:30311", + "enode://fac42fb0ba082b7d1eebded216db42161163d42e4f52c9e47716946d64468a62da4ba0b1cac0df5e8bf1e5284861d757339751c33d51dfef318be5168803d0b5@18.203.152.54:30311", + "enode://3063d1c9e1b824cfbb7c7b6abafa34faec6bb4e7e06941d218d760acdd7963b274278c5c3e63914bd6d1b58504c59ec5522c56f883baceb8538674b92da48a96@34.250.32.100:30311", + "enode://ad78c64a4ade83692488aa42e4c94084516e555d3f340d9802c2bf106a3df8868bc46eae083d2de4018f40e8d9a9952c32a0943cd68855a9bc9fd07aac982a6d@34.204.214.24:30311", + "enode://5db798deb67df75d073f8e2953dad283148133acb520625ea804c9c4ad09a35f13592a762d8f89056248f3889f6dcc33490c145774ea4ff2966982294909b37a@107.20.191.97:30311", +]; + +/// Bsc testnet boot nodes. +pub static BSC_TESTNET_BOOTNODES: &[&str] = &[ + "enode://0637d1e62026e0c8685b1db0ca1c767c78c95c3fab64abc468d1a64b12ca4b530b46b8f80c915aec96f74f7ffc5999e8ad6d1484476f420f0c10e3d42361914b@52.199.214.252:30311", + "enode://df1e8eb59e42cad3c4551b2a53e31a7e55a2fdde1287babd1e94b0836550b489ba16c40932e4dacb16cba346bd442c432265a299c4aca63ee7bb0f832b9f45eb@52.51.80.128:30311", + "enode://ecd664250ca19b1074dcfbfb48576a487cc18d052064222a363adacd2650f8e08fb3db9de7a7aecb48afa410eaeb3285e92e516ead01fb62598553aed91ee15e@3.209.122.123:30311", + "enode://665cf77ca26a8421cfe61a52ac312958308d4912e78ce8e0f61d6902e4494d4cc38f9b0dd1b23a427a7a5734e27e5d9729231426b06bb9c73b56a142f83f6b68@52.72.123.113:30311", +]; diff --git a/crates/net/peers/src/bootnodes/ethereum.rs b/crates/net/peers/src/bootnodes/ethereum.rs new file mode 100644 index 000000000..9cb6aac00 --- /dev/null +++ b/crates/net/peers/src/bootnodes/ethereum.rs @@ -0,0 +1,24 @@ +//! Ethereum bootnodes come from + +/// Ethereum Foundation Go Bootnodes +pub static MAINNET_BOOTNODES : [&str; 4] = [ + "enode://d860a01f9722d78051619d1e2351aba3f43f943f6f00718d1b9baa4101932a1f5011f16bb2b1bb35db20d6fe28fa0bf09636d26a87d31de9ec6203eeedb1f666@18.138.108.67:30303", // bootnode-aws-ap-southeast-1-001 + "enode://22a8232c3abc76a16ae9d6c3b164f98775fe226f0917b0ca871128a74a8e9630b458460865bab457221f1d448dd9791d24c4e5d88786180ac185df813a68d4de@3.209.45.79:30303", // bootnode-aws-us-east-1-001 + "enode://2b252ab6a1d0f971d9722cb839a42cb81db019ba44c08754628ab4a823487071b5695317c8ccd085219c3a03af063495b2f1da8d18218da2d6a82981b45e6ffc@65.108.70.101:30303", // bootnode-hetzner-hel + "enode://4aeb4ab6c14b23e2c4cfdce879c04b0748a20d8e9b59e25ded2a08143e265c6c25936e74cbc8e641e3312ca288673d91f2f93f8e277de3cfa444ecdaaf982052@157.90.35.166:30303", // bootnode-hetzner-fsn +]; + +/// Ethereum Foundation Sepolia Bootnodes +pub static SEPOLIA_BOOTNODES : [&str; 5] = [ + "enode://4e5e92199ee224a01932a377160aa432f31d0b351f84ab413a8e0a42f4f36476f8fb1cbe914af0d9aef0d51665c214cf653c651c4bbd9d5550a934f241f1682b@138.197.51.181:30303", // sepolia-bootnode-1-nyc3 + "enode://143e11fb766781d22d92a2e33f8f104cddae4411a122295ed1fdb6638de96a6ce65f5b7c964ba3763bba27961738fef7d3ecc739268f3e5e771fb4c87b6234ba@146.190.1.103:30303", // sepolia-bootnode-1-sfo3 + "enode://8b61dc2d06c3f96fddcbebb0efb29d60d3598650275dc469c22229d3e5620369b0d3dedafd929835fe7f489618f19f456fe7c0df572bf2d914a9f4e006f783a9@170.64.250.88:30303", // sepolia-bootnode-1-syd1 + "enode://10d62eff032205fcef19497f35ca8477bea0eadfff6d769a147e895d8b2b8f8ae6341630c645c30f5df6e67547c03494ced3d9c5764e8622a26587b083b028e8@139.59.49.206:30303", // sepolia-bootnode-1-blr1 + "enode://9e9492e2e8836114cc75f5b929784f4f46c324ad01daf87d956f98b3b6c5fcba95524d6e5cf9861dc96a2c8a171ea7105bb554a197455058de185fa870970c7c@138.68.123.152:30303", // sepolia-bootnode-1-ams3 +]; + +/// Ethereum Foundation Holesky Bootnodes +pub static HOLESKY_BOOTNODES : [&str; 2] = [ + "enode://ac906289e4b7f12df423d654c5a962b6ebe5b3a74cc9e06292a85221f9a64a6f1cfdd6b714ed6dacef51578f92b34c60ee91e9ede9c7f8fadc4d347326d95e2b@146.190.13.128:30303", + "enode://a3435a0155a3e837c02f5e7f5662a2f1fbc25b48e4dc232016e1c51b544cb5b4510ef633ea3278c0e970fa8ad8141e2d4d0f9f95456c537ff05fdf9b31c15072@178.128.136.233:30303", +]; diff --git a/crates/net/peers/src/bootnodes/mod.rs b/crates/net/peers/src/bootnodes/mod.rs new file mode 100644 index 000000000..0e0d531e4 --- /dev/null +++ b/crates/net/peers/src/bootnodes/mod.rs @@ -0,0 +1,72 @@ +//! Bootnodes for the network + +use crate::NodeRecord; + +mod ethereum; +pub use ethereum::*; + +mod optimism; +pub use optimism::*; + +mod bsc; +pub use bsc::*; + +/// Returns parsed mainnet nodes +pub fn mainnet_nodes() -> Vec { + parse_nodes(&MAINNET_BOOTNODES[..]) +} + +/// Returns parsed sepolia nodes +pub fn sepolia_nodes() -> Vec { + parse_nodes(&SEPOLIA_BOOTNODES[..]) +} + +/// Returns parsed holesky nodes +pub fn holesky_nodes() -> Vec { + parse_nodes(&HOLESKY_BOOTNODES[..]) +} + +/// Returns parsed op-stack mainnet nodes +pub fn op_nodes() -> Vec { + parse_nodes(OP_BOOTNODES) +} + +/// Returns parsed op-stack testnet nodes +pub fn op_testnet_nodes() -> Vec { + parse_nodes(OP_TESTNET_BOOTNODES) +} + +/// Returns parsed op-stack base mainnet nodes +pub fn base_nodes() -> Vec { + parse_nodes(OP_BOOTNODES) +} + +/// Returns parsed op-stack base testnet nodes +pub fn base_testnet_nodes() -> Vec { + parse_nodes(OP_TESTNET_BOOTNODES) +} + +/// Returns parsed opbnb testnet nodes +pub fn opbnb_testnet_nodes() -> Vec { + parse_nodes(OPBNB_TESTNET_BOOTNODES) +} + +/// Returns parsed opbnb mainnet nodes +pub fn opbnb_mainnet_nodes() -> Vec { + parse_nodes(OPBNB_MAINNET_BOOTNODES) +} + +/// Returns parsed bsc mainnet nodes +pub fn bsc_mainnet_nodes() -> Vec { + parse_nodes(BSC_MAINNET_BOOTNODES) +} + +/// Returns parsed bsc mainnet nodes +pub fn bsc_testnet_nodes() -> Vec { + parse_nodes(BSC_TESTNET_BOOTNODES) +} + +/// Parses all the nodes +pub fn parse_nodes(nodes: impl IntoIterator>) -> Vec { + nodes.into_iter().map(|s| s.as_ref().parse().unwrap()).collect() +} diff --git a/crates/net/peers/src/bootnodes/optimism.rs b/crates/net/peers/src/bootnodes/optimism.rs new file mode 100644 index 000000000..c5f4eab97 --- /dev/null +++ b/crates/net/peers/src/bootnodes/optimism.rs @@ -0,0 +1,38 @@ +//! OP bootnodes come from + +/// OP stack mainnet boot nodes. +pub static OP_BOOTNODES: &[&str] = &[ + // OP Labs + "enode://ca2774c3c401325850b2477fd7d0f27911efbf79b1e8b335066516e2bd8c4c9e0ba9696a94b1cb030a88eac582305ff55e905e64fb77fe0edcd70a4e5296d3ec@34.65.175.185:30305", + "enode://dd751a9ef8912be1bfa7a5e34e2c3785cc5253110bd929f385e07ba7ac19929fb0e0c5d93f77827291f4da02b2232240fbc47ea7ce04c46e333e452f8656b667@34.65.107.0:30305", + "enode://c5d289b56a77b6a2342ca29956dfd07aadf45364dde8ab20d1dc4efd4d1bc6b4655d902501daea308f4d8950737a4e93a4dfedd17b49cd5760ffd127837ca965@34.65.202.239:30305", + // Base + "enode://87a32fd13bd596b2ffca97020e31aef4ddcc1bbd4b95bb633d16c1329f654f34049ed240a36b449fda5e5225d70fe40bc667f53c304b71f8e68fc9d448690b51@3.231.138.188:30301", + "enode://ca21ea8f176adb2e229ce2d700830c844af0ea941a1d8152a9513b966fe525e809c3a6c73a2c18a12b74ed6ec4380edf91662778fe0b79f6a591236e49e176f9@184.72.129.189:30301", + "enode://acf4507a211ba7c1e52cdf4eef62cdc3c32e7c9c47998954f7ba024026f9a6b2150cd3f0b734d9c78e507ab70d59ba61dfe5c45e1078c7ad0775fb251d7735a2@3.220.145.177:30301", + "enode://8a5a5006159bf079d06a04e5eceab2a1ce6e0f721875b2a9c96905336219dbe14203d38f70f3754686a6324f786c2f9852d8c0dd3adac2d080f4db35efc678c5@3.231.11.52:30301", + "enode://cdadbe835308ad3557f9a1de8db411da1a260a98f8421d62da90e71da66e55e98aaa8e90aa7ce01b408a54e4bd2253d701218081ded3dbe5efbbc7b41d7cef79@54.198.153.150:30301" +]; + +/// OP stack testnet boot nodes. +pub static OP_TESTNET_BOOTNODES: &[&str] = &[ + // OP Labs + "enode://2bd2e657bb3c8efffb8ff6db9071d9eb7be70d7c6d7d980ff80fc93b2629675c5f750bc0a5ef27cd788c2e491b8795a7e9a4a6e72178c14acc6753c0e5d77ae4@34.65.205.244:30305", + "enode://db8e1cab24624cc62fc35dbb9e481b88a9ef0116114cd6e41034c55b5b4f18755983819252333509bd8e25f6b12aadd6465710cd2e956558faf17672cce7551f@34.65.173.88:30305", + "enode://bfda2e0110cfd0f4c9f7aa5bf5ec66e6bd18f71a2db028d36b8bf8b0d6fdb03125c1606a6017b31311d96a36f5ef7e1ad11604d7a166745e6075a715dfa67f8a@34.65.229.245:30305", + // Base + "enode://548f715f3fc388a7c917ba644a2f16270f1ede48a5d88a4d14ea287cc916068363f3092e39936f1a3e7885198bef0e5af951f1d7b1041ce8ba4010917777e71f@18.210.176.114:30301", + "enode://6f10052847a966a725c9f4adf6716f9141155b99a0fb487fea3f51498f4c2a2cb8d534e680ee678f9447db85b93ff7c74562762c3714783a7233ac448603b25f@107.21.251.55:30301", +]; + +/// OPBNB mainnet boot nodes. +pub static OPBNB_MAINNET_BOOTNODES: &[&str] = &[ + "enode://db109c6cac5c8b6225edd3176fc3764c58e0720950fe94c122c80978e706a9c9e976629b718e48b6306ea0f9126e5394d3424c9716c5703549e2e7eba216353b@52.193.218.151:30304", + "enode://afe18782053bb31fb7ea41e1acf659ab9bd1eec181fb97331f0a6b61871a469b4f75138f903c977796be1cc2a3c985d33150a396e878d3cd6e4723b6040ff9c0@52.195.105.192:30304", +]; + +/// OPBNB testnet boot nodes. +pub static OPBNB_TESTNET_BOOTNODES: &[&str] = &[ + "enode://217cfe091047a1c3f490e96d51e2f3bd90517a9be77b8a6033b31833a193aa6c33b6d07088c4980f462162635ffbccaa413dc28cb14c4f2b96af0dd97292411f@13.112.117.88:30304", + "enode://38c8913f87d64179bac23514ddb56a17f5b28f7e253b3825a10a2c8b9553c5df7d3b6c83a96948ad0466f384bf63236fd5e6bed6d6402156749b6b0899c82d47@54.199.235.83:30304", +]; diff --git a/crates/net/peers/src/lib.rs b/crates/net/peers/src/lib.rs index f531f1eb8..e80331f90 100644 --- a/crates/net/peers/src/lib.rs +++ b/crates/net/peers/src/lib.rs @@ -39,6 +39,11 @@ //! - [`TrustedPeer`]: A [`NodeRecord`] with an optional domain name, which can be resolved to a //! [`NodeRecord`]. Useful for adding trusted peers at startup, whose IP address may not be //! static. +//! +//! +//! ## Feature Flags +//! +//! - `net`: Support for address lookups. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -63,6 +68,9 @@ pub use node_record::{NodeRecord, NodeRecordParseError}; pub mod trusted_peer; pub use trusted_peer::TrustedPeer; +mod bootnodes; +pub use bootnodes::*; + /// This tag should be set to indicate to libsecp256k1 that the following bytes denote an /// uncompressed pubkey. /// diff --git a/crates/net/peers/src/node_record.rs b/crates/net/peers/src/node_record.rs index 3b6c38170..5f268c253 100644 --- a/crates/net/peers/src/node_record.rs +++ b/crates/net/peers/src/node_record.rs @@ -92,6 +92,17 @@ impl NodeRecord { Self { address: addr.ip(), tcp_port: addr.port(), udp_port: addr.port(), id } } + /// Creates a new record from an ip address and ports. + pub fn new_with_ports( + ip_addr: IpAddr, + tcp_port: u16, + udp_port: Option, + id: PeerId, + ) -> Self { + let udp_port = udp_port.unwrap_or(tcp_port); + Self { address: ip_addr, tcp_port, udp_port, id } + } + /// The TCP socket address of this node #[must_use] pub const fn tcp_addr(&self) -> SocketAddr { diff --git a/crates/net/peers/src/trusted_peer.rs b/crates/net/peers/src/trusted_peer.rs index 27096bcab..aa7e0a015 100644 --- a/crates/net/peers/src/trusted_peer.rs +++ b/crates/net/peers/src/trusted_peer.rs @@ -45,24 +45,42 @@ impl TrustedPeer { Self { host, tcp_port: port, udp_port: port, id } } + const fn to_node_record(&self, ip: IpAddr) -> NodeRecord { + NodeRecord { address: ip, id: self.id, tcp_port: self.tcp_port, udp_port: self.udp_port } + } + + /// Tries to resolve directly to a [`NodeRecord`] if the host is an IP address. + fn try_node_record(&self) -> Result { + match &self.host { + Host::Ipv4(ip) => Ok(self.to_node_record((*ip).into())), + Host::Ipv6(ip) => Ok(self.to_node_record((*ip).into())), + Host::Domain(domain) => Err(domain), + } + } + + /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. + /// + /// This use [`ToSocketAddr`](std::net::ToSocketAddrs) to resolve the host to an IP address. + pub fn resolve_blocking(&self) -> Result { + let domain = match self.try_node_record() { + Ok(record) => return Ok(record), + Err(domain) => domain, + }; + // Resolve the domain to an IP address + let mut ips = std::net::ToSocketAddrs::to_socket_addrs(&(domain, 0))?; + let ip = ips + .next() + .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; + + Ok(self.to_node_record(ip.ip())) + } + /// Resolves the host in a [`TrustedPeer`] to an IP address, returning a [`NodeRecord`]. + #[cfg(any(test, feature = "net"))] pub async fn resolve(&self) -> Result { - let domain = match self.host.to_owned() { - Host::Ipv4(ip) => { - let id = self.id; - let tcp_port = self.tcp_port; - let udp_port = self.udp_port; - - return Ok(NodeRecord { address: ip.into(), id, tcp_port, udp_port }) - } - Host::Ipv6(ip) => { - let id = self.id; - let tcp_port = self.tcp_port; - let udp_port = self.udp_port; - - return Ok(NodeRecord { address: ip.into(), id, tcp_port, udp_port }) - } - Host::Domain(domain) => domain, + let domain = match self.try_node_record() { + Ok(record) => return Ok(record), + Err(domain) => domain, }; // Resolve the domain to an IP address @@ -70,12 +88,8 @@ impl TrustedPeer { let ip = ips .next() .ok_or_else(|| Error::new(std::io::ErrorKind::AddrNotAvailable, "No IP found"))?; - Ok(NodeRecord { - address: ip.ip(), - id: self.id, - tcp_port: self.tcp_port, - udp_port: self.udp_port, - }) + + Ok(self.to_node_record(ip.ip())) } } @@ -285,15 +299,16 @@ mod tests { TrustedPeer::new(url::Host::Domain(domain.to_owned()), 30300, PeerId::random()); // Resolve domain and validate - let rec = rec.resolve().await.unwrap(); - match rec.address { - std::net::IpAddr::V4(addr) => { + let ensure = |rec: NodeRecord| match rec.address { + IpAddr::V4(addr) => { assert_eq!(addr, std::net::Ipv4Addr::new(127, 0, 0, 1)) } - std::net::IpAddr::V6(addr) => { - assert_eq!(addr, std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)) + IpAddr::V6(addr) => { + assert_eq!(addr, Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)) } - } + }; + ensure(rec.resolve().await.unwrap()); + ensure(rec.resolve_blocking().unwrap()); } } } diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs deleted file mode 100644 index 59e2f6e99..000000000 --- a/crates/node-core/src/args/utils.rs +++ /dev/null @@ -1,193 +0,0 @@ -//! Clap parser utilities - -use alloy_genesis::Genesis; -use reth_chainspec::ChainSpec; -use reth_fs_util as fs; -use reth_primitives::{BlockHashOrNumber, B256}; -use std::{ - net::{IpAddr, Ipv4Addr, SocketAddr, ToSocketAddrs}, - path::PathBuf, - str::FromStr, - sync::Arc, - time::Duration, -}; - -use reth_chainspec::DEV; - -#[cfg(feature = "bsc")] -use reth_primitives::{BSC_MAINNET, BSC_TESTNET}; - -#[cfg(feature = "optimism")] -use reth_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; - -#[cfg(all(feature = "optimism", feature = "opbnb"))] -use reth_primitives::{OPBNB_MAINNET, OPBNB_TESTNET}; - -#[cfg(not(feature = "optimism"))] -use reth_chainspec::{GOERLI, HOLESKY, MAINNET, SEPOLIA}; - -#[cfg(feature = "bsc")] -/// Chains supported by bsc. First value should be used as the default. -pub const SUPPORTED_CHAINS: &[&str] = &["bsc", "bsc-testnet"]; -#[cfg(feature = "optimism")] -/// Chains supported by op-reth. First value should be used as the default. -pub const SUPPORTED_CHAINS: &[&str] = - &["optimism", "optimism-sepolia", "base", "base-sepolia", "opbnb-mainnet", "opbnb-testnet"]; -#[cfg(all(not(feature = "optimism"), not(feature = "bsc")))] -/// Chains supported by reth. First value should be used as the default. -pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "goerli", "holesky", "dev"]; - -/// Helper to parse a [Duration] from seconds -pub fn parse_duration_from_secs(arg: &str) -> eyre::Result { - let seconds = arg.parse()?; - Ok(Duration::from_secs(seconds)) -} - -/// The help info for the --chain flag -pub fn chain_help() -> String { - format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specificafile.\n\nBuilt-in chains:\n {}", SUPPORTED_CHAINS.join(", ")) -} - -/// Clap value parser for [`ChainSpec`]s. -/// -/// The value parser matches either a known chain, the path -/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. -pub fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { - Ok(match s { - #[cfg(not(feature = "optimism"))] - "mainnet" => MAINNET.clone(), - #[cfg(not(feature = "optimism"))] - "goerli" => GOERLI.clone(), - #[cfg(not(feature = "optimism"))] - "sepolia" => SEPOLIA.clone(), - #[cfg(not(feature = "optimism"))] - "holesky" => HOLESKY.clone(), - "dev" => DEV.clone(), - #[cfg(feature = "optimism")] - "optimism" => OP_MAINNET.clone(), - #[cfg(feature = "optimism")] - "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), - #[cfg(feature = "optimism")] - "base" => BASE_MAINNET.clone(), - #[cfg(feature = "optimism")] - "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), - #[cfg(all(feature = "optimism", feature = "opbnb"))] - "opbnb_mainnet" | "opbnb-mainnet" => OPBNB_MAINNET.clone(), - #[cfg(all(feature = "optimism", feature = "opbnb"))] - "opbnb_testnet" | "opbnb-testnet" => OPBNB_TESTNET.clone(), - #[cfg(feature = "bsc")] - "bsc" | "bsc-mainnet" => BSC_MAINNET.clone(), - #[cfg(feature = "bsc")] - "bsc-testnet" => BSC_TESTNET.clone(), - _ => { - // try to read json from path first - let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) { - Ok(raw) => raw, - Err(io_err) => { - // valid json may start with "\n", but must contain "{" - if s.contains('{') { - s.to_string() - } else { - return Err(io_err.into()) // assume invalid path - } - } - }; - - // both serialized Genesis and ChainSpec structs supported - let genesis: Genesis = serde_json::from_str(&raw)?; - - Arc::new(genesis.into()) - } - }) -} - -/// Parse [`BlockHashOrNumber`] -pub fn hash_or_num_value_parser(value: &str) -> eyre::Result { - match B256::from_str(value) { - Ok(hash) => Ok(BlockHashOrNumber::Hash(hash)), - Err(_) => Ok(BlockHashOrNumber::Number(value.parse()?)), - } -} - -/// Error thrown while parsing a socket address. -#[derive(thiserror::Error, Debug)] -pub enum SocketAddressParsingError { - /// Failed to convert the string into a socket addr - #[error("could not parse socket address: {0}")] - Io(#[from] std::io::Error), - /// Input must not be empty - #[error("cannot parse socket address from empty string")] - Empty, - /// Failed to parse the address - #[error("could not parse socket address from {0}")] - Parse(String), - /// Failed to parse port - #[error("could not parse port: {0}")] - Port(#[from] std::num::ParseIntError), -} - -/// Parse a [`SocketAddr`] from a `str`. -/// -/// The following formats are checked: -/// -/// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the -/// hostname is set to `localhost`. -/// - If the value contains `:` it is assumed to be the format `:` -/// - Otherwise it is assumed to be a hostname -/// -/// An error is returned if the value is empty. -pub fn parse_socket_address(value: &str) -> eyre::Result { - if value.is_empty() { - return Err(SocketAddressParsingError::Empty) - } - - if let Some(port) = value.strip_prefix(':').or_else(|| value.strip_prefix("localhost:")) { - let port: u16 = port.parse()?; - return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) - } - if let Ok(port) = value.parse::() { - return Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port)) - } - value - .to_socket_addrs()? - .next() - .ok_or_else(|| SocketAddressParsingError::Parse(value.to_string())) -} - -#[cfg(test)] -mod tests { - use super::*; - use proptest::prelude::Rng; - use secp256k1::rand::thread_rng; - - #[test] - fn parse_known_chain_spec() { - for chain in SUPPORTED_CHAINS { - chain_value_parser(chain).unwrap(); - } - } - - #[test] - fn parse_socket_addresses() { - for value in ["localhost:9000", ":9000", "9000"] { - let socket_addr = parse_socket_address(value) - .unwrap_or_else(|_| panic!("could not parse socket address: {value}")); - - assert!(socket_addr.ip().is_loopback()); - assert_eq!(socket_addr.port(), 9000); - } - } - - #[test] - fn parse_socket_address_random() { - let port: u16 = thread_rng().gen(); - - for value in [format!("localhost:{port}"), format!(":{port}"), port.to_string()] { - let socket_addr = parse_socket_address(&value) - .unwrap_or_else(|_| panic!("could not parse socket address: {value}")); - - assert!(socket_addr.ip().is_loopback()); - assert_eq!(socket_addr.port(), port); - } - } -} diff --git a/crates/node-core/src/metrics/version_metrics.rs b/crates/node-core/src/metrics/version_metrics.rs deleted file mode 100644 index f0b11c3b7..000000000 --- a/crates/node-core/src/metrics/version_metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! This exposes reth's version information over prometheus. - -use crate::version::build_profile_name; -use metrics::gauge; - -const LABELS: [(&str, &str); 6] = [ - ("version", env!("CARGO_PKG_VERSION")), - ("build_timestamp", env!("VERGEN_BUILD_TIMESTAMP")), - ("cargo_features", env!("VERGEN_CARGO_FEATURES")), - ("git_sha", env!("VERGEN_GIT_SHA")), - ("target_triple", env!("VERGEN_CARGO_TARGET_TRIPLE")), - ("build_profile", build_profile_name()), -]; - -/// This exposes reth's version information over prometheus. -pub fn register_version_metrics() { - let _gauge = gauge!("info", &LABELS); -} diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 6be142582..04bc54ccb 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -21,7 +21,7 @@ reth-db-common.workspace = true reth-exex.workspace = true reth-evm.workspace = true reth-provider.workspace = true -reth-db = { workspace = true, features = ["mdbx"] } +reth-db = { workspace = true, features = ["mdbx"], optional = true } reth-db-api.workspace = true reth-rpc-engine-api.workspace = true reth-rpc.workspace = true @@ -45,7 +45,10 @@ reth-node-events.workspace = true reth-consensus.workspace = true reth-consensus-debug-client.workspace = true reth-rpc-types.workspace = true -reth-bsc-consensus = { workspace = true, optional = true} +reth-engine-util.workspace = true +reth-cli-util.workspace = true +reth-bsc-consensus = { workspace = true, optional = true } +reth-bsc-engine = { workspace = true, optional = true } ## async futures.workspace = true @@ -57,9 +60,6 @@ tokio = { workspace = true, features = [ ] } tokio-stream.workspace = true -## ethereum -discv5.workspace = true - ## crypto secp256k1 = { workspace = true, features = [ "global-context", @@ -75,10 +75,16 @@ confy.workspace = true rayon.workspace = true backon.workspace = true +# tracing +tracing.workspace = true + [dev-dependencies] tempfile.workspace = true [features] +default = [] +test-utils = ["reth-db/test-utils"] bsc = [ - "reth-bsc-consensus" + "reth-bsc-engine/bsc", + "reth-beacon-consensus/bsc", ] \ No newline at end of file diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index c3255ceed..d46b73d76 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -9,13 +9,9 @@ use crate::{ rpc::{RethRpcServerHandles, RpcContext}, DefaultNodeLauncher, Node, NodeHandle, }; -use discv5::ListenConfig; use futures::Future; use reth_chainspec::ChainSpec; -use reth_db::{ - test_utils::{create_test_rw_db_with_path, tempdir_path, TempDatabase}, - DatabaseEnv, -}; +use reth_cli_util::get_secret_key; use reth_db_api::{ database::Database, database_metrics::{DatabaseMetadata, DatabaseMetrics}, @@ -26,23 +22,19 @@ use reth_network::{ }; use reth_node_api::{FullNodeTypes, FullNodeTypesAdapter, NodeTypes}; use reth_node_core::{ - args::{get_secret_key, DatadirArgs}, cli::config::{PayloadBuilderConfig, RethTransactionPoolConfig}, - dirs::{ChainPath, DataDirPath, MaybePlatformPath}, + dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, - primitives::{kzg::KzgSettings, Head}, - utils::write_peers_to_file, + primitives::Head, }; -use reth_primitives::constants::eip4844::MAINNET_KZG_TRUSTED_SETUP; +use reth_primitives::revm_primitives::EnvKzgSettings; use reth_provider::{providers::BlockchainProvider, ChainSpecProvider}; use reth_tasks::TaskExecutor; use reth_transaction_pool::{PoolConfig, TransactionPool}; use secp256k1::SecretKey; pub use states::*; -use std::{ - net::{IpAddr, SocketAddr, SocketAddrV4, SocketAddrV6}, - sync::Arc, -}; +use std::sync::Arc; +use tracing::{info, trace, warn}; mod states; @@ -66,7 +58,7 @@ pub type RethFullAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter = FullNodeTypesAdapter NodeBuilder { } /// Creates an _ephemeral_ preconfigured node for testing purposes. + #[cfg(feature = "test-utils")] pub fn testing_node( mut self, task_executor: TaskExecutor, - ) -> WithLaunchContext>>> { - let path = MaybePlatformPath::::from(tempdir_path()); - self.config = self - .config - .with_datadir_args(DatadirArgs { datadir: path.clone(), ..Default::default() }); + ) -> WithLaunchContext>>> + { + let path = reth_node_core::dirs::MaybePlatformPath::::from( + reth_db::test_utils::tempdir_path(), + ); + self.config = self.config.with_datadir_args(reth_node_core::args::DatadirArgs { + datadir: path.clone(), + ..Default::default() + }); let data_dir = path.unwrap_or_chain_default(self.config.chain.chain, self.config.datadir.clone()); - let db = create_test_rw_db_with_path(data_dir.db()); + let db = reth_db::test_utils::create_test_rw_db_with_path(data_dir.db()); WithLaunchContext { builder: self.with_database(db), task_executor } } @@ -469,9 +467,9 @@ impl BuilderContext { self.config().txpool.pool_config() } - /// Loads `MAINNET_KZG_TRUSTED_SETUP`. - pub fn kzg_settings(&self) -> eyre::Result> { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + /// Loads `EnvKzgSettings::Default`. + pub const fn kzg_settings(&self) -> eyre::Result { + Ok(EnvKzgSettings::Default) } /// Returns the config for payload building. @@ -512,7 +510,18 @@ impl BuilderContext { "p2p network task", |shutdown| { network.run_until_graceful_shutdown(shutdown, |network| { - write_peers_to_file(&network, known_peers_file) + if let Some(peers_file) = known_peers_file { + let num_known_peers = network.num_known_peers(); + trace!(target: "reth::cli", peers_file=?peers_file, num_peers=%num_known_peers, "Saving current peers"); + match network.write_peers_to_file(peers_file.as_path()) { + Ok(_) => { + info!(target: "reth::cli", peers_file=?peers_file, "Wrote network peers to file"); + } + Err(err) => { + warn!(target: "reth::cli", %err, "Failed to write network peers to file"); + } + } + } }) }, ); @@ -530,12 +539,19 @@ impl BuilderContext { pub fn network_config_builder(&self) -> eyre::Result { let secret_key = self.network_secret(&self.config().datadir())?; let default_peers_path = self.config().datadir().known_peers(); - Ok(self.config().network.network_config( - self.reth_config(), - self.config().chain.clone(), - secret_key, - default_peers_path, - )) + let builder = self + .config() + .network + .network_config( + self.reth_config(), + self.config().chain.clone(), + secret_key, + default_peers_path, + ) + .with_task_executor(Box::new(self.executor.clone())) + .set_head(self.head); + + Ok(builder) } /// Get the network secret from the given data dir @@ -551,49 +567,7 @@ impl BuilderContext { &self, network_builder: NetworkConfigBuilder, ) -> NetworkConfig { - network_builder - .with_task_executor(Box::new(self.executor.clone())) - .set_head(self.head) - .listener_addr(SocketAddr::new( - self.config().network.addr, - // set discovery port based on instance number - self.config().network.port + self.config().instance - 1, - )) - .discovery_addr(SocketAddr::new( - self.config().network.discovery.addr, - // set discovery port based on instance number - self.config().network.discovery.port + self.config().instance - 1, - )) - .map_discv5_config_builder(|builder| { - // Use rlpx address if none given - let discv5_addr_ipv4 = self.config().network.discovery.discv5_addr.or( - match self.config().network.addr { - IpAddr::V4(ip) => Some(ip), - IpAddr::V6(_) => None, - }, - ); - let discv5_addr_ipv6 = self.config().network.discovery.discv5_addr_ipv6.or( - match self.config().network.addr { - IpAddr::V4(_) => None, - IpAddr::V6(ip) => Some(ip), - }, - ); - - let discv5_port_ipv4 = - self.config().network.discovery.discv5_port + self.config().instance - 1; - let discv5_port_ipv6 = - self.config().network.discovery.discv5_port_ipv6 + self.config().instance - 1; - - builder.discv5_config( - discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( - discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, discv5_port_ipv4)), - discv5_addr_ipv6 - .map(|addr| SocketAddrV6::new(addr, discv5_port_ipv6, 0, 0)), - )) - .build(), - ) - }) - .build(self.provider.clone()) + network_builder.build(self.provider.clone()) } } diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 45c5fe01c..f57b3f010 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1,18 +1,28 @@ //! Helper types that can be used by launchers. +use crate::{ + components::{NodeComponents, NodeComponentsBuilder}, + hooks::OnComponentInitializedHook, + BuilderContext, NodeAdapter, +}; use backon::{ConstantBuilder, Retryable}; use eyre::Context; use rayon::ThreadPoolBuilder; use reth_auto_seal_consensus::MiningMode; use reth_beacon_consensus::EthBeaconConsensus; -use reth_blockchain_tree::{noop::NoopBlockchainTree, BlockchainTreeConfig}; +use reth_blockchain_tree::{ + noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, + TreeExternals, +}; use reth_chainspec::{Chain, ChainSpec}; use reth_config::{config::EtlConfig, PruneConfig}; +use reth_consensus::Consensus; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_downloaders::{bodies::noop::NoopBodiesDownloader, headers::noop::NoopHeaderDownloader}; use reth_evm::noop::NoopBlockExecutorProvider; use reth_network_p2p::headers::client::HeadersClient; +use reth_node_api::FullNodeTypes; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, node_config::NodeConfig, @@ -29,7 +39,7 @@ use reth_stages::{sets::DefaultStages, MetricEvent, Pipeline, PipelineTarget}; use reth_static_file::StaticFileProducer; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, error, info, warn}; -use std::{sync::Arc, thread::available_parallelism}; +use std::{marker::PhantomData, sync::Arc, thread::available_parallelism}; use tokio::sync::{ mpsc::{unbounded_channel, Receiver, UnboundedSender}, oneshot, watch, @@ -61,11 +71,11 @@ impl LaunchContext { /// `config`. /// /// Attaches both the `NodeConfig` and the loaded `reth.toml` config to the launch context. - pub async fn with_loaded_toml_config( + pub fn with_loaded_toml_config( self, config: NodeConfig, ) -> eyre::Result> { - let toml_config = self.load_toml_config(&config).await?; + let toml_config = self.load_toml_config(&config)?; Ok(self.with(WithConfigs { config, toml_config })) } @@ -73,7 +83,7 @@ impl LaunchContext { /// `config`. /// /// This is async because the trusted peers may have to be resolved. - pub async fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { + pub fn load_toml_config(&self, config: &NodeConfig) -> eyre::Result { let config_path = config.config.clone().unwrap_or_else(|| self.data_dir.config()); let mut toml_config = confy::load_path::(&config_path) @@ -97,7 +107,7 @@ impl LaunchContext { ) -> eyre::Result<()> { if reth_config.prune.is_none() { if let Some(prune_config) = config.prune_config() { - reth_config.update_prune_confing(prune_config); + reth_config.update_prune_config(prune_config); info!(target: "reth::cli", "Saving prune config to toml file"); reth_config.save(config_path.as_ref())?; } @@ -199,15 +209,16 @@ impl LaunchContextWith { info!(target: "reth::cli", "Adding trusted nodes"); // resolve trusted peers if they use a domain instead of dns - for peer in &self.attachment.config.network.trusted_peers { + let resolved = futures::future::try_join_all( + self.attachment.config.network.trusted_peers.iter().map(|peer| async move { let backoff = ConstantBuilder::default() .with_max_times(self.attachment.config.network.dns_retries); - let resolved = (move || { peer.resolve() }) - .retry(&backoff) - .notify(|err, _| warn!(target: "reth::cli", "Error resolving peer domain: {err}. Retrying...")) - .await?; - self.attachment.toml_config.peers.trusted_nodes.insert(resolved); - } + (move || { peer.resolve() }) + .retry(&backoff) + .notify(|err, _| warn!(target: "reth::cli", "Error resolving peer domain: {err}. Retrying...")) + .await + })).await?; + self.attachment.toml_config.peers.trusted_nodes.extend(resolved); } Ok(self) } @@ -241,7 +252,7 @@ impl LaunchContextWith> { /// - Making sure the ETL dir is set to the datadir /// - RPC settings are adjusted to the correct port pub fn with_adjusted_configs(self) -> Self { - self.ensure_etl_datadir().with_adjusted_rpc_instance_ports() + self.ensure_etl_datadir().with_adjusted_instance_ports() } /// Make sure ETL doesn't default to /tmp/, but to whatever datadir is set to @@ -255,7 +266,7 @@ impl LaunchContextWith> { } /// Change rpc port numbers based on the instance number. - pub fn with_adjusted_rpc_instance_ports(mut self) -> Self { + pub fn with_adjusted_instance_ports(mut self) -> Self { self.node_config_mut().adjust_instance_ports(); self } @@ -310,9 +321,9 @@ impl LaunchContextWith> { self.toml_config().prune.clone().or_else(|| self.node_config().prune_config()) } - /// Returns the configured [`PruneModes`] - pub fn prune_modes(&self) -> Option { - self.prune_config().map(|config| config.segments) + /// Returns the configured [`PruneModes`], returning the default if no config was available. + pub fn prune_modes(&self) -> PruneModes { + self.prune_config().map(|config| config.segments).unwrap_or_default() } /// Returns an initialized [`PrunerBuilder`] based on the configured [`PruneConfig`] @@ -354,6 +365,7 @@ where self.chain_spec(), StaticFileProvider::read_write(self.data_dir().static_files())?, ) + .with_prune_modes(self.prune_modes()) .with_static_files_metrics(); let has_receipt_pruning = @@ -385,14 +397,11 @@ where NoopBodiesDownloader::default(), NoopBlockExecutorProvider::default(), self.toml_config().stages.clone(), - self.prune_modes().unwrap_or_default(), + self.prune_modes(), )) .build( factory.clone(), - StaticFileProducer::new( - factory.clone(), - self.prune_modes().unwrap_or_default(), - ), + StaticFileProducer::new(factory.clone(), self.prune_modes()), ); // Unwinds to block @@ -509,9 +518,12 @@ where } /// Creates a `BlockchainProvider` and attaches it to the launch context. - pub async fn with_blockchain_db( + pub fn with_blockchain_db( self, - ) -> eyre::Result>>> { + ) -> eyre::Result>>> + where + T: FullNodeTypes::DB>>, + { let tree_config = BlockchainTreeConfig::default(); // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: @@ -526,11 +538,15 @@ where )?; let metered_providers = WithMeteredProviders { - provider_factory: self.provider_factory().clone(), + db_provider_container: WithMeteredProvider { + provider_factory: self.provider_factory().clone(), + metrics_sender: self.sync_metrics_tx(), + }, blockchain_db, - metrics_sender: self.sync_metrics_tx(), tree_config, canon_state_notification_sender, + // we store here a reference to T. + phantom_data: PhantomData, }; let ctx = LaunchContextWith { @@ -542,9 +558,10 @@ where } } -impl LaunchContextWith>> +impl LaunchContextWith>> where DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, + T: FullNodeTypes>, { /// Returns access to the underlying database. pub fn database(&self) -> &DB { @@ -553,20 +570,122 @@ where /// Returns the configured `ProviderFactory`. pub const fn provider_factory(&self) -> &ProviderFactory { - &self.right().provider_factory + &self.right().db_provider_container.provider_factory } - /// Returns the static file provider to interact with the static files. - pub fn static_file_provider(&self) -> StaticFileProvider { - self.provider_factory().static_file_provider() + /// Fetches the head block from the database. + /// + /// If the database is empty, returns the genesis block. + pub fn lookup_head(&self) -> eyre::Result { + self.node_config() + .lookup_head(self.provider_factory().clone()) + .wrap_err("the head block is missing") } - /// Creates a new [`StaticFileProducer`] with the attached database. - pub fn static_file_producer(&self) -> StaticFileProducer { - StaticFileProducer::new( + /// Returns the metrics sender. + pub fn sync_metrics_tx(&self) -> UnboundedSender { + self.right().db_provider_container.metrics_sender.clone() + } + + /// Returns a reference to the `BlockchainProvider`. + pub const fn blockchain_db(&self) -> &BlockchainProvider { + &self.right().blockchain_db + } + + /// Returns a reference to the `BlockchainTreeConfig`. + pub const fn tree_config(&self) -> &BlockchainTreeConfig { + &self.right().tree_config + } + + /// Returns the `CanonStateNotificationSender`. + pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { + self.right().canon_state_notification_sender.clone() + } + + /// Creates a `NodeAdapter` and attaches it to the launch context. + pub async fn with_components( + self, + components_builder: CB, + on_component_initialized: Box< + dyn OnComponentInitializedHook>, + >, + ) -> eyre::Result>>> + where + CB: NodeComponentsBuilder, + { + // fetch the head block from the database + let head = self.lookup_head()?; + + let builder_ctx = BuilderContext::new( + head, + self.blockchain_db().clone(), + self.task_executor().clone(), + self.configs().clone(), + ); + + debug!(target: "reth::cli", "creating components"); + let components = components_builder.build_components(&builder_ctx).await?; + + let consensus: Arc = Arc::new(components.consensus().clone()); + + let tree_externals = TreeExternals::new( self.provider_factory().clone(), - self.prune_modes().unwrap_or_default(), - ) + consensus.clone(), + components.block_executor().clone(), + ); + let tree = BlockchainTree::new(tree_externals, *self.tree_config(), self.prune_modes())? + .with_sync_metrics_tx(self.sync_metrics_tx()) + // Note: This is required because we need to ensure that both the components and the + // tree are using the same channel for canon state notifications. This will be removed + // once the Blockchain provider no longer depends on an instance of the tree + .with_canon_state_notification_sender(self.canon_state_notification_sender()); + + let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); + + // Replace the tree component with the actual tree + let blockchain_db = self.blockchain_db().clone().with_tree(blockchain_tree); + + debug!(target: "reth::cli", "configured blockchain tree"); + + let node_adapter = NodeAdapter { + components, + task_executor: self.task_executor().clone(), + provider: blockchain_db.clone(), + }; + + debug!(target: "reth::cli", "calling on_component_initialized hook"); + on_component_initialized.on_event(node_adapter.clone())?; + + let components_container = WithComponents { + db_provider_container: WithMeteredProvider { + provider_factory: self.provider_factory().clone(), + metrics_sender: self.sync_metrics_tx(), + }, + blockchain_db, + tree_config: self.right().tree_config, + node_adapter, + head, + consensus, + }; + + let ctx = LaunchContextWith { + inner: self.inner, + attachment: self.attachment.map_right(|_| components_container), + }; + + Ok(ctx) + } +} + +impl LaunchContextWith>> +where + DB: Database + DatabaseMetrics + Send + Sync + Clone + 'static, + T: FullNodeTypes>, + CB: NodeComponentsBuilder, +{ + /// Returns the configured `ProviderFactory`. + pub const fn provider_factory(&self) -> &ProviderFactory { + &self.right().db_provider_container.provider_factory } /// Returns the max block that the node should run to, looking it up from the network if @@ -578,18 +697,24 @@ where self.node_config().max_block(client, self.provider_factory().clone()).await } - /// Fetches the head block from the database. - /// - /// If the database is empty, returns the genesis block. - pub fn lookup_head(&self) -> eyre::Result { - self.node_config() - .lookup_head(self.provider_factory().clone()) - .wrap_err("the head block is missing") + /// Returns the static file provider to interact with the static files. + pub fn static_file_provider(&self) -> StaticFileProvider { + self.provider_factory().static_file_provider() } - /// Returns the metrics sender. - pub fn sync_metrics_tx(&self) -> UnboundedSender { - self.right().metrics_sender.clone() + /// Creates a new [`StaticFileProducer`] with the attached database. + pub fn static_file_producer(&self) -> StaticFileProducer { + StaticFileProducer::new(self.provider_factory().clone(), self.prune_modes()) + } + + /// Returns the current head block. + pub const fn head(&self) -> Head { + self.right().head + } + + /// Returns the configured `NodeAdapter`. + pub const fn node_adapter(&self) -> &NodeAdapter { + &self.right().node_adapter } /// Returns a reference to the `BlockchainProvider`. @@ -597,14 +722,24 @@ where &self.right().blockchain_db } + /// Returns the configured `Consensus`. + pub fn consensus(&self) -> Arc { + self.right().consensus.clone() + } + + /// Returns the metrics sender. + pub fn sync_metrics_tx(&self) -> UnboundedSender { + self.right().db_provider_container.metrics_sender.clone() + } + /// Returns a reference to the `BlockchainTreeConfig`. pub const fn tree_config(&self) -> &BlockchainTreeConfig { &self.right().tree_config } - /// Returns the `CanonStateNotificationSender`. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.right().canon_state_notification_sender.clone() + /// Returns the node adapter components. + pub const fn components(&self) -> &CB::Components { + &self.node_adapter().components } } @@ -668,23 +803,40 @@ pub struct WithConfigs { pub toml_config: reth_config::Config, } +/// Helper container type to bundle the [`ProviderFactory`] and the metrics +/// sender. +#[derive(Debug, Clone)] +pub struct WithMeteredProvider { + provider_factory: ProviderFactory, + metrics_sender: UnboundedSender, +} + /// Helper container to bundle the [`ProviderFactory`], [`BlockchainProvider`] /// and a metrics sender. #[allow(missing_debug_implementations)] -pub struct WithMeteredProviders { - provider_factory: ProviderFactory, +pub struct WithMeteredProviders { + db_provider_container: WithMeteredProvider, blockchain_db: BlockchainProvider, - metrics_sender: UnboundedSender, canon_state_notification_sender: CanonStateNotificationSender, tree_config: BlockchainTreeConfig, + // this field is used to store a reference to the FullNodeTypes so that we + // can build the components in `with_components` method. + phantom_data: PhantomData, } -/// Helper container type to bundle athe [`ProviderFactory`] and the metrics -/// sender. -#[derive(Debug)] -pub struct WithMeteredProvider { - provider_factory: ProviderFactory, - metrics_sender: UnboundedSender, +/// Helper container to bundle the metered providers container and [`NodeAdapter`]. +#[allow(missing_debug_implementations)] +pub struct WithComponents +where + T: FullNodeTypes>, + CB: NodeComponentsBuilder, +{ + db_provider_container: WithMeteredProvider, + tree_config: BlockchainTreeConfig, + blockchain_db: BlockchainProvider, + node_adapter: NodeAdapter, + head: Head, + consensus: Arc, } #[cfg(test)] diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index d60d1a42e..3f82d5c93 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -5,33 +5,29 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, node::FullNode, - BuilderContext, NodeBuilderWithComponents, NodeHandle, + NodeBuilderWithComponents, NodeHandle, }; use futures::{future::Either, stream, stream_select, StreamExt}; use reth_beacon_consensus::{ hooks::{EngineHooks, PruneHook, StaticFileHook}, BeaconConsensusEngine, }; -use reth_blockchain_tree::{BlockchainTree, ShareableBlockchainTree, TreeExternals}; - #[cfg(feature = "bsc")] -use reth_bsc_consensus::ParliaEngineBuilder; -use reth_consensus::Consensus; +use reth_bsc_engine::ParliaEngineBuilder; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; +use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; use reth_network::NetworkEvents; -use reth_node_api::{FullNodeComponents, FullNodeTypes}; +use reth_node_api::FullNodeTypes; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, - engine::EngineMessageStreamExt, exit::NodeExitFuture, version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; +use reth_primitives::format_ether; #[cfg(feature = "bsc")] use reth_primitives::parlia::ParliaConfig; - -use reth_primitives::format_ether; use reth_provider::providers::BlockchainProvider; use reth_rpc_engine_api::EngineApi; use reth_rpc_types::engine::ClientVersionV1; @@ -46,7 +42,6 @@ pub mod common; pub use common::LaunchContext; mod exex; pub use exex::ExExLauncher; - /// A general purpose trait that launches a new node of any kind. /// /// Acts as a node factory. @@ -106,12 +101,13 @@ where add_ons: NodeAddOns { hooks, rpc, exexs: installed_exex }, config, } = target; + let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; // setup the launch context let ctx = ctx .with_configured_globals() // load the toml config - .with_loaded_toml_config(config).await? + .with_loaded_toml_config(config)? // add resolved peers .with_resolved_peers().await? // attach the database @@ -132,61 +128,23 @@ where info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); }) .with_metrics() - .with_blockchain_db().await?; - - // fetch the head block from the database - let head = ctx.lookup_head()?; - - let builder_ctx = BuilderContext::new( - head, - ctx.blockchain_db().clone(), - ctx.task_executor().clone(), - ctx.configs().clone(), - ); - - debug!(target: "reth::cli", "creating components"); - let components = components_builder.build_components(&builder_ctx).await?; - - let consensus: Arc = Arc::new(components.consensus().clone()); - - let tree_externals = TreeExternals::new( - ctx.provider_factory().clone(), - consensus.clone(), - components.block_executor().clone(), - ); - let tree = BlockchainTree::new(tree_externals, *ctx.tree_config(), ctx.prune_modes())? - .with_sync_metrics_tx(ctx.sync_metrics_tx()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be removed - // once the Blockchain provider no longer depends on an instance of the tree - .with_canon_state_notification_sender(ctx.canon_state_notification_sender()); - - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - // Replace the tree component with the actual tree - let blockchain_db = ctx.blockchain_db().clone().with_tree(blockchain_tree); - - debug!(target: "reth::cli", "configured blockchain tree"); - - let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - - let node_adapter = NodeAdapter { - components, - task_executor: ctx.task_executor().clone(), - provider: blockchain_db.clone(), - }; - - debug!(target: "reth::cli", "calling on_component_initialized hook"); - on_component_initialized.on_event(node_adapter.clone())?; + // passing FullNodeTypes as type parameter here so that we can build + // later the components. + .with_blockchain_db::()? + .with_components(components_builder, on_component_initialized).await?; // spawn exexs - let exex_manager_handle = - ExExLauncher::new(head, node_adapter.clone(), installed_exex, ctx.configs().clone()) - .launch() - .await; + let exex_manager_handle = ExExLauncher::new( + ctx.head(), + ctx.node_adapter().clone(), + installed_exex, + ctx.configs().clone(), + ) + .launch() + .await; // create pipeline - let network_client = node_adapter.network().fetch_client().await?; + let network_client = ctx.components().network().fetch_client().await?; let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); let node_config = ctx.node_config(); @@ -221,33 +179,32 @@ where // install auto-seal let mining_mode = - ctx.dev_mining_mode(node_adapter.components.pool().pending_transactions_listener()); + ctx.dev_mining_mode(ctx.components().pool().pending_transactions_listener()); info!(target: "reth::cli", mode=%mining_mode, "configuring dev mining mode"); let (_, client, mut task) = reth_auto_seal_consensus::AutoSealBuilder::new( ctx.chain_spec(), - blockchain_db.clone(), - node_adapter.components.pool().clone(), + ctx.blockchain_db().clone(), + ctx.components().pool().clone(), consensus_engine_tx.clone(), mining_mode, - node_adapter.components.block_executor().clone(), + ctx.components().block_executor().clone(), ) .build(); let pipeline = crate::setup::build_networked_pipeline( &ctx.toml_config().stages, client.clone(), - consensus.clone(), + ctx.consensus(), ctx.provider_factory().clone(), ctx.task_executor(), ctx.sync_metrics_tx(), ctx.prune_config(), max_block, static_file_producer, - node_adapter.components.block_executor().clone(), + ctx.components().block_executor().clone(), pipeline_exex_handle, - ) - .await?; + )?; let pipeline_events = pipeline.events(); task.set_pipeline_events(pipeline_events); @@ -259,24 +216,24 @@ where let pipeline = crate::setup::build_networked_pipeline( &ctx.toml_config().stages, network_client.clone(), - consensus.clone(), + ctx.consensus(), ctx.provider_factory().clone(), ctx.task_executor(), ctx.sync_metrics_tx(), ctx.prune_config(), max_block, static_file_producer, - node_adapter.components.block_executor().clone(), + ctx.components().block_executor().clone(), pipeline_exex_handle, - ) - .await?; + )?; #[cfg(feature = "bsc")] { - let engine_rx = node_adapter.components.network().get_to_engine_rx(); + let engine_rx = ctx.node_adapter().components.network().get_to_engine_rx(); let client = ParliaEngineBuilder::new( ctx.chain_spec(), ParliaConfig::default(), - blockchain_db.clone(), + ctx.blockchain_db().clone(), + ctx.blockchain_db().clone(), consensus_engine_tx.clone(), engine_rx, network_client.clone(), @@ -311,11 +268,11 @@ where let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( client, pipeline, - blockchain_db.clone(), + ctx.blockchain_db().clone(), Box::new(ctx.task_executor().clone()), - Box::new(node_adapter.components.network().clone()), + Box::new(ctx.components().network().clone()), max_block, - node_adapter.components.payload_builder().clone(), + ctx.components().payload_builder().clone(), initial_target, reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, consensus_engine_tx, @@ -325,12 +282,12 @@ where info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( - node_adapter.components.network().event_listener().map(Into::into), + ctx.components().network().event_listener().map(Into::into), beacon_engine_handle.event_listener().map(Into::into), pipeline_events.map(Into::into), if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { Either::Left( - ConsensusLayerHealthEvents::new(Box::new(blockchain_db.clone())) + ConsensusLayerHealthEvents::new(Box::new(ctx.blockchain_db().clone())) .map(Into::into), ) } else { @@ -342,8 +299,8 @@ where ctx.task_executor().spawn_critical( "events task", node::handle_events( - Some(node_adapter.components.network().clone()), - Some(head.number), + Some(Box::new(ctx.components().network().clone())), + Some(ctx.head().number), events, database.clone(), ), @@ -356,10 +313,10 @@ where commit: VERGEN_GIT_SHA.to_string(), }; let engine_api = EngineApi::new( - blockchain_db.clone(), + ctx.blockchain_db().clone(), ctx.chain_spec(), beacon_engine_handle, - node_adapter.components.payload_builder().clone().into(), + ctx.components().payload_builder().clone().into(), Box::new(ctx.task_executor().clone()), client, ); @@ -369,8 +326,8 @@ where let jwt_secret = ctx.auth_jwt_secret()?; // Start RPC servers - let (rpc_server_handles, mut rpc_registry) = crate::rpc::launch_rpc_servers( - node_adapter.clone(), + let (rpc_server_handles, rpc_registry) = crate::rpc::launch_rpc_servers( + ctx.node_adapter().clone(), engine_api, ctx.node_config(), jwt_secret, @@ -434,12 +391,12 @@ where } let full_node = FullNode { - evm_config: node_adapter.components.evm_config().clone(), - block_executor: node_adapter.components.block_executor().clone(), - pool: node_adapter.components.pool().clone(), - network: node_adapter.components.network().clone(), - provider: node_adapter.provider.clone(), - payload_builder: node_adapter.components.payload_builder().clone(), + evm_config: ctx.components().evm_config().clone(), + block_executor: ctx.components().block_executor().clone(), + pool: ctx.components().pool().clone(), + network: ctx.components().network().clone(), + provider: ctx.node_adapter().provider.clone(), + payload_builder: ctx.components().payload_builder().clone(), task_executor: ctx.task_executor().clone(), rpc_server_handles, rpc_registry, @@ -450,7 +407,10 @@ where on_node_started.on_event(full_node.clone())?; let handle = NodeHandle { - node_exit_future: NodeExitFuture::new(rx, full_node.config.debug.terminate), + node_exit_future: NodeExitFuture::new( + async { Ok(rx.await??) }, + full_node.config.debug.terminate, + ), node: full_node, }; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 0b82b2e9e..fe8d99ed6 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -11,7 +11,7 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_provider::ChainSpecProvider; use reth_rpc_builder::{auth::AuthServerHandle, RpcServerHandle}; use reth_tasks::TaskExecutor; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; // re-export the node api types use crate::components::NodeComponentsBuilder; @@ -28,10 +28,48 @@ pub trait Node: NodeTypes + Clone { fn components_builder(self) -> Self::ComponentsBuilder; } +/// A [`Node`] type builder +#[derive(Clone, Default, Debug)] +pub struct AnyNode(PhantomData, C); + +impl AnyNode { + /// Configures the types of the node. + pub fn types(self) -> AnyNode { + AnyNode::(PhantomData::, self.1) + } + + /// Sets the node components builder. + pub fn components_builder(self, value: T) -> AnyNode { + AnyNode::(PhantomData::, value) + } +} + +impl NodeTypes for AnyNode +where + N: FullNodeTypes, + C: NodeComponentsBuilder + Sync + Unpin + 'static, +{ + type Primitives = N::Primitives; + + type Engine = N::Engine; +} + +impl Node for AnyNode +where + N: FullNodeTypes + Clone, + C: NodeComponentsBuilder + Clone + Sync + Unpin + 'static, +{ + type ComponentsBuilder = C; + + fn components_builder(self) -> Self::ComponentsBuilder { + self.1 + } +} + /// The launched node with all components including RPC handlers. /// /// This can be used to interact with the launched node. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct FullNode { /// The evm configuration. pub evm_config: Node::Evm, @@ -95,21 +133,3 @@ impl FullNode { self.auth_server_handle().ipc_client().await } } - -impl Clone for FullNode { - fn clone(&self) -> Self { - Self { - evm_config: self.evm_config.clone(), - block_executor: self.block_executor.clone(), - pool: self.pool.clone(), - network: self.network.clone(), - provider: self.provider.clone(), - payload_builder: self.payload_builder.clone(), - task_executor: self.task_executor.clone(), - rpc_server_handles: self.rpc_server_handles.clone(), - rpc_registry: self.rpc_registry.clone(), - config: self.config.clone(), - data_dir: self.data_dir.clone(), - } - } -} diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 9a6ada8f9..03ae899cb 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -1,22 +1,24 @@ //! Builder support for rpc components. +use std::{ + fmt, + ops::{Deref, DerefMut}, +}; + use futures::TryFutureExt; use reth_network::NetworkHandle; use reth_node_api::FullNodeComponents; use reth_node_core::{node_config::NodeConfig, rpc::api::EngineApiServer}; use reth_payload_builder::PayloadBuilderHandle; +use reth_rpc::eth::EthApi; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, config::RethRpcServerConfig, - RethModuleRegistry, RpcModuleBuilder, RpcServerHandle, TransportRpcModules, + EthApiBuild, RpcModuleBuilder, RpcRegistryInner, RpcServerHandle, TransportRpcModules, }; use reth_rpc_layer::JwtSecret; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; -use std::{ - fmt, - ops::{Deref, DerefMut}, -}; /// Contains the handles to the spawned RPC servers. /// @@ -145,27 +147,28 @@ impl ExtendRpcModules for () { } } -/// Helper wrapper type to encapsulate the [`RethModuleRegistry`] over components trait. +/// Helper wrapper type to encapsulate the [`RpcRegistryInner`] over components trait. #[derive(Debug)] +#[allow(clippy::type_complexity)] pub struct RpcRegistry { - pub(crate) registry: RethModuleRegistry< + pub(crate) registry: RpcRegistryInner< Node::Provider, Node::Pool, NetworkHandle, TaskExecutor, Node::Provider, - Node::Evm, + EthApi, >, } impl Deref for RpcRegistry { - type Target = RethModuleRegistry< + type Target = RpcRegistryInner< Node::Provider, Node::Pool, NetworkHandle, TaskExecutor, Node::Provider, - Node::Evm, + EthApi, >; fn deref(&self) -> &Self::Target { @@ -185,12 +188,13 @@ impl Clone for RpcRegistry { } } -/// Helper container to encapsulate [`RethModuleRegistry`], [`TransportRpcModules`] and +/// Helper container to encapsulate [`RpcRegistryInner`], [`TransportRpcModules`] and /// [`AuthRpcModule`]. /// /// This can be used to access installed modules, or create commonly used handlers like -/// [`reth_rpc::EthApi`], and ultimately merge additional rpc handler into the configured transport -/// modules [`TransportRpcModules`] as well as configured authenticated methods [`AuthRpcModule`]. +/// [`reth_rpc::eth::EthApi`], and ultimately merge additional rpc handler into the configured +/// transport modules [`TransportRpcModules`] as well as configured authenticated methods +/// [`AuthRpcModule`]. #[allow(missing_debug_implementations)] pub struct RpcContext<'a, Node: FullNodeComponents> { /// The node components. @@ -201,7 +205,7 @@ pub struct RpcContext<'a, Node: FullNodeComponents> { /// A Helper type the holds instances of the configured modules. /// - /// This provides easy access to rpc handlers, such as [`RethModuleRegistry::eth_api`]. + /// This provides easy access to rpc handlers, such as [`RpcRegistryInner::eth_api`]. pub registry: &'a mut RpcRegistry, /// Holds installed modules per transport type. /// @@ -271,7 +275,7 @@ where .with_events(node.provider().clone()) .with_executor(node.task_executor().clone()) .with_evm_config(node.evm_config().clone()) - .build_with_auth_server(module_config, engine_api); + .build_with_auth_server(module_config, engine_api, EthApiBuild::build); let mut registry = RpcRegistry { registry }; let ctx = RpcContext { @@ -285,7 +289,8 @@ where extend_rpc_modules.extend_rpc_modules(ctx)?; let server_config = config.rpc.rpc_server_config(); - let launch_rpc = modules.clone().start_server(server_config).map_ok(|handle| { + let cloned_modules = modules.clone(); + let launch_rpc = server_config.start(&cloned_modules).map_ok(|handle| { if let Some(path) = handle.ipc_endpoint() { info!(target: "reth::cli", %path, "RPC IPC server started"); } diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index cf4d090dc..294d7a8f6 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -24,7 +24,7 @@ use tokio::sync::watch; /// Constructs a [Pipeline] that's wired to the network #[allow(clippy::too_many_arguments)] -pub async fn build_networked_pipeline( +pub fn build_networked_pipeline( config: &StageConfig, client: Client, consensus: Arc, @@ -63,15 +63,14 @@ where static_file_producer, executor, exex_manager_handle, - ) - .await?; + )?; Ok(pipeline) } /// Builds the [Pipeline] with the given [`ProviderFactory`] and downloaders. #[allow(clippy::too_many_arguments)] -pub async fn build_pipeline( +pub fn build_pipeline( provider_factory: ProviderFactory, stage_config: &StageConfig, header_downloader: H, diff --git a/crates/node-core/Cargo.toml b/crates/node/core/Cargo.toml similarity index 89% rename from crates/node-core/Cargo.toml rename to crates/node/core/Cargo.toml index 7c3814153..e84ffbe8d 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -14,6 +14,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true +reth-cli-util.workspace = true reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-db-api.workspace = true @@ -21,11 +22,12 @@ reth-storage-errors.workspace = true reth-provider.workspace = true reth-network = { workspace = true, features = ["serde"] } reth-network-p2p.workspace = true -reth-rpc.workspace = true +reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } +reth-rpc-eth-api = { workspace = true, features = ["client"] } reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true @@ -33,10 +35,8 @@ reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true reth-network-peers.workspace = true -reth-engine-primitives.workspace = true reth-tasks.workspace = true reth-consensus-common.workspace = true -reth-beacon-consensus.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true @@ -46,8 +46,6 @@ alloy-rpc-types-engine.workspace = true # async tokio.workspace = true -tokio-util.workspace = true -pin-project.workspace = true # metrics reth-metrics.workspace = true @@ -60,8 +58,7 @@ metrics-util.workspace = true eyre.workspace = true clap = { workspace = true, features = ["derive"] } humantime.workspace = true -thiserror.workspace = true -const-str = "0.5.6" +const_format.workspace = true rand.workspace = true derive_more.workspace = true once_cell.workspace = true @@ -69,7 +66,6 @@ once_cell.workspace = true # io dirs-next = "2.0.0" shellexpand = "3.0.0" -serde.workspace = true serde_json.workspace = true # http/rpc @@ -103,10 +99,10 @@ proptest.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-rpc/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", - "reth-beacon-consensus/optimism", + "reth-rpc-eth-api/optimism", + "reth-rpc-eth-types/optimism", ] opbnb = [ "reth-primitives/opbnb", diff --git a/crates/node-core/build.rs b/crates/node/core/build.rs similarity index 90% rename from crates/node-core/build.rs rename to crates/node/core/build.rs index 043505cdf..1a78793a4 100644 --- a/crates/node-core/build.rs +++ b/crates/node/core/build.rs @@ -8,19 +8,20 @@ fn main() -> Result<(), Box> { EmitBuilder::builder() .git_describe(false, true, None) .git_dirty(true) - .git_sha(true) + .git_sha(false) .build_timestamp() .cargo_features() .cargo_target_triple() .emit_and_set()?; let sha = env::var("VERGEN_GIT_SHA")?; + let sha_short = &sha[0..7]; let is_dirty = env::var("VERGEN_GIT_DIRTY")? == "true"; // > git describe --always --tags // if not on a tag: v0.2.0-beta.3-82-g1939939b // if on a tag: v0.2.0-beta.3 - let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha}")); + let not_on_tag = env::var("VERGEN_GIT_DESCRIBE")?.ends_with(&format!("-g{sha_short}")); let is_dev = is_dirty || not_on_tag; println!("cargo:rustc-env=RETH_VERSION_SUFFIX={}", if is_dev { "-dev" } else { "" }); Ok(()) diff --git a/crates/node-core/src/args/benchmark_args.rs b/crates/node/core/src/args/benchmark_args.rs similarity index 100% rename from crates/node-core/src/args/benchmark_args.rs rename to crates/node/core/src/args/benchmark_args.rs diff --git a/crates/node-core/src/args/database.rs b/crates/node/core/src/args/database.rs similarity index 100% rename from crates/node-core/src/args/database.rs rename to crates/node/core/src/args/database.rs diff --git a/crates/node-core/src/args/datadir_args.rs b/crates/node/core/src/args/datadir_args.rs similarity index 100% rename from crates/node-core/src/args/datadir_args.rs rename to crates/node/core/src/args/datadir_args.rs diff --git a/crates/node-core/src/args/debug.rs b/crates/node/core/src/args/debug.rs similarity index 100% rename from crates/node-core/src/args/debug.rs rename to crates/node/core/src/args/debug.rs diff --git a/crates/node-core/src/args/dev.rs b/crates/node/core/src/args/dev.rs similarity index 100% rename from crates/node-core/src/args/dev.rs rename to crates/node/core/src/args/dev.rs diff --git a/crates/node-core/src/args/gas_price_oracle.rs b/crates/node/core/src/args/gas_price_oracle.rs similarity index 98% rename from crates/node-core/src/args/gas_price_oracle.rs rename to crates/node/core/src/args/gas_price_oracle.rs index 5148fdca3..abdd8e142 100644 --- a/crates/node-core/src/args/gas_price_oracle.rs +++ b/crates/node/core/src/args/gas_price_oracle.rs @@ -1,6 +1,6 @@ use crate::primitives::U256; use clap::Args; -use reth_rpc::eth::gas_oracle::GasPriceOracleConfig; +use reth_rpc_eth_types::GasPriceOracleConfig; use reth_rpc_server_types::constants::gas_oracle::{ DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, DEFAULT_MAX_GAS_PRICE, diff --git a/crates/node-core/src/args/log.rs b/crates/node/core/src/args/log.rs similarity index 100% rename from crates/node-core/src/args/log.rs rename to crates/node/core/src/args/log.rs diff --git a/crates/node-core/src/args/mod.rs b/crates/node/core/src/args/mod.rs similarity index 95% rename from crates/node-core/src/args/mod.rs rename to crates/node/core/src/args/mod.rs index 469ff72ea..7d1f61903 100644 --- a/crates/node-core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -24,9 +24,6 @@ pub use database::DatabaseArgs; mod log; pub use log::{ColorMode, LogArgs}; -mod secret_key; -pub use secret_key::{get_secret_key, SecretKeyError}; - /// `PayloadBuilderArgs` struct for configuring the payload builder mod payload_builder; pub use payload_builder::PayloadBuilderArgs; diff --git a/crates/node-core/src/args/network.rs b/crates/node/core/src/args/network.rs similarity index 78% rename from crates/node-core/src/args/network.rs rename to crates/node/core/src/args/network.rs index a5763495c..39af9480d 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -2,11 +2,11 @@ use crate::version::P2P_CLIENT_VERSION; use clap::Args; -use reth_chainspec::{net::mainnet_nodes, ChainSpec}; +use reth_chainspec::ChainSpec; use reth_config::Config; -use reth_discv4::{DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; +use reth_discv4::{NodeRecord, DEFAULT_DISCOVERY_ADDR, DEFAULT_DISCOVERY_PORT}; use reth_discv5::{ - DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, + discv5::ListenConfig, DEFAULT_COUNT_BOOTSTRAP_LOOKUPS, DEFAULT_DISCOVERY_V5_PORT, DEFAULT_SECONDS_BOOTSTRAP_LOOKUP_INTERVAL, DEFAULT_SECONDS_LOOKUP_INTERVAL, }; use reth_net_nat::NatResolver; @@ -18,10 +18,10 @@ use reth_network::{ }, HelloMessageWithProtocols, NetworkConfigBuilder, SessionsConfig, }; -use reth_network_peers::TrustedPeer; +use reth_network_peers::{mainnet_nodes, TrustedPeer}; use secp256k1::SecretKey; use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, ops::Not, path::PathBuf, sync::Arc, @@ -123,6 +123,12 @@ impl NetworkArgs { /// /// The `default_peers_file` will be used as the default location to store the persistent peers /// file if `no_persist_peers` is false, and there is no provided `peers_file`. + /// + /// Configured Bootnodes are prioritized, if unset, the chain spec bootnodes are used + /// Priority order for bootnodes configuration: + /// 1. --bootnodes flag + /// 2. Network preset flags (e.g. --holesky) + /// 3. default to mainnet nodes pub fn network_config( &self, config: &Config, @@ -130,7 +136,16 @@ impl NetworkArgs { secret_key: SecretKey, default_peers_file: PathBuf, ) -> NetworkConfigBuilder { - let chain_bootnodes = chain_spec.bootnodes().unwrap_or_else(mainnet_nodes); + let chain_bootnodes = self + .bootnodes + .clone() + .map(|bootnodes| { + bootnodes + .into_iter() + .filter_map(|trusted_peer| trusted_peer.resolve_blocking().ok()) + .collect() + }) + .unwrap_or_else(|| chain_spec.bootnodes().unwrap_or_else(mainnet_nodes)); let peers_file = self.peers_file.clone().unwrap_or(default_peers_file); // Configure peer connections @@ -173,23 +188,17 @@ impl NetworkArgs { // apply discovery settings .apply(|builder| { let rlpx_socket = (self.addr, self.port).into(); - self.discovery.apply_to_builder(builder, rlpx_socket) - }) - // modify discv5 settings if enabled in previous step - .map_discv5_config_builder(|builder| { - let DiscoveryArgs { - discv5_lookup_interval, - discv5_bootstrap_lookup_interval, - discv5_bootstrap_lookup_countdown, - .. - } = self.discovery; - - builder - .add_unsigned_boot_nodes(chain_bootnodes) - .lookup_interval(discv5_lookup_interval) - .bootstrap_lookup_interval(discv5_bootstrap_lookup_interval) - .bootstrap_lookup_countdown(discv5_bootstrap_lookup_countdown) + self.discovery.apply_to_builder(builder, rlpx_socket, chain_bootnodes) }) + .listener_addr(SocketAddr::new( + self.addr, // set discovery port based on instance number + self.port, + )) + .discovery_addr(SocketAddr::new( + self.discovery.addr, + // set discovery port based on instance number + self.discovery.port, + )) } /// If `no_persist_peers` is false then this returns the path to the persistent peers file path. @@ -211,6 +220,25 @@ impl NetworkArgs { self.discovery = self.discovery.with_unused_discovery_port(); self } + + /// Change networking port numbers based on the instance number. + /// Ports are updated to `previous_value + instance - 1` + /// + /// # Panics + /// Warning: if `instance` is zero in debug mode, this will panic. + pub fn adjust_instance_ports(&mut self, instance: u16) { + debug_assert_ne!(instance, 0, "instance must be non-zero"); + self.port += instance - 1; + self.discovery.adjust_instance_ports(instance); + } + + /// Resolve all trusted peers at once + pub async fn resolve_trusted_peers(&self) -> Result, std::io::Error> { + futures::future::try_join_all( + self.trusted_peers.iter().map(|peer| async move { peer.resolve().await }), + ) + .await + } } impl Default for NetworkArgs { @@ -309,6 +337,7 @@ impl DiscoveryArgs { &self, mut network_config_builder: NetworkConfigBuilder, rlpx_tcp_socket: SocketAddr, + boot_nodes: impl IntoIterator, ) -> NetworkConfigBuilder { if self.disable_discovery || self.disable_dns_discovery { network_config_builder = network_config_builder.disable_dns_discovery(); @@ -319,19 +348,72 @@ impl DiscoveryArgs { } if !self.disable_discovery && self.enable_discv5_discovery { - network_config_builder = - network_config_builder.discovery_v5(reth_discv5::Config::builder(rlpx_tcp_socket)); + network_config_builder = network_config_builder + .discovery_v5(self.discovery_v5_builder(rlpx_tcp_socket, boot_nodes)); } network_config_builder } + /// Creates a [`reth_discv5::ConfigBuilder`] filling it with the values from this struct. + pub fn discovery_v5_builder( + &self, + rlpx_tcp_socket: SocketAddr, + boot_nodes: impl IntoIterator, + ) -> reth_discv5::ConfigBuilder { + let Self { + discv5_addr, + discv5_addr_ipv6, + discv5_port, + discv5_port_ipv6, + discv5_lookup_interval, + discv5_bootstrap_lookup_interval, + discv5_bootstrap_lookup_countdown, + .. + } = self; + + // Use rlpx address if none given + let discv5_addr_ipv4 = discv5_addr.or(match rlpx_tcp_socket { + SocketAddr::V4(addr) => Some(*addr.ip()), + SocketAddr::V6(_) => None, + }); + let discv5_addr_ipv6 = discv5_addr_ipv6.or(match rlpx_tcp_socket { + SocketAddr::V4(_) => None, + SocketAddr::V6(addr) => Some(*addr.ip()), + }); + + reth_discv5::Config::builder(rlpx_tcp_socket) + .discv5_config( + reth_discv5::discv5::ConfigBuilder::new(ListenConfig::from_two_sockets( + discv5_addr_ipv4.map(|addr| SocketAddrV4::new(addr, *discv5_port)), + discv5_addr_ipv6.map(|addr| SocketAddrV6::new(addr, *discv5_port_ipv6, 0, 0)), + )) + .build(), + ) + .add_unsigned_boot_nodes(boot_nodes) + .lookup_interval(*discv5_lookup_interval) + .bootstrap_lookup_interval(*discv5_bootstrap_lookup_interval) + .bootstrap_lookup_countdown(*discv5_bootstrap_lookup_countdown) + } + /// Set the discovery port to zero, to allow the OS to assign a random unused port when /// discovery binds to the socket. pub const fn with_unused_discovery_port(mut self) -> Self { self.port = 0; self } + + /// Change networking port numbers based on the instance number. + /// Ports are updated to `previous_value + instance - 1` + /// + /// # Panics + /// Warning: if `instance` is zero in debug mode, this will panic. + pub fn adjust_instance_ports(&mut self, instance: u16) { + debug_assert_ne!(instance, 0, "instance must be non-zero"); + self.port += instance - 1; + self.discv5_port += instance - 1; + self.discv5_port_ipv6 += instance - 1; + } } impl Default for DiscoveryArgs { diff --git a/crates/node-core/src/args/payload_builder.rs b/crates/node/core/src/args/payload_builder.rs similarity index 97% rename from crates/node-core/src/args/payload_builder.rs rename to crates/node/core/src/args/payload_builder.rs index b6a937156..7d3b5f851 100644 --- a/crates/node-core/src/args/payload_builder.rs +++ b/crates/node/core/src/args/payload_builder.rs @@ -1,11 +1,9 @@ -use crate::{ - args::utils::parse_duration_from_secs, cli::config::PayloadBuilderConfig, - version::default_extradata, -}; +use crate::{cli::config::PayloadBuilderConfig, version::default_extradata}; use clap::{ builder::{RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; +use reth_cli_util::parse_duration_from_secs; use reth_primitives::constants::{ ETHEREUM_BLOCK_GAS_LIMIT, MAXIMUM_EXTRA_DATA_SIZE, SLOT_DURATION, }; diff --git a/crates/node-core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs similarity index 100% rename from crates/node-core/src/args/pruning.rs rename to crates/node/core/src/args/pruning.rs diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs similarity index 93% rename from crates/node-core/src/args/rpc_server.rs rename to crates/node/core/src/args/rpc_server.rs index 7ab2dd268..761f0c3f7 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -1,22 +1,22 @@ //! clap [Args](clap::Args) for RPC related arguments. -use crate::args::{ - types::{MaxU32, ZeroAsNoneU64}, - GasPriceOracleArgs, RpcStateCacheArgs, +use std::{ + ffi::OsStr, + net::{IpAddr, Ipv4Addr}, + path::PathBuf, }; + use alloy_rpc_types_engine::JwtSecret; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use rand::Rng; -use reth_rpc::eth::RPC_DEFAULT_GAS_CAP; - use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection}; -use std::{ - ffi::OsStr, - net::{IpAddr, Ipv4Addr}, - path::PathBuf, + +use crate::args::{ + types::{MaxU32, ZeroAsNoneU64}, + GasPriceOracleArgs, RpcStateCacheArgs, }; /// Default max number of subscriptions per connection. @@ -152,10 +152,24 @@ pub struct RpcServerArgs { alias = "rpc-gascap", value_name = "GAS_CAP", value_parser = RangedU64ValueParser::::new().range(1..), - default_value_t = RPC_DEFAULT_GAS_CAP.into() + default_value_t = constants::gas_oracle::RPC_DEFAULT_GAS_CAP )] pub rpc_gas_cap: u64, + /// The maximum proof window for historical proof generation. + /// This value allows for generating historical proofs up to + /// configured number of blocks from current tip (up to `tip - window`). + #[arg( + long = "rpc.eth-proof-window", + default_value_t = constants::DEFAULT_ETH_PROOF_WINDOW, + value_parser = RangedU64ValueParser::::new().range(..=constants::MAX_ETH_PROOF_WINDOW) + )] + pub rpc_eth_proof_window: u64, + + /// Maximum number of concurrent getproof requests. + #[arg(long = "rpc.proof-permits", alias = "rpc-proof-permits", value_name = "COUNT", default_value_t = constants::DEFAULT_PROOF_PERMITS)] + pub rpc_proof_permits: usize, + /// State cache configuration. #[command(flatten)] pub rpc_state_cache: RpcStateCacheArgs, @@ -285,9 +299,11 @@ impl Default for RpcServerArgs { rpc_max_tracing_requests: constants::default_max_tracing_requests(), rpc_max_blocks_per_filter: constants::DEFAULT_MAX_BLOCKS_PER_FILTER.into(), rpc_max_logs_per_response: (constants::DEFAULT_MAX_LOGS_PER_RESPONSE as u64).into(), - rpc_gas_cap: RPC_DEFAULT_GAS_CAP.into(), + rpc_gas_cap: constants::gas_oracle::RPC_DEFAULT_GAS_CAP, + rpc_eth_proof_window: constants::DEFAULT_ETH_PROOF_WINDOW, gas_price_oracle: GasPriceOracleArgs::default(), rpc_state_cache: RpcStateCacheArgs::default(), + rpc_proof_permits: constants::DEFAULT_PROOF_PERMITS, } } } diff --git a/crates/node-core/src/args/rpc_state_cache.rs b/crates/node/core/src/args/rpc_state_cache.rs similarity index 100% rename from crates/node-core/src/args/rpc_state_cache.rs rename to crates/node/core/src/args/rpc_state_cache.rs diff --git a/crates/node-core/src/args/stage.rs b/crates/node/core/src/args/stage.rs similarity index 100% rename from crates/node-core/src/args/stage.rs rename to crates/node/core/src/args/stage.rs diff --git a/crates/node-core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs similarity index 100% rename from crates/node-core/src/args/txpool.rs rename to crates/node/core/src/args/txpool.rs diff --git a/crates/node-core/src/args/types.rs b/crates/node/core/src/args/types.rs similarity index 100% rename from crates/node-core/src/args/types.rs rename to crates/node/core/src/args/types.rs diff --git a/crates/node/core/src/args/utils.rs b/crates/node/core/src/args/utils.rs new file mode 100644 index 000000000..f626f8ec3 --- /dev/null +++ b/crates/node/core/src/args/utils.rs @@ -0,0 +1,98 @@ +//! Clap parser utilities + +use alloy_genesis::Genesis; +use reth_chainspec::ChainSpec; +use reth_fs_util as fs; +use std::{path::PathBuf, sync::Arc}; + +use reth_chainspec::DEV; + +#[cfg(feature = "bsc")] +use reth_primitives::{BSC_MAINNET, BSC_TESTNET}; + +#[cfg(feature = "optimism")] +use reth_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; + +#[cfg(all(feature = "optimism", feature = "opbnb"))] +use reth_chainspec::{OPBNB_MAINNET, OPBNB_TESTNET}; + +#[cfg(not(feature = "optimism"))] +use reth_chainspec::{HOLESKY, MAINNET, SEPOLIA}; + +#[cfg(feature = "bsc")] +/// Chains supported by bsc. First value should be used as the default. +pub const SUPPORTED_CHAINS: &[&str] = &["bsc", "bsc-testnet"]; +#[cfg(feature = "optimism")] +/// Chains supported by op-reth. First value should be used as the default. +pub const SUPPORTED_CHAINS: &[&str] = &["optimism", "optimism-sepolia", "base", "base-sepolia"]; +#[cfg(all(not(feature = "optimism"), not(feature = "bsc")))] +/// Chains supported by reth. First value should be used as the default. +pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "holesky", "dev"]; + +/// The help info for the --chain flag +pub fn chain_help() -> String { + format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", SUPPORTED_CHAINS.join(", ")) +} + +/// Clap value parser for [`ChainSpec`]s. +/// +/// The value parser matches either a known chain, the path +/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. +pub fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { + Ok(match s { + #[cfg(not(feature = "optimism"))] + "mainnet" => MAINNET.clone(), + #[cfg(not(feature = "optimism"))] + "sepolia" => SEPOLIA.clone(), + #[cfg(not(feature = "optimism"))] + "holesky" => HOLESKY.clone(), + "dev" => DEV.clone(), + #[cfg(feature = "optimism")] + "optimism" => OP_MAINNET.clone(), + #[cfg(feature = "optimism")] + "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), + #[cfg(feature = "optimism")] + "base" => BASE_MAINNET.clone(), + #[cfg(feature = "optimism")] + "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), + #[cfg(all(feature = "optimism", feature = "opbnb"))] + "opbnb_mainnet" | "opbnb-mainnet" => OPBNB_MAINNET.clone(), + #[cfg(all(feature = "optimism", feature = "opbnb"))] + "opbnb_testnet" | "opbnb-testnet" => OPBNB_TESTNET.clone(), + #[cfg(feature = "bsc")] + "bsc" | "bsc-mainnet" => BSC_MAINNET.clone(), + #[cfg(feature = "bsc")] + "bsc-testnet" => BSC_TESTNET.clone(), + _ => { + // try to read json from path first + let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) { + Ok(raw) => raw, + Err(io_err) => { + // valid json may start with "\n", but must contain "{" + if s.contains('{') { + s.to_string() + } else { + return Err(io_err.into()) // assume invalid path + } + } + }; + + // both serialized Genesis and ChainSpec structs supported + let genesis: Genesis = serde_json::from_str(&raw)?; + + Arc::new(genesis.into()) + } + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_known_chain_spec() { + for chain in SUPPORTED_CHAINS { + chain_value_parser(chain).unwrap(); + } + } +} diff --git a/crates/node-core/src/cli/config.rs b/crates/node/core/src/cli/config.rs similarity index 100% rename from crates/node-core/src/cli/config.rs rename to crates/node/core/src/cli/config.rs diff --git a/crates/node-core/src/cli/mod.rs b/crates/node/core/src/cli/mod.rs similarity index 100% rename from crates/node-core/src/cli/mod.rs rename to crates/node/core/src/cli/mod.rs diff --git a/crates/node-core/src/dirs.rs b/crates/node/core/src/dirs.rs similarity index 97% rename from crates/node-core/src/dirs.rs rename to crates/node/core/src/dirs.rs index 08f93b472..a43350c28 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node/core/src/dirs.rs @@ -259,10 +259,10 @@ impl From for MaybePlatformPath { /// Wrapper type around `PlatformPath` that includes a `Chain`, used for separating reth data for /// different networks. /// -/// If the chain is either mainnet, goerli, or sepolia, then the path will be: +/// If the chain is either mainnet, sepolia, or holesky, then the path will be: /// * mainnet: `/mainnet` -/// * goerli: `/goerli` /// * sepolia: `/sepolia` +/// * holesky: `/holesky` /// /// Otherwise, the path will be dependent on the chain ID: /// * `/` @@ -383,10 +383,6 @@ mod tests { #[test] fn test_maybe_testnet_datadir_path() { - let path = MaybePlatformPath::::default(); - let path = path.unwrap_or_chain_default(Chain::goerli(), DatadirArgs::default()); - assert!(path.as_ref().ends_with("reth/goerli"), "{path:?}"); - let path = MaybePlatformPath::::default(); let path = path.unwrap_or_chain_default(Chain::holesky(), DatadirArgs::default()); assert!(path.as_ref().ends_with("reth/holesky"), "{path:?}"); diff --git a/crates/node-core/src/exit.rs b/crates/node/core/src/exit.rs similarity index 53% rename from crates/node-core/src/exit.rs rename to crates/node/core/src/exit.rs index 7957af185..5dc6e5638 100644 --- a/crates/node-core/src/exit.rs +++ b/crates/node/core/src/exit.rs @@ -1,32 +1,39 @@ //! Helper types for waiting for the node to exit. -use futures::FutureExt; -use reth_beacon_consensus::BeaconConsensusEngineError; +use futures::{future::BoxFuture, FutureExt}; use std::{ + fmt, future::Future, pin::Pin, task::{ready, Context, Poll}, }; -use tokio::sync::oneshot; /// A Future which resolves when the node exits -#[derive(Debug)] pub struct NodeExitFuture { - /// The receiver half of the channel for the consensus engine. - /// This can be used to wait for the consensus engine to exit. - consensus_engine_rx: Option>>, + /// The consensus engine future. + /// This can be polled to wait for the consensus engine to exit. + consensus_engine_fut: Option>>, /// Flag indicating whether the node should be terminated after the pipeline sync. terminate: bool, } +impl fmt::Debug for NodeExitFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NodeExitFuture") + .field("consensus_engine_fut", &"...") + .field("terminate", &self.terminate) + .finish() + } +} + impl NodeExitFuture { /// Create a new `NodeExitFuture`. - pub const fn new( - consensus_engine_rx: oneshot::Receiver>, - terminate: bool, - ) -> Self { - Self { consensus_engine_rx: Some(consensus_engine_rx), terminate } + pub fn new(consensus_engine_fut: F, terminate: bool) -> Self + where + F: Future> + 'static + Send, + { + Self { consensus_engine_fut: Some(Box::pin(consensus_engine_fut)), terminate } } } @@ -35,18 +42,17 @@ impl Future for NodeExitFuture { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); - if let Some(rx) = this.consensus_engine_rx.as_mut() { + if let Some(rx) = this.consensus_engine_fut.as_mut() { match ready!(rx.poll_unpin(cx)) { - Ok(res) => { - this.consensus_engine_rx.take(); - res?; + Ok(_) => { + this.consensus_engine_fut.take(); if this.terminate { Poll::Ready(Ok(())) } else { Poll::Pending } } - Err(err) => Poll::Ready(Err(err.into())), + Err(err) => Poll::Ready(Err(err)), } } else { Poll::Pending @@ -61,11 +67,9 @@ mod tests { #[tokio::test] async fn test_node_exit_future_terminate_true() { - let (tx, rx) = oneshot::channel::>(); + let fut = async { Ok(()) }; - let _ = tx.send(Ok(())); - - let node_exit_future = NodeExitFuture::new(rx, true); + let node_exit_future = NodeExitFuture::new(fut, true); let res = node_exit_future.await; @@ -74,11 +78,9 @@ mod tests { #[tokio::test] async fn test_node_exit_future_terminate_false() { - let (tx, rx) = oneshot::channel::>(); - - let _ = tx.send(Ok(())); + let fut = async { Ok(()) }; - let mut node_exit_future = NodeExitFuture::new(rx, false); + let mut node_exit_future = NodeExitFuture::new(fut, false); poll_fn(|cx| { assert!(node_exit_future.poll_unpin(cx).is_pending()); Poll::Ready(()) diff --git a/crates/node-core/src/lib.rs b/crates/node/core/src/lib.rs similarity index 93% rename from crates/node-core/src/lib.rs rename to crates/node/core/src/lib.rs index a8761110a..27a81cc26 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node/core/src/lib.rs @@ -11,7 +11,6 @@ pub mod args; pub mod cli; pub mod dirs; -pub mod engine; pub mod exit; pub mod metrics; pub mod node_config; @@ -39,12 +38,12 @@ pub mod rpc { } /// Re-exported from `reth_rpc::eth`. pub mod eth { - pub use reth_rpc::eth::*; + pub use reth_rpc_eth_api::*; } /// Re-exported from `reth_rpc::rpc`. pub mod result { - pub use reth_rpc::result::*; + pub use reth_rpc_server_types::result::*; } /// Re-exported from `reth_rpc::eth`. diff --git a/crates/node-core/src/metrics/mod.rs b/crates/node/core/src/metrics/mod.rs similarity index 100% rename from crates/node-core/src/metrics/mod.rs rename to crates/node/core/src/metrics/mod.rs diff --git a/crates/node-core/src/metrics/prometheus_exporter.rs b/crates/node/core/src/metrics/prometheus_exporter.rs similarity index 98% rename from crates/node-core/src/metrics/prometheus_exporter.rs rename to crates/node/core/src/metrics/prometheus_exporter.rs index b7a3ba701..5e049cc97 100644 --- a/crates/node-core/src/metrics/prometheus_exporter.rs +++ b/crates/node/core/src/metrics/prometheus_exporter.rs @@ -1,6 +1,6 @@ //! Prometheus exporter -use crate::metrics::version_metrics::register_version_metrics; +use crate::metrics::version_metrics::VersionInfo; use eyre::WrapErr; use futures::{future::FusedFuture, FutureExt}; use http::Response; @@ -151,7 +151,7 @@ where process.describe(); describe_memory_stats(); describe_io_stats(); - register_version_metrics(); + VersionInfo::default().register_version_metrics(); Ok(()) } diff --git a/crates/node/core/src/metrics/version_metrics.rs b/crates/node/core/src/metrics/version_metrics.rs new file mode 100644 index 000000000..03769d990 --- /dev/null +++ b/crates/node/core/src/metrics/version_metrics.rs @@ -0,0 +1,50 @@ +//! This exposes reth's version information over prometheus. + +use crate::version::{BUILD_PROFILE_NAME, VERGEN_GIT_SHA}; +use metrics::gauge; + +/// Contains version information for the application. +#[derive(Debug, Clone)] +pub struct VersionInfo { + /// The version of the application. + pub version: &'static str, + /// The build timestamp of the application. + pub build_timestamp: &'static str, + /// The cargo features enabled for the build. + pub cargo_features: &'static str, + /// The Git SHA of the build. + pub git_sha: &'static str, + /// The target triple for the build. + pub target_triple: &'static str, + /// The build profile (e.g., debug or release). + pub build_profile: &'static str, +} + +impl Default for VersionInfo { + fn default() -> Self { + Self { + version: env!("CARGO_PKG_VERSION"), + build_timestamp: env!("VERGEN_BUILD_TIMESTAMP"), + cargo_features: env!("VERGEN_CARGO_FEATURES"), + git_sha: VERGEN_GIT_SHA, + target_triple: env!("VERGEN_CARGO_TARGET_TRIPLE"), + build_profile: BUILD_PROFILE_NAME, + } + } +} + +impl VersionInfo { + /// This exposes reth's version information over prometheus. + pub fn register_version_metrics(&self) { + let labels: [(&str, &str); 6] = [ + ("version", self.version), + ("build_timestamp", self.build_timestamp), + ("cargo_features", self.cargo_features), + ("git_sha", self.git_sha), + ("target_triple", self.target_triple), + ("build_profile", self.build_profile), + ]; + + let _gauge = gauge!("info", &labels); + } +} diff --git a/crates/node-core/src/node_config.rs b/crates/node/core/src/node_config.rs similarity index 94% rename from crates/node-core/src/node_config.rs rename to crates/node/core/src/node_config.rs index 3a3b742ce..1f5bea21b 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -16,8 +16,7 @@ use reth_config::config::PruneConfig; use reth_db_api::{database::Database, database_metrics::DatabaseMetrics}; use reth_network_p2p::headers::client::HeadersClient; use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, kzg::KzgSettings, BlockHashOrNumber, - BlockNumber, Head, SealedHeader, B256, + revm_primitives::EnvKzgSettings, BlockHashOrNumber, BlockNumber, Head, SealedHeader, B256, }; use reth_provider::{ providers::StaticFileProvider, BlockHashReader, HeaderProvider, ProviderFactory, @@ -156,12 +155,25 @@ impl NodeConfig { .with_unused_ports() } - /// Sets --dev mode for the node + /// Sets --dev mode for the node. + /// + /// In addition to setting the `--dev` flag, this also: + /// - disables discovery in [`NetworkArgs`]. pub const fn dev(mut self) -> Self { self.dev.dev = true; + self.network.discovery.disable_discovery = true; self } + /// Sets --dev mode for the node [`NodeConfig::dev`], if `dev` is true. + pub const fn set_dev(self, dev: bool) -> Self { + if dev { + self.dev() + } else { + self + } + } + /// Set the data directory args for the node pub fn with_datadir_args(mut self, datadir_args: DatadirArgs) -> Self { self.datadir = datadir_args; @@ -267,9 +279,9 @@ impl NodeConfig { Ok(max_block) } - /// Loads '`MAINNET_KZG_TRUSTED_SETUP`' - pub fn kzg_settings(&self) -> eyre::Result> { - Ok(Arc::clone(&MAINNET_KZG_TRUSTED_SETUP)) + /// Loads '`EnvKzgSettings::Default`' + pub const fn kzg_settings(&self) -> eyre::Result { + Ok(EnvKzgSettings::Default) } /// Installs the prometheus recorder. @@ -391,6 +403,7 @@ impl NodeConfig { /// [`RpcServerArgs::adjust_instance_ports`] method. pub fn adjust_instance_ports(&mut self) { self.rpc.adjust_instance_ports(self.instance); + self.network.adjust_instance_ports(self.instance); } /// Sets networking and RPC ports to zero, causing the OS to choose random unused ports when diff --git a/crates/node-core/src/utils.rs b/crates/node/core/src/utils.rs similarity index 68% rename from crates/node-core/src/utils.rs rename to crates/node/core/src/utils.rs index 8393b50be..28dc14f82 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -4,22 +4,19 @@ use eyre::Result; use reth_chainspec::ChainSpec; use reth_consensus_common::validation::validate_block_pre_execution; -use reth_fs_util as fs; -use reth_network::NetworkManager; use reth_network_p2p::{ bodies::client::BodiesClient, - headers::client::{HeadersClient, HeadersRequest}, + headers::client::{HeadersClient, HeadersDirection, HeadersRequest}, priority::Priority, }; -use reth_primitives::{BlockHashOrNumber, HeadersDirection, SealedBlock, SealedHeader}; -use reth_provider::BlockReader; +use reth_primitives::{BlockHashOrNumber, SealedBlock, SealedHeader}; use reth_rpc_types::engine::{JwtError, JwtSecret}; use std::{ env::VarError, path::{Path, PathBuf}, sync::Arc, }; -use tracing::{debug, info, trace, warn}; +use tracing::{debug, info}; /// Parses a user-specified path with support for environment variables and common shorthands (e.g. /// ~ for the user's home directory). @@ -38,29 +35,6 @@ pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result(network: &NetworkManager, persistent_peers_file: Option) -where - C: BlockReader + Unpin, -{ - if let Some(file_path) = persistent_peers_file { - let known_peers = network.all_peers().collect::>(); - if let Ok(known_peers) = serde_json::to_string_pretty(&known_peers) { - trace!(target: "reth::cli", peers_file =?file_path, num_peers=%known_peers.len(), "Saving current peers"); - let parent_dir = file_path.parent().map(fs::create_dir_all).transpose(); - match parent_dir.and_then(|_| fs::write(&file_path, known_peers)) { - Ok(_) => { - info!(target: "reth::cli", peers_file=?file_path, "Wrote network peers to file"); - } - Err(err) => { - warn!(target: "reth::cli", %err, peers_file=?file_path, "Failed to write network peers to file"); - } - } - } - } -} - /// Get a single header from network pub async fn get_single_header( client: Client, @@ -120,6 +94,7 @@ where body: block.transactions, ommers: block.ommers, withdrawals: block.withdrawals, + sidecars: block.sidecars, requests: block.requests, }; diff --git a/crates/node-core/src/version.rs b/crates/node/core/src/version.rs similarity index 78% rename from crates/node-core/src/version.rs rename to crates/node/core/src/version.rs index db8bf09d1..adc922787 100644 --- a/crates/node-core/src/version.rs +++ b/crates/node/core/src/version.rs @@ -11,8 +11,11 @@ pub const NAME_CLIENT: &str = "Reth"; /// The latest version from Cargo.toml. pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); -/// The short SHA of the latest commit. -pub const VERGEN_GIT_SHA: &str = env!("VERGEN_GIT_SHA"); +/// The full SHA of the latest commit. +pub const VERGEN_GIT_SHA_LONG: &str = env!("VERGEN_GIT_SHA"); + +/// The 8 character short SHA of the latest commit. +pub const VERGEN_GIT_SHA: &str = const_format::str_index!(VERGEN_GIT_SHA_LONG, ..8); /// The build timestamp. pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); @@ -27,11 +30,11 @@ pub const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); /// ```text /// 0.1.0 (defa64b2) /// ``` -pub const SHORT_VERSION: &str = concat!( +pub const SHORT_VERSION: &str = const_format::concatcp!( env!("CARGO_PKG_VERSION"), env!("RETH_VERSION_SUFFIX"), " (", - env!("VERGEN_GIT_SHA"), + VERGEN_GIT_SHA, ")" ); @@ -52,13 +55,13 @@ pub const SHORT_VERSION: &str = concat!( /// Build Features: jemalloc /// Build Profile: maxperf /// ``` -pub const LONG_VERSION: &str = const_str::concat!( +pub const LONG_VERSION: &str = const_format::concatcp!( "Version: ", env!("CARGO_PKG_VERSION"), env!("RETH_VERSION_SUFFIX"), "\n", "Commit SHA: ", - env!("VERGEN_GIT_SHA"), + VERGEN_GIT_SHA_LONG, "\n", "Build Timestamp: ", env!("VERGEN_BUILD_TIMESTAMP"), @@ -67,9 +70,23 @@ pub const LONG_VERSION: &str = const_str::concat!( env!("VERGEN_CARGO_FEATURES"), "\n", "Build Profile: ", - build_profile_name() + BUILD_PROFILE_NAME ); +pub(crate) const BUILD_PROFILE_NAME: &str = { + // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime + // We split on the path separator of the *host* machine, which may be different from + // `std::path::MAIN_SEPARATOR_STR`. + const OUT_DIR: &str = env!("OUT_DIR"); + let unix_parts = const_format::str_split!(OUT_DIR, '/'); + if unix_parts.len() >= 4 { + unix_parts[unix_parts.len() - 4] + } else { + let win_parts = const_format::str_split!(OUT_DIR, '\\'); + win_parts[win_parts.len() - 4] + } +}; + /// The version information for reth formatted for P2P (devp2p). /// /// - The latest version from Cargo.toml @@ -81,11 +98,11 @@ pub const LONG_VERSION: &str = const_str::concat!( /// reth/v{major}.{minor}.{patch}-{sha1}/{target} /// ``` /// e.g.: `reth/v0.1.0-alpha.1-428a6dc2f/aarch64-apple-darwin` -pub(crate) const P2P_CLIENT_VERSION: &str = concat!( +pub(crate) const P2P_CLIENT_VERSION: &str = const_format::concatcp!( "reth/v", env!("CARGO_PKG_VERSION"), "-", - env!("VERGEN_GIT_SHA"), + VERGEN_GIT_SHA, "/", env!("VERGEN_CARGO_TARGET_TRIPLE") ); @@ -113,16 +130,6 @@ pub fn default_client_version() -> ClientVersion { } } -pub(crate) const fn build_profile_name() -> &'static str { - // Derived from https://stackoverflow.com/questions/73595435/how-to-get-profile-from-cargo-toml-in-build-rs-or-at-runtime - // We split on the path separator of the *host* machine, which may be different from - // `std::path::MAIN_SEPARATOR_STR`. - const OUT_DIR: &str = env!("OUT_DIR"); - const SEP: char = if const_str::contains!(OUT_DIR, "/") { '/' } else { '\\' }; - let parts = const_str::split!(OUT_DIR, SEP); - parts[parts.len() - 4] -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 1b14c74de..d592d25f3 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -20,6 +20,7 @@ reth-prune.workspace = true reth-static-file.workspace = true reth-db-api.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true # alloy alloy-rpc-types-engine.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 8a25c3700..bc0cfb137 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -7,9 +7,10 @@ use reth_beacon_consensus::{ BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, }; use reth_db_api::{database::Database, database_metrics::DatabaseMetadata}; -use reth_network::{NetworkEvent, NetworkHandle}; +use reth_network::NetworkEvent; use reth_network_api::PeersInfo; use reth_primitives::{constants, BlockNumber, B256}; +use reth_primitives_traits::{format_gas, format_gas_throughput}; use reth_prune::PrunerEvent; use reth_stages::{EntitiesCheckpoint, ExecOutput, PipelineEvent, StageCheckpoint, StageId}; use reth_static_file::StaticFileProducerEvent; @@ -35,8 +36,8 @@ struct NodeState { /// Used for freelist calculation reported in the "Status" log message. /// See [`EventHandler::poll`]. db: DB, - /// Connection to the network. - network: Option, + /// Information about connected peers. + peers_info: Option>, /// The stage currently being executed. current_stage: Option, /// The latest block reached by either pipeline or consensus engine. @@ -54,12 +55,12 @@ struct NodeState { impl NodeState { const fn new( db: DB, - network: Option, + peers_info: Option>, latest_block: Option, ) -> Self { Self { db, - network, + peers_info, current_stage: None, latest_block, latest_block_time: None, @@ -70,7 +71,7 @@ impl NodeState { } fn num_connected_peers(&self) -> usize { - self.network.as_ref().map(|net| net.num_connected_peers()).unwrap_or_default() + self.peers_info.as_ref().map(|info| info.num_connected_peers()).unwrap_or_default() } /// Processes an event emitted by the pipeline @@ -279,8 +280,8 @@ impl NodeState { hash=?block.hash(), peers=self.num_connected_peers(), txs=block.body.len(), - mgas=%format!("{:.3}MGas", block.header.gas_used as f64 / constants::MGAS_TO_GAS as f64), - mgas_throughput=%format!("{:.3}MGas/s", block.header.gas_used as f64 / elapsed.as_secs_f64() / constants::MGAS_TO_GAS as f64), + gas=%format_gas(block.header.gas_used), + gas_throughput=%format_gas_throughput(block.header.gas_used, elapsed), full=%format!("{:.1}%", block.header.gas_used as f64 * 100.0 / block.header.gas_limit as f64), base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas.unwrap_or(0) as f64 / constants::GWEI_TO_WEI as f64), blobs=block.header.blob_gas_used.unwrap_or(0) / constants::eip4844::DATA_GAS_PER_BLOB, @@ -437,7 +438,7 @@ impl From for NodeEvent { /// Displays relevant information to the user from components of the node, and periodically /// displays the high-level status of the node. pub async fn handle_events( - network: Option, + peers_info: Option>, latest_block_number: Option, events: E, db: DB, @@ -445,7 +446,7 @@ pub async fn handle_events( E: Stream + Unpin, DB: DatabaseMetadata + Database + 'static, { - let state = NodeState::new(db, network, latest_block_number); + let state = NodeState::new(db, peers_info, latest_block_number); let start = tokio::time::Instant::now() + Duration::from_secs(3); let mut info_interval = tokio::time::interval_at(start, INFO_MESSAGE_INTERVAL); diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml new file mode 100644 index 000000000..b5cd12c33 --- /dev/null +++ b/crates/optimism/cli/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "reth-optimism-cli" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-static-file-types = { workspace = true, features = ["clap"] } +clap = { workspace = true, features = ["derive", "env"] } +reth-cli-commands.workspace = true +reth-consensus.workspace = true +reth-db = { workspace = true, features = ["mdbx"] } +reth-db-api.workspace = true +reth-downloaders.workspace = true +reth-optimism-primitives.workspace = true +reth-provider.workspace = true +reth-prune.workspace = true +reth-stages.workspace = true +reth-static-file.workspace = true +reth-execution-types.workspace = true +reth-node-core.workspace = true +reth-primitives.workspace = true + + +reth-stages-types.workspace = true +reth-node-events.workspace = true +reth-network-p2p.workspace = true +reth-errors.workspace = true + +reth-config.workspace = true +alloy-primitives.workspace = true +futures-util.workspace = true +reth-evm-optimism.workspace = true + + + +tokio = { workspace = true, features = [ + "sync", + "macros", + "time", + "rt-multi-thread", +] } +tracing.workspace = true +eyre.workspace = true + +[features] + optimism = [ + "reth-primitives/optimism", + "reth-evm-optimism/optimism", + ] \ No newline at end of file diff --git a/crates/optimism/cli/src/commands/build_pipeline.rs b/crates/optimism/cli/src/commands/build_pipeline.rs new file mode 100644 index 000000000..29761d0f7 --- /dev/null +++ b/crates/optimism/cli/src/commands/build_pipeline.rs @@ -0,0 +1,96 @@ +use alloy_primitives::B256; +use futures_util::{Stream, StreamExt}; +use reth_config::Config; +use reth_consensus::Consensus; +use reth_db_api::database::Database; +use reth_downloaders::{ + bodies::bodies::BodiesDownloaderBuilder, file_client::FileClient, + headers::reverse_headers::ReverseHeadersDownloaderBuilder, +}; +use reth_errors::ProviderError; +use reth_evm_optimism::OpExecutorProvider; +use reth_network_p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, +}; +use reth_node_events::node::NodeEvent; +use reth_provider::{BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderFactory}; +use reth_prune::PruneModes; +use reth_stages::{sets::DefaultStages, Pipeline, StageSet}; +use reth_stages_types::StageId; +use reth_static_file::StaticFileProducer; +use std::sync::Arc; +use tokio::sync::watch; + +/// Builds import pipeline. +/// +/// If configured to execute, all stages will run. Otherwise, only stages that don't require state +/// will run. +pub async fn build_import_pipeline( + config: &Config, + provider_factory: ProviderFactory, + consensus: &Arc, + file_client: Arc, + static_file_producer: StaticFileProducer, + disable_exec: bool, +) -> eyre::Result<(Pipeline, impl Stream)> +where + DB: Database + Clone + Unpin + 'static, + C: Consensus + 'static, +{ + if !file_client.has_canonical_blocks() { + eyre::bail!("unable to import non canonical blocks"); + } + + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or(ProviderError::HeaderNotFound(last_block_number.into()))?; + + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + .build(file_client.clone(), consensus.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); + + let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) + .build(file_client.clone(), consensus.clone(), provider_factory.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + body_downloader + .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) + .expect("failed to set download range"); + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + let executor = OpExecutorProvider::optimism(provider_factory.chain_spec()); + + let max_block = file_client.max_block().unwrap_or(0); + + let pipeline = Pipeline::builder() + .with_tip_sender(tip_tx) + // we want to sync all blocks the file client provides or 0 if empty + .with_max_block(max_block) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + tip_rx, + consensus.clone(), + header_downloader, + body_downloader, + executor, + config.stages.clone(), + PruneModes::default(), + ) + .builder() + .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec), + ) + .build(provider_factory, static_file_producer); + + let events = pipeline.events().map(Into::into); + + Ok((pipeline, events)) +} diff --git a/bin/reth/src/commands/import_op.rs b/crates/optimism/cli/src/commands/import.rs similarity index 96% rename from bin/reth/src/commands/import_op.rs rename to crates/optimism/cli/src/commands/import.rs index 646cd4f97..d28ec658a 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/crates/optimism/cli/src/commands/import.rs @@ -1,28 +1,24 @@ //! Command that initializes the node by importing OP Mainnet chain segment below Bedrock, from a //! file. - -use crate::{ - commands::{ - common::{AccessRights, Environment, EnvironmentArgs}, - import::build_import_pipeline, - }, - version::SHORT_VERSION, -}; use clap::Parser; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_consensus::noop::NoopConsensus; use reth_db::tables; use reth_db_api::transaction::DbTx; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; +use reth_node_core::version::SHORT_VERSION; use reth_optimism_primitives::bedrock_import::is_dup_tx; use reth_provider::StageCheckpointReader; -use reth_prune_types::PruneModes; +use reth_prune::PruneModes; use reth_stages::StageId; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; use tracing::{debug, error, info}; +use crate::commands::build_pipeline::build_import_pipeline; + /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] pub struct ImportOpCommand { diff --git a/bin/reth/src/commands/import_receipts_op.rs b/crates/optimism/cli/src/commands/import_receipts.rs similarity index 97% rename from bin/reth/src/commands/import_receipts_op.rs rename to crates/optimism/cli/src/commands/import_receipts.rs index d77332f86..042b7df6e 100644 --- a/bin/reth/src/commands/import_receipts_op.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -1,8 +1,8 @@ //! Command that imports OP mainnet receipts from Bedrock datadir, exported via //! . -use crate::commands::common::{AccessRights, Environment, EnvironmentArgs}; use clap::Parser; +use reth_cli_commands::common::{AccessRights, Environment, EnvironmentArgs}; use reth_db::tables; use reth_db_api::{database::Database, transaction::DbTx}; use reth_downloaders::{ @@ -13,12 +13,13 @@ use reth_downloaders::{ use reth_execution_types::ExecutionOutcome; use reth_node_core::version::SHORT_VERSION; use reth_optimism_primitives::bedrock_import::is_dup_tx; -use reth_primitives::{Receipts, StaticFileSegment}; +use reth_primitives::Receipts; use reth_provider::{ OriginalValuesKnown, ProviderFactory, StageCheckpointReader, StateWriter, StaticFileProviderFactory, StaticFileWriter, StatsReader, }; use reth_stages::StageId; +use reth_static_file_types::StaticFileSegment; use std::path::{Path, PathBuf}; use tracing::{debug, error, info, trace}; diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs new file mode 100644 index 000000000..373e7802c --- /dev/null +++ b/crates/optimism/cli/src/commands/mod.rs @@ -0,0 +1,4 @@ +/// Helper function to build an import pipeline. +pub mod build_pipeline; +pub mod import; +pub mod import_receipts; diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs new file mode 100644 index 000000000..67d0ccd61 --- /dev/null +++ b/crates/optimism/cli/src/lib.rs @@ -0,0 +1,15 @@ +//! OP-Reth CLI implementation. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(all(not(test), feature = "optimism"), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +/// Optimism CLI commands. +pub mod commands; +pub use commands::{import::ImportOpCommand, import_receipts::ImportReceiptsOpCommand}; diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 0c5439bf7..61aa23bde 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -9,7 +9,7 @@ // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardforks}; use reth_consensus::{Consensus, ConsensusError, PostExecutionInput}; use reth_consensus_common::validation::{ validate_against_parent_4844, validate_against_parent_eip1559_base_fee, diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 8aa00c53c..d7bb7681c 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,4 +1,4 @@ -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; use reth_primitives::{ gas_spent_by_transactions, proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 2a60670c1..aa365ab83 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # Reth reth-chainspec.workspace = true +reth-ethereum-forks.workspace = true reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs new file mode 100644 index 000000000..07491a2df --- /dev/null +++ b/crates/optimism/evm/src/config.rs @@ -0,0 +1,141 @@ +use reth_chainspec::{ChainSpec, OptimismHardfork}; +use reth_ethereum_forks::{EthereumHardfork, Head}; + +/// Returns the spec id at the given timestamp. +/// +/// Note: This is only intended to be used after the merge, when hardforks are activated by +/// timestamp. +pub fn revm_spec_by_timestamp_after_bedrock( + chain_spec: &ChainSpec, + timestamp: u64, +) -> revm_primitives::SpecId { + if chain_spec.fork(OptimismHardfork::Fjord).active_at_timestamp(timestamp) { + revm_primitives::FJORD + } else if chain_spec.fork(OptimismHardfork::Haber).active_at_timestamp(timestamp) { + revm_primitives::HABER + } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_timestamp(timestamp) { + revm_primitives::ECOTONE + } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp) { + revm_primitives::CANYON + } else if chain_spec.fork(OptimismHardfork::Fermat).active_at_timestamp(timestamp) { + revm_primitives::FERMAT + } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(timestamp) { + revm_primitives::REGOLITH + } else { + revm_primitives::BEDROCK + } +} + +/// return `revm_spec` from spec configuration. +pub fn revm_spec(chain_spec: &ChainSpec, block: &Head) -> revm_primitives::SpecId { + if chain_spec.fork(OptimismHardfork::Fjord).active_at_head(block) { + revm_primitives::FJORD + } else if chain_spec.fork(OptimismHardfork::Haber).active_at_head(block) { + revm_primitives::HABER + } else if chain_spec.fork(OptimismHardfork::Ecotone).active_at_head(block) { + revm_primitives::ECOTONE + } else if chain_spec.fork(OptimismHardfork::Canyon).active_at_head(block) { + revm_primitives::CANYON + } else if chain_spec.fork(OptimismHardfork::Fermat).active_at_head(block) { + revm_primitives::FERMAT + } else if chain_spec.fork(OptimismHardfork::Regolith).active_at_head(block) { + revm_primitives::REGOLITH + } else if chain_spec.fork(OptimismHardfork::Bedrock).active_at_head(block) { + revm_primitives::BEDROCK + } else if chain_spec.fork(EthereumHardfork::Prague).active_at_head(block) { + revm_primitives::PRAGUE + } else if chain_spec.fork(EthereumHardfork::Cancun).active_at_head(block) { + revm_primitives::CANCUN + } else if chain_spec.fork(EthereumHardfork::Shanghai).active_at_head(block) { + revm_primitives::SHANGHAI + } else if chain_spec.fork(EthereumHardfork::Paris).active_at_head(block) { + revm_primitives::MERGE + } else if chain_spec.fork(EthereumHardfork::London).active_at_head(block) { + revm_primitives::LONDON + } else if chain_spec.fork(EthereumHardfork::Berlin).active_at_head(block) { + revm_primitives::BERLIN + } else if chain_spec.fork(EthereumHardfork::Istanbul).active_at_head(block) { + revm_primitives::ISTANBUL + } else if chain_spec.fork(EthereumHardfork::Petersburg).active_at_head(block) { + revm_primitives::PETERSBURG + } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_head(block) { + revm_primitives::BYZANTIUM + } else if chain_spec.fork(EthereumHardfork::SpuriousDragon).active_at_head(block) { + revm_primitives::SPURIOUS_DRAGON + } else if chain_spec.fork(EthereumHardfork::Tangerine).active_at_head(block) { + revm_primitives::TANGERINE + } else if chain_spec.fork(EthereumHardfork::Homestead).active_at_head(block) { + revm_primitives::HOMESTEAD + } else if chain_spec.fork(EthereumHardfork::Frontier).active_at_head(block) { + revm_primitives::FRONTIER + } else { + panic!( + "invalid hardfork chainspec: expected at least one hardfork, got {:?}", + chain_spec.hardforks + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_chainspec::ChainSpecBuilder; + + #[test] + fn test_revm_spec_by_timestamp_after_merge() { + #[inline(always)] + fn op_cs(f: impl FnOnce(ChainSpecBuilder) -> ChainSpecBuilder) -> ChainSpec { + let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)); + f(cs).build() + } + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.fjord_activated()), 0), + revm_primitives::FJORD + ); + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.ecotone_activated()), 0), + revm_primitives::ECOTONE + ); + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.canyon_activated()), 0), + revm_primitives::CANYON + ); + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.bedrock_activated()), 0), + revm_primitives::BEDROCK + ); + assert_eq!( + revm_spec_by_timestamp_after_bedrock(&op_cs(|cs| cs.regolith_activated()), 0), + revm_primitives::REGOLITH + ); + } + + #[test] + fn test_to_revm_spec() { + #[inline(always)] + fn op_cs(f: impl FnOnce(ChainSpecBuilder) -> ChainSpecBuilder) -> ChainSpec { + let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)); + f(cs).build() + } + assert_eq!( + revm_spec(&op_cs(|cs| cs.fjord_activated()), &Head::default()), + revm_primitives::FJORD + ); + assert_eq!( + revm_spec(&op_cs(|cs| cs.ecotone_activated()), &Head::default()), + revm_primitives::ECOTONE + ); + assert_eq!( + revm_spec(&op_cs(|cs| cs.canyon_activated()), &Head::default()), + revm_primitives::CANYON + ); + assert_eq!( + revm_spec(&op_cs(|cs| cs.bedrock_activated()), &Head::default()), + revm_primitives::BEDROCK + ); + assert_eq!( + revm_spec(&op_cs(|cs| cs.regolith_activated()), &Head::default()), + revm_primitives::REGOLITH + ); + } +} diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index bbefdbe67..3ff38e4de 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,30 +1,31 @@ //! Optimism block executor. use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; -use reth_chainspec::{ChainSpec, Hardfork}; +use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardfork}; use reth_evm::{ execute::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, + system_calls::apply_beacon_root_contract_call, ConfigureEvm, }; use reth_execution_types::ExecutionOutcome; use reth_optimism_consensus::validate_block_post_execution; use reth_primitives::{ - Address, BlockNumber, BlockWithSenders, Header, Receipt, Receipts, TxType, Withdrawals, U256, + Address, BlockNumber, BlockWithSenders, Header, Receipt, Receipts, TxType, U256, }; use reth_prune_types::PruneModes; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, db::states::bundle_state::BundleRetention, - state_change::{apply_beacon_root_contract_call, post_block_balance_increments}, + state_change::post_block_balance_increments, Evm, State, }; use revm::db::states::StorageSlot; use revm_primitives::{ db::{Database, DatabaseCommit}, - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, + BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, ResultAndState, }; use std::{collections::HashMap, str::FromStr, sync::Arc}; use tracing::trace; @@ -56,7 +57,7 @@ where { fn op_executor(&self, db: DB) -> OpBlockExecutor where - DB: Database, + DB: Database + std::fmt::Display>, { OpBlockExecutor::new( self.chain_spec.clone(), @@ -70,25 +71,26 @@ impl BlockExecutorProvider for OpExecutorProvider where EvmConfig: ConfigureEvm, { - type Executor> = OpBlockExecutor; - - type BatchExecutor> = OpBatchExecutor; + type Executor + std::fmt::Display>> = + OpBlockExecutor; + type BatchExecutor + std::fmt::Display>> = + OpBatchExecutor; fn executor(&self, db: DB) -> Self::Executor where - DB: Database, + DB: Database + std::fmt::Display>, { self.op_executor(db) } - fn batch_executor(&self, db: DB, prune_modes: PruneModes) -> Self::BatchExecutor + fn batch_executor(&self, db: DB) -> Self::BatchExecutor where - DB: Database, + DB: Database + std::fmt::Display>, { let executor = self.op_executor(db); OpBatchExecutor { executor, - batch_record: BlockBatchRecord::new(prune_modes), + batch_record: BlockBatchRecord::default(), stats: BlockExecutorStats::default(), } } @@ -120,10 +122,11 @@ where mut evm: Evm<'_, Ext, &mut State>, ) -> Result<(Vec, u64), BlockExecutionError> where - DB: Database, + DB: Database + std::fmt::Display>, { // apply pre execution changes apply_beacon_root_contract_call( + &self.evm_config, &self.chain_spec, block.timestamp, block.number, @@ -133,7 +136,7 @@ where // execute transactions let is_regolith = - self.chain_spec.fork(Hardfork::Regolith).active_at_timestamp(block.timestamp); + self.chain_spec.fork(OptimismHardfork::Regolith).active_at_timestamp(block.timestamp); // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), @@ -177,14 +180,21 @@ where .transpose() .map_err(|_| OptimismBlockExecutionError::AccountLoadFailed(*sender))?; - EvmConfig::fill_tx_env(evm.tx_mut(), transaction, *sender); + self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. let ResultAndState { result, state } = evm.transact().map_err(move |err| { + let new_err = match err { + EVMError::Transaction(e) => EVMError::Transaction(e), + EVMError::Header(e) => EVMError::Header(e), + EVMError::Database(e) => EVMError::Database(e.into()), + EVMError::Custom(e) => EVMError::Custom(e), + EVMError::Precompile(e) => EVMError::Precompile(e), + }; // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { hash: transaction.recalculate_hash(), - error: err.into(), + error: Box::new(new_err), } })?; @@ -213,7 +223,7 @@ where // this is only set for post-Canyon deposit transactions. deposit_receipt_version: (transaction.is_deposit() && self.chain_spec - .is_fork_active_at_timestamp(Hardfork::Canyon, block.timestamp)) + .is_fork_active_at_timestamp(OptimismHardfork::Canyon, block.timestamp)) .then_some(1), }); } @@ -257,7 +267,7 @@ impl OpBlockExecutor { impl OpBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, { /// Configures a new evm configuration and block environment for the given block. /// @@ -265,7 +275,7 @@ where fn evm_env_for_block(&self, header: &Header, total_difficulty: U256) -> EnvWithHandlerCfg { let mut cfg = CfgEnvWithHandlerCfg::new(Default::default(), Default::default()); let mut block_env = BlockEnv::default(); - EvmConfig::fill_cfg_and_block_env( + self.executor.evm_config.fill_cfg_and_block_env( &mut cfg, &mut block_env, self.chain_spec(), @@ -317,19 +327,14 @@ where block: &BlockWithSenders, total_difficulty: U256, ) -> Result<(), BlockExecutionError> { - let balance_increments = post_block_balance_increments( - self.chain_spec(), - block.number, - block.difficulty, - block.beneficiary, - block.timestamp, - total_difficulty, - &block.ommers, - block.withdrawals.as_ref().map(Withdrawals::as_ref), - ); + let balance_increments = + post_block_balance_increments(self.chain_spec(), block, total_difficulty); #[cfg(all(feature = "optimism", feature = "opbnb"))] - if self.chain_spec().fork(Hardfork::PreContractForkBlock).transitions_at_block(block.number) + if self + .chain_spec() + .fork(OptimismHardfork::PreContractForkBlock) + .transitions_at_block(block.number) { // WBNBContract WBNB preDeploy contract address let w_bnb_contract_address = @@ -337,11 +342,12 @@ where // GovernanceToken contract address let governance_token_contract_address = Address::from_str("0x4200000000000000000000000000000000000042").unwrap(); - // touch in cache - let mut w_bnb_contract_account = - self.state.load_cache_account(w_bnb_contract_address).unwrap().clone(); - let mut governance_token_account = - self.state.load_cache_account(governance_token_contract_address).unwrap().clone(); + + let w_bnb_contract_account = self + .state + .load_cache_account(w_bnb_contract_address) + .map_err(|err| BlockExecutionError::Other(Box::new(err.into()))) + .unwrap(); // change the token symbol and token name let w_bnb_contract_change = w_bnb_contract_account.change( w_bnb_contract_account.account_info().unwrap(), HashMap::from([ @@ -357,18 +363,20 @@ where ), ]) ); + + let governance_token_account = self + .state + .load_cache_account(governance_token_contract_address) + .map_err(|err| BlockExecutionError::Other(Box::new(err.into()))) + .unwrap(); // destroy governance token contract let governance_token_change = governance_token_account.selfdestruct().unwrap(); - if let Some(s) = self.state.transition_state.as_mut() { - let transitions = vec![ - (w_bnb_contract_address, w_bnb_contract_change), - (governance_token_contract_address, governance_token_change), - ]; - s.add_transitions(transitions); - } + self.state.apply_transition(vec![ + (w_bnb_contract_address, w_bnb_contract_change), + (governance_token_contract_address, governance_token_change), + ]); } - // increment balances self.state .increment_balances(balance_increments) @@ -381,7 +389,7 @@ where impl Executor for OpBlockExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = BlockExecutionOutput; @@ -439,7 +447,7 @@ impl OpBatchExecutor { impl BatchExecutor for OpBatchExecutor where EvmConfig: ConfigureEvm, - DB: Database, + DB: Database + std::fmt::Display>, { type Input<'a> = BlockExecutionInput<'a, BlockWithSenders>; type Output = ExecutionOutcome; @@ -481,6 +489,10 @@ where self.batch_record.set_tip(tip); } + fn set_prune_modes(&mut self, prune_modes: PruneModes) { + self.batch_record.set_prune_modes(prune_modes); + } + fn size_hint(&self) -> Option { Some(self.executor.state.bundle_state.size_hint()) } @@ -574,8 +586,7 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); @@ -589,6 +600,7 @@ mod tests { body: vec![tx, tx_deposit], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![addr, addr], @@ -656,8 +668,7 @@ mod tests { ); let provider = executor_provider(chain_spec); - let mut executor = - provider.batch_executor(StateProviderDatabase::new(&db), PruneModes::none()); + let mut executor = provider.batch_executor(StateProviderDatabase::new(&db)); executor.state_mut().load_cache_account(L1_BLOCK_CONTRACT).unwrap(); @@ -671,6 +682,7 @@ mod tests { body: vec![tx, tx_deposit], ommers: vec![], withdrawals: None, + sidecars: None, requests: None, }, senders: vec![addr, addr], diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index a750c8f4f..d581b4164 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,7 +1,7 @@ //! Optimism-specific implementation and utilities for the executor use crate::OptimismBlockExecutionError; -use reth_chainspec::{ChainSpec, Hardfork}; +use reth_chainspec::{ChainSpec, OptimismHardfork}; use reth_execution_errors::BlockExecutionError; use reth_primitives::{address, b256, hex, Address, Block, Bytes, B256, U256}; use revm::{ @@ -191,13 +191,14 @@ impl RethL1BlockInfo for L1BlockInfo { return Ok(U256::ZERO) } - let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, timestamp) { + let spec_id = if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, timestamp) + { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Ecotone, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Ecotone, timestamp) { SpecId::ECOTONE - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { return Err(OptimismBlockExecutionError::L1BlockInfoError { @@ -214,11 +215,12 @@ impl RethL1BlockInfo for L1BlockInfo { timestamp: u64, input: &[u8], ) -> Result { - let spec_id = if chain_spec.is_fork_active_at_timestamp(Hardfork::Fjord, timestamp) { + let spec_id = if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Fjord, timestamp) + { SpecId::FJORD - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Regolith, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, timestamp) { SpecId::REGOLITH - } else if chain_spec.is_fork_active_at_timestamp(Hardfork::Bedrock, timestamp) { + } else if chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Bedrock, timestamp) { SpecId::BEDROCK } else { return Err(OptimismBlockExecutionError::L1BlockInfoError { @@ -245,8 +247,9 @@ where // previous block timestamp (heuristically, block time is not perfectly constant at 2s), and the // chain is an optimism chain, then we need to force-deploy the create2 deployer contract. if chain_spec.is_optimism() && - chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, timestamp) && - !chain_spec.is_fork_active_at_timestamp(Hardfork::Canyon, timestamp.saturating_sub(2)) + chain_spec.is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp) && + !chain_spec + .is_fork_active_at_timestamp(OptimismHardfork::Canyon, timestamp.saturating_sub(2)) { trace!(target: "evm", "Forcing create2 deployer contract deployment on Canyon transition"); @@ -285,6 +288,7 @@ mod tests { body: vec![l1_info_tx], ommers: Vec::default(), withdrawals: None, + sidecars: None, requests: None, }; @@ -307,6 +311,7 @@ mod tests { body: vec![l1_info_tx], ommers: Vec::default(), withdrawals: None, + sidecars: None, requests: None, }; diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index d13168bfe..8a56014c5 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -12,12 +12,14 @@ use reth_chainspec::ChainSpec; use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ - revm::{config::revm_spec, env::fill_op_tx_env}, revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + transaction::FillTxEnv, Address, Head, Header, TransactionSigned, U256, }; use reth_revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; +mod config; +pub use config::{revm_spec, revm_spec_by_timestamp_after_bedrock}; mod execute; pub use execute::*; pub mod l1; @@ -25,6 +27,7 @@ pub use l1::*; mod error; pub use error::OptimismBlockExecutionError; +use revm_primitives::{Bytes, Env, OptimismFields, TxKind}; /// Optimism-related EVM configuration. #[derive(Debug, Default, Clone, Copy)] @@ -32,13 +35,57 @@ pub use error::OptimismBlockExecutionError; pub struct OptimismEvmConfig; impl ConfigureEvmEnv for OptimismEvmConfig { - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - let mut buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut buf); - fill_op_tx_env(tx_env, transaction, sender, buf.into()); + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + transaction.fill_tx_env(tx_env, sender); + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + env.tx = TxEnv { + caller, + transact_to: TxKind::Call(contract), + // Explicitly set nonce to None so revm does not do any nonce checks + nonce: None, + gas_limit: 30_000_000, + value: U256::ZERO, + data, + // Setting the gas price to zero enforces that no value is transferred as part of the + // call, and that the call will not count against the block's gas limit + gas_price: U256::ZERO, + // The chain ID check is not relevant here and is disabled if set to None + chain_id: None, + // Setting the gas priority fee to None ensures the effective gas price is derived from + // the `gas_price` field, which we need to be zero + gas_priority_fee: None, + access_list: Vec::new(), + // blob fields can be None for this tx + blob_hashes: Vec::new(), + max_fee_per_blob_gas: None, + authorization_list: None, + optimism: OptimismFields { + source_hash: None, + mint: None, + is_system_transaction: Some(false), + // The L1 fee is not charged for the EIP-4788 transaction, submit zero bytes for the + // enveloped tx size. + enveloped_tx: Some(Bytes::default()), + }, + }; + + // ensure the block gas limit is >= the tx + env.block.gas_limit = U256::from(env.tx.gas_limit); + + // disable the base fee check for this call by setting the base fee to zero + env.block.basefee = U256::ZERO; } fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, @@ -46,7 +93,7 @@ impl ConfigureEvmEnv for OptimismEvmConfig { ) { let spec_id = revm_spec( chain_spec, - Head { + &Head { number: header.number, timestamp: header.timestamp, difficulty: header.difficulty, @@ -99,7 +146,7 @@ mod tests { let chain_spec = ChainSpec::default(); let total_difficulty = U256::ZERO; - OptimismEvmConfig::fill_cfg_and_block_env( + OptimismEvmConfig::default().fill_cfg_and_block_env( &mut cfg_env, &mut block_env, &chain_spec, diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index dc33b657d..59bcc1b1e 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -35,6 +35,8 @@ reth-beacon-consensus.workspace = true reth-optimism-consensus.workspace = true revm-primitives.workspace = true reth-discv5.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-eth-api.workspace = true # async async-trait.workspace = true @@ -44,17 +46,22 @@ tracing.workspace = true # misc clap.workspace = true serde.workspace = true -serde_json.workspace = true eyre.workspace = true parking_lot.workspace = true thiserror.workspace = true + +# rpc jsonrpsee.workspace = true +jsonrpsee-types.workspace = true +serde_json.workspace = true [dev-dependencies] reth.workspace = true reth-db.workspace = true -reth-revm = { workspace = true, features = ["test-utils"] } reth-e2e-test-utils.workspace = true +reth-node-builder = { workspace = true, features = ["test-utils"] } +reth-provider = { workspace = true, features = ["test-utils"] } +reth-revm = { workspace = true, features = ["test-utils"] } tokio.workspace = true alloy-primitives.workspace = true alloy-genesis.workspace = true @@ -71,9 +78,11 @@ optimism = [ "reth-beacon-consensus/optimism", "reth-revm/optimism", "reth-auto-seal-consensus/optimism", + "reth-rpc-eth-types/optimism" ] opbnb = [ "reth-primitives/opbnb", "reth-evm-optimism/opbnb", "reth-optimism-payload-builder/opbnb", -] \ No newline at end of file +] +test-utils = ["reth-node-builder/test-utils"] \ No newline at end of file diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 6507d5c92..d0fc9b53e 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -1,4 +1,4 @@ -use reth_chainspec::{ChainSpec, Hardfork}; +use reth_chainspec::{ChainSpec, OptimismHardfork}; use reth_node_api::{ payload::{ validate_parent_beacon_block_root_presence, EngineApiMessageVersion, @@ -69,7 +69,7 @@ pub fn validate_withdrawals_presence( timestamp: u64, has_withdrawals: bool, ) -> Result<(), EngineObjectValidationError> { - let is_shanghai = chain_spec.fork(Hardfork::Canyon).active_at_timestamp(timestamp); + let is_shanghai = chain_spec.fork(OptimismHardfork::Canyon).active_at_timestamp(timestamp); match version { EngineApiMessageVersion::V1 => { diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index d8628dc6b..2ea24da67 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -283,27 +283,17 @@ where // purposefully disable discv4 .disable_discv4_discovery() // apply discovery settings - .apply(|builder| { + .apply(|mut builder| { let rlpx_socket = (args.addr, args.port).into(); - let mut builder = args.discovery.apply_to_builder(builder, rlpx_socket); if !args.discovery.disable_discovery { - builder = builder.discovery_v5(reth_discv5::Config::builder(rlpx_socket)); + builder = builder.discovery_v5(args.discovery.discovery_v5_builder( + rlpx_socket, + ctx.chain_spec().bootnodes().unwrap_or_default(), + )); } builder - }) - // ensure we configure discv5 - .map_discv5_config_builder(|builder| { - builder - .add_unsigned_boot_nodes(ctx.chain_spec().bootnodes().unwrap_or_default()) - .lookup_interval(ctx.config().network.discovery.discv5_lookup_interval) - .bootstrap_lookup_interval( - ctx.config().network.discovery.discv5_bootstrap_lookup_interval, - ) - .bootstrap_lookup_countdown( - ctx.config().network.discovery.discv5_bootstrap_lookup_countdown, - ) }); let mut network_config = ctx.build_network_config(network_builder); diff --git a/crates/optimism/node/src/rpc.rs b/crates/optimism/node/src/rpc.rs index 5ae1ba7b2..d7c3f49ef 100644 --- a/crates/optimism/node/src/rpc.rs +++ b/crates/optimism/node/src/rpc.rs @@ -1,13 +1,12 @@ //! Helpers for optimism specific RPC implementations. -use jsonrpsee::types::ErrorObject; +use std::sync::{atomic::AtomicUsize, Arc}; + +use jsonrpsee_types::error::{ErrorObject, INTERNAL_ERROR_CODE}; use reqwest::Client; -use reth_rpc::eth::{ - error::{EthApiError, EthResult}, - traits::RawTransactionForwarder, -}; +use reth_rpc_eth_api::RawTransactionForwarder; +use reth_rpc_eth_types::error::{EthApiError, EthResult}; use reth_rpc_types::ToRpcError; -use std::sync::{atomic::AtomicUsize, Arc}; /// Error type when interacting with the Sequencer #[derive(Debug, thiserror::Error)] @@ -22,11 +21,7 @@ pub enum SequencerRpcError { impl ToRpcError for SequencerRpcError { fn to_rpc_error(&self) -> ErrorObject<'static> { - ErrorObject::owned( - jsonrpsee::types::error::INTERNAL_ERROR_CODE, - self.to_string(), - None::, - ) + ErrorObject::owned(INTERNAL_ERROR_CODE, self.to_string(), None::) } } diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index fa17982cb..92d0ce675 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -5,17 +5,14 @@ use crate::{ payload::{OptimismBuiltPayload, OptimismPayloadBuilderAttributes}, }; use reth_basic_payload_builder::*; -use reth_chainspec::ChainSpec; -use reth_evm::ConfigureEvm; +use reth_chainspec::{ChainSpec, EthereumHardforks, OptimismHardfork}; +use reth_evm::{system_calls::pre_block_beacon_root_contract_call, ConfigureEvm}; use reth_execution_types::ExecutionOutcome; use reth_payload_builder::error::PayloadBuilderError; use reth_primitives::{ constants::{BEACON_NONCE, EMPTY_RECEIPTS, EMPTY_TRANSACTIONS}, eip4844::calculate_excess_blob_gas, - proofs, - revm::env::tx_env_with_recovered, - Block, Hardfork, Header, IntoRecoveredTransaction, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, - U256, + proofs, Block, Header, IntoRecoveredTransaction, Receipt, TxType, EMPTY_OMMER_ROOT_HASH, U256, }; use reth_provider::StateProviderFactory; use reth_revm::database::StateProviderDatabase; @@ -128,11 +125,13 @@ where // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( &mut db, + &self.evm_config, &chain_spec, - block_number, &initialized_cfg, &initialized_block_env, - &attributes, + block_number, + attributes.payload_attributes.timestamp, + attributes.payload_attributes.parent_beacon_block_root, ) .map_err(|err| { warn!(target: "payload_builder", @@ -140,7 +139,7 @@ where %err, "failed to apply beacon root contract call for empty payload" ); - err + PayloadBuilderError::Internal(err.into()) })?; let WithdrawalsOutcome { withdrawals_root, withdrawals } = commit_withdrawals( @@ -214,7 +213,14 @@ where requests_root: None, }; - let block = Block { header, body: vec![], ommers: vec![], withdrawals, requests: None }; + let block = Block { + header, + body: vec![], + ommers: vec![], + withdrawals, + sidecars: None, + requests: None, + }; let sealed_block = block.seal_slow(); Ok(OptimismBuiltPayload::new( @@ -281,18 +287,30 @@ where let block_number = initialized_block_env.number.to::(); - let is_regolith = chain_spec - .is_fork_active_at_timestamp(Hardfork::Regolith, attributes.payload_attributes.timestamp); + let is_regolith = chain_spec.is_fork_active_at_timestamp( + OptimismHardfork::Regolith, + attributes.payload_attributes.timestamp, + ); // apply eip-4788 pre block contract call pre_block_beacon_root_contract_call( &mut db, + &evm_config, &chain_spec, - block_number, &initialized_cfg, &initialized_block_env, - &attributes, - )?; + block_number, + attributes.payload_attributes.timestamp, + attributes.payload_attributes.parent_beacon_block_root, + ) + .map_err(|err| { + warn!(target: "payload_builder", + parent_hash=%parent_block.hash(), + %err, + "failed to apply beacon root contract call for empty payload" + ); + PayloadBuilderError::Internal(err.into()) + })?; // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), @@ -323,7 +341,7 @@ where } // Convert the transaction to a [TransactionSignedEcRecovered]. This is - // purely for the purposes of utilizing the [tx_env_with_recovered] function. + // purely for the purposes of utilizing the `evm_config.tx_env`` function. // Deposit transactions do not have signatures, so if the tx is a deposit, this // will just pull in its `from` address. let sequencer_tx = sequencer_tx.clone().try_into_ecrecovered().map_err(|_| { @@ -350,7 +368,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - tx_env_with_recovered(&sequencer_tx), + evm_config.tx_env(&sequencer_tx), ); let mut evm = evm_config.evm_with_env(&mut db, env); @@ -393,7 +411,7 @@ where // ensures this is only set for post-Canyon deposit transactions. deposit_receipt_version: chain_spec .is_fork_active_at_timestamp( - Hardfork::Canyon, + OptimismHardfork::Canyon, attributes.payload_attributes.timestamp, ) .then_some(1), @@ -429,7 +447,7 @@ where let env = EnvWithHandlerCfg::new_with_cfg_env( initialized_cfg.clone(), initialized_block_env.clone(), - tx_env_with_recovered(&tx), + evm_config.tx_env(&tx), ); // Configure the environment for the block. @@ -573,7 +591,14 @@ where }; // seal the block - let block = Block { header, body: executed_txs, ommers: vec![], withdrawals, requests: None }; + let block = Block { + header, + body: executed_txs, + ommers: vec![], + withdrawals, + sidecars: None, + requests: None, + }; let sealed_block = block.seal_slow(); debug!(target: "payload_builder", ?sealed_block, "sealed built block"); diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index ae6324655..47db0d571 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -3,11 +3,11 @@ //! Optimism builder support use alloy_rlp::Encodable; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_evm_optimism::revm_spec_by_timestamp_after_bedrock; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ - revm::config::revm_spec_by_timestamp_after_merge, revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}, Address, BlobTransactionSidecar, Header, SealedBlock, TransactionSigned, Withdrawals, B256, U256, @@ -113,7 +113,7 @@ impl PayloadBuilderAttributes for OptimismPayloadBuilderAttributes { let cfg = CfgEnv::default().with_chain_id(chain_spec.chain().id()); // ensure we're not missing any timestamp based hardforks - let spec_id = revm_spec_by_timestamp_after_merge(chain_spec, self.timestamp()); + let spec_id = revm_spec_by_timestamp_after_bedrock(chain_spec, self.timestamp()); // if the parent block did not have excess blob gas (i.e. it was pre-cancun), but it is // cancun now, we need to set the excess blob gas to the default value diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml new file mode 100644 index 000000000..f7599b4a0 --- /dev/null +++ b/crates/optimism/rpc/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "reth-optimism-rpc" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +# reth +reth-errors.workspace = true +reth-evm.workspace = true +reth-rpc-eth-api.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-types.workspace = true +reth-chainspec.workspace = true +reth-provider.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } +reth-transaction-pool.workspace = true + +# ethereum +alloy-primitives.workspace = true + +# async +parking_lot.workspace = true +tokio.workspace = true \ No newline at end of file diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs new file mode 100644 index 000000000..d580a30e9 --- /dev/null +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -0,0 +1,224 @@ +//! OP-Reth `eth_` endpoint implementation. + +use alloy_primitives::{Address, U64}; +use reth_chainspec::ChainInfo; +use reth_errors::RethResult; +use reth_evm::ConfigureEvm; +use reth_provider::{ + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, +}; +use reth_rpc_eth_api::{ + helpers::{ + Call, EthApiSpec, EthBlocks, EthCall, EthFees, EthSigner, EthState, EthTransactions, + LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, LoadState, LoadTransaction, + SpawnBlocking, Trace, + }, + RawTransactionForwarder, +}; +use reth_rpc_eth_types::{EthStateCache, PendingBlock}; +use reth_rpc_types::SyncStatus; +use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; +use reth_transaction_pool::TransactionPool; +use std::future::Future; +use tokio::sync::{AcquireError, Mutex, OwnedSemaphorePermit}; + +/// OP-Reth `Eth` API implementation. +/// +/// This type provides the functionality for handling `eth_` related requests. +/// +/// This wraps a default `Eth` implementation, and provides additional functionality where the +/// optimism spec deviates from the default (ethereum) spec, e.g. transaction forwarding to the +/// sequencer, receipts, additional RPC fields for transaction receipts. +/// +/// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented +/// all the `Eth` helper traits and prerequisite traits. +#[derive(Debug, Clone)] +pub struct OpEthApi { + inner: Eth, +} + +impl OpEthApi { + /// Creates a new `OpEthApi` from the provided `Eth` implementation. + pub const fn new(inner: Eth) -> Self { + Self { inner } + } +} + +impl EthApiSpec for OpEthApi { + fn protocol_version(&self) -> impl Future> + Send { + self.inner.protocol_version() + } + + fn chain_id(&self) -> U64 { + self.inner.chain_id() + } + + fn chain_info(&self) -> RethResult { + self.inner.chain_info() + } + + fn accounts(&self) -> Vec
{ + self.inner.accounts() + } + + fn is_syncing(&self) -> bool { + self.inner.is_syncing() + } + + fn sync_status(&self) -> RethResult { + self.inner.sync_status() + } +} + +impl LoadBlock for OpEthApi { + fn provider(&self) -> impl BlockReaderIdExt { + LoadBlock::provider(&self.inner) + } + + fn cache(&self) -> &reth_rpc_eth_types::EthStateCache { + self.inner.cache() + } +} + +impl LoadPendingBlock for OpEthApi { + fn provider( + &self, + ) -> impl BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory { + self.inner.provider() + } + + fn pool(&self) -> impl TransactionPool { + self.inner.pool() + } + + fn pending_block(&self) -> &Mutex> { + self.inner.pending_block() + } + + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} + +impl SpawnBlocking for OpEthApi { + fn io_task_spawner(&self) -> impl TaskSpawner { + self.inner.io_task_spawner() + } + + fn tracing_task_pool(&self) -> &BlockingTaskPool { + self.inner.tracing_task_pool() + } + + fn acquire_owned( + &self, + ) -> impl Future> + Send { + self.inner.acquire_owned() + } + + fn acquire_many_owned( + &self, + n: u32, + ) -> impl Future> + Send { + self.inner.acquire_many_owned(n) + } +} + +impl LoadReceipt for OpEthApi { + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} + +impl LoadFee for OpEthApi { + fn provider(&self) -> impl reth_provider::BlockIdReader + HeaderProvider + ChainSpecProvider { + LoadFee::provider(&self.inner) + } + + fn cache(&self) -> &EthStateCache { + LoadFee::cache(&self.inner) + } + + fn gas_oracle(&self) -> &reth_rpc_eth_types::GasPriceOracle { + self.inner.gas_oracle() + } + + fn fee_history_cache(&self) -> &reth_rpc_eth_types::FeeHistoryCache { + self.inner.fee_history_cache() + } +} + +impl Call for OpEthApi { + fn call_gas_limit(&self) -> u64 { + self.inner.call_gas_limit() + } + + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} + +impl LoadState for OpEthApi { + fn provider(&self) -> impl StateProviderFactory { + LoadState::provider(&self.inner) + } + + fn cache(&self) -> &EthStateCache { + LoadState::cache(&self.inner) + } + + fn pool(&self) -> impl TransactionPool { + LoadState::pool(&self.inner) + } +} + +impl LoadTransaction for OpEthApi { + type Pool = Eth::Pool; + + fn provider(&self) -> impl reth_provider::TransactionsProvider { + LoadTransaction::provider(&self.inner) + } + + fn cache(&self) -> &EthStateCache { + LoadTransaction::cache(&self.inner) + } + + fn pool(&self) -> &Self::Pool { + LoadTransaction::pool(&self.inner) + } +} + +impl EthTransactions for OpEthApi { + fn provider(&self) -> impl BlockReaderIdExt { + EthTransactions::provider(&self.inner) + } + + fn raw_tx_forwarder(&self) -> Option> { + self.inner.raw_tx_forwarder() + } + + fn signers(&self) -> &parking_lot::RwLock>> { + self.inner.signers() + } +} + +impl EthBlocks for OpEthApi { + fn provider(&self) -> impl HeaderProvider { + EthBlocks::provider(&self.inner) + } +} + +impl EthState for OpEthApi { + fn max_proof_window(&self) -> u64 { + self.inner.max_proof_window() + } +} + +impl EthCall for OpEthApi {} + +impl EthFees for OpEthApi {} + +impl Trace for OpEthApi { + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs new file mode 100644 index 000000000..cad90bc42 --- /dev/null +++ b/crates/optimism/rpc/src/lib.rs @@ -0,0 +1,11 @@ +//! OP-Reth RPC support. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod eth; diff --git a/crates/payload/basic/src/lib.rs b/crates/payload/basic/src/lib.rs index 26dc06293..d5e3766bd 100644 --- a/crates/payload/basic/src/lib.rs +++ b/crates/payload/basic/src/lib.rs @@ -11,7 +11,7 @@ use crate::metrics::PayloadBuilderMetrics; use futures_core::ready; use futures_util::FutureExt; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_payload_builder::{ database::CachedReads, error::PayloadBuilderError, KeepPayloadJobAlive, PayloadId, PayloadJob, PayloadJobGenerator, @@ -19,20 +19,17 @@ use reth_payload_builder::{ use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; use reth_primitives::{ constants::{EMPTY_WITHDRAWALS, RETH_CLIENT_VERSION, SLOT_DURATION}, - proofs, BlockNumberOrTag, Bytes, Request, SealedBlock, Withdrawals, B256, U256, + proofs, BlockNumberOrTag, Bytes, SealedBlock, Withdrawals, B256, U256, }; use reth_provider::{ BlockReaderIdExt, BlockSource, CanonStateNotification, ProviderError, StateProviderFactory, }; -use reth_revm::state_change::{ - apply_beacon_root_contract_call, apply_withdrawal_requests_contract_call, - post_block_withdrawals_balance_increments, -}; +use reth_revm::state_change::post_block_withdrawals_balance_increments; use reth_tasks::TaskSpawner; use reth_transaction_pool::TransactionPool; use revm::{ - primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg}, - Database, DatabaseCommit, Evm, State, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, + Database, State, }; use std::{ fmt, @@ -923,79 +920,6 @@ pub fn commit_withdrawals>( }) } -/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. -/// -/// This constructs a new [Evm] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the pre block contract call. -/// -/// The parent beacon block root used for the call is gathered from the given -/// [`PayloadBuilderAttributes`]. -/// -/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state -/// change. -pub fn pre_block_beacon_root_contract_call( - db: &mut DB, - chain_spec: &ChainSpec, - block_number: u64, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, - attributes: &Attributes, -) -> Result<(), PayloadBuilderError> -where - DB::Error: std::fmt::Display, - Attributes: PayloadBuilderAttributes, -{ - // apply pre-block EIP-4788 contract call - let mut evm_pre_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the pre block call needs the block itself - apply_beacon_root_contract_call( - chain_spec, - attributes.timestamp(), - block_number, - attributes.parent_beacon_block_root(), - &mut evm_pre_block, - ) - .map_err(|err| PayloadBuilderError::Internal(err.into())) -} - -/// Apply the [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) post block contract call. -/// -/// This constructs a new [Evm] with the given DB, and environment -/// ([`CfgEnvWithHandlerCfg`] and [`BlockEnv`]) to execute the post block contract call. -/// -/// This uses [`apply_withdrawal_requests_contract_call`] to ultimately calculate the -/// [requests](Request). -pub fn post_block_withdrawal_requests_contract_call( - db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, -) -> Result, PayloadBuilderError> -where - DB::Error: std::fmt::Display, -{ - // apply post-block EIP-7002 contract call - let mut evm_post_block = Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the post block call needs the block itself - apply_withdrawal_requests_contract_call(&mut evm_post_block) - .map_err(|err| PayloadBuilderError::Internal(err.into())) -} - /// Checks if the new payload is better than the current best. /// /// This compares the total fees of the blocks, higher is better. diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index 340a8510a..03ca50843 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -35,7 +35,7 @@ use std::{ pub struct CachedReads { accounts: HashMap, contracts: HashMap, - block_hashes: HashMap, + block_hashes: HashMap, } // === impl CachedReads === @@ -114,7 +114,7 @@ impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { } } - fn block_hash(&mut self, number: U256) -> Result { + fn block_hash(&mut self, number: u64) -> Result { let code = match self.cached.block_hashes.entry(number) { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => *entry.insert(self.db.block_hash_ref(number)?), @@ -148,7 +148,7 @@ impl<'a, DB: DatabaseRef> DatabaseRef for CachedReadsDBRef<'a, DB> { self.inner.borrow_mut().storage(address, index) } - fn block_hash_ref(&self, number: U256) -> Result { + fn block_hash_ref(&self, number: u64) -> Result { self.inner.borrow_mut().block_hash(number) } } diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 52029a3c4..996013017 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -20,8 +20,7 @@ pub use traits::{BuiltPayload, PayloadAttributes, PayloadBuilderAttributes}; mod payload; pub use payload::PayloadOrAttributes; -use reth_chainspec::ChainSpec; - +use reth_chainspec::{ChainSpec, EthereumHardforks}; /// The types that are used by the engine API. pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone { /// The built payload type. diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index afc190377..e71d2b58d 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_primitives::SealedBlock; use reth_rpc_types::{engine::MaybeCancunPayloadFields, ExecutionPayload, PayloadError}; use reth_rpc_types_compat::engine::payload::try_into_block; diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 0076e15da..35a15c79d 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -40,6 +40,7 @@ proptest = { workspace = true, optional = true } proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] +alloy-primitives = { workspace = true, features = ["rand", "rlp"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true proptest-arbitrary-interop.workspace = true diff --git a/crates/primitives/src/blob_sidecar.rs b/crates/primitives-traits/src/blob_sidecar.rs similarity index 55% rename from crates/primitives/src/blob_sidecar.rs rename to crates/primitives-traits/src/blob_sidecar.rs index c99ae5be3..c221ccede 100644 --- a/crates/primitives/src/blob_sidecar.rs +++ b/crates/primitives-traits/src/blob_sidecar.rs @@ -1,14 +1,15 @@ #![allow(missing_docs)] -use alloy_eips::eip4844::{Blob, BlobTransactionSidecar, Bytes48}; +use alloy_eips::eip4844::{Blob, BlobTransactionSidecar, Bytes48, BYTES_PER_BLOB}; use alloy_primitives::B256; use alloy_rlp::{Decodable, Encodable, RlpDecodableWrapper, RlpEncodableWrapper}; -use bytes::BufMut; -use reth_codecs::{derive_arbitrary, Compact}; +use bytes::{Buf, BufMut}; +use derive_more::{Deref, DerefMut, From, IntoIterator}; +use reth_codecs::{derive_arbitrary, main_codec, Compact}; use revm_primitives::U256; use serde::{Deserialize, Serialize}; -use std::ops::{Deref, DerefMut}; +#[main_codec(no_arbitrary)] #[derive_arbitrary] #[derive( Debug, @@ -16,8 +17,10 @@ use std::ops::{Deref, DerefMut}; PartialEq, Eq, Default, - Serialize, - Deserialize, + Deref, + DerefMut, + From, + IntoIterator, RlpEncodableWrapper, RlpDecodableWrapper, )] @@ -40,56 +43,6 @@ impl BlobSidecars { pub fn size(&self) -> usize { self.len() * std::mem::size_of::() } - - /// Get an iterator over the `BlobSidecars`. - pub fn iter(&self) -> std::slice::Iter<'_, BlobSidecar> { - self.0.iter() - } - - /// Get a mutable iterator over the `BlobSidecars`. - pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, BlobSidecar> { - self.0.iter_mut() - } - - /// Convert [Self] into raw vec of `sidecars`. - pub fn into_inner(self) -> Vec { - self.0 - } -} - -impl IntoIterator for BlobSidecars { - type Item = BlobSidecar; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} - -impl AsRef<[BlobSidecar]> for BlobSidecars { - fn as_ref(&self) -> &[BlobSidecar] { - &self.0 - } -} - -impl Deref for BlobSidecars { - type Target = Vec; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for BlobSidecars { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl From> for BlobSidecars { - fn from(sidecars: Vec) -> Self { - Self(sidecars) - } } #[derive_arbitrary] @@ -175,31 +128,84 @@ impl Compact for BlobSidecar { where B: BufMut + AsMut<[u8]>, { - let mut size = 0; - size += self.blob_transaction_sidecar.blobs.to_compact(buf); - size += self.blob_transaction_sidecar.commitments.to_compact(buf); - size += self.blob_transaction_sidecar.proofs.to_compact(buf); - size += self.block_number.to_compact(buf); - size += self.block_hash.to_compact(buf); - size += self.tx_index.to_compact(buf); - size += self.tx_hash.to_compact(buf); - size - } + let mut len = 0; + + buf.put_u16(self.blob_transaction_sidecar.blobs.len() as u16); + len += 2; + for item in self.blob_transaction_sidecar.blobs { + len += item.to_compact(buf); + } + + buf.put_u16(self.blob_transaction_sidecar.commitments.len() as u16); + len += 2; + for item in self.blob_transaction_sidecar.commitments { + len += item.to_compact(buf); + } + + buf.put_u16(self.blob_transaction_sidecar.proofs.len() as u16); + len += 2; + for item in self.blob_transaction_sidecar.proofs { + len += item.to_compact(buf); + } + + buf.put_slice(self.block_number.as_le_slice()); + len += 32; - fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { - let (blobs, buf) = Vec::::from_compact(buf, len); - let (commitments, buf) = Vec::::from_compact(buf, len); - let (proofs, buf) = Vec::::from_compact(buf, len); + buf.put_slice(self.block_hash.as_slice()); + len += 32; - let blob_transaction_sidecar = BlobTransactionSidecar { blobs, commitments, proofs }; + buf.put_u64(self.tx_index); + len += 8; - let (block_number, buf) = U256::from_compact(buf, len); - let (block_hash, buf) = B256::from_compact(buf, len); - let (tx_index, buf) = u64::from_compact(buf, len); - let (tx_hash, buf) = B256::from_compact(buf, len); + buf.put_slice(self.tx_hash.as_slice()); + len += 32; - let blob_sidecar = - Self { blob_transaction_sidecar, block_number, block_hash, tx_index, tx_hash }; + len + } + + fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { + let blobs_len = buf.get_u16() as usize; + let mut blobs = Vec::with_capacity(blobs_len); + for _ in 0..blobs_len { + let (item, rest) = Blob::from_compact(buf, BYTES_PER_BLOB); + blobs.push(item); + buf = rest; + } + + let commitments_len = buf.get_u16() as usize; + let mut commitments = Vec::with_capacity(commitments_len); + for _ in 0..commitments_len { + let (item, rest) = Bytes48::from_compact(buf, 48); + commitments.push(item); + buf = rest; + } + + let proofs_len = buf.get_u16() as usize; + let mut proofs = Vec::with_capacity(proofs_len); + for _ in 0..proofs_len { + let (item, rest) = Bytes48::from_compact(buf, 48); + proofs.push(item); + buf = rest; + } + + let block_number = U256::from_le_slice(&buf[..32]); + buf = &buf[32..]; + + let block_hash = B256::from_slice(&buf[..32]); + buf = &buf[32..]; + + let tx_index = buf.get_u64(); + + let tx_hash = B256::from_slice(&buf[..32]); + buf = &buf[32..]; + + let blob_sidecar = Self { + blob_transaction_sidecar: BlobTransactionSidecar { blobs, commitments, proofs }, + block_number, + block_hash, + tx_index, + tx_hash, + }; (blob_sidecar, buf) } @@ -208,16 +214,15 @@ impl Compact for BlobSidecar { #[cfg(test)] mod tests { use super::*; - use crate::U256; use alloy_rlp::Decodable; #[test] - fn rlp_encode_blob_sidecar() { + fn test_blob_sidecar_rlp() { let blob_sidecar = BlobSidecar { blob_transaction_sidecar: BlobTransactionSidecar { blobs: vec![], - commitments: vec![], - proofs: vec![], + commitments: vec![Default::default()], + proofs: vec![Default::default()], }, block_number: U256::from(rand::random::()), block_hash: B256::random(), @@ -231,4 +236,24 @@ mod tests { let decoded = BlobSidecar::decode(&mut encoded.as_slice()).unwrap(); assert_eq!(blob_sidecar, decoded); } + + #[test] + fn test_blob_sidecar_compact() { + let blob_sidecar = BlobSidecar { + blob_transaction_sidecar: BlobTransactionSidecar { + blobs: vec![], + commitments: vec![Default::default()], + proofs: vec![Default::default()], + }, + block_number: U256::from(rand::random::()), + block_hash: B256::random(), + tx_index: rand::random::(), + tx_hash: B256::random(), + }; + + let mut buf = vec![]; + let len = blob_sidecar.clone().to_compact(&mut buf); + let (decoded, _) = BlobSidecar::from_compact(&buf, len); + assert_eq!(blob_sidecar, decoded); + } } diff --git a/crates/primitives-traits/src/constants/gas_units.rs b/crates/primitives-traits/src/constants/gas_units.rs index 9916de864..0af0d2c24 100644 --- a/crates/primitives-traits/src/constants/gas_units.rs +++ b/crates/primitives-traits/src/constants/gas_units.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + /// Represents one Kilogas, or `1_000` gas. pub const KILOGAS: u64 = 1_000; @@ -6,3 +8,73 @@ pub const MEGAGAS: u64 = KILOGAS * 1_000; /// Represents one Gigagas, or `1_000_000_000` gas. pub const GIGAGAS: u64 = MEGAGAS * 1_000; + +/// Returns a formatted gas throughput log, showing either: +/// * "Kgas/s", or 1,000 gas per second +/// * "Mgas/s", or 1,000,000 gas per second +/// * "Ggas/s", or 1,000,000,000 gas per second +/// +/// Depending on the magnitude of the gas throughput. +pub fn format_gas_throughput(gas: u64, execution_duration: Duration) -> String { + let gas_per_second = gas as f64 / execution_duration.as_secs_f64(); + if gas_per_second < MEGAGAS as f64 { + format!("{:.2} Kgas/second", gas_per_second / KILOGAS as f64) + } else if gas_per_second < GIGAGAS as f64 { + format!("{:.2} Mgas/second", gas_per_second / MEGAGAS as f64) + } else { + format!("{:.2} Ggas/second", gas_per_second / GIGAGAS as f64) + } +} + +/// Returns a formatted gas log, showing either: +/// * "Kgas", or 1,000 gas +/// * "Mgas", or 1,000,000 gas +/// * "Ggas", or 1,000,000,000 gas +/// +/// Depending on the magnitude of gas. +pub fn format_gas(gas: u64) -> String { + let gas = gas as f64; + if gas < MEGAGAS as f64 { + format!("{:.2} Kgas", gas / KILOGAS as f64) + } else if gas < GIGAGAS as f64 { + format!("{:.2} Mgas", gas / MEGAGAS as f64) + } else { + format!("{:.2} Ggas", gas / GIGAGAS as f64) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_gas_fmt() { + let gas = 100_000; + let gas_unit = format_gas(gas); + assert_eq!(gas_unit, "100.00 Kgas"); + + let gas = 100_000_000; + let gas_unit = format_gas(gas); + assert_eq!(gas_unit, "100.00 Mgas"); + + let gas = 100_000_000_000; + let gas_unit = format_gas(gas); + assert_eq!(gas_unit, "100.00 Ggas"); + } + + #[test] + fn test_gas_throughput_fmt() { + let duration = Duration::from_secs(1); + let gas = 100_000; + let throughput = format_gas_throughput(gas, duration); + assert_eq!(throughput, "100.00 Kgas/second"); + + let gas = 100_000_000; + let throughput = format_gas_throughput(gas, duration); + assert_eq!(throughput, "100.00 Mgas/second"); + + let gas = 100_000_000_000; + let throughput = format_gas_throughput(gas, duration); + assert_eq!(throughput, "100.00 Ggas/second"); + } +} diff --git a/crates/primitives-traits/src/constants/mod.rs b/crates/primitives-traits/src/constants/mod.rs index f05d86a46..9d6ace2e5 100644 --- a/crates/primitives-traits/src/constants/mod.rs +++ b/crates/primitives-traits/src/constants/mod.rs @@ -3,8 +3,9 @@ use alloy_primitives::{address, b256, Address, B256, U256}; use core::time::Duration; -/// Gas units, for example [`GIGAGAS`](gas_units::GIGAGAS). +/// Gas units, for example [`GIGAGAS`]. pub mod gas_units; +pub use gas_units::{GIGAGAS, KILOGAS, MEGAGAS}; /// The client version: `reth/v{major}.{minor}.{patch}` pub const RETH_CLIENT_VERSION: &str = concat!("reth/v", env!("CARGO_PKG_VERSION")); @@ -108,25 +109,18 @@ pub const FINNEY_TO_WEI: u128 = (GWEI_TO_WEI as u128) * 1_000_000; /// Multiplier for converting ether to wei. pub const ETH_TO_WEI: u128 = FINNEY_TO_WEI * 1000; -/// Multiplier for converting mgas to gas. -pub const MGAS_TO_GAS: u64 = 1_000_000u64; - /// The Ethereum mainnet genesis hash: /// `0x0d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3` pub const MAINNET_GENESIS_HASH: B256 = b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); -/// Goerli genesis hash: `0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a` -pub const GOERLI_GENESIS_HASH: B256 = - b256!("bf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a"); - /// Sepolia genesis hash: `0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9` pub const SEPOLIA_GENESIS_HASH: B256 = b256!("25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9"); -/// Holesky genesis hash: `0xff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d` +/// Holesky genesis hash: `0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4` pub const HOLESKY_GENESIS_HASH: B256 = - b256!("ff9006519a8ce843ac9c28549d24211420b546e12ce2d170c77a8cca7964f23d"); + b256!("b5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4"); /// Testnet genesis hash: `0x2f980576711e3617a5e4d83dd539548ec0f7792007d505a3d2e9674833af2d7c` pub const DEV_GENESIS_HASH: B256 = diff --git a/crates/primitives-traits/src/header/mod.rs b/crates/primitives-traits/src/header/mod.rs index fe4e4ba37..bcccc4b0d 100644 --- a/crates/primitives-traits/src/header/mod.rs +++ b/crates/primitives-traits/src/header/mod.rs @@ -494,7 +494,7 @@ impl Decodable for Header { } } -#[cfg(feature = "arbitrary")] +#[cfg(any(test, feature = "test-utils", feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Header { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { // Generate an arbitrary header, passing it to the generate_valid_header function to make diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 22d4c86a0..bc5daa1cd 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] @@ -16,6 +14,7 @@ mod alloy_compat; /// Common constants. pub mod constants; +pub use constants::gas_units::{format_gas, format_gas_throughput}; /// Minimal account pub mod account; @@ -41,6 +40,10 @@ pub use storage::StorageEntry; /// Common header types pub mod header; + +mod blob_sidecar; +pub use blob_sidecar::{BlobSidecar, BlobSidecars}; + #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub use header::test_utils; pub use header::{Header, HeaderError, SealedHeader}; diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index 679c80cab..49d4e5e31 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -65,6 +65,23 @@ impl Withdrawals { } } +impl<'a> IntoIterator for &'a Withdrawals { + type Item = &'a Withdrawal; + type IntoIter = core::slice::Iter<'a, Withdrawal>; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a> IntoIterator for &'a mut Withdrawals { + type Item = &'a mut Withdrawal; + type IntoIter = core::slice::IterMut<'a, Withdrawal>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index f4f94d4a1..7fc3dc922 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -18,9 +18,9 @@ reth-codecs.workspace = true reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true reth-trie-common.workspace = true -reth-chainspec.workspace = true -revm.workspace = true +revm = { workspace = true, optional = true } revm-primitives = { workspace = true, features = ["serde"] } +reth-chainspec = { workspace = true, optional = true } # ethereum alloy-primitives = { workspace = true, features = ["rand", "rlp"] } @@ -63,6 +63,7 @@ thiserror = { workspace = true, optional = true } # eth reth-primitives-traits = { workspace = true, features = ["arbitrary"] } revm-primitives = { workspace = true, features = ["arbitrary"] } +reth-chainspec.workspace = true nybbles = { workspace = true, features = ["arbitrary"] } alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } @@ -94,7 +95,7 @@ asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ "reth-primitives-traits/arbitrary", "revm-primitives/arbitrary", - "reth-chainspec/arbitrary", + "reth-chainspec?/arbitrary", "reth-ethereum-forks/arbitrary", "nybbles/arbitrary", "alloy-trie/arbitrary", @@ -115,6 +116,7 @@ opbnb = [ "reth-chainspec/opbnb", "reth-ethereum-forks/opbnb", "revm/opbnb", + "dep:revm" ] bsc = [ "reth-primitives-traits/bsc", @@ -124,6 +126,7 @@ bsc = [ "dep:alloy-chains", "dep:include_dir", "dep:lazy_static", + "dep:revm", "dep:thiserror" ] diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 40f8b9092..622168bb3 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,7 +1,7 @@ #![allow(missing_docs)] +use alloy_eips::eip4844::env_settings::EnvKzgSettings; use alloy_primitives::hex; -use c_kzg::KzgSettings; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, }; @@ -11,11 +11,8 @@ use proptest::{ test_runner::{RngAlgorithm, TestRng, TestRunner}, }; use proptest_arbitrary_interop::arb; -use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, BlobTransactionSidecar, TxEip4844, -}; +use reth_primitives::{BlobTransactionSidecar, TxEip4844}; use revm_primitives::MAX_BLOB_NUMBER_PER_BLOCK; -use std::sync::Arc; // constant seed to use for the rng const SEED: [u8; 32] = hex!("1337133713371337133713371337133713371337133713371337133713371337"); @@ -23,11 +20,10 @@ const SEED: [u8; 32] = hex!("133713371337133713371337133713371337133713371337133 /// Benchmarks EIP-48444 blob validation. fn blob_validation(c: &mut Criterion) { let mut group = c.benchmark_group("Blob Transaction KZG validation"); - let kzg_settings = MAINNET_KZG_TRUSTED_SETUP.clone(); for num_blobs in 1..=MAX_BLOB_NUMBER_PER_BLOCK { println!("Benchmarking validation for tx with {num_blobs} blobs"); - validate_blob_tx(&mut group, "ValidateBlob", num_blobs, kzg_settings.clone()); + validate_blob_tx(&mut group, "ValidateBlob", num_blobs, EnvKzgSettings::Default); } } @@ -35,7 +31,7 @@ fn validate_blob_tx( group: &mut BenchmarkGroup<'_, WallTime>, description: &str, num_blobs: u64, - kzg_settings: Arc, + kzg_settings: EnvKzgSettings, ) { let setup = || { let config = ProptestConfig::default(); @@ -73,7 +69,9 @@ fn validate_blob_tx( // for now we just use the default SubPoolLimit group.bench_function(group_id, |b| { b.iter_with_setup(setup, |(tx, blob_sidecar)| { - if let Err(err) = std::hint::black_box(tx.validate_blob(&blob_sidecar, &kzg_settings)) { + if let Err(err) = + std::hint::black_box(tx.validate_blob(&blob_sidecar, kzg_settings.get())) + { println!("Validation failed: {err:?}"); } }); diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index e53be8844..1eaedad07 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -59,6 +59,7 @@ impl TryFrom for Block { withdrawals: block.withdrawals.map(Into::into), // todo(onbjerg): we don't know if this is added to rpc yet, so for now we leave it as // empty. + sidecars: None, requests: None, }) } @@ -168,7 +169,7 @@ impl TryFrom for Transaction { .gas .try_into() .map_err(|_| ConversionError::Eip2718Error(RlpError::Overflow.into()))?, - placeholder: tx.to.map(|_| ()), + placeholder: tx.to.map(drop), to: tx.to.unwrap_or_default(), value: tx.value, access_list: tx.access_list.ok_or(ConversionError::MissingAccessList)?, diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 7da0616ce..0e4d3e90e 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -45,6 +45,9 @@ pub struct Block { pub ommers: Vec
, /// Block withdrawals. pub withdrawals: Option, + // only for bsc + /// Tx sidecars for the block. + pub sidecars: Option, /// Block requests. pub requests: Option, } @@ -57,6 +60,7 @@ impl Block { body: self.body, ommers: self.ommers, withdrawals: self.withdrawals, + sidecars: self.sidecars, requests: self.requests, } } @@ -70,6 +74,7 @@ impl Block { body: self.body, ommers: self.ommers, withdrawals: self.withdrawals, + sidecars: self.sidecars, requests: self.requests, } } @@ -170,7 +175,7 @@ impl Block { } } -#[cfg(feature = "arbitrary")] +#[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for Block { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { // first generate up to 100 txs @@ -188,6 +193,7 @@ impl<'a> arbitrary::Arbitrary<'a> for Block { // for now just generate empty requests, see HACK above requests: u.arbitrary()?, withdrawals: u.arbitrary()?, + sidecars: None, }) } } @@ -288,6 +294,9 @@ pub struct SealedBlock { pub ommers: Vec
, /// Block withdrawals. pub withdrawals: Option, + // only for bsc + /// Tx sidecars for the block. + pub sidecars: Option, /// Block requests. pub requests: Option, } @@ -296,8 +305,8 @@ impl SealedBlock { /// Create a new sealed block instance using the sealed header and block body. #[inline] pub fn new(header: SealedHeader, body: BlockBody) -> Self { - let BlockBody { transactions, ommers, withdrawals, requests, .. } = body; - Self { header, body: transactions, ommers, withdrawals, requests } + let BlockBody { transactions, ommers, withdrawals, sidecars, requests } = body; + Self { header, body: transactions, ommers, withdrawals, sidecars, requests } } /// Header hash. @@ -321,7 +330,7 @@ impl SealedBlock { transactions: self.body, ommers: self.ommers, withdrawals: self.withdrawals, - sidecars: None, + sidecars: self.sidecars, requests: self.requests, }, ) @@ -415,6 +424,7 @@ impl SealedBlock { body: self.body, ommers: self.ommers, withdrawals: self.withdrawals, + sidecars: self.sidecars, requests: self.requests, } } @@ -567,6 +577,7 @@ impl BlockBody { body: self.transactions.clone(), ommers: self.ommers.clone(), withdrawals: self.withdrawals.clone(), + sidecars: self.sidecars.clone(), requests: self.requests.clone(), } } @@ -615,13 +626,13 @@ impl From for BlockBody { transactions: block.body, ommers: block.ommers, withdrawals: block.withdrawals, - sidecars: None, + sidecars: block.sidecars, requests: block.requests, } } } -#[cfg(feature = "arbitrary")] +#[cfg(any(test, feature = "arbitrary"))] impl<'a> arbitrary::Arbitrary<'a> for BlockBody { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { // first generate up to 100 txs diff --git a/crates/primitives/src/constants/eip4844.rs b/crates/primitives/src/constants/eip4844.rs index c1748edf7..48a3aebb3 100644 --- a/crates/primitives/src/constants/eip4844.rs +++ b/crates/primitives/src/constants/eip4844.rs @@ -11,19 +11,7 @@ pub use alloy_eips::eip4844::{ #[cfg(feature = "c-kzg")] mod trusted_setup { use crate::kzg::KzgSettings; - use once_cell::sync::Lazy; - use std::{io::Write, sync::Arc}; - - /// KZG trusted setup - pub static MAINNET_KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { - Arc::new( - c_kzg::KzgSettings::load_trusted_setup( - &revm_primitives::kzg::G1_POINTS.0, - &revm_primitives::kzg::G2_POINTS.0, - ) - .expect("failed to load trusted setup"), - ) - }); + use std::io::Write; /// Loads the trusted setup parameters from the given bytes and returns the [`KzgSettings`]. /// @@ -48,14 +36,4 @@ mod trusted_setup { #[error("KZG error: {0:?}")] KzgError(#[from] c_kzg::Error), } - - #[cfg(test)] - mod tests { - use super::*; - - #[test] - fn ensure_load_kzg_settings() { - let _settings = Arc::clone(&MAINNET_KZG_TRUSTED_SETUP); - } - } } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index b148a6e7a..128659b5c 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -13,9 +13,7 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] -//#![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] @@ -31,13 +29,9 @@ mod compression; pub mod constants; pub mod eip4844; pub mod genesis; -pub mod header; pub mod proofs; mod receipt; -/// Helpers for working with revm -pub mod revm; pub use reth_static_file_types as static_file; -mod blob_sidecar; pub mod parlia; pub mod system_contracts; pub mod transaction; @@ -51,17 +45,17 @@ pub use block::{ #[cfg(feature = "zstd-codec")] pub use compression::*; pub use constants::{ - DEV_GENESIS_HASH, EMPTY_OMMER_ROOT_HASH, GOERLI_GENESIS_HASH, HOLESKY_GENESIS_HASH, - KECCAK_EMPTY, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, + DEV_GENESIS_HASH, EMPTY_OMMER_ROOT_HASH, HOLESKY_GENESIS_HASH, KECCAK_EMPTY, + MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH, }; pub use genesis::{ChainConfig, Genesis, GenesisAccount}; -pub use header::{Header, HeadersDirection, SealedHeader}; pub use receipt::{ gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, }; pub use reth_primitives_traits::{ - logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Log, Request, Requests, - StorageEntry, Withdrawal, Withdrawals, + logs_bloom, Account, BlobSidecar, BlobSidecars, Bytecode, GotExpected, GotExpectedBoxed, + Header, HeaderError, Log, LogData, Request, Requests, SealedHeader, StorageEntry, Withdrawal, + Withdrawals, }; pub use static_file::StaticFileSegment; @@ -82,8 +76,6 @@ pub use transaction::{ LEGACY_TX_TYPE_ID, }; -pub use blob_sidecar::{BlobSidecar, BlobSidecars}; - // Re-exports pub use self::ruint::UintTryTo; pub use alloy_primitives::{ @@ -123,18 +115,9 @@ pub use c_kzg as kzg; #[cfg(feature = "optimism")] mod optimism { pub use crate::transaction::{TxDeposit, DEPOSIT_TX_TYPE_ID}; - pub use reth_chainspec::{ - net::{ - base_nodes, base_testnet_nodes, op_nodes, op_testnet_nodes, OP_BOOTNODES, - OP_TESTNET_BOOTNODES, - }, - BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA, - }; + pub use reth_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_MAINNET, OP_SEPOLIA}; #[cfg(feature = "opbnb")] - pub use reth_chainspec::{ - net::{opbnb_mainnet_nodes, opbnb_testnet_nodes}, - OPBNB_MAINNET, OPBNB_TESTNET, - }; + pub use reth_chainspec::{OPBNB_MAINNET, OPBNB_TESTNET}; } #[cfg(feature = "optimism")] @@ -143,11 +126,12 @@ pub use optimism::*; /// Bsc specific re-exports #[cfg(feature = "bsc")] mod bsc { - pub use reth_chainspec::{ - net::{bsc_mainnet_nodes, bsc_testnet_nodes, BSC_MAINNET_BOOTNODES, BSC_TESTNET_BOOTNODES}, - BSC_MAINNET, BSC_TESTNET, - }; + pub use reth_chainspec::{BSC_MAINNET, BSC_TESTNET}; } #[cfg(feature = "bsc")] pub use bsc::*; + +// to make lint happy +#[cfg(any(feature = "bsc", feature = "opbnb"))] +use revm as _; diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 5521fedec..ab57be8ff 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -4,9 +4,8 @@ use crate::{ constants::EMPTY_OMMER_ROOT_HASH, keccak256, Header, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Request, TransactionSigned, Withdrawal, B256, }; -use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; - use alloy_eips::eip7685::Encodable7685; +use reth_trie_common::root::{ordered_trie_root, ordered_trie_root_with_encoder}; #[cfg(not(feature = "std"))] use alloc::vec::Vec; @@ -50,8 +49,9 @@ pub fn calculate_receipt_root_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(reth_chainspec::Hardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(reth_chainspec::Hardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Regolith, timestamp) && + !chain_spec + .is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Canyon, timestamp) { let receipts = receipts .iter() @@ -98,8 +98,9 @@ pub fn calculate_receipt_root_no_memo_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(reth_chainspec::Hardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(reth_chainspec::Hardfork::Canyon, timestamp) + if chain_spec.is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Regolith, timestamp) && + !chain_spec + .is_fork_active_at_timestamp(reth_chainspec::OptimismHardfork::Canyon, timestamp) { let receipts = receipts .iter() @@ -139,7 +140,7 @@ mod tests { use alloy_genesis::GenesisAccount; use alloy_primitives::{b256, Address, LogData}; use alloy_rlp::Decodable; - use reth_chainspec::{GOERLI, HOLESKY, MAINNET, SEPOLIA}; + use reth_chainspec::{HOLESKY, MAINNET, SEPOLIA}; use reth_trie_common::root::{state_root_ref_unhashed, state_root_unhashed}; use std::collections::HashMap; @@ -534,14 +535,6 @@ mod tests { "mainnet state root mismatch" ); - let expected_goerli_state_root = - b256!("5d6cded585e73c4e322c30c2f782a336316f17dd85a4863b9d838d2d4b8b3008"); - let calculated_goerli_state_root = state_root_ref_unhashed(&GOERLI.genesis.alloc); - assert_eq!( - expected_goerli_state_root, calculated_goerli_state_root, - "goerli state root mismatch" - ); - let expected_sepolia_state_root = b256!("5eb6e371a698b8d68f665192350ffcecbbbf322916f4b51bd79bb6887da3f494"); let calculated_sepolia_state_root = state_root_ref_unhashed(&SEPOLIA.genesis.alloc); diff --git a/crates/primitives/src/revm/config.rs b/crates/primitives/src/revm/config.rs deleted file mode 100644 index 0824418c2..000000000 --- a/crates/primitives/src/revm/config.rs +++ /dev/null @@ -1,396 +0,0 @@ -use reth_chainspec::ChainSpec; -use reth_ethereum_forks::{Hardfork, Head}; - -/// Returns the spec id at the given timestamp. -/// -/// Note: This is only intended to be used after the merge, when hardforks are activated by -/// timestamp. -pub fn revm_spec_by_timestamp_after_merge( - chain_spec: &ChainSpec, - timestamp: u64, -) -> revm_primitives::SpecId { - #[cfg(feature = "optimism")] - if chain_spec.is_optimism() { - return if chain_spec.fork(Hardfork::Fjord).active_at_timestamp(timestamp) { - revm_primitives::FJORD - } else if chain_spec.fork(Hardfork::Haber).active_at_timestamp(timestamp) { - revm_primitives::HABER - } else if chain_spec.fork(Hardfork::Ecotone).active_at_timestamp(timestamp) { - revm_primitives::ECOTONE - } else if chain_spec.fork(Hardfork::Canyon).active_at_timestamp(timestamp) { - revm_primitives::CANYON - } else if chain_spec.fork(Hardfork::Fermat).active_at_timestamp(timestamp) { - revm_primitives::FERMAT - } else if chain_spec.fork(Hardfork::Regolith).active_at_timestamp(timestamp) { - revm_primitives::REGOLITH - } else { - revm_primitives::BEDROCK - } - } - - #[cfg(feature = "bsc")] - if chain_spec.is_bsc() { - if chain_spec.is_haber_fix_active_at_timestamp(timestamp) { - return revm_primitives::HABER_FIX - } else if chain_spec.is_haber_active_at_timestamp(timestamp) { - return revm_primitives::HABER - } else if chain_spec.is_cancun_active_at_timestamp(timestamp) { - return revm_primitives::CANCUN - } else if chain_spec.is_feynman_fix_active_at_timestamp(timestamp) { - return revm_primitives::FEYNMAN_FIX - } else if chain_spec.is_feynman_active_at_timestamp(timestamp) { - return revm_primitives::FEYNMAN - } else if chain_spec.is_kepler_active_at_timestamp(timestamp) { - return revm_primitives::KEPLER - } else { - return revm_primitives::SHANGHAI - } - } - - if chain_spec.is_prague_active_at_timestamp(timestamp) { - revm_primitives::PRAGUE - } else if chain_spec.is_cancun_active_at_timestamp(timestamp) { - revm_primitives::CANCUN - } else if chain_spec.is_shanghai_active_at_timestamp(timestamp) { - revm_primitives::SHANGHAI - } else { - revm_primitives::MERGE - } -} - -/// return `revm_spec` from spec configuration. -pub fn revm_spec(chain_spec: &ChainSpec, block: Head) -> revm_primitives::SpecId { - #[cfg(feature = "optimism")] - if chain_spec.is_optimism() { - if chain_spec.fork(Hardfork::Fjord).active_at_head(&block) { - return revm_primitives::FJORD - } else if chain_spec.fork(Hardfork::Haber).active_at_head(&block) { - return revm_primitives::HABER - } else if chain_spec.fork(Hardfork::Ecotone).active_at_head(&block) { - return revm_primitives::ECOTONE - } else if chain_spec.fork(Hardfork::Canyon).active_at_head(&block) { - return revm_primitives::CANYON - } else if chain_spec.fork(Hardfork::Fermat).active_at_head(&block) { - return revm_primitives::FERMAT - } else if chain_spec.fork(Hardfork::Regolith).active_at_head(&block) { - return revm_primitives::REGOLITH - } else if chain_spec.fork(Hardfork::Bedrock).active_at_head(&block) { - return revm_primitives::BEDROCK - } - } - - #[cfg(feature = "bsc")] - if chain_spec.is_bsc() { - if chain_spec.fork(Hardfork::HaberFix).active_at_head(&block) { - return revm_primitives::HABER_FIX - } else if chain_spec.fork(Hardfork::Haber).active_at_head(&block) { - return revm_primitives::HABER - } else if chain_spec.fork(Hardfork::Cancun).active_at_head(&block) { - return revm_primitives::CANCUN - } else if chain_spec.fork(Hardfork::FeynmanFix).active_at_head(&block) { - return revm_primitives::FEYNMAN_FIX - } else if chain_spec.fork(Hardfork::Feynman).active_at_head(&block) { - return revm_primitives::FEYNMAN - } else if chain_spec.fork(Hardfork::Kepler).active_at_head(&block) { - return revm_primitives::KEPLER - } else if chain_spec.fork(Hardfork::Shanghai).active_at_head(&block) { - return revm_primitives::SHANGHAI - } else if chain_spec.fork(Hardfork::HertzFix).active_at_head(&block) { - return revm_primitives::HERTZ_FIX - } else if chain_spec.fork(Hardfork::Hertz).active_at_head(&block) { - return revm_primitives::HERTZ - } else if chain_spec.fork(Hardfork::London).active_at_head(&block) { - return revm_primitives::LONDON - } else if chain_spec.fork(Hardfork::Berlin).active_at_head(&block) { - return revm_primitives::BERLIN - } else if chain_spec.fork(Hardfork::Plato).active_at_head(&block) { - return revm_primitives::PLATO - } else if chain_spec.fork(Hardfork::Luban).active_at_head(&block) { - return revm_primitives::LUBAN - } else if chain_spec.fork(Hardfork::Planck).active_at_head(&block) { - return revm_primitives::PLANCK - } else if chain_spec.fork(Hardfork::Gibbs).active_at_head(&block) { - // bsc mainnet and testnet have different order for Moran, Nano and Gibbs - return if chain_spec.fork(Hardfork::Moran).active_at_head(&block) { - revm_primitives::GIBBS - } else if chain_spec.fork(Hardfork::Nano).active_at_head(&block) { - revm_primitives::NANO - } else { - revm_primitives::EULER - } - } else if chain_spec.fork(Hardfork::Euler).active_at_head(&block) { - return revm_primitives::EULER - } else if chain_spec.fork(Hardfork::Bruno).active_at_head(&block) { - return revm_primitives::BRUNO - } else if chain_spec.fork(Hardfork::MirrorSync).active_at_head(&block) { - return revm_primitives::MIRROR_SYNC - } else if chain_spec.fork(Hardfork::Niels).active_at_head(&block) { - return revm_primitives::NIELS - } else if chain_spec.fork(Hardfork::Ramanujan).active_at_head(&block) { - return revm_primitives::RAMANUJAN - } else if chain_spec.fork(Hardfork::MuirGlacier).active_at_head(&block) { - return revm_primitives::MUIR_GLACIER - } else if chain_spec.fork(Hardfork::Istanbul).active_at_head(&block) { - return revm_primitives::ISTANBUL - } else if chain_spec.fork(Hardfork::Petersburg).active_at_head(&block) { - return revm_primitives::PETERSBURG - } else if chain_spec.fork(Hardfork::Constantinople).active_at_head(&block) { - return revm_primitives::CONSTANTINOPLE - } else if chain_spec.fork(Hardfork::Byzantium).active_at_head(&block) { - return revm_primitives::BYZANTIUM - } else if chain_spec.fork(Hardfork::Homestead).active_at_head(&block) { - return revm_primitives::HOMESTEAD - } else if chain_spec.fork(Hardfork::Frontier).active_at_head(&block) { - return revm_primitives::FRONTIER - } - } - - if chain_spec.fork(Hardfork::Prague).active_at_head(&block) { - revm_primitives::PRAGUE - } else if chain_spec.fork(Hardfork::Cancun).active_at_head(&block) { - revm_primitives::CANCUN - } else if chain_spec.fork(Hardfork::Shanghai).active_at_head(&block) { - revm_primitives::SHANGHAI - } else if chain_spec.fork(Hardfork::Paris).active_at_head(&block) { - revm_primitives::MERGE - } else if chain_spec.fork(Hardfork::London).active_at_head(&block) { - revm_primitives::LONDON - } else if chain_spec.fork(Hardfork::Berlin).active_at_head(&block) { - revm_primitives::BERLIN - } else if chain_spec.fork(Hardfork::Istanbul).active_at_head(&block) { - revm_primitives::ISTANBUL - } else if chain_spec.fork(Hardfork::Petersburg).active_at_head(&block) { - revm_primitives::PETERSBURG - } else if chain_spec.fork(Hardfork::Byzantium).active_at_head(&block) { - revm_primitives::BYZANTIUM - } else if chain_spec.fork(Hardfork::SpuriousDragon).active_at_head(&block) { - revm_primitives::SPURIOUS_DRAGON - } else if chain_spec.fork(Hardfork::Tangerine).active_at_head(&block) { - revm_primitives::TANGERINE - } else if chain_spec.fork(Hardfork::Homestead).active_at_head(&block) { - revm_primitives::HOMESTEAD - } else if chain_spec.fork(Hardfork::Frontier).active_at_head(&block) { - revm_primitives::FRONTIER - } else { - panic!( - "invalid hardfork chainspec: expected at least one hardfork, got {:?}", - chain_spec.hardforks - ) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::U256; - use reth_chainspec::{ChainSpecBuilder, MAINNET}; - - #[test] - fn test_revm_spec_by_timestamp_after_merge() { - assert_eq!( - revm_spec_by_timestamp_after_merge( - &ChainSpecBuilder::mainnet().cancun_activated().build(), - 0 - ), - revm_primitives::CANCUN - ); - assert_eq!( - revm_spec_by_timestamp_after_merge( - &ChainSpecBuilder::mainnet().shanghai_activated().build(), - 0 - ), - revm_primitives::SHANGHAI - ); - assert_eq!( - revm_spec_by_timestamp_after_merge(&ChainSpecBuilder::mainnet().build(), 0), - revm_primitives::MERGE - ); - #[cfg(feature = "optimism")] - { - #[inline(always)] - fn op_cs(f: impl FnOnce(ChainSpecBuilder) -> ChainSpecBuilder) -> ChainSpec { - let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)); - f(cs).build() - } - assert_eq!( - revm_spec_by_timestamp_after_merge(&op_cs(|cs| cs.fjord_activated()), 0), - revm_primitives::FJORD - ); - assert_eq!( - revm_spec_by_timestamp_after_merge(&op_cs(|cs| cs.ecotone_activated()), 0), - revm_primitives::ECOTONE - ); - assert_eq!( - revm_spec_by_timestamp_after_merge(&op_cs(|cs| cs.canyon_activated()), 0), - revm_primitives::CANYON - ); - assert_eq!( - revm_spec_by_timestamp_after_merge(&op_cs(|cs| cs.bedrock_activated()), 0), - revm_primitives::BEDROCK - ); - assert_eq!( - revm_spec_by_timestamp_after_merge(&op_cs(|cs| cs.regolith_activated()), 0), - revm_primitives::REGOLITH - ); - } - } - - #[test] - fn test_to_revm_spec() { - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().cancun_activated().build(), Head::default()), - revm_primitives::CANCUN - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().shanghai_activated().build(), Head::default()), - revm_primitives::SHANGHAI - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().paris_activated().build(), Head::default()), - revm_primitives::MERGE - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().london_activated().build(), Head::default()), - revm_primitives::LONDON - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().berlin_activated().build(), Head::default()), - revm_primitives::BERLIN - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().istanbul_activated().build(), Head::default()), - revm_primitives::ISTANBUL - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().petersburg_activated().build(), Head::default()), - revm_primitives::PETERSBURG - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().byzantium_activated().build(), Head::default()), - revm_primitives::BYZANTIUM - ); - assert_eq!( - revm_spec( - &ChainSpecBuilder::mainnet().spurious_dragon_activated().build(), - Head::default() - ), - revm_primitives::SPURIOUS_DRAGON - ); - assert_eq!( - revm_spec( - &ChainSpecBuilder::mainnet().tangerine_whistle_activated().build(), - Head::default() - ), - revm_primitives::TANGERINE - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().homestead_activated().build(), Head::default()), - revm_primitives::HOMESTEAD - ); - assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().frontier_activated().build(), Head::default()), - revm_primitives::FRONTIER - ); - #[cfg(feature = "optimism")] - { - #[inline(always)] - fn op_cs(f: impl FnOnce(ChainSpecBuilder) -> ChainSpecBuilder) -> ChainSpec { - let cs = ChainSpecBuilder::mainnet().chain(reth_chainspec::Chain::from_id(10)); - f(cs).build() - } - assert_eq!( - revm_spec(&op_cs(|cs| cs.fjord_activated()), Head::default()), - revm_primitives::FJORD - ); - assert_eq!( - revm_spec(&op_cs(|cs| cs.ecotone_activated()), Head::default()), - revm_primitives::ECOTONE - ); - assert_eq!( - revm_spec(&op_cs(|cs| cs.canyon_activated()), Head::default()), - revm_primitives::CANYON - ); - assert_eq!( - revm_spec(&op_cs(|cs| cs.bedrock_activated()), Head::default()), - revm_primitives::BEDROCK - ); - assert_eq!( - revm_spec(&op_cs(|cs| cs.regolith_activated()), Head::default()), - revm_primitives::REGOLITH - ); - } - } - - #[test] - fn test_eth_spec() { - assert_eq!( - revm_spec(&MAINNET, Head { timestamp: 1710338135, ..Default::default() }), - revm_primitives::CANCUN - ); - assert_eq!( - revm_spec(&MAINNET, Head { timestamp: 1681338455, ..Default::default() }), - revm_primitives::SHANGHAI - ); - - assert_eq!( - revm_spec( - &MAINNET, - Head { - total_difficulty: U256::from(58_750_000_000_000_000_000_010_u128), - difficulty: U256::from(10_u128), - ..Default::default() - } - ), - revm_primitives::MERGE - ); - // TTD trumps the block number - assert_eq!( - revm_spec( - &MAINNET, - Head { - number: 15537394 - 10, - total_difficulty: U256::from(58_750_000_000_000_000_000_010_u128), - difficulty: U256::from(10_u128), - ..Default::default() - } - ), - revm_primitives::MERGE - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 15537394 - 10, ..Default::default() }), - revm_primitives::LONDON - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 12244000 + 10, ..Default::default() }), - revm_primitives::BERLIN - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 12244000 - 10, ..Default::default() }), - revm_primitives::ISTANBUL - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 7280000 + 10, ..Default::default() }), - revm_primitives::PETERSBURG - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 7280000 - 10, ..Default::default() }), - revm_primitives::BYZANTIUM - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 2675000 + 10, ..Default::default() }), - revm_primitives::SPURIOUS_DRAGON - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 2675000 - 10, ..Default::default() }), - revm_primitives::TANGERINE - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 1150000 + 10, ..Default::default() }), - revm_primitives::HOMESTEAD - ); - assert_eq!( - revm_spec(&MAINNET, Head { number: 1150000 - 10, ..Default::default() }), - revm_primitives::FRONTIER - ); - } -} diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs deleted file mode 100644 index b2f30ec5b..000000000 --- a/crates/primitives/src/revm/env.rs +++ /dev/null @@ -1,389 +0,0 @@ -use crate::{ - recover_signer_unchecked, - revm_primitives::{BlockEnv, Env, TxEnv}, - Address, Bytes, Header, Transaction, TransactionSignedEcRecovered, TxKind, B256, U256, -}; -use reth_chainspec::{Chain, ChainSpec}; - -use alloy_eips::{eip4788::BEACON_ROOTS_ADDRESS, eip7002::WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS}; -#[cfg(feature = "optimism")] -use revm_primitives::OptimismFields; - -#[cfg(feature = "bsc")] -use revm_primitives::env::BscFields; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - -/// Fill block environment from Block. -pub fn fill_block_env( - block_env: &mut BlockEnv, - chain_spec: &ChainSpec, - header: &Header, - after_merge: bool, -) { - let coinbase = block_coinbase(chain_spec, header, after_merge); - fill_block_env_with_coinbase(block_env, header, after_merge, coinbase); -} - -/// Fill block environment with coinbase. -#[inline] -pub fn fill_block_env_with_coinbase( - block_env: &mut BlockEnv, - header: &Header, - after_merge: bool, - coinbase: Address, -) { - block_env.number = U256::from(header.number); - block_env.coinbase = coinbase; - block_env.timestamp = U256::from(header.timestamp); - if after_merge { - block_env.prevrandao = Some(header.mix_hash); - block_env.difficulty = U256::ZERO; - } else { - block_env.difficulty = header.difficulty; - block_env.prevrandao = None; - } - block_env.basefee = U256::from(header.base_fee_per_gas.unwrap_or_default()); - block_env.gas_limit = U256::from(header.gas_limit); - - // EIP-4844 excess blob gas of this block, introduced in Cancun - if let Some(excess_blob_gas) = header.excess_blob_gas { - block_env.set_blob_excess_gas_and_price(excess_blob_gas); - } -} - -/// Return the coinbase address for the given header and chain spec. -pub fn block_coinbase(chain_spec: &ChainSpec, header: &Header, after_merge: bool) -> Address { - // Clique consensus fills the EXTRA_SEAL (last 65 bytes) of the extra data with the - // signer's signature. - // - // On the genesis block, the extra data is filled with zeros, so we should not attempt to - // recover the signer on the genesis block. - // - // From EIP-225: - // - // * `EXTRA_SEAL`: Fixed number of extra-data suffix bytes reserved for signer seal. - // * 65 bytes fixed as signatures are based on the standard `secp256k1` curve. - // * Filled with zeros on genesis block. - if chain_spec.chain == Chain::goerli() && !after_merge && header.number > 0 { - recover_header_signer(header).unwrap_or_else(|err| { - panic!( - "Failed to recover goerli Clique Consensus signer from header ({}, {}) using extradata {}: {:?}", - header.number, header.hash_slow(), header.extra_data, err - ) - }) - } else { - header.beneficiary - } -} - -/// Error type for recovering Clique signer from a header. -#[derive(Debug, thiserror_no_std::Error)] -pub enum CliqueSignerRecoveryError { - /// Header extradata is too short. - #[error("Invalid extra data length")] - InvalidExtraData, - /// Recovery failed. - #[error("Invalid signature: {0}")] - InvalidSignature(#[from] secp256k1::Error), -} - -/// Recover the account from signed header per clique consensus rules. -pub fn recover_header_signer(header: &Header) -> Result { - let extra_data_len = header.extra_data.len(); - // Fixed number of extra-data suffix bytes reserved for signer signature. - // 65 bytes fixed as signatures are based on the standard secp256k1 curve. - // Filled with zeros on genesis block. - let signature_start_byte = extra_data_len - 65; - let signature: [u8; 65] = header.extra_data[signature_start_byte..] - .try_into() - .map_err(|_| CliqueSignerRecoveryError::InvalidExtraData)?; - let seal_hash = { - let mut header_to_seal = header.clone(); - header_to_seal.extra_data = Bytes::from(header.extra_data[..signature_start_byte].to_vec()); - header_to_seal.hash_slow() - }; - - // TODO: this is currently unchecked recovery, does this need to be checked w.r.t EIP-2? - recover_signer_unchecked(&signature, &seal_hash.0) - .map_err(CliqueSignerRecoveryError::InvalidSignature) -} - -/// Returns a new [`TxEnv`] filled with the transaction's data. -pub fn tx_env_with_recovered(transaction: &TransactionSignedEcRecovered) -> TxEnv { - let mut tx_env = TxEnv::default(); - - #[cfg(not(feature = "optimism"))] - fill_tx_env(&mut tx_env, transaction.as_ref(), transaction.signer()); - - #[cfg(feature = "optimism")] - { - let mut envelope_buf = Vec::with_capacity(transaction.length_without_header()); - transaction.encode_enveloped(&mut envelope_buf); - fill_op_tx_env( - &mut tx_env, - transaction.as_ref(), - transaction.signer(), - envelope_buf.into(), - ); - } - - tx_env -} - -/// Fill transaction environment with the EIP-4788 system contract message data. -/// -/// This requirements for the beacon root contract call defined by -/// [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) are: -/// -/// At the start of processing any execution block where `block.timestamp >= FORK_TIMESTAMP` (i.e. -/// before processing any transactions), call [`BEACON_ROOTS_ADDRESS`] as -/// [`SYSTEM_ADDRESS`](alloy_eips::eip4788::SYSTEM_ADDRESS) with the 32-byte input of -/// `header.parent_beacon_block_root`. This will trigger the `set()` routine of the beacon roots -/// contract. -pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_block_root: B256) { - fill_tx_env_with_system_contract_call( - env, - alloy_eips::eip4788::SYSTEM_ADDRESS, - BEACON_ROOTS_ADDRESS, - parent_beacon_block_root.0.into(), - ); -} - -/// Fill transaction environment with the EIP-7002 withdrawal requests contract message data. -// -/// This requirement for the withdrawal requests contract call defined by -/// [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002) is: -// -/// At the end of processing any execution block where `block.timestamp >= FORK_TIMESTAMP` (i.e. -/// after processing all transactions and after performing the block body withdrawal requests -/// validations), call the contract as `SYSTEM_ADDRESS`. -pub fn fill_tx_env_with_withdrawal_requests_contract_call(env: &mut Env) { - fill_tx_env_with_system_contract_call( - env, - alloy_eips::eip7002::SYSTEM_ADDRESS, - WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS, - Bytes::new(), - ); -} - -/// Fill transaction environment with the system caller and the system contract address and message -/// data. -/// -/// This is a system operation and therefore: -/// * the call must execute to completion -/// * the call does not count against the block’s gas limit -/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as part -/// of the call -/// * if no code exists at the provided address, the call will fail silently -fn fill_tx_env_with_system_contract_call( - env: &mut Env, - caller: Address, - contract: Address, - data: Bytes, -) { - env.tx = TxEnv { - caller, - transact_to: TxKind::Call(contract), - // Explicitly set nonce to None so revm does not do any nonce checks - nonce: None, - gas_limit: 30_000_000, - value: U256::ZERO, - data, - // Setting the gas price to zero enforces that no value is transferred as part of the call, - // and that the call will not count against the block's gas limit - gas_price: U256::ZERO, - // The chain ID check is not relevant here and is disabled if set to None - chain_id: None, - // Setting the gas priority fee to None ensures the effective gas price is derived from the - // `gas_price` field, which we need to be zero - gas_priority_fee: None, - access_list: Vec::new(), - // blob fields can be None for this tx - blob_hashes: Vec::new(), - max_fee_per_blob_gas: None, - #[cfg(feature = "optimism")] - optimism: OptimismFields { - source_hash: None, - mint: None, - is_system_transaction: Some(false), - // The L1 fee is not charged for the EIP-4788 transaction, submit zero bytes for the - // enveloped tx size. - enveloped_tx: Some(Bytes::default()), - }, - #[cfg(feature = "bsc")] - bsc: BscFields { is_system_transaction: Some(false) }, - }; - - // ensure the block gas limit is >= the tx - env.block.gas_limit = U256::from(env.tx.gas_limit); - - // disable the base fee check for this call by setting the base fee to zero - env.block.basefee = U256::ZERO; -} - -/// Fill transaction environment from [`TransactionSignedEcRecovered`]. -#[cfg(not(feature = "optimism"))] -pub fn fill_tx_env_with_recovered(tx_env: &mut TxEnv, transaction: &TransactionSignedEcRecovered) { - fill_tx_env(tx_env, transaction.as_ref(), transaction.signer()); -} - -/// Fill transaction environment from [`TransactionSignedEcRecovered`] and the given envelope. -#[cfg(feature = "optimism")] -pub fn fill_tx_env_with_recovered( - tx_env: &mut TxEnv, - transaction: &TransactionSignedEcRecovered, - envelope: Bytes, -) { - fill_op_tx_env(tx_env, transaction.as_ref(), transaction.signer(), envelope); -} - -/// Fill transaction environment from a [Transaction] and the given sender address. -pub fn fill_tx_env(tx_env: &mut TxEnv, transaction: T, sender: Address) -where - T: AsRef, -{ - tx_env.caller = sender; - match transaction.as_ref() { - Transaction::Legacy(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.gas_price); - tx_env.gas_priority_fee = None; - tx_env.transact_to = match tx.to { - TxKind::Call(to) => TxKind::Call(to), - TxKind::Create => TxKind::Create, - }; - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = tx.chain_id; - tx_env.nonce = Some(tx.nonce); - tx_env.access_list.clear(); - tx_env.blob_hashes.clear(); - tx_env.max_fee_per_blob_gas.take(); - } - Transaction::Eip2930(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.gas_price); - tx_env.gas_priority_fee = None; - tx_env.transact_to = match tx.to { - TxKind::Call(to) => TxKind::Call(to), - TxKind::Create => TxKind::Create, - }; - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = Some(tx.chain_id); - tx_env.nonce = Some(tx.nonce); - tx_env.access_list = tx - .access_list - .0 - .iter() - .map(|l| { - (l.address, l.storage_keys.iter().map(|k| U256::from_be_bytes(k.0)).collect()) - }) - .collect(); - tx_env.blob_hashes.clear(); - tx_env.max_fee_per_blob_gas.take(); - } - Transaction::Eip1559(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.max_fee_per_gas); - tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); - tx_env.transact_to = match tx.to { - TxKind::Call(to) => TxKind::Call(to), - TxKind::Create => TxKind::Create, - }; - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = Some(tx.chain_id); - tx_env.nonce = Some(tx.nonce); - tx_env.access_list = tx - .access_list - .0 - .iter() - .map(|l| { - (l.address, l.storage_keys.iter().map(|k| U256::from_be_bytes(k.0)).collect()) - }) - .collect(); - tx_env.blob_hashes.clear(); - tx_env.max_fee_per_blob_gas.take(); - } - Transaction::Eip4844(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.max_fee_per_gas); - tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); - tx_env.transact_to = TxKind::Call(tx.to); - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = Some(tx.chain_id); - tx_env.nonce = Some(tx.nonce); - tx_env.access_list = tx - .access_list - .0 - .iter() - .map(|l| { - (l.address, l.storage_keys.iter().map(|k| U256::from_be_bytes(k.0)).collect()) - }) - .collect(); - tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); - tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); - } - #[cfg(feature = "optimism")] - Transaction::Deposit(tx) => { - tx_env.access_list.clear(); - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::ZERO; - tx_env.gas_priority_fee = None; - tx_env.transact_to = tx.to; - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = None; - tx_env.nonce = None; - } - } -} - -/// Fill transaction environment from a [Transaction], envelope, and the given sender address. -#[cfg(feature = "optimism")] -#[inline(always)] -pub fn fill_op_tx_env>( - tx_env: &mut TxEnv, - transaction: T, - sender: Address, - envelope: Bytes, -) { - fill_tx_env(tx_env, &transaction, sender); - match transaction.as_ref() { - Transaction::Deposit(tx) => { - tx_env.optimism = OptimismFields { - source_hash: Some(tx.source_hash), - mint: tx.mint, - is_system_transaction: Some(tx.is_system_transaction), - enveloped_tx: Some(envelope), - }; - } - _ => { - tx_env.optimism = OptimismFields { - source_hash: None, - mint: None, - is_system_transaction: Some(false), - enveloped_tx: Some(envelope), - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_chainspec::GOERLI; - - #[test] - fn test_recover_genesis_goerli_signer() { - // just ensures that `block_coinbase` does not panic on the genesis block - let chain_spec = GOERLI.clone(); - let header = chain_spec.genesis_header(); - let block_coinbase = block_coinbase(&chain_spec, &header, false); - assert_eq!(block_coinbase, header.beneficiary); - } -} diff --git a/crates/primitives/src/revm/mod.rs b/crates/primitives/src/revm/mod.rs deleted file mode 100644 index 9937a209b..000000000 --- a/crates/primitives/src/revm/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Helpers for working with revm. - -/// Reth block execution/validation configuration and constants -pub mod config; - -/// The `env` module provides utility methods for filling revm transaction and block environments. -/// -/// It includes functions to fill transaction and block environments with relevant data, prepare -/// the block and transaction environments for system contract calls, and recover the signer from -/// Clique-formatted extra data in ethereum headers. -pub mod env; diff --git a/crates/primitives/src/system_contracts/mod.rs b/crates/primitives/src/system_contracts/mod.rs index 782032ea8..10ac4877e 100644 --- a/crates/primitives/src/system_contracts/mod.rs +++ b/crates/primitives/src/system_contracts/mod.rs @@ -1,14 +1,13 @@ #![cfg(feature = "bsc")] #![allow(missing_docs)] +use crate::{hex, Address, BlockNumber}; use alloy_chains::Chain; -use alloy_primitives::BlockNumber; use include_dir::{include_dir, Dir}; use lazy_static::lazy_static; use reth_chainspec::{ChainSpec, BSC_MAINNET, BSC_TESTNET}; -use reth_ethereum_forks::Hardfork; -use revm::primitives::{Address, Bytecode}; -use revm_primitives::hex; +use reth_ethereum_forks::BscHardfork; +use revm_primitives::Bytecode; use std::collections::HashMap; use thiserror::Error; @@ -52,15 +51,15 @@ lazy_static! { ]; /// mainnet system contracts: hardfork -> address -> Bytecode - pub(crate) static ref BSC_MAINNET_CONTRACTS: HashMap>> = + pub(crate) static ref BSC_MAINNET_CONTRACTS: HashMap>> = read_all_system_contracts(BSC_MAINNET.as_ref()); /// testnet system contracts: hardfork -> address -> Bytecode - pub(crate) static ref BSC_TESTNET_CONTRACTS: HashMap>> = + pub(crate) static ref BSC_TESTNET_CONTRACTS: HashMap>> = read_all_system_contracts(BSC_TESTNET.as_ref()); /// qa system contracts: hardfork -> address -> Bytecode - pub(crate) static ref BSC_QA_CONTRACTS: HashMap>> = + pub(crate) static ref BSC_QA_CONTRACTS: HashMap>> = read_all_system_contracts(BSC_TESTNET.as_ref()); } @@ -138,42 +137,42 @@ pub enum SystemContractError { } /// Return hardforks which contain upgrades of system contracts. -fn hardforks_with_system_contracts() -> Vec { +fn hardforks_with_system_contracts() -> Vec { vec![ - Hardfork::Bruno, - Hardfork::Euler, - Hardfork::Feynman, - Hardfork::FeynmanFix, - Hardfork::Gibbs, - Hardfork::Kepler, - Hardfork::Luban, - Hardfork::MirrorSync, - Hardfork::Moran, - Hardfork::Niels, - Hardfork::Planck, - Hardfork::Plato, - Hardfork::Ramanujan, - Hardfork::HaberFix, + BscHardfork::Bruno, + BscHardfork::Euler, + BscHardfork::Feynman, + BscHardfork::FeynmanFix, + BscHardfork::Gibbs, + BscHardfork::Kepler, + BscHardfork::Luban, + BscHardfork::MirrorSync, + BscHardfork::Moran, + BscHardfork::Niels, + BscHardfork::Planck, + BscHardfork::Plato, + BscHardfork::Ramanujan, + BscHardfork::HaberFix, ] } /// Load the folder names which stores the codes of system contracts. -fn hardfork_to_dir_name(hardfork: &Hardfork) -> Result { +fn hardfork_to_dir_name(hardfork: &BscHardfork) -> Result { let name = match hardfork { - Hardfork::Bruno => "bruno", - Hardfork::Euler => "euler", - Hardfork::Feynman => "feynman", - Hardfork::FeynmanFix => "feynman_fix", - Hardfork::Gibbs => "gibbs", - Hardfork::Kepler => "kepler", - Hardfork::Luban => "luban", - Hardfork::MirrorSync => "mirror_sync", - Hardfork::Moran => "moran", - Hardfork::Niels => "niels", - Hardfork::Planck => "planck", - Hardfork::Plato => "plato", - Hardfork::Ramanujan => "ramanujan", - Hardfork::HaberFix => "haber_fix", + BscHardfork::Bruno => "bruno", + BscHardfork::Euler => "euler", + BscHardfork::Feynman => "feynman", + BscHardfork::FeynmanFix => "feynman_fix", + BscHardfork::Gibbs => "gibbs", + BscHardfork::Kepler => "kepler", + BscHardfork::Luban => "luban", + BscHardfork::MirrorSync => "mirror_sync", + BscHardfork::Moran => "moran", + BscHardfork::Niels => "niels", + BscHardfork::Planck => "planck", + BscHardfork::Plato => "plato", + BscHardfork::Ramanujan => "ramanujan", + BscHardfork::HaberFix => "haber_fix", _ => { return Err(SystemContractError::InvalidHardfork); } @@ -184,7 +183,7 @@ fn hardfork_to_dir_name(hardfork: &Hardfork) -> Result HashMap>> { +) -> HashMap>> { let dir: String; if spec.chain.eq(&Chain::bsc_mainnet()) { dir = "mainnet".to_string(); @@ -220,7 +219,7 @@ fn read_all_system_contracts( let bytes = hex::decode(body).unwrap(); inner_map.insert(c.address.to_string(), Some(Bytecode::new_raw(bytes.into()))); } - outer_map.insert(hardfork, inner_map); + outer_map.insert(hardfork.name().to_string(), inner_map); } outer_map @@ -229,7 +228,7 @@ fn read_all_system_contracts( /// Get byte codes for a specific hardfork. fn get_system_contract_codes( spec: &ChainSpec, - hardfork: &Hardfork, + hardfork: &str, ) -> Result>, SystemContractError> { return if spec.chain.eq(&Chain::bsc_mainnet()) { if let Some(m) = BSC_MAINNET_CONTRACTS.get(hardfork) { @@ -262,11 +261,11 @@ pub fn get_upgrade_system_contracts( parent_block_time: u64, ) -> Result>, SystemContractError> { let mut m = HashMap::new(); - for (name, condition) in &spec.hardforks { + for (fork, condition) in spec.hardforks.forks_iter() { if condition.transitions_at_block(block_number) || condition.transitions_at_timestamp(block_time, parent_block_time) { - if let Ok(contracts) = get_system_contract_codes(spec, name) { + if let Ok(contracts) = get_system_contract_codes(spec, fork.name()) { contracts.iter().for_each(|(k, v)| { let address = Address::parse_checksummed(k.clone(), None).unwrap(); m.insert(address, v.clone()); @@ -287,7 +286,7 @@ mod tests { #[test] fn test_get_system_contract_code() { - let res = get_system_contract_codes(&BSC_MAINNET, &Hardfork::Feynman).unwrap(); + let res = get_system_contract_codes(&BSC_MAINNET, BscHardfork::Feynman.name()).unwrap(); assert!(!res.is_empty()); let bytes = res.get(STAKE_HUB_CONTRACT).unwrap(); diff --git a/crates/primitives/src/transaction/compat.rs b/crates/primitives/src/transaction/compat.rs new file mode 100644 index 000000000..1f9c72be4 --- /dev/null +++ b/crates/primitives/src/transaction/compat.rs @@ -0,0 +1,104 @@ +use crate::{Address, Transaction, TransactionSigned, TxKind, U256}; +use revm_primitives::TxEnv; + +/// Implements behaviour to fill a [`TxEnv`] from another transaction. +pub trait FillTxEnv { + /// Fills [`TxEnv`] with an [`Address`] and transaction. + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address); +} + +impl FillTxEnv for TransactionSigned { + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + #[cfg(feature = "optimism")] + let envelope = { + let mut envelope = Vec::with_capacity(self.length_without_header()); + self.encode_enveloped(&mut envelope); + envelope + }; + + tx_env.caller = sender; + match self.as_ref() { + Transaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + } + Transaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + } + Transaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + } + Transaction::Eip4844(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = TxKind::Call(tx.to); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); + tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); + } + #[cfg(feature = "optimism")] + Transaction::Deposit(tx) => { + tx_env.access_list.clear(); + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::ZERO; + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = None; + tx_env.nonce = None; + tx_env.optimism = revm_primitives::OptimismFields { + source_hash: Some(tx.source_hash), + mint: tx.mint, + is_system_transaction: Some(tx.is_system_transaction), + enveloped_tx: Some(envelope.into()), + }; + return; + } + } + + #[cfg(feature = "optimism")] + if !self.is_deposit() { + tx_env.optimism = revm_primitives::OptimismFields { + source_hash: None, + mint: None, + is_system_transaction: Some(false), + enveloped_tx: Some(envelope.into()), + } + } + } +} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index c23d454f8..b6f8ba72c 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -32,6 +32,7 @@ pub use sidecar::generate_blob_sidecar; pub use sidecar::BlobTransactionValidationError; pub use sidecar::{BlobTransaction, BlobTransactionSidecar}; +pub use compat::FillTxEnv; pub use signature::{extract_chain_id, Signature}; pub use tx_type::{ TxType, EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, @@ -39,6 +40,7 @@ pub use tx_type::{ pub use variant::TransactionSignedVariant; mod access_list; +mod compat; mod eip1559; mod eip2930; mod eip4844; @@ -75,18 +77,6 @@ pub(crate) static PARALLEL_SENDER_RECOVERY_THRESHOLD: Lazy = _ => 5, }); -/// Minimum length of a rlp-encoded legacy transaction. -pub const MIN_LENGTH_LEGACY_TX_ENCODED: usize = 10; -/// Minimum length of a rlp-encoded eip2930 transaction. -pub const MIN_LENGTH_EIP2930_TX_ENCODED: usize = 14; -/// Minimum length of a rlp-encoded eip1559 transaction. -pub const MIN_LENGTH_EIP1559_TX_ENCODED: usize = 15; -/// Minimum length of a rlp-encoded eip4844 transaction. -pub const MIN_LENGTH_EIP4844_TX_ENCODED: usize = 37; -/// Minimum length of a rlp-encoded deposit transaction. -#[cfg(feature = "optimism")] -pub const MIN_LENGTH_DEPOSIT_TX_ENCODED: usize = 65; - /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -1478,6 +1468,16 @@ impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { if tx_eip_4844.to != Address::default() { Some(()) } else { None }; } + #[cfg(feature = "optimism")] + // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces + // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that + // it's `None` if zero. + if let Transaction::Deposit(ref mut tx_deposit) = transaction { + if tx_deposit.mint == Some(0) { + tx_deposit.mint = None; + } + } + let signature = Signature::arbitrary(u)?; #[cfg(feature = "optimism")] @@ -1617,12 +1617,10 @@ mod tests { hex, sign_message, transaction::{ from_compact_zstd_unaware, signature::Signature, to_compact_ztd_unaware, TxEip1559, - TxKind, TxLegacy, MIN_LENGTH_EIP1559_TX_ENCODED, MIN_LENGTH_EIP2930_TX_ENCODED, - MIN_LENGTH_EIP4844_TX_ENCODED, MIN_LENGTH_LEGACY_TX_ENCODED, - PARALLEL_SENDER_RECOVERY_THRESHOLD, + TxKind, TxLegacy, PARALLEL_SENDER_RECOVERY_THRESHOLD, }, Address, Bytes, Transaction, TransactionSigned, TransactionSignedEcRecovered, - TransactionSignedNoHash, TxEip2930, TxEip4844, B256, U256, + TransactionSignedNoHash, B256, U256, }; use alloy_primitives::{address, b256, bytes}; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; @@ -1963,106 +1961,6 @@ mod tests { assert_eq!(sender, address!("7e9e359edf0dbacf96a9952fa63092d919b0842b")); } - #[test] - fn min_length_encoded_legacy_transaction() { - let transaction = TxLegacy::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Legacy(transaction), - signature, - ); - - let encoded = &alloy_rlp::encode(signed_tx); - assert_eq!( - if cfg!(feature = "optimism") { - hex!("c9808080808080808080") - } else { - hex!("c98080808080801b8080") - }, - &encoded[..] - ); - assert_eq!(MIN_LENGTH_LEGACY_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - - #[test] - fn min_length_encoded_eip2930_transaction() { - let transaction = TxEip2930::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip2930(transaction), - signature, - ); - - let encoded = &alloy_rlp::encode(signed_tx); - assert_eq!(hex!("8d01cb80808080808080c0808080"), encoded[..]); - assert_eq!(MIN_LENGTH_EIP2930_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - - #[test] - fn min_length_encoded_eip1559_transaction() { - let transaction = TxEip1559::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip1559(transaction), - signature, - ); - - let encoded = &alloy_rlp::encode(signed_tx); - assert_eq!(hex!("8e02cc8080808080808080c0808080"), encoded[..]); - assert_eq!(MIN_LENGTH_EIP1559_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - - #[test] - fn min_length_encoded_eip4844_transaction() { - let transaction = TxEip4844::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Eip4844(transaction), - signature, - ); - - let encoded = alloy_rlp::encode(signed_tx); - assert_eq!( - hex!("a403e280808080809400000000000000000000000000000000000000008080c080c0808080"), - encoded[..] - ); - assert_eq!(MIN_LENGTH_EIP4844_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - - #[cfg(feature = "optimism")] - #[test] - fn min_length_encoded_deposit_transaction() { - use super::MIN_LENGTH_DEPOSIT_TX_ENCODED; - use crate::TxDeposit; - - let transaction = TxDeposit::default(); - let signature = Signature::default(); - - let signed_tx = TransactionSigned::from_transaction_and_signature( - Transaction::Deposit(transaction), - signature, - ); - - let encoded = &alloy_rlp::encode(signed_tx); - - assert_eq!(b"\xb8?~\xf8<\xa0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x94\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\x80\x80\x80\x80\x80", &encoded[..]); - assert_eq!(MIN_LENGTH_DEPOSIT_TX_ENCODED, encoded.len()); - - TransactionSigned::decode(&mut &encoded[..]).unwrap(); - } - #[test] fn transaction_signed_no_hash_zstd_codec() { // will use same signature everywhere. diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index c45683ce7..2c6f4598a 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -281,14 +281,16 @@ impl BlobTransaction { /// Generates a [`BlobTransactionSidecar`] structure containing blobs, commitments, and proofs. #[cfg(all(feature = "c-kzg", any(test, feature = "arbitrary")))] pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar { - use crate::constants::eip4844::MAINNET_KZG_TRUSTED_SETUP; + use alloy_eips::eip4844::env_settings::EnvKzgSettings; use c_kzg::{KzgCommitment, KzgProof}; - let kzg_settings = MAINNET_KZG_TRUSTED_SETUP.clone(); + let kzg_settings = EnvKzgSettings::Default; let commitments: Vec = blobs .iter() - .map(|blob| KzgCommitment::blob_to_kzg_commitment(&blob.clone(), &kzg_settings).unwrap()) + .map(|blob| { + KzgCommitment::blob_to_kzg_commitment(&blob.clone(), kzg_settings.get()).unwrap() + }) .map(|commitment| commitment.to_bytes()) .collect(); @@ -296,7 +298,7 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar .iter() .zip(commitments.iter()) .map(|(blob, commitment)| { - KzgProof::compute_blob_kzg_proof(blob, commitment, &kzg_settings).unwrap() + KzgProof::compute_blob_kzg_proof(blob, commitment, kzg_settings.get()).unwrap() }) .map(|proof| proof.to_bytes()) .collect(); diff --git a/crates/prune/prune/Cargo.toml b/crates/prune/prune/Cargo.toml index b5d9059c9..2f2a37d5b 100644 --- a/crates/prune/prune/Cargo.toml +++ b/crates/prune/prune/Cargo.toml @@ -35,6 +35,7 @@ thiserror.workspace = true itertools.workspace = true rayon.workspace = true tokio.workspace = true +rustc-hash.workspace = true [dev-dependencies] # reth diff --git a/crates/prune/prune/src/event.rs b/crates/prune/prune/src/event.rs index 7007e3f47..95a90d762 100644 --- a/crates/prune/prune/src/event.rs +++ b/crates/prune/prune/src/event.rs @@ -1,6 +1,6 @@ use alloy_primitives::BlockNumber; use reth_prune_types::{PruneProgress, PruneSegment}; -use std::{collections::BTreeMap, time::Duration}; +use std::time::Duration; /// An event emitted by a [Pruner][crate::Pruner]. #[derive(Debug, PartialEq, Eq, Clone)] @@ -11,6 +11,6 @@ pub enum PrunerEvent { Finished { tip_block_number: BlockNumber, elapsed: Duration, - stats: BTreeMap, + stats: Vec<(PruneSegment, usize, PruneProgress)>, }, } diff --git a/crates/prune/prune/src/pruner.rs b/crates/prune/prune/src/pruner.rs index 656aa69ad..60ff1ee80 100644 --- a/crates/prune/prune/src/pruner.rs +++ b/crates/prune/prune/src/pruner.rs @@ -14,10 +14,7 @@ use reth_provider::{ use reth_prune_types::{PruneLimiter, PruneMode, PruneProgress, PrunePurpose, PruneSegment}; use reth_static_file_types::StaticFileSegment; use reth_tokio_util::{EventSender, EventStream}; -use std::{ - collections::BTreeMap, - time::{Duration, Instant}, -}; +use std::time::{Duration, Instant}; use tokio::sync::watch; use tracing::debug; @@ -27,7 +24,7 @@ pub type PrunerResult = Result; /// The pruner type itself with the result of [`Pruner::run`] pub type PrunerWithResult = (Pruner, PrunerResult); -type PrunerStats = BTreeMap; +type PrunerStats = Vec<(PruneSegment, usize, PruneProgress)>; /// Pruning routine. Main pruning logic happens in [`Pruner::run`]. #[derive(Debug)] @@ -85,7 +82,11 @@ impl Pruner { self.event_sender.new_listener() } - /// Run the pruner + /// Run the pruner. This will only prune data up to the highest finished `ExEx` height, if there + /// are no `ExEx`s, . + /// + /// Returns a [`PruneProgress`], indicating whether pruning is finished, or there is more data + /// to prune. pub fn run(&mut self, tip_block_number: BlockNumber) -> PrunerResult { let Some(tip_block_number) = self.adjust_tip_block_number_to_finished_exex_height(tip_block_number) @@ -237,7 +238,7 @@ impl Pruner { if output.pruned > 0 { limiter.increment_deleted_entries_count_by(output.pruned); pruned += output.pruned; - stats.insert(segment.segment(), (output.progress, output.pruned)); + stats.push((segment.segment(), output.pruned, output.progress)); } } else { debug!(target: "pruner", segment = ?segment.segment(), ?purpose, "Nothing to prune for the segment"); @@ -306,8 +307,8 @@ impl Pruner { /// Adjusts the tip block number to the finished `ExEx` height. This is needed to not prune more /// data than `ExExs` have processed. Depending on the height: - /// - [`FinishedExExHeight::NoExExs`] returns the tip block number as is as no adjustment for - /// `ExExs` is needed. + /// - [`FinishedExExHeight::NoExExs`] returns the tip block number as no adjustment for `ExExs` + /// is needed. /// - [`FinishedExExHeight::NotReady`] returns `None` as not all `ExExs` have emitted a /// `FinishedHeight` event yet. /// - [`FinishedExExHeight::Height`] returns the finished `ExEx` height. diff --git a/crates/prune/prune/src/segments/account_history.rs b/crates/prune/prune/src/segments/account_history.rs index ab2800a31..28e448560 100644 --- a/crates/prune/prune/src/segments/account_history.rs +++ b/crates/prune/prune/src/segments/account_history.rs @@ -4,10 +4,12 @@ use crate::{ }, PrunerError, }; +use itertools::Itertools; use reth_db::tables; use reth_db_api::{database::Database, models::ShardedKey}; use reth_provider::DatabaseProviderRW; use reth_prune_types::{PruneInterruptReason, PruneMode, PruneProgress, PruneSegment}; +use rustc_hash::FxHashMap; use tracing::{instrument, trace}; /// Number of account history tables to prune in one step. @@ -64,34 +66,53 @@ impl Segment for AccountHistory { } let mut last_changeset_pruned_block = None; + // Deleted account changeset keys (account addresses) with the highest block number deleted + // for that key. + // + // The size of this map it's limited by `prune_delete_limit * blocks_since_last_run / + // ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with current default it's usually `3500 * 5 + // / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total + // size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is + // additionally limited by the `max_reorg_depth`, so no OOM is expected here. + let mut highest_deleted_accounts = FxHashMap::default(); let (pruned_changesets, done) = provider .prune_table_with_range::( range, &mut limiter, |_| false, - |row| last_changeset_pruned_block = Some(row.0), + |(block_number, account)| { + highest_deleted_accounts.insert(account.address, block_number); + last_changeset_pruned_block = Some(block_number); + }, )?; trace!(target: "pruner", pruned = %pruned_changesets, %done, "Pruned account history (changesets)"); let last_changeset_pruned_block = last_changeset_pruned_block - // If there's more account account changesets to prune, set the checkpoint block number - // to previous, so we could finish pruning its account changesets on the next run. + // If there's more account changesets to prune, set the checkpoint block number to + // previous, so we could finish pruning its account changesets on the next run. .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) .unwrap_or(range_end); - let (processed, pruned_indices) = prune_history_indices::( + // Sort highest deleted block numbers by account address and turn them into sharded keys. + // We did not use `BTreeMap` from the beginning, because it's inefficient for hashes. + let highest_sharded_keys = highest_deleted_accounts + .into_iter() + .sorted_unstable() // Unstable is fine because no equal keys exist in the map + .map(|(address, block_number)| { + ShardedKey::new(address, block_number.min(last_changeset_pruned_block)) + }); + let outcomes = prune_history_indices::( provider, - last_changeset_pruned_block, + highest_sharded_keys, |a, b| a.key == b.key, - |key| ShardedKey::last(key.key), )?; - trace!(target: "pruner", %processed, pruned = %pruned_indices, %done, "Pruned account history (history)"); + trace!(target: "pruner", ?outcomes, %done, "Pruned account history (indices)"); let progress = PruneProgress::new(done, &limiter); Ok(PruneOutput { progress, - pruned: pruned_changesets + pruned_indices, + pruned: pruned_changesets + outcomes.deleted, checkpoint: Some(PruneOutputCheckpoint { block_number: Some(last_changeset_pruned_block), tx_number: None, diff --git a/crates/prune/prune/src/segments/history.rs b/crates/prune/prune/src/segments/history.rs index ee841ef89..ff477a39f 100644 --- a/crates/prune/prune/src/segments/history.rs +++ b/crates/prune/prune/src/segments/history.rs @@ -1,5 +1,5 @@ use alloy_primitives::BlockNumber; -use reth_db::BlockNumberList; +use reth_db::{BlockNumberList, RawKey, RawTable, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, database::Database, @@ -10,103 +10,151 @@ use reth_db_api::{ }; use reth_provider::DatabaseProviderRW; -/// Prune history indices up to the provided block, inclusive. +enum PruneShardOutcome { + Deleted, + Updated, + Unchanged, +} + +#[derive(Debug, Default)] +pub(crate) struct PrunedIndices { + pub(crate) deleted: usize, + pub(crate) updated: usize, + pub(crate) unchanged: usize, +} + +/// Prune history indices according to the provided list of highest sharded keys. /// -/// Returns total number of processed (walked) and deleted entities. +/// Returns total number of deleted, updated and unchanged entities. pub(crate) fn prune_history_indices( provider: &DatabaseProviderRW, - to_block: BlockNumber, + highest_sharded_keys: impl IntoIterator, key_matches: impl Fn(&T::Key, &T::Key) -> bool, - last_key: impl Fn(&T::Key) -> T::Key, -) -> Result<(usize, usize), DatabaseError> +) -> Result where DB: Database, T: Table, T::Key: AsRef>, { - let mut processed = 0; - let mut deleted = 0; - let mut cursor = provider.tx_ref().cursor_write::()?; + let mut outcomes = PrunedIndices::default(); + let mut cursor = provider.tx_ref().cursor_write::>()?; + + for sharded_key in highest_sharded_keys { + // Seek to the shard that has the key >= the given sharded key + // TODO: optimize + let mut shard = cursor.seek(RawKey::new(sharded_key.clone()))?; - // Prune history table: - // 1. If the shard has `highest_block_number` less than or equal to the target block number - // for pruning, delete the shard completely. - // 2. If the shard has `highest_block_number` greater than the target block number for - // pruning, filter block numbers inside the shard which are less than the target - // block number for pruning. - while let Some(result) = cursor.next()? { - let (key, blocks): (T::Key, BlockNumberList) = result; + // Get the highest block number that needs to be deleted for this sharded key + let to_block = sharded_key.as_ref().highest_block_number; - // If shard consists only of block numbers less than the target one, delete shard - // completely. - if key.as_ref().highest_block_number <= to_block { - cursor.delete_current()?; - deleted += 1; - if key.as_ref().highest_block_number == to_block { - // Shard contains only block numbers up to the target one, so we can skip to - // the last shard for this key. It is guaranteed that further shards for this - // sharded key will not contain the target block number, as it's in this shard. - cursor.seek_exact(last_key(&key))?; + 'shard: loop { + let Some((key, block_nums)) = + shard.map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v))).transpose()? + else { + break + }; + + if key_matches(&key, &sharded_key) { + match prune_shard(&mut cursor, key, block_nums, to_block, &key_matches)? { + PruneShardOutcome::Deleted => outcomes.deleted += 1, + PruneShardOutcome::Updated => outcomes.updated += 1, + PruneShardOutcome::Unchanged => outcomes.unchanged += 1, + } + } else { + // If such shard doesn't exist, skip to the next sharded key + break 'shard } + + shard = cursor.next()?; } - // Shard contains block numbers that are higher than the target one, so we need to - // filter it. It is guaranteed that further shards for this sharded key will not - // contain the target block number, as it's in this shard. - else { - let higher_blocks = - blocks.iter().skip_while(|block| *block <= to_block).collect::>(); + } - // If there were blocks less than or equal to the target one - // (so the shard has changed), update the shard. - if blocks.len() as usize != higher_blocks.len() { - // If there will be no more blocks in the shard after pruning blocks below target - // block, we need to remove it, as empty shards are not allowed. - if higher_blocks.is_empty() { - if key.as_ref().highest_block_number == u64::MAX { - let prev_row = cursor.prev()?; - match prev_row { - // If current shard is the last shard for the sharded key that - // has previous shards, replace it with the previous shard. - Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { - cursor.delete_current()?; - deleted += 1; - // Upsert will replace the last shard for this sharded key with - // the previous value. - cursor.upsert(key.clone(), prev_value)?; - } - // If there's no previous shard for this sharded key, - // just delete last shard completely. - _ => { - // If we successfully moved the cursor to a previous row, - // jump to the original last shard. - if prev_row.is_some() { - cursor.next()?; - } - // Delete shard. - cursor.delete_current()?; - deleted += 1; + Ok(outcomes) +} + +/// Prunes one shard of a history table. +/// +/// 1. If the shard has `highest_block_number` less than or equal to the target block number for +/// pruning, delete the shard completely. +/// 2. If the shard has `highest_block_number` greater than the target block number for pruning, +/// filter block numbers inside the shard which are less than the target block number for +/// pruning. +fn prune_shard( + cursor: &mut C, + key: T::Key, + raw_blocks: RawValue, + to_block: BlockNumber, + key_matches: impl Fn(&T::Key, &T::Key) -> bool, +) -> Result +where + C: DbCursorRO> + DbCursorRW>, + T: Table, + T::Key: AsRef>, +{ + // If shard consists only of block numbers less than the target one, delete shard + // completely. + if key.as_ref().highest_block_number <= to_block { + cursor.delete_current()?; + Ok(PruneShardOutcome::Deleted) + } + // Shard contains block numbers that are higher than the target one, so we need to + // filter it. It is guaranteed that further shards for this sharded key will not + // contain the target block number, as it's in this shard. + else { + let blocks = raw_blocks.value()?; + let higher_blocks = + blocks.iter().skip_while(|block| *block <= to_block).collect::>(); + + // If there were blocks less than or equal to the target one + // (so the shard has changed), update the shard. + if blocks.len() as usize != higher_blocks.len() { + // If there will be no more blocks in the shard after pruning blocks below target + // block, we need to remove it, as empty shards are not allowed. + if higher_blocks.is_empty() { + if key.as_ref().highest_block_number == u64::MAX { + let prev_row = cursor + .prev()? + .map(|(k, v)| Result::<_, DatabaseError>::Ok((k.key()?, v))) + .transpose()?; + match prev_row { + // If current shard is the last shard for the sharded key that + // has previous shards, replace it with the previous shard. + Some((prev_key, prev_value)) if key_matches(&prev_key, &key) => { + cursor.delete_current()?; + // Upsert will replace the last shard for this sharded key with + // the previous value. + cursor.upsert(RawKey::new(key), prev_value)?; + Ok(PruneShardOutcome::Updated) + } + // If there's no previous shard for this sharded key, + // just delete last shard completely. + _ => { + // If we successfully moved the cursor to a previous row, + // jump to the original last shard. + if prev_row.is_some() { + cursor.next()?; } + // Delete shard. + cursor.delete_current()?; + Ok(PruneShardOutcome::Deleted) } } - // If current shard is not the last shard for this sharded key, - // just delete it. - else { - cursor.delete_current()?; - deleted += 1; - } - } else { - cursor.upsert(key.clone(), BlockNumberList::new_pre_sorted(higher_blocks))?; } + // If current shard is not the last shard for this sharded key, + // just delete it. + else { + cursor.delete_current()?; + Ok(PruneShardOutcome::Deleted) + } + } else { + cursor.upsert( + RawKey::new(key), + RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)), + )?; + Ok(PruneShardOutcome::Updated) } - - // Jump to the last shard for this key, if current key isn't already the last shard. - if key.as_ref().highest_block_number != u64::MAX { - cursor.seek_exact(last_key(&key))?; - } + } else { + Ok(PruneShardOutcome::Unchanged) } - - processed += 1; } - - Ok((processed, deleted)) } diff --git a/crates/prune/prune/src/segments/storage_history.rs b/crates/prune/prune/src/segments/storage_history.rs index 3e7ad86a7..95e9afa0a 100644 --- a/crates/prune/prune/src/segments/storage_history.rs +++ b/crates/prune/prune/src/segments/storage_history.rs @@ -4,6 +4,7 @@ use crate::{ }, PrunerError, }; +use itertools::Itertools; use reth_db::tables; use reth_db_api::{ database::Database, @@ -11,6 +12,7 @@ use reth_db_api::{ }; use reth_provider::DatabaseProviderRW; use reth_prune_types::{PruneInterruptReason, PruneMode, PruneProgress, PruneSegment}; +use rustc_hash::FxHashMap; use tracing::{instrument, trace}; /// Number of storage history tables to prune in one step @@ -67,34 +69,58 @@ impl Segment for StorageHistory { } let mut last_changeset_pruned_block = None; + // Deleted storage changeset keys (account addresses and storage slots) with the highest + // block number deleted for that key. + // + // The size of this map it's limited by `prune_delete_limit * blocks_since_last_run / + // ACCOUNT_HISTORY_TABLES_TO_PRUNE`, and with current default it's usually `3500 * 5 + // / 2`, so 8750 entries. Each entry is `160 bit + 256 bit + 64 bit`, so the total + // size should be up to 0.5MB + some hashmap overhead. `blocks_since_last_run` is + // additionally limited by the `max_reorg_depth`, so no OOM is expected here. + let mut highest_deleted_storages = FxHashMap::default(); let (pruned_changesets, done) = provider .prune_table_with_range::( BlockNumberAddress::range(range), &mut limiter, |_| false, - |row| last_changeset_pruned_block = Some(row.0.block_number()), + |(BlockNumberAddress((block_number, address)), entry)| { + highest_deleted_storages.insert((address, entry.key), block_number); + last_changeset_pruned_block = Some(block_number); + }, )?; trace!(target: "pruner", deleted = %pruned_changesets, %done, "Pruned storage history (changesets)"); let last_changeset_pruned_block = last_changeset_pruned_block - // If there's more storage storage changesets to prune, set the checkpoint block number - // to previous, so we could finish pruning its storage changesets on the next run. + // If there's more storage changesets to prune, set the checkpoint block number to + // previous, so we could finish pruning its storage changesets on the next run. .map(|block_number| if done { block_number } else { block_number.saturating_sub(1) }) .unwrap_or(range_end); - let (processed, pruned_indices) = prune_history_indices::( + // Sort highest deleted block numbers by account address and storage key and turn them into + // sharded keys. + // We did not use `BTreeMap` from the beginning, because it's inefficient for hashes. + let highest_sharded_keys = highest_deleted_storages + .into_iter() + .sorted_unstable() // Unstable is fine because no equal keys exist in the map + .map(|((address, storage_key), block_number)| { + StorageShardedKey::new( + address, + storage_key, + block_number.min(last_changeset_pruned_block), + ) + }); + let outcomes = prune_history_indices::( provider, - last_changeset_pruned_block, + highest_sharded_keys, |a, b| a.address == b.address && a.sharded_key.key == b.sharded_key.key, - |key| StorageShardedKey::last(key.address, key.sharded_key.key), )?; - trace!(target: "pruner", %processed, deleted = %pruned_indices, %done, "Pruned storage history (history)"); + trace!(target: "pruner", ?outcomes, %done, "Pruned storage history (indices)"); let progress = PruneProgress::new(done, &limiter); Ok(PruneOutput { progress, - pruned: pruned_changesets + pruned_indices, + pruned: pruned_changesets + outcomes.deleted, checkpoint: Some(PruneOutputCheckpoint { block_number: Some(last_changeset_pruned_block), tx_number: None, diff --git a/crates/prune/types/src/lib.rs b/crates/prune/types/src/lib.rs index 34d74614f..82563010f 100644 --- a/crates/prune/types/src/lib.rs +++ b/crates/prune/types/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod checkpoint; diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 4d7a7f684..bbb60b293 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -27,7 +27,6 @@ revm.workspace = true # alloy alloy-eips.workspace = true -alloy-rlp.workspace = true # common tracing.workspace = true diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index f2903a4f4..02ffba017 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -210,3 +210,189 @@ impl BlockExecutorStats { ); } } + +#[cfg(test)] +mod tests { + use super::*; + use reth_primitives::{Address, Log, Receipt}; + use reth_prune_types::{PruneMode, ReceiptsLogPruneConfig}; + use std::collections::BTreeMap; + + #[test] + fn test_save_receipts_empty() { + let mut recorder = BlockBatchRecord::default(); + // Create an empty vector of receipts + let receipts = vec![]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the saved receipts are equal to a nested empty vector + assert_eq!(*recorder.receipts(), vec![vec![]].into()); + } + + #[test] + fn test_save_receipts_non_empty_no_pruning() { + let mut recorder = BlockBatchRecord::default(); + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the first block contains one receipt + assert_eq!(recorder.receipts()[0].len(), 1); + // Verify that the saved receipt is the default receipt + assert_eq!(recorder.receipts()[0][0], Some(Receipt::default())); + } + + #[test] + fn test_save_receipts_with_pruning_no_prunable_receipts() { + let mut recorder = BlockBatchRecord::default(); + + // Set the first block number + recorder.set_first_block(1); + // Set the tip (highest known block) + recorder.set_tip(130); + + // Create a vector of receipts with a default receipt + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the first block contains one receipt + assert_eq!(recorder.receipts()[0].len(), 1); + // Verify that the saved receipt is the default receipt + assert_eq!(recorder.receipts()[0][0], Some(Receipt::default())); + } + + #[test] + fn test_save_receipts_with_pruning_no_tip() { + // Create a PruneModes with receipts set to PruneMode::Full + let prune_modes = PruneModes { receipts: Some(PruneMode::Full), ..Default::default() }; + + let mut recorder = BlockBatchRecord::new(prune_modes); + + // Set the first block number + recorder.set_first_block(1); + // Create a vector of receipts with a default receipt + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the first block contains one receipt + assert_eq!(recorder.receipts()[0].len(), 1); + // Verify that the saved receipt is the default receipt + assert_eq!(recorder.receipts()[0][0], Some(Receipt::default())); + } + + #[test] + fn test_save_receipts_with_pruning_no_block_number() { + // Create a PruneModes with receipts set to PruneMode::Full + let prune_modes = PruneModes { receipts: Some(PruneMode::Full), ..Default::default() }; + + // Create a BlockBatchRecord with the prune_modes + let mut recorder = BlockBatchRecord::new(prune_modes); + + // Set the tip (highest known block) + recorder.set_tip(130); + + // Create a vector of receipts with a default receipt + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the first block contains one receipt + assert_eq!(recorder.receipts()[0].len(), 1); + // Verify that the saved receipt is the default receipt + assert_eq!(recorder.receipts()[0][0], Some(Receipt::default())); + } + + // Test saving receipts with pruning configuration and receipts should be pruned + #[test] + fn test_save_receipts_with_pruning_should_prune() { + // Create a PruneModes with receipts set to PruneMode::Full + let prune_modes = PruneModes { receipts: Some(PruneMode::Full), ..Default::default() }; + + // Create a BlockBatchRecord with the prune_modes + let mut recorder = BlockBatchRecord::new(prune_modes); + + // Set the first block number + recorder.set_first_block(1); + // Set the tip (highest known block) + recorder.set_tip(130); + + // Create a vector of receipts with a default receipt + let receipts = vec![Receipt::default()]; + + // Verify that saving receipts completes without error + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that there is one block of receipts + assert_eq!(recorder.receipts().len(), 1); + // Verify that the receipts are pruned (empty) + assert!(recorder.receipts()[0].is_empty()); + } + + // Test saving receipts with address filter pruning + #[test] + fn test_save_receipts_with_address_filter_pruning() { + // Create a PruneModes with receipts_log_filter configuration + let prune_modes = PruneModes { + receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([ + (Address::with_last_byte(1), PruneMode::Before(1300001)), + (Address::with_last_byte(2), PruneMode::Before(1300002)), + (Address::with_last_byte(3), PruneMode::Distance(1300003)), + ])), + ..Default::default() + }; + + // Create a BlockBatchRecord with the prune_modes + let mut recorder = BlockBatchRecord::new(prune_modes); + + // Set the first block number + recorder.set_first_block(1); + // Set the tip (highest known block) + recorder.set_tip(1300000); + + // With a receipt that should be pruned (address 4 not in the log filter) + let mut receipt = Receipt::default(); + receipt.logs.push(Log { address: Address::with_last_byte(4), ..Default::default() }); + let receipts = vec![receipt.clone()]; + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the receipts are pruned (empty) + assert_eq!(recorder.receipts().len(), 1); + assert_eq!(recorder.receipts()[0], vec![None]); + + // With a receipt that should not be pruned (address 1 in the log filter) + let mut receipt1 = Receipt::default(); + receipt1.logs.push(Log { address: Address::with_last_byte(1), ..Default::default() }); + let receipts = vec![receipt1.clone()]; + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the second block of receipts contains the receipt + assert_eq!(recorder.receipts().len(), 2); + assert_eq!(recorder.receipts()[1][0], Some(receipt1)); + + // With a receipt that should not be pruned (address 2 in the log filter) + let mut receipt2 = Receipt::default(); + receipt2.logs.push(Log { address: Address::with_last_byte(2), ..Default::default() }); + let receipts = vec![receipt2.clone()]; + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the third block of receipts contains the receipt + assert_eq!(recorder.receipts().len(), 3); + assert_eq!(recorder.receipts()[2][0], Some(receipt2)); + + // With a receipt that should not be pruned (address 3 in the log filter) + let mut receipt3 = Receipt::default(); + receipt3.logs.push(Log { address: Address::with_last_byte(3), ..Default::default() }); + let receipts = vec![receipt3.clone()]; + assert!(recorder.save_receipts(receipts).is_ok()); + // Verify that the fourth block of receipts contains the receipt + assert_eq!(recorder.receipts().len(), 4); + assert_eq!(recorder.receipts()[3][0], Some(receipt3)); + } +} diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 3b31788db..5edd76bea 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,6 +1,6 @@ use crate::primitives::alloy_primitives::{BlockNumber, StorageKey, StorageValue}; use core::ops::{Deref, DerefMut}; -use reth_primitives::{Account, Address, B256, KECCAK_EMPTY, U256}; +use reth_primitives::{Account, Address, B256, U256}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use revm::{ db::DatabaseRef, @@ -121,7 +121,7 @@ impl Database for StateProviderDatabase { /// /// Returns `Ok` with the block hash if found, or the default hash otherwise. /// Note: It safely casts the `number` to `u64`. - fn block_hash(&mut self, number: U256) -> Result { + fn block_hash(&mut self, number: u64) -> Result { DatabaseRef::block_hash_ref(self, number) } } @@ -134,12 +134,7 @@ impl DatabaseRef for StateProviderDatabase { /// Returns `Ok` with `Some(AccountInfo)` if the account exists, /// `None` if it doesn't, or an error if encountered. fn basic_ref(&self, address: Address) -> Result, Self::Error> { - Ok(self.basic_account(address)?.map(|account| AccountInfo { - balance: account.balance, - nonce: account.nonce, - code_hash: account.bytecode_hash.unwrap_or(KECCAK_EMPTY), - code: None, - })) + Ok(self.basic_account(address)?.map(Into::into)) } /// Retrieves the bytecode associated with a given code hash. @@ -159,14 +154,8 @@ impl DatabaseRef for StateProviderDatabase { /// Retrieves the block hash for a given block number. /// /// Returns `Ok` with the block hash if found, or the default hash otherwise. - fn block_hash_ref(&self, number: U256) -> Result { - // Attempt to convert U256 to u64 - let block_number = match number.try_into() { - Ok(value) => value, - Err(_) => return Err(Self::Error::BlockNumberOverflow(number)), - }; - - // Get the block hash or default hash - Ok(self.0.block_hash(block_number)?.unwrap_or_default()) + fn block_hash_ref(&self, number: u64) -> Result { + // Get the block hash or default hash with an attempt to convert U256 block number to u64 + Ok(self.0.block_hash(number)?.unwrap_or_default()) } } diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index b9fba6b25..519f9704d 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,26 +1,12 @@ -use alloy_eips::{ - eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}, - eip7002::WithdrawalRequest, -}; -use alloy_rlp::Buf; -use reth_chainspec::ChainSpec; +use alloy_eips::eip2935::{HISTORY_STORAGE_ADDRESS, HISTORY_STORAGE_CODE}; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus_common::calc; use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{ - revm::env::{ - fill_tx_env_with_beacon_root_contract_call, - fill_tx_env_with_withdrawal_requests_contract_call, - }, - Address, Header, Request, Withdrawal, B256, U256, -}; +use reth_primitives::{Address, Block, Withdrawal, Withdrawals, B256, U256}; use reth_storage_errors::provider::ProviderError; use revm::{ - interpreter::Host, - primitives::{ - Account, AccountInfo, Bytecode, EvmStorageSlot, ExecutionResult, FixedBytes, - ResultAndState, BLOCKHASH_SERVE_WINDOW, - }, - Database, DatabaseCommit, Evm, + primitives::{Account, AccountInfo, Bytecode, EvmStorageSlot, BLOCKHASH_SERVE_WINDOW}, + Database, DatabaseCommit, }; // reuse revm's hashbrown implementation for no-std @@ -36,40 +22,34 @@ use std::collections::HashMap; /// /// Balance changes might include the block reward, uncle rewards, withdrawals, or irregular /// state changes (DAO fork). -#[allow(clippy::too_many_arguments)] #[inline] pub fn post_block_balance_increments( chain_spec: &ChainSpec, - block_number: u64, - block_difficulty: U256, - beneficiary: Address, - block_timestamp: u64, + block: &Block, total_difficulty: U256, - ommers: &[Header], - withdrawals: Option<&[Withdrawal]>, ) -> HashMap { let mut balance_increments = HashMap::new(); // Add block rewards if they are enabled. if let Some(base_block_reward) = - calc::base_block_reward(chain_spec, block_number, block_difficulty, total_difficulty) + calc::base_block_reward(chain_spec, block.number, block.difficulty, total_difficulty) { // Ommer rewards - for ommer in ommers { + for ommer in &block.ommers { *balance_increments.entry(ommer.beneficiary).or_default() += - calc::ommer_reward(base_block_reward, block_number, ommer.number); + calc::ommer_reward(base_block_reward, block.number, ommer.number); } // Full block reward - *balance_increments.entry(beneficiary).or_default() += - calc::block_reward(base_block_reward, ommers.len()); + *balance_increments.entry(block.beneficiary).or_default() += + calc::block_reward(base_block_reward, block.ommers.len()); } // process withdrawals insert_post_block_withdrawals_balance_increments( chain_spec, - block_timestamp, - withdrawals, + block.timestamp, + block.withdrawals.as_ref().map(Withdrawals::as_ref), &mut balance_increments, ); @@ -86,7 +66,7 @@ pub fn post_block_balance_increments( /// /// [EIP-2935]: https://eips.ethereum.org/EIPS/eip-2935 #[inline] -pub fn apply_blockhashes_update + DatabaseCommit>( +pub fn apply_blockhashes_update> + DatabaseCommit>( db: &mut DB, chain_spec: &ChainSpec, block_timestamp: u64, @@ -108,7 +88,7 @@ where // nonce of 1, so it does not get deleted. let mut account: Account = db .basic(HISTORY_STORAGE_ADDRESS) - .map_err(BlockValidationError::BlockHashAccountLoadingFailed)? + .map_err(|err| BlockValidationError::BlockHashAccountLoadingFailed(err.into()))? .unwrap_or_else(|| AccountInfo { nonce: 1, code: Some(Bytecode::new_raw(HISTORY_STORAGE_CODE.clone())), @@ -132,7 +112,7 @@ where /// /// This calculates the correct storage slot in the `BLOCKHASH` history storage address, fetches the /// blockhash and creates a [`EvmStorageSlot`] with appropriate previous and new values. -fn eip2935_block_hash_slot>( +fn eip2935_block_hash_slot>>( db: &mut DB, block_number: u64, block_hash: B256, @@ -140,77 +120,11 @@ fn eip2935_block_hash_slot>( let slot = U256::from(block_number % BLOCKHASH_SERVE_WINDOW as u64); let current_hash = db .storage(HISTORY_STORAGE_ADDRESS, slot) - .map_err(BlockValidationError::BlockHashAccountLoadingFailed)?; + .map_err(|err| BlockValidationError::BlockHashAccountLoadingFailed(err.into()))?; Ok((slot, EvmStorageSlot::new_changed(current_hash, block_hash.into()))) } -/// Applies the pre-block call to the [EIP-4788] beacon block root contract, using the given block, -/// [`ChainSpec`], EVM. -/// -/// If Cancun is not activated or the block is the genesis block, then this is a no-op, and no -/// state changes are made. -/// -/// [EIP-4788]: https://eips.ethereum.org/EIPS/eip-4788 -#[inline] -pub fn apply_beacon_root_contract_call( - chain_spec: &ChainSpec, - block_timestamp: u64, - block_number: u64, - parent_beacon_block_root: Option, - evm: &mut Evm<'_, EXT, DB>, -) -> Result<(), BlockExecutionError> -where - DB::Error: core::fmt::Display, -{ - if !chain_spec.is_cancun_active_at_timestamp(block_timestamp) { - return Ok(()) - } - - let parent_beacon_block_root = - parent_beacon_block_root.ok_or(BlockValidationError::MissingParentBeaconBlockRoot)?; - - // if the block number is zero (genesis block) then the parent beacon block root must - // be 0x0 and no system transaction may occur as per EIP-4788 - if block_number == 0 { - if parent_beacon_block_root != B256::ZERO { - return Err(BlockValidationError::CancunGenesisParentBeaconBlockRootNotZero { - parent_beacon_block_root, - } - .into()) - } - return Ok(()) - } - - // get previous env - let previous_env = Box::new(evm.context.env().clone()); - - // modify env for pre block call - fill_tx_env_with_beacon_root_contract_call(&mut evm.context.evm.env, parent_beacon_block_root); - - let mut state = match evm.transact() { - Ok(res) => res.state, - Err(e) => { - evm.context.evm.env = previous_env; - return Err(BlockValidationError::BeaconRootContractCall { - parent_beacon_block_root: Box::new(parent_beacon_block_root), - message: e.to_string(), - } - .into()) - } - }; - - state.remove(&alloy_eips::eip4788::SYSTEM_ADDRESS); - state.remove(&evm.block().coinbase); - - evm.context.evm.db.commit(state); - - // re-set the previous env - evm.context.evm.env = previous_env; - - Ok(()) -} - /// Returns a map of addresses to their balance increments if the Shanghai hardfork is active at the /// given timestamp. /// @@ -254,89 +168,3 @@ pub fn insert_post_block_withdrawals_balance_increments( } } } - -/// Applies the post-block call to the EIP-7002 withdrawal requests contract. -/// -/// If Prague is not active at the given timestamp, then this is a no-op, and an empty vector is -/// returned. Otherwise, the withdrawal requests are returned. -#[inline] -pub fn apply_withdrawal_requests_contract_call( - evm: &mut Evm<'_, EXT, DB>, -) -> Result, BlockExecutionError> -where - DB::Error: core::fmt::Display, -{ - // get previous env - let previous_env = Box::new(evm.context.env().clone()); - - // modify env for pre block call - fill_tx_env_with_withdrawal_requests_contract_call(&mut evm.context.evm.env); - - let ResultAndState { result, mut state } = match evm.transact() { - Ok(res) => res, - Err(e) => { - evm.context.evm.env = previous_env; - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution failed: {e}"), - } - .into()) - } - }; - - // cleanup the state - state.remove(&alloy_eips::eip7002::SYSTEM_ADDRESS); - state.remove(&evm.block().coinbase); - evm.context.evm.db.commit(state); - - // re-set the previous env - evm.context.evm.env = previous_env; - - let mut data = match result { - ExecutionResult::Success { output, .. } => Ok(output.into_data()), - ExecutionResult::Revert { output, .. } => { - Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution reverted: {output}"), - }) - } - ExecutionResult::Halt { reason, .. } => { - Err(BlockValidationError::WithdrawalRequestsContractCall { - message: format!("execution halted: {reason:?}"), - }) - } - }?; - - // Withdrawals are encoded as a series of withdrawal requests, each with the following - // format: - // - // +------+--------+--------+ - // | addr | pubkey | amount | - // +------+--------+--------+ - // 20 48 8 - - const WITHDRAWAL_REQUEST_SIZE: usize = 20 + 48 + 8; - let mut withdrawal_requests = Vec::with_capacity(data.len() / WITHDRAWAL_REQUEST_SIZE); - while data.has_remaining() { - if data.remaining() < WITHDRAWAL_REQUEST_SIZE { - return Err(BlockValidationError::WithdrawalRequestsContractCall { - message: "invalid withdrawal request length".to_string(), - } - .into()) - } - - let mut source_address = Address::ZERO; - data.copy_to_slice(source_address.as_mut_slice()); - - let mut validator_pubkey = FixedBytes::<48>::ZERO; - data.copy_to_slice(validator_pubkey.as_mut_slice()); - - let amount = data.get_u64(); - - withdrawal_requests.push(Request::WithdrawalRequest(WithdrawalRequest { - source_address, - validator_pubkey, - amount, - })); - } - - Ok(withdrawal_requests) -} diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 90ac4ea04..0459cf679 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,7 +1,9 @@ use reth_primitives::{ keccak256, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, }; -use reth_storage_api::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; +use reth_storage_api::{ + AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, +}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::db::BundleState; @@ -76,6 +78,17 @@ impl StateRootProvider for StateProviderTest { } } +impl StateProofProvider for StateProviderTest { + fn proof( + &self, + _state: &BundleState, + _address: Address, + _slots: &[B256], + ) -> ProviderResult { + unimplemented!("proof generation is not supported") + } +} + impl StateProvider for StateProviderTest { fn storage( &self, @@ -88,8 +101,4 @@ impl StateProvider for StateProviderTest { fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { Ok(self.contracts.get(&code_hash).cloned()) } - - fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { - unimplemented!("proof generation is not supported") - } } diff --git a/crates/rpc/rpc-api/Cargo.toml b/crates/rpc/rpc-api/Cargo.toml index 5374c46e4..59ae5d4cf 100644 --- a/crates/rpc/rpc-api/Cargo.toml +++ b/crates/rpc/rpc-api/Cargo.toml @@ -15,11 +15,11 @@ workspace = true # reth reth-primitives.workspace = true reth-rpc-types.workspace = true +reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true reth-network-peers.workspace = true # misc -alloy-dyn-abi = { workspace = true, features = ["eip712"] } jsonrpsee = { workspace = true, features = ["server", "macros"] } serde = { workspace = true, features = ["derive"] } @@ -27,4 +27,9 @@ serde = { workspace = true, features = ["derive"] } serde_json.workspace = true [features] -client = ["jsonrpsee/client", "jsonrpsee/async-client"] +client = [ + "jsonrpsee/client", + "jsonrpsee/async-client", + "reth-rpc-eth-api/client" +] +optimism = ["reth-rpc-eth-api/optimism"] \ No newline at end of file diff --git a/crates/rpc/rpc-api/src/admin.rs b/crates/rpc/rpc-api/src/admin.rs index 173cd8ef7..66f8918a3 100644 --- a/crates/rpc/rpc-api/src/admin.rs +++ b/crates/rpc/rpc-api/src/admin.rs @@ -1,6 +1,6 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_network_peers::{AnyNode, NodeRecord}; -use reth_rpc_types::{admin::NodeInfo, PeerInfo}; +use reth_rpc_types::admin::{NodeInfo, PeerInfo}; /// Admin namespace rpc interface that gives access to several non-standard RPC methods. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "admin"))] diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index ccee09cc2..580245b10 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -26,7 +26,7 @@ pub trait DebugApi { #[method(name = "getRawTransaction")] async fn raw_transaction(&self, hash: B256) -> RpcResult>; - /// Returns an array of EIP-2718 binary-encoded transactions for the given [BlockId]. + /// Returns an array of EIP-2718 binary-encoded transactions for the given [`BlockId`]. #[method(name = "getRawTransactions")] async fn raw_transactions(&self, block_id: BlockId) -> RpcResult>; diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index e858f62df..986dd76b1 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -8,14 +8,13 @@ use reth_engine_primitives::EngineTypes; use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, B256, U256, U64}; use reth_rpc_types::{ engine::{ - ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, - ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, - PayloadStatus, TransitionConfiguration, + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadBodiesV2, + ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, }, state::StateOverride, BlockOverrides, Filter, Log, RichBlock, SyncStatus, TransactionRequest, }; - // NOTE: We can't use associated types in the `EngineApi` trait because of jsonrpsee, so we use a // generic here. It would be nice if the rpc macro would understand which types need to have serde. // By default, if the trait has a generic, the rpc macro will add e.g. `Engine: DeserializeOwned` to @@ -144,6 +143,13 @@ pub trait EngineApi { block_hashes: Vec, ) -> RpcResult; + /// See also + #[method(name = "getPayloadBodiesByHashV2")] + async fn get_payload_bodies_by_hash_v2( + &self, + block_hashes: Vec, + ) -> RpcResult; + /// See also /// /// Returns the execution payload bodies by the range starting at `start`, containing `count` @@ -163,6 +169,16 @@ pub trait EngineApi { count: U64, ) -> RpcResult; + /// See also + /// + /// Similar to `getPayloadBodiesByRangeV1`, but returns [`ExecutionPayloadBodiesV2`] + #[method(name = "getPayloadBodiesByRangeV2")] + async fn get_payload_bodies_by_range_v2( + &self, + start: U64, + count: U64, + ) -> RpcResult; + /// See also /// /// Note: This method will be deprecated after the cancun hardfork: diff --git a/crates/rpc/rpc-api/src/eth.rs b/crates/rpc/rpc-api/src/eth.rs deleted file mode 100644 index eb11fde82..000000000 --- a/crates/rpc/rpc-api/src/eth.rs +++ /dev/null @@ -1,311 +0,0 @@ -use alloy_dyn_abi::TypedData; -use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; -use reth_rpc_types::{ - serde_helpers::JsonStorageKey, state::StateOverride, AccessListWithGasUsed, - AnyTransactionReceipt, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, - FeeHistory, Header, Index, RichBlock, StateContext, SyncStatus, Transaction, - TransactionRequest, Work, -}; - -/// Eth rpc interface: -#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] -#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EthApi { - /// Returns the protocol version encoded as a string. - #[method(name = "protocolVersion")] - async fn protocol_version(&self) -> RpcResult; - - /// Returns an object with data about the sync status or false. - #[method(name = "syncing")] - fn syncing(&self) -> RpcResult; - - /// Returns the client coinbase address. - #[method(name = "coinbase")] - async fn author(&self) -> RpcResult
; - - /// Returns a list of addresses owned by client. - #[method(name = "accounts")] - fn accounts(&self) -> RpcResult>; - - /// Returns the number of most recent block. - #[method(name = "blockNumber")] - fn block_number(&self) -> RpcResult; - - /// Returns the chain ID of the current network. - #[method(name = "chainId")] - async fn chain_id(&self) -> RpcResult>; - - /// Returns information about a block by hash. - #[method(name = "getBlockByHash")] - async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>; - - /// Returns information about a block by number. - #[method(name = "getBlockByNumber")] - async fn block_by_number( - &self, - number: BlockNumberOrTag, - full: bool, - ) -> RpcResult>; - - /// Returns the number of transactions in a block from a block matching the given block hash. - #[method(name = "getBlockTransactionCountByHash")] - async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult>; - - /// Returns the number of transactions in a block matching the given block number. - #[method(name = "getBlockTransactionCountByNumber")] - async fn block_transaction_count_by_number( - &self, - number: BlockNumberOrTag, - ) -> RpcResult>; - - /// Returns the number of uncles in a block from a block matching the given block hash. - #[method(name = "getUncleCountByBlockHash")] - async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult>; - - /// Returns the number of uncles in a block with given block number. - #[method(name = "getUncleCountByBlockNumber")] - async fn block_uncles_count_by_number( - &self, - number: BlockNumberOrTag, - ) -> RpcResult>; - - /// Returns all transaction receipts for a given block. - #[method(name = "getBlockReceipts")] - async fn block_receipts( - &self, - block_id: BlockId, - ) -> RpcResult>>; - - /// Returns an uncle block of the given block and index. - #[method(name = "getUncleByBlockHashAndIndex")] - async fn uncle_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> RpcResult>; - - /// Returns an uncle block of the given block and index. - #[method(name = "getUncleByBlockNumberAndIndex")] - async fn uncle_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> RpcResult>; - - /// Returns the EIP-2718 encoded transaction if it exists. - /// - /// If this is a EIP-4844 transaction that is in the pool it will include the sidecar. - #[method(name = "getRawTransactionByHash")] - async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult>; - - /// Returns the information about a transaction requested by transaction hash. - #[method(name = "getTransactionByHash")] - async fn transaction_by_hash(&self, hash: B256) -> RpcResult>; - - /// Returns information about a raw transaction by block hash and transaction index position. - #[method(name = "getRawTransactionByBlockHashAndIndex")] - async fn raw_transaction_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> RpcResult>; - - /// Returns information about a transaction by block hash and transaction index position. - #[method(name = "getTransactionByBlockHashAndIndex")] - async fn transaction_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> RpcResult>; - - /// Returns information about a raw transaction by block number and transaction index - /// position. - #[method(name = "getRawTransactionByBlockNumberAndIndex")] - async fn raw_transaction_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> RpcResult>; - - /// Returns information about a transaction by block number and transaction index position. - #[method(name = "getTransactionByBlockNumberAndIndex")] - async fn transaction_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> RpcResult>; - - /// Returns the receipt of a transaction by transaction hash. - #[method(name = "getTransactionReceipt")] - async fn transaction_receipt(&self, hash: B256) -> RpcResult>; - - /// Returns the balance of the account of given address. - #[method(name = "getBalance")] - async fn balance(&self, address: Address, block_number: Option) -> RpcResult; - - /// Returns the value from a storage position at a given address - #[method(name = "getStorageAt")] - async fn storage_at( - &self, - address: Address, - index: JsonStorageKey, - block_number: Option, - ) -> RpcResult; - - /// Returns the number of transactions sent from an address at given block number. - #[method(name = "getTransactionCount")] - async fn transaction_count( - &self, - address: Address, - block_number: Option, - ) -> RpcResult; - - /// Returns code at a given address at given block number. - #[method(name = "getCode")] - async fn get_code(&self, address: Address, block_number: Option) -> RpcResult; - - /// Returns the block's header at given number. - #[method(name = "getHeaderByNumber")] - async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult>; - - /// Returns the block's header at given hash. - #[method(name = "getHeaderByHash")] - async fn header_by_hash(&self, hash: B256) -> RpcResult>; - - /// Executes a new message call immediately without creating a transaction on the block chain. - #[method(name = "call")] - async fn call( - &self, - request: TransactionRequest, - block_number: Option, - state_overrides: Option, - block_overrides: Option>, - ) -> RpcResult; - - /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the - /// optionality of state overrides - #[method(name = "callMany")] - async fn call_many( - &self, - bundle: Bundle, - state_context: Option, - state_override: Option, - ) -> RpcResult>; - - /// Generates an access list for a transaction. - /// - /// This method creates an [EIP2930](https://eips.ethereum.org/EIPS/eip-2930) type accessList based on a given Transaction. - /// - /// An access list contains all storage slots and addresses touched by the transaction, except - /// for the sender account and the chain's precompiles. - /// - /// It returns list of addresses and storage keys used by the transaction, plus the gas - /// consumed when the access list is added. That is, it gives you the list of addresses and - /// storage keys that will be used by that transaction, plus the gas consumed if the access - /// list is included. Like eth_estimateGas, this is an estimation; the list could change - /// when the transaction is actually mined. Adding an accessList to your transaction does - /// not necessary result in lower gas usage compared to a transaction without an access - /// list. - #[method(name = "createAccessList")] - async fn create_access_list( - &self, - request: TransactionRequest, - block_number: Option, - ) -> RpcResult; - - /// Generates and returns an estimate of how much gas is necessary to allow the transaction to - /// complete. - #[method(name = "estimateGas")] - async fn estimate_gas( - &self, - request: TransactionRequest, - block_number: Option, - state_override: Option, - ) -> RpcResult; - - /// Returns the current price per gas in wei. - #[method(name = "gasPrice")] - async fn gas_price(&self) -> RpcResult; - - /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. - #[method(name = "maxPriorityFeePerGas")] - async fn max_priority_fee_per_gas(&self) -> RpcResult; - - /// Introduced in EIP-4844, returns the current blob base fee in wei. - #[method(name = "blobBaseFee")] - async fn blob_base_fee(&self) -> RpcResult; - - /// Returns the Transaction fee history - /// - /// Introduced in EIP-1559 for getting information on the appropriate priority fee to use. - /// - /// Returns transaction base fee per gas and effective priority fee per gas for the - /// requested/supported block range. The returned Fee history for the returned block range - /// can be a subsection of the requested range if not all blocks are available. - #[method(name = "feeHistory")] - async fn fee_history( - &self, - block_count: U64, - newest_block: BlockNumberOrTag, - reward_percentiles: Option>, - ) -> RpcResult; - - /// Returns whether the client is actively mining new blocks. - #[method(name = "mining")] - async fn is_mining(&self) -> RpcResult; - - /// Returns the number of hashes per second that the node is mining with. - #[method(name = "hashrate")] - async fn hashrate(&self) -> RpcResult; - - /// Returns the hash of the current block, the seedHash, and the boundary condition to be met - /// (“target”) - #[method(name = "getWork")] - async fn get_work(&self) -> RpcResult; - - /// Used for submitting mining hashrate. - /// - /// Can be used for remote miners to submit their hash rate. - /// It accepts the miner hash rate and an identifier which must be unique between nodes. - /// Returns `true` if the block was successfully submitted, `false` otherwise. - #[method(name = "submitHashrate")] - async fn submit_hashrate(&self, hashrate: U256, id: B256) -> RpcResult; - - /// Used for submitting a proof-of-work solution. - #[method(name = "submitWork")] - async fn submit_work(&self, nonce: B64, pow_hash: B256, mix_digest: B256) -> RpcResult; - - /// Sends transaction; will block waiting for signer to return the - /// transaction hash. - #[method(name = "sendTransaction")] - async fn send_transaction(&self, request: TransactionRequest) -> RpcResult; - - /// Sends signed transaction, returning its hash. - #[method(name = "sendRawTransaction")] - async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult; - - /// Returns an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n" - /// + len(message) + message))). - #[method(name = "sign")] - async fn sign(&self, address: Address, message: Bytes) -> RpcResult; - - /// Signs a transaction that can be submitted to the network at a later time using with - /// `sendRawTransaction.` - #[method(name = "signTransaction")] - async fn sign_transaction(&self, transaction: TransactionRequest) -> RpcResult; - - /// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md). - #[method(name = "signTypedData")] - async fn sign_typed_data(&self, address: Address, data: TypedData) -> RpcResult; - - /// Returns the account and storage values of the specified account including the Merkle-proof. - /// This call can be used to verify that the data you are pulling from is not tampered with. - #[method(name = "getProof")] - async fn get_proof( - &self, - address: Address, - keys: Vec, - block_number: Option, - ) -> RpcResult; -} diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 82af34a86..cb84f8388 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -16,12 +16,8 @@ mod admin; mod anvil; -mod bundle; mod debug; mod engine; -mod eth; -mod eth_filter; -mod eth_pubsub; mod ganache; mod hardhat; mod mev; @@ -42,12 +38,8 @@ pub use servers::*; pub mod servers { pub use crate::{ admin::AdminApiServer, - bundle::{EthBundleApiServer, EthCallBundleApiServer}, debug::DebugApiServer, engine::{EngineApiServer, EngineEthApiServer}, - eth::EthApiServer, - eth_filter::EthFilterApiServer, - eth_pubsub::EthPubSubApiServer, mev::MevApiServer, net::NetApiServer, otterscan::OtterscanServer, @@ -58,6 +50,10 @@ pub mod servers { validation::BlockSubmissionValidationApiServer, web3::Web3ApiServer, }; + pub use reth_rpc_eth_api::{ + self as eth, EthApiServer, EthBundleApiServer, EthCallBundleApiServer, EthFilterApiServer, + EthPubSubApiServer, + }; } /// re-export of all client traits @@ -70,11 +66,8 @@ pub mod clients { pub use crate::{ admin::AdminApiClient, anvil::AnvilApiClient, - bundle::{EthBundleApiClient, EthCallBundleApiClient}, debug::DebugApiClient, engine::{EngineApiClient, EngineEthApiClient}, - eth::EthApiClient, - eth_filter::EthFilterApiClient, ganache::GanacheApiClient, hardhat::HardhatApiClient, mev::MevApiClient, @@ -86,4 +79,7 @@ pub mod clients { validation::BlockSubmissionValidationApiClient, web3::Web3ApiClient, }; + pub use reth_rpc_eth_api::{ + EthApiClient, EthBundleApiClient, EthCallBundleApiClient, EthFilterApiClient, + }; } diff --git a/crates/rpc/rpc-api/src/mev.rs b/crates/rpc/rpc-api/src/mev.rs index 008535276..ebe6f5ee8 100644 --- a/crates/rpc/rpc-api/src/mev.rs +++ b/crates/rpc/rpc-api/src/mev.rs @@ -1,5 +1,5 @@ use jsonrpsee::proc_macros::rpc; -use reth_rpc_types::{ +use reth_rpc_types::mev::{ SendBundleRequest, SendBundleResponse, SimBundleOverrides, SimBundleResponse, }; diff --git a/crates/rpc/rpc-api/src/otterscan.rs b/crates/rpc/rpc-api/src/otterscan.rs index 2156765bb..a06fa1a4d 100644 --- a/crates/rpc/rpc-api/src/otterscan.rs +++ b/crates/rpc/rpc-api/src/otterscan.rs @@ -1,17 +1,27 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, TxHash, B256}; +use reth_primitives::{Address, BlockId, Bytes, TxHash, B256}; use reth_rpc_types::{ trace::otterscan::{ BlockDetails, ContractCreator, InternalOperation, OtsBlockTransactions, TraceEntry, TransactionsWithReceipts, }, - Transaction, + Header, }; /// Otterscan rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "ots"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "ots"))] pub trait Otterscan { + /// Get the block header by block number, required by otterscan. + /// Otterscan currently requires this endpoint, used as: + /// + /// 1. check if the node is Erigon or not + /// 2. get block header instead of the full block + /// + /// Ref: + #[method(name = "getHeaderByNumber", aliases = ["erigon_getHeaderByNumber"])] + async fn get_header_by_number(&self, block_number: u64) -> RpcResult>; + /// Check if a certain address contains a deployed code. #[method(name = "hasCode")] async fn has_code(&self, address: Address, block_number: Option) -> RpcResult; @@ -33,15 +43,12 @@ pub trait Otterscan { /// Extract all variations of calls, contract creation and self-destructs and returns a call /// tree. #[method(name = "traceTransaction")] - async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult; + async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult>>; /// Tailor-made and expanded version of eth_getBlockByNumber for block details page in /// Otterscan. #[method(name = "getBlockDetails")] - async fn get_block_details( - &self, - block_number: BlockNumberOrTag, - ) -> RpcResult>; + async fn get_block_details(&self, block_number: u64) -> RpcResult>; /// Tailor-made and expanded version of eth_getBlockByHash for block details page in Otterscan. #[method(name = "getBlockDetailsByHash")] @@ -51,7 +58,7 @@ pub trait Otterscan { #[method(name = "getBlockTransactions")] async fn get_block_transactions( &self, - block_number: BlockNumberOrTag, + block_number: u64, page_number: usize, page_size: usize, ) -> RpcResult; @@ -61,7 +68,7 @@ pub trait Otterscan { async fn search_transactions_before( &self, address: Address, - block_number: BlockNumberOrTag, + block_number: u64, page_size: usize, ) -> RpcResult; @@ -70,7 +77,7 @@ pub trait Otterscan { async fn search_transactions_after( &self, address: Address, - block_number: BlockNumberOrTag, + block_number: u64, page_size: usize, ) -> RpcResult; @@ -80,7 +87,7 @@ pub trait Otterscan { &self, sender: Address, nonce: u64, - ) -> RpcResult>; + ) -> RpcResult>; /// Gets the transaction hash and the address who created a contract. #[method(name = "getContractCreator")] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 46105030c..d97b23b51 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -19,7 +19,9 @@ reth-node-core.workspace = true reth-provider.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true +reth-rpc-eth-api.workspace = true reth-rpc-layer.workspace = true +reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true @@ -46,6 +48,7 @@ tracing.workspace = true reth-chainspec.workspace = true reth-beacon-consensus.workspace = true reth-network-api.workspace = true +reth-network-peers.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index 1e8ef8f56..be904f6ef 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -7,8 +7,8 @@ use jsonrpsee::{ Methods, }; use reth_engine_primitives::EngineTypes; -use reth_rpc::EthSubscriptionIdProvider; use reth_rpc_api::servers::*; +use reth_rpc_eth_types::EthSubscriptionIdProvider; use reth_rpc_layer::{ secret_to_bearer_header, AuthClientLayer, AuthClientService, AuthLayer, JwtAuthValidator, JwtSecret, diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index 45cad81cd..1f61f5791 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -4,7 +4,7 @@ use crate::{ }; use jsonrpsee::server::ServerBuilder; use reth_node_core::{args::RpcServerArgs, utils::get_or_create_jwt_secret_from_path}; -use reth_rpc::eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig}; +use reth_rpc_eth_types::{EthStateCacheConfig, GasPriceOracleConfig}; use reth_rpc_layer::{JwtError, JwtSecret}; use reth_rpc_server_types::RpcModuleSelection; use std::{net::SocketAddr, path::PathBuf}; @@ -91,9 +91,11 @@ impl RethRpcServerConfig for RpcServerArgs { .max_tracing_requests(self.rpc_max_tracing_requests) .max_blocks_per_filter(self.rpc_max_blocks_per_filter.unwrap_or_max()) .max_logs_per_response(self.rpc_max_logs_per_response.unwrap_or_max() as usize) + .eth_proof_window(self.rpc_eth_proof_window) .rpc_gas_cap(self.rpc_gas_cap) .state_cache(self.state_cache_config()) .gpo_config(self.gas_price_oracle_config()) + .proof_permits(self.rpc_proof_permits) } fn state_cache_config(&self) -> EthStateCacheConfig { @@ -216,7 +218,7 @@ impl RethRpcServerConfig for RpcServerArgs { mod tests { use clap::{Args, Parser}; use reth_node_core::args::RpcServerArgs; - use reth_rpc::eth::RPC_DEFAULT_GAS_CAP; + use reth_rpc_eth_types::RPC_DEFAULT_GAS_CAP; use reth_rpc_server_types::{constants, RethRpcModule, RpcModuleSelection}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; diff --git a/crates/rpc/rpc-builder/src/eth.rs b/crates/rpc/rpc-builder/src/eth.rs index 224301966..b9b2d63ef 100644 --- a/crates/rpc/rpc-builder/src/eth.rs +++ b/crates/rpc/rpc-builder/src/eth.rs @@ -1,233 +1,137 @@ -use crate::RpcModuleConfig; +use std::{fmt::Debug, time::Duration}; + use reth_evm::ConfigureEvm; -use reth_network_api::{NetworkInfo, Peers}; +use reth_network_api::NetworkInfo; use reth_provider::{ - AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, EvmEnvProvider, + FullRpcProvider, StateProviderFactory, }; -use reth_rpc::{ - eth::{ - cache::{cache_new_blocks_task, EthStateCache, EthStateCacheConfig}, - fee_history_cache_new_blocks_task, - gas_oracle::{GasPriceOracle, GasPriceOracleConfig}, - traits::RawTransactionForwarder, - EthFilterConfig, FeeHistoryCache, FeeHistoryCacheConfig, RPC_DEFAULT_GAS_CAP, - }, - EthApi, EthFilter, EthPubSub, +use reth_rpc::{eth::EthFilterConfig, EthApi, EthFilter, EthPubSub}; +use reth_rpc_eth_types::{ + cache::cache_new_blocks_task, fee_history::fee_history_cache_new_blocks_task, EthStateCache, + EthStateCacheConfig, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + GasPriceOracleConfig, RPC_DEFAULT_GAS_CAP, }; use reth_rpc_server_types::constants::{ - default_max_tracing_requests, DEFAULT_MAX_BLOCKS_PER_FILTER, DEFAULT_MAX_LOGS_PER_RESPONSE, + default_max_tracing_requests, DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_BLOCKS_PER_FILTER, + DEFAULT_MAX_LOGS_PER_RESPONSE, DEFAULT_PROOF_PERMITS, }; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; use reth_transaction_pool::TransactionPool; use serde::{Deserialize, Serialize}; -use std::sync::Arc; -/// All handlers for the `eth` namespace +/// Default value for stale filter ttl +const DEFAULT_STALE_FILTER_TTL: Duration = Duration::from_secs(5 * 60); + +/// Alias for function that builds the core `eth` namespace API. +pub type EthApiBuilder = + Box) -> EthApi>; + +/// Handlers for core, filter and pubsub `eth` namespace APIs. #[derive(Debug, Clone)] -pub struct EthHandlers { +pub struct EthHandlers { /// Main `eth_` request handler - pub api: EthApi, + pub api: EthApi, /// The async caching layer used by the eth handlers pub cache: EthStateCache, /// Polling based filter handler available on all transports pub filter: EthFilter, /// Handler for subscriptions only available for transports that support it (ws, ipc) pub pubsub: EthPubSub, - /// The configured tracing call pool - pub blocking_task_pool: BlockingTaskPool, } -/// Configuration for `EthHandlersBuilder` -#[derive(Clone, Debug)] -pub(crate) struct EthHandlersConfig { - /// The provider for blockchain data, responsible for reading blocks, accounts, state, etc. - pub(crate) provider: Provider, - /// The transaction pool for managing pending transactions. - pub(crate) pool: Pool, - /// The network information, handling peer connections and network state. - pub(crate) network: Network, - /// The task executor for spawning asynchronous tasks. - pub(crate) executor: Tasks, - /// The event subscriptions for canonical state changes. - pub(crate) events: Events, - /// The EVM configuration for Ethereum Virtual Machine settings. - pub(crate) evm_config: EvmConfig, - /// An optional forwarder for raw transactions. - pub(crate) eth_raw_transaction_forwarder: Option>, +impl EthHandlers { + /// Returns a new [`EthHandlers`] builder. + #[allow(clippy::too_many_arguments)] + pub fn builder( + provider: Provider, + pool: Pool, + network: Network, + evm_config: EvmConfig, + config: EthConfig, + executor: Tasks, + events: Events, + eth_api_builder: EthApiB, + ) -> EthHandlersBuilder + where + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + { + EthHandlersBuilder { + provider, + pool, + network, + evm_config, + config, + executor, + events, + eth_api_builder: Box::new(eth_api_builder), + } + } } -/// Represents the builder for the `EthHandlers` struct, used to configure and create instances of -/// `EthHandlers`. -#[derive(Debug, Clone)] -pub(crate) struct EthHandlersBuilder { - eth_handlers_config: EthHandlersConfig, - /// Configuration for the RPC module - rpc_config: RpcModuleConfig, +/// Builds [`EthHandlers`] for core, filter, and pubsub `eth_` apis. +#[allow(missing_debug_implementations)] +pub struct EthHandlersBuilder { + provider: Provider, + pool: Pool, + network: Network, + evm_config: EvmConfig, + config: EthConfig, + executor: Tasks, + events: Events, + eth_api_builder: EthApiBuilder, } -impl - EthHandlersBuilder +impl + EthHandlersBuilder where - Provider: BlockReaderIdExt - + AccountReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Peers + Clone + 'static, + Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Pool: Send + Sync + Clone + 'static, + EvmConfig: ConfigureEvm, + Network: Clone, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + Events: CanonStateSubscriptions + Clone, { - /// Creates a new `EthHandlersBuilder` with the provided components. - pub(crate) const fn new( - eth_handlers_config: EthHandlersConfig, - rpc_config: RpcModuleConfig, - ) -> Self { - Self { eth_handlers_config, rpc_config } - } - - /// Builds and returns an `EthHandlers` instance. - pub(crate) fn build(self) -> EthHandlers { - // Initialize the cache - let cache = self.init_cache(); - - // Initialize the fee history cache - let fee_history_cache = self.init_fee_history_cache(&cache); - - // Spawn background tasks for cache - self.spawn_cache_tasks(&cache, &fee_history_cache); - - // Initialize the gas oracle - let gas_oracle = self.init_gas_oracle(&cache); - - // Initialize the blocking task pool - let blocking_task_pool = self.init_blocking_task_pool(); - - // Initialize the Eth API - let api = self.init_api(&cache, gas_oracle, &fee_history_cache, &blocking_task_pool); - - // Initialize the filter - let filter = self.init_filter(&cache); - - // Initialize the pubsub - let pubsub = self.init_pubsub(); - - EthHandlers { api, cache, filter, pubsub, blocking_task_pool } - } - - /// Initializes the `EthStateCache`. - fn init_cache(&self) -> EthStateCache { - EthStateCache::spawn_with( - self.eth_handlers_config.provider.clone(), - self.rpc_config.eth.cache.clone(), - self.eth_handlers_config.executor.clone(), - self.eth_handlers_config.evm_config.clone(), - ) - } - - /// Initializes the `FeeHistoryCache`. - fn init_fee_history_cache(&self, cache: &EthStateCache) -> FeeHistoryCache { - FeeHistoryCache::new(cache.clone(), self.rpc_config.eth.fee_history_cache.clone()) - } - - /// Spawns background tasks for updating caches. - fn spawn_cache_tasks(&self, cache: &EthStateCache, fee_history_cache: &FeeHistoryCache) { - // Get the stream of new canonical blocks - let new_canonical_blocks = self.eth_handlers_config.events.canonical_state_stream(); - - // Clone the cache for the task - let cache_clone = cache.clone(); + /// Returns a new instance with handlers for `eth` namespace. + pub fn build(self) -> EthHandlers { + let Self { provider, pool, network, evm_config, config, executor, events, eth_api_builder } = + self; + + let cache = EthStateCache::spawn_with( + provider.clone(), + config.cache, + executor.clone(), + evm_config.clone(), + ); - // Spawn a critical task to update the cache with new blocks - self.eth_handlers_config.executor.spawn_critical( + let new_canonical_blocks = events.canonical_state_stream(); + let c = cache.clone(); + executor.spawn_critical( "cache canonical blocks task", Box::pin(async move { - cache_new_blocks_task(cache_clone, new_canonical_blocks).await; + cache_new_blocks_task(c, new_canonical_blocks).await; }), ); - // Get another stream of new canonical blocks - let new_canonical_blocks = self.eth_handlers_config.events.canonical_state_stream(); - - // Clone the fee history cache for the task - let fhc_clone = fee_history_cache.clone(); - - // Clone the provider for the task - let provider_clone = self.eth_handlers_config.provider.clone(); - - // Spawn a critical task to update the fee history cache with new blocks - self.eth_handlers_config.executor.spawn_critical( - "cache canonical blocks for fee history task", - Box::pin(async move { - fee_history_cache_new_blocks_task(fhc_clone, new_canonical_blocks, provider_clone) - .await; - }), - ); - } + let ctx = EthApiBuilderCtx { + provider, + pool, + network, + evm_config, + config, + executor, + events, + cache, + }; - /// Initializes the `GasPriceOracle`. - fn init_gas_oracle(&self, cache: &EthStateCache) -> GasPriceOracle { - GasPriceOracle::new( - self.eth_handlers_config.provider.clone(), - self.rpc_config.eth.gas_oracle.clone(), - cache.clone(), - ) - } + let api = eth_api_builder(&ctx); - /// Initializes the `BlockingTaskPool`. - fn init_blocking_task_pool(&self) -> BlockingTaskPool { - BlockingTaskPool::build().expect("failed to build tracing pool") - } + let filter = EthFilterApiBuilder::build(&ctx); - /// Initializes the `EthApi`. - fn init_api( - &self, - cache: &EthStateCache, - gas_oracle: GasPriceOracle, - fee_history_cache: &FeeHistoryCache, - blocking_task_pool: &BlockingTaskPool, - ) -> EthApi { - EthApi::with_spawner( - self.eth_handlers_config.provider.clone(), - self.eth_handlers_config.pool.clone(), - self.eth_handlers_config.network.clone(), - cache.clone(), - gas_oracle, - self.rpc_config.eth.rpc_gas_cap, - Box::new(self.eth_handlers_config.executor.clone()), - blocking_task_pool.clone(), - fee_history_cache.clone(), - self.eth_handlers_config.evm_config.clone(), - self.eth_handlers_config.eth_raw_transaction_forwarder.clone(), - ) - } + let pubsub = EthPubSubApiBuilder::build(&ctx); - /// Initializes the `EthFilter`. - fn init_filter(&self, cache: &EthStateCache) -> EthFilter { - EthFilter::new( - self.eth_handlers_config.provider.clone(), - self.eth_handlers_config.pool.clone(), - cache.clone(), - self.rpc_config.eth.filter_config(), - Box::new(self.eth_handlers_config.executor.clone()), - ) - } - - /// Initializes the `EthPubSub`. - fn init_pubsub(&self) -> EthPubSub { - EthPubSub::with_spawner( - self.eth_handlers_config.provider.clone(), - self.eth_handlers_config.pool.clone(), - self.eth_handlers_config.events.clone(), - self.eth_handlers_config.network.clone(), - Box::new(self.eth_handlers_config.executor.clone()), - ) + EthHandlers { api, cache: ctx.cache, filter, pubsub } } } @@ -238,6 +142,8 @@ pub struct EthConfig { pub cache: EthStateCacheConfig, /// Settings for the gas price oracle pub gas_oracle: GasPriceOracleConfig, + /// The maximum number of blocks into the past for generating state proofs. + pub eth_proof_window: u64, /// The maximum number of tracing calls that can be executed in concurrently. pub max_tracing_requests: usize, /// Maximum number of blocks that could be scanned per filter request in `eth_getLogs` calls. @@ -250,9 +156,11 @@ pub struct EthConfig { pub rpc_gas_cap: u64, /// /// Sets TTL for stale filters - pub stale_filter_ttl: std::time::Duration, + pub stale_filter_ttl: Duration, /// Settings for the fee history cache pub fee_history_cache: FeeHistoryCacheConfig, + /// The maximum number of getproof calls that can be executed concurrently. + pub proof_permits: usize, } impl EthConfig { @@ -265,20 +173,19 @@ impl EthConfig { } } -/// Default value for stale filter ttl -const DEFAULT_STALE_FILTER_TTL: std::time::Duration = std::time::Duration::from_secs(5 * 60); - impl Default for EthConfig { fn default() -> Self { Self { cache: EthStateCacheConfig::default(), gas_oracle: GasPriceOracleConfig::default(), + eth_proof_window: DEFAULT_ETH_PROOF_WINDOW, max_tracing_requests: default_max_tracing_requests(), max_blocks_per_filter: DEFAULT_MAX_BLOCKS_PER_FILTER, max_logs_per_response: DEFAULT_MAX_LOGS_PER_RESPONSE, rpc_gas_cap: RPC_DEFAULT_GAS_CAP.into(), stale_filter_ttl: DEFAULT_STALE_FILTER_TTL, fee_history_cache: FeeHistoryCacheConfig::default(), + proof_permits: DEFAULT_PROOF_PERMITS, } } } @@ -319,4 +226,172 @@ impl EthConfig { self.rpc_gas_cap = rpc_gas_cap; self } + + /// Configures the maximum proof window for historical proof generation. + pub const fn eth_proof_window(mut self, window: u64) -> Self { + self.eth_proof_window = window; + self + } + + /// Configures the number of getproof requests + pub const fn proof_permits(mut self, permits: usize) -> Self { + self.proof_permits = permits; + self + } +} + +/// Context for building the `eth` namespace API. +#[derive(Debug, Clone)] +pub struct EthApiBuilderCtx { + /// Database handle. + pub provider: Provider, + /// Mempool handle. + pub pool: Pool, + /// Network handle. + pub network: Network, + /// EVM configuration. + pub evm_config: EvmConfig, + /// RPC config for `eth` namespace. + pub config: EthConfig, + /// Runtime handle. + pub executor: Tasks, + /// Events handle. + pub events: Events, + /// RPC cache handle. + pub cache: EthStateCache, +} + +/// Ethereum layer one `eth` RPC server builder. +#[derive(Default, Debug, Clone, Copy)] +pub struct EthApiBuild; + +impl EthApiBuild { + /// Builds the [`EthApiServer`](reth_rpc_eth_api::EthApiServer), for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> EthApi + where + Provider: FullRpcProvider, + Pool: TransactionPool, + Network: NetworkInfo + Clone, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions, + EvmConfig: ConfigureEvm, + { + let gas_oracle = GasPriceOracleBuilder::build(ctx); + let fee_history_cache = FeeHistoryCacheBuilder::build(ctx); + + EthApi::with_spawner( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.network.clone(), + ctx.cache.clone(), + gas_oracle, + ctx.config.rpc_gas_cap, + ctx.config.eth_proof_window, + Box::new(ctx.executor.clone()), + BlockingTaskPool::build().expect("failed to build blocking task pool"), + fee_history_cache, + ctx.evm_config.clone(), + None, + ctx.config.proof_permits, + ) + } +} + +/// Builds the `eth_` namespace API [`EthFilterApiServer`](reth_rpc_eth_api::EthFilterApiServer). +#[derive(Debug)] +pub struct EthFilterApiBuilder; + +impl EthFilterApiBuilder { + /// Builds the [`EthFilterApiServer`](reth_rpc_eth_api::EthFilterApiServer), for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> EthFilter + where + Provider: Send + Sync + Clone + 'static, + Pool: Send + Sync + Clone + 'static, + Tasks: TaskSpawner + Clone + 'static, + { + EthFilter::new( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.cache.clone(), + ctx.config.filter_config(), + Box::new(ctx.executor.clone()), + ) + } +} + +/// Builds the `eth_` namespace API [`EthPubSubApiServer`](reth_rpc_eth_api::EthFilterApiServer). +#[derive(Debug)] +pub struct EthPubSubApiBuilder; + +impl EthPubSubApiBuilder { + /// Builds the [`EthPubSubApiServer`](reth_rpc_eth_api::EthPubSubApiServer), for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> EthPubSub + where + Provider: Clone, + Pool: Clone, + Events: Clone, + Network: Clone, + Tasks: TaskSpawner + Clone + 'static, + { + EthPubSub::with_spawner( + ctx.provider.clone(), + ctx.pool.clone(), + ctx.events.clone(), + ctx.network.clone(), + Box::new(ctx.executor.clone()), + ) + } +} + +/// Builds `eth_` core api component [`GasPriceOracle`], for given context. +#[derive(Debug)] +pub struct GasPriceOracleBuilder; + +impl GasPriceOracleBuilder { + /// Builds a [`GasPriceOracle`], for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> GasPriceOracle + where + Provider: BlockReaderIdExt + Clone, + { + GasPriceOracle::new(ctx.provider.clone(), ctx.config.gas_oracle, ctx.cache.clone()) + } +} + +/// Builds `eth_` core api component [`FeeHistoryCache`], for given context. +#[derive(Debug)] +pub struct FeeHistoryCacheBuilder; + +impl FeeHistoryCacheBuilder { + /// Builds a [`FeeHistoryCache`], for given context. + pub fn build( + ctx: &EthApiBuilderCtx, + ) -> FeeHistoryCache + where + Provider: ChainSpecProvider + BlockReaderIdExt + Clone + 'static, + Tasks: TaskSpawner, + Events: CanonStateSubscriptions, + { + let fee_history_cache = + FeeHistoryCache::new(ctx.cache.clone(), ctx.config.fee_history_cache); + + let new_canonical_blocks = ctx.events.canonical_state_stream(); + let fhc = fee_history_cache.clone(); + let provider = ctx.provider.clone(); + ctx.executor.spawn_critical( + "cache canonical blocks for fee history task", + Box::pin(async move { + fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider).await; + }), + ); + + fee_history_cache + } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 7257b3be3..8a6dce5ae 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -8,9 +8,8 @@ //! transaction pool. [`RpcModuleBuilder::build`] returns a [`TransportRpcModules`] which contains //! the transport specific config (what APIs are available via this transport). //! -//! The [`RpcServerConfig`] is used to configure the [`RpcServer`] type which contains all transport -//! implementations (http server, ws server, ipc server). [`RpcServer::start`] requires the -//! [`TransportRpcModules`] so it can start the servers with the configured modules. +//! The [`RpcServerConfig`] is used to assemble and start the http server, ws server, ipc servers, +//! it requires the [`TransportRpcModules`] so it can start the servers with the configured modules. //! //! # Examples //! @@ -19,13 +18,12 @@ //! ``` //! use reth_evm::ConfigureEvm; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_provider::{ -//! AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, -//! ChangeSetReader, EvmEnvProvider, StateProviderFactory, -//! }; +//! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc_builder::{ -//! RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig, +//! EthApiBuild, RethRpcModule, RpcModuleBuilder, RpcServerConfig, ServerBuilder, +//! TransportRpcModuleConfig, //! }; +//! //! use reth_tasks::TokioTaskExecutor; //! use reth_transaction_pool::TransactionPool; //! pub async fn launch( @@ -35,19 +33,11 @@ //! events: Events, //! evm_config: EvmConfig, //! ) where -//! Provider: AccountReader -//! + BlockReaderIdExt -//! + ChainSpecProvider -//! + ChangeSetReader -//! + StateProviderFactory -//! + EvmEnvProvider -//! + Clone -//! + Unpin -//! + 'static, -//! Pool: TransactionPool + Clone + 'static, +//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Pool: TransactionPool + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, -//! EvmConfig: ConfigureEvm + 'static, +//! EvmConfig: ConfigureEvm, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -64,12 +54,11 @@ //! events, //! evm_config, //! ) -//! .build(transports); +//! .build(transports, EthApiBuild::build); //! let handle = RpcServerConfig::default() //! .with_http(ServerBuilder::default()) -//! .start(transport_modules) -//! .await -//! .unwrap(); +//! .start(&transport_modules) +//! .await; //! } //! ``` //! @@ -80,13 +69,10 @@ //! use reth_engine_primitives::EngineTypes; //! use reth_evm::ConfigureEvm; //! use reth_network_api::{NetworkInfo, Peers}; -//! use reth_provider::{ -//! AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, -//! ChangeSetReader, EvmEnvProvider, StateProviderFactory, -//! }; +//! use reth_provider::{AccountReader, CanonStateSubscriptions, ChangeSetReader, FullRpcProvider}; //! use reth_rpc_api::EngineApiServer; //! use reth_rpc_builder::{ -//! auth::AuthServerConfig, RethRpcModule, RpcModuleBuilder, RpcServerConfig, +//! auth::AuthServerConfig, EthApiBuild, RethRpcModule, RpcModuleBuilder, RpcServerConfig, //! TransportRpcModuleConfig, //! }; //! use reth_rpc_layer::JwtSecret; @@ -101,21 +87,13 @@ //! engine_api: EngineApi, //! evm_config: EvmConfig, //! ) where -//! Provider: AccountReader -//! + BlockReaderIdExt -//! + ChainSpecProvider -//! + ChangeSetReader -//! + StateProviderFactory -//! + EvmEnvProvider -//! + Clone -//! + Unpin -//! + 'static, -//! Pool: TransactionPool + Clone + 'static, +//! Provider: FullRpcProvider + AccountReader + ChangeSetReader, +//! Pool: TransactionPool + 'static, //! Network: NetworkInfo + Peers + Clone + 'static, //! Events: CanonStateSubscriptions + Clone + 'static, //! EngineApi: EngineApiServer, //! EngineT: EngineTypes + 'static, -//! EvmConfig: ConfigureEvm + 'static, +//! EvmConfig: ConfigureEvm, //! { //! // configure the rpc module per transport //! let transports = TransportRpcModuleConfig::default().with_http(vec![ @@ -135,15 +113,14 @@ //! //! // configure the server modules //! let (modules, auth_module, _registry) = -//! builder.build_with_auth_server(transports, engine_api); +//! builder.build_with_auth_server(transports, engine_api, EthApiBuild::build); //! //! // start the servers //! let auth_config = AuthServerConfig::builder(JwtSecret::random()).build(); //! let config = RpcServerConfig::default(); //! //! let (_rpc_handle, _auth_handle) = -//! try_join!(modules.start_server(config), auth_module.start_server(auth_config),) -//! .unwrap(); +//! try_join!(config.start(&modules), auth_module.start_server(auth_config),).unwrap(); //! } //! ``` @@ -155,47 +132,49 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use crate::{ - auth::AuthRpcModule, - cors::CorsDomainError, - error::WsHttpSamePortError, - eth::{EthHandlersBuilder, EthHandlersConfig}, - metrics::RpcRequestMetrics, +use std::{ + collections::HashMap, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, }; + use error::{ConflictingModules, RpcError, ServerKind}; use http::{header::AUTHORIZATION, HeaderMap}; use jsonrpsee::{ core::RegisterMethodError, - server::{AlreadyStoppedError, IdProvider, RpcServiceBuilder, Server, ServerHandle}, + server::{AlreadyStoppedError, IdProvider, RpcServiceBuilder, ServerHandle}, Methods, RpcModule, }; use reth_engine_primitives::EngineTypes; use reth_evm::ConfigureEvm; -use reth_ipc::server::IpcServer; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_provider::{ - AccountReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, - ChangeSetReader, EvmEnvProvider, StateProviderFactory, + AccountReader, BlockReader, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + EvmEnvProvider, FullRpcProvider, StateProviderFactory, }; use reth_rpc::{ - eth::{cache::EthStateCache, traits::RawTransactionForwarder, EthBundle}, - AdminApi, DebugApi, EngineEthApi, EthApi, EthSubscriptionIdProvider, NetApi, OtterscanApi, - RPCApi, RethApi, TraceApi, TxPoolApi, Web3Api, + AdminApi, DebugApi, EngineEthApi, EthBundle, NetApi, OtterscanApi, RPCApi, RethApi, TraceApi, + TxPoolApi, Web3Api, }; use reth_rpc_api::servers::*; +use reth_rpc_eth_api::{ + helpers::{ + Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt, UpdateRawTxForwarder, + }, + EthApiServer, FullEthApiServer, RawTransactionForwarder, +}; +use reth_rpc_eth_types::{EthStateCache, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, JwtAuthValidator, JwtSecret}; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; use serde::{Deserialize, Serialize}; -use std::{ - collections::HashMap, - fmt, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - sync::Arc, - time::{Duration, SystemTime, UNIX_EPOCH}, -}; use tower_http::cors::CorsLayer; -use tracing::{instrument, trace}; + +use crate::{ + auth::AuthRpcModule, cors::CorsDomainError, error::WsHttpSamePortError, + metrics::RpcRequestMetrics, +}; // re-export for convenience pub use jsonrpsee::server::ServerBuilder; @@ -218,15 +197,18 @@ mod cors; pub mod error; /// Eth utils -mod eth; -pub use eth::{EthConfig, EthHandlers}; +pub mod eth; +pub use eth::{ + EthApiBuild, EthApiBuilderCtx, EthConfig, EthHandlers, FeeHistoryCacheBuilder, + GasPriceOracleBuilder, +}; // Rpc server metrics mod metrics; /// Convenience function for starting a server in one step. #[allow(clippy::too_many_arguments)] -pub async fn launch( +pub async fn launch( provider: Provider, pool: Pool, network: Network, @@ -235,28 +217,26 @@ pub async fn launch( executor: Tasks, events: Events, evm_config: EvmConfig, + eth: EthApiB, ) -> Result where - Provider: BlockReaderIdExt - + AccountReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + EvmConfig: ConfigureEvm, + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + EthApi: FullEthApiServer, { let module_config = module_config.into(); - let server_config = server_config.into(); - RpcModuleBuilder::new(provider, pool, network, executor, events, evm_config) - .build(module_config) - .start_server(server_config) + server_config + .into() + .start( + &RpcModuleBuilder::new(provider, pool, network, executor, events, evm_config) + .build(module_config, eth), + ) .await } @@ -323,8 +303,8 @@ impl /// Configure a [`NoopTransactionPool`] instance. /// /// Caution: This will configure a pool API that does absolutely nothing. - /// This is only intended for allow easier setup of namespaces that depend on the [`EthApi`] - /// which requires a [`TransactionPool`] implementation. + /// This is only intended for allow easier setup of namespaces that depend on the + /// [`EthApi`](reth_rpc::eth::EthApi) which requires a [`TransactionPool`] implementation. pub fn with_noop_pool( self, ) -> RpcModuleBuilder { @@ -354,8 +334,8 @@ impl /// Configure a [`NoopNetwork`] instance. /// /// Caution: This will configure a network API that does absolutely nothing. - /// This is only intended for allow easier setup of namespaces that depend on the [`EthApi`] - /// which requires a [`NetworkInfo`] implementation. + /// This is only intended for allow easier setup of namespaces that depend on the + /// [`EthApi`](reth_rpc::eth::EthApi) which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, ) -> RpcModuleBuilder { @@ -428,20 +408,12 @@ impl impl RpcModuleBuilder where - Provider: BlockReaderIdExt - + AccountReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + EvmConfig: ConfigureEvm, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -449,25 +421,31 @@ where /// This behaves exactly as [`RpcModuleBuilder::build`] for the [`TransportRpcModules`], but /// also configures the auth (engine api) server, which exposes a subset of the `eth_` /// namespace. - pub fn build_with_auth_server( + #[allow(clippy::type_complexity)] + pub fn build_with_auth_server( self, module_config: TransportRpcModuleConfig, engine: EngineApi, + eth: EthApiB, ) -> ( TransportRpcModules, AuthRpcModule, - RethModuleRegistry, + RpcRegistryInner, ) where EngineT: EngineTypes + 'static, EngineApi: EngineApiServer, + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + EthApi: FullEthApiServer, { let Self { provider, pool, network, executor, events, evm_config } = self; let config = module_config.config.clone().unwrap_or_default(); - let mut registry = - RethModuleRegistry::new(provider, pool, network, executor, events, config, evm_config); + let mut registry = RpcRegistryInner::new( + provider, pool, network, executor, events, config, evm_config, eth, + ); let modules = registry.create_transport_rpc_modules(module_config); @@ -476,7 +454,7 @@ where (modules, auth_module, registry) } - /// Converts the builder into a [`RethModuleRegistry`] which can be used to create all + /// Converts the builder into a [`RpcRegistryInner`] which can be used to create all /// components. /// /// This is useful for getting access to API handlers directly: @@ -487,7 +465,7 @@ where /// use reth_evm::ConfigureEvm; /// use reth_network_api::noop::NoopNetwork; /// use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; - /// use reth_rpc_builder::RpcModuleBuilder; + /// use reth_rpc_builder::{EthApiBuild, RpcModuleBuilder}; /// use reth_tasks::TokioTaskExecutor; /// use reth_transaction_pool::noop::NoopTransactionPool; /// @@ -499,24 +477,36 @@ where /// .with_executor(TokioTaskExecutor::default()) /// .with_events(TestCanonStateSubscriptions::default()) /// .with_evm_config(evm) - /// .into_registry(Default::default()); + /// .into_registry(Default::default(), EthApiBuild::build); /// /// let eth_api = registry.eth_api(); /// } /// ``` - pub fn into_registry( + pub fn into_registry( self, config: RpcModuleConfig, - ) -> RethModuleRegistry { + eth: EthApiB, + ) -> RpcRegistryInner + where + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + { let Self { provider, pool, network, executor, events, evm_config } = self; - RethModuleRegistry::new(provider, pool, network, executor, events, config, evm_config) + RpcRegistryInner::new(provider, pool, network, executor, events, config, evm_config, eth) } /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). - /// - /// See also [`RpcServer::start`] - pub fn build(self, module_config: TransportRpcModuleConfig) -> TransportRpcModules<()> { + pub fn build( + self, + module_config: TransportRpcModuleConfig, + eth: EthApiB, + ) -> TransportRpcModules<()> + where + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + EthApi: FullEthApiServer, + { let mut modules = TransportRpcModules::default(); let Self { provider, pool, network, executor, events, evm_config } = self; @@ -524,7 +514,7 @@ where if !module_config.is_empty() { let TransportRpcModuleConfig { http, ws, ipc, config } = module_config.clone(); - let mut registry = RethModuleRegistry::new( + let mut registry = RpcRegistryInner::new( provider, pool, network, @@ -532,6 +522,7 @@ where events, config.unwrap_or_default(), evm_config, + eth, ); modules.config = module_config; @@ -620,34 +611,34 @@ impl RpcModuleConfigBuilder { /// A Helper type the holds instances of the configured modules. #[derive(Debug, Clone)] -pub struct RethModuleRegistry { +pub struct RpcRegistryInner { provider: Provider, pool: Pool, network: Network, executor: Tasks, events: Events, - /// Defines how to configure the EVM before execution. - evm_config: EvmConfig, - /// Additional settings for handlers. - config: RpcModuleConfig, - /// Holds a clone of all the eth namespace handlers - eth: Option>, + /// Holds a all `eth_` namespace handlers + eth: EthHandlers, /// to put trace calls behind semaphore blocking_pool_guard: BlockingTaskGuard, /// Contains the [Methods] of a module modules: HashMap, - /// Optional forwarder for `eth_sendRawTransaction` - // TODO(mattsse): find a more ergonomic way to configure eth/rpc customizations - eth_raw_transaction_forwarder: Option>, } -// === impl RethModuleRegistry === +// === impl RpcRegistryInner === -impl - RethModuleRegistry +impl + RpcRegistryInner +where + Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, + Pool: Send + Sync + Clone + 'static, + Network: Clone, + Events: CanonStateSubscriptions + Clone, + Tasks: TaskSpawner + Clone + 'static, { /// Creates a new, empty instance. - pub fn new( + #[allow(clippy::too_many_arguments)] + pub fn new( provider: Provider, pool: Pool, network: Network, @@ -655,34 +646,59 @@ impl events: Events, config: RpcModuleConfig, evm_config: EvmConfig, - ) -> Self { + eth_api_builder: EthApiB, + ) -> Self + where + EvmConfig: ConfigureEvm, + EthApiB: FnOnce(&EthApiBuilderCtx) -> EthApi + + 'static, + { + let blocking_pool_guard = BlockingTaskGuard::new(config.eth.max_tracing_requests); + + let eth = EthHandlers::builder( + provider.clone(), + pool.clone(), + network.clone(), + evm_config, + config.eth, + executor.clone(), + events.clone(), + eth_api_builder, + ) + .build(); + Self { provider, pool, network, - evm_config, - eth: None, + eth, executor, modules: Default::default(), - blocking_pool_guard: BlockingTaskGuard::new(config.eth.max_tracing_requests), - config, + blocking_pool_guard, events, - eth_raw_transaction_forwarder: None, } } +} - /// Sets a forwarder for `eth_sendRawTransaction` +impl + RpcRegistryInner +{ + /// Returns a reference to the installed [`EthApi`](reth_rpc::eth::EthApi). + pub const fn eth_api(&self) -> &EthApi { + &self.eth.api + } + + /// Returns a reference to the installed [`EthHandlers`]. + pub const fn eth_handlers(&self) -> &EthHandlers { + &self.eth + } + + /// Returns the [`EthStateCache`] frontend /// - /// Note: this might be removed in the future in favor of a more generic approach. - pub fn set_eth_raw_transaction_forwarder( - &mut self, - forwarder: Arc, - ) { - if let Some(eth) = self.eth.as_ref() { - // in case the eth api has been created before the forwarder was set: - eth.api.set_eth_raw_transaction_forwarder(forwarder.clone()); - } - self.eth_raw_transaction_forwarder = Some(forwarder); + /// This will spawn exactly one [`EthStateCache`] service if this is the first time the cache is + /// requested. + pub const fn eth_cache(&self) -> &EthStateCache { + &self.eth.cache } /// Returns a reference to the pool @@ -720,13 +736,30 @@ impl } } -impl - RethModuleRegistry +impl + RpcRegistryInner where - Network: NetworkInfo + Peers + Clone + 'static, + EthApi: UpdateRawTxForwarder, +{ + /// Sets a forwarder for `eth_sendRawTransaction` + /// + /// Note: this might be removed in the future in favor of a more generic approach. + pub fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc) { + // in case the eth api has been created before the forwarder was set: + self.eth.api.set_eth_raw_transaction_forwarder(forwarder.clone()); + } +} + +impl + RpcRegistryInner +where + Network: NetworkInfo + Clone + 'static, { /// Instantiates `AdminApi` - pub fn admin_api(&self) -> AdminApi { + pub fn admin_api(&self) -> AdminApi + where + Network: Peers, + { AdminApi::new(self.network.clone(), self.provider.chain_spec()) } @@ -736,7 +769,10 @@ where } /// Register Admin Namespace - pub fn register_admin(&mut self) -> &mut Self { + pub fn register_admin(&mut self) -> &mut Self + where + Network: Peers, + { let adminapi = self.admin_api(); self.modules.insert(RethRpcModule::Admin, adminapi.into_rpc().into()); self @@ -750,31 +786,24 @@ where } } -impl - RethModuleRegistry +impl + RpcRegistryInner where - Provider: BlockReaderIdExt - + AccountReader - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, + Provider: FullRpcProvider + AccountReader + ChangeSetReader, Network: NetworkInfo + Peers + Clone + 'static, Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + EthApi: Clone, { /// Register Eth Namespace /// /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn register_eth(&mut self) -> &mut Self { - let eth_api = self.eth_api(); + pub fn register_eth(&mut self) -> &mut Self + where + EthApi: EthApiServer, + { + let eth_api = self.eth_api().clone(); self.modules.insert(RethRpcModule::Eth, eth_api.into_rpc().into()); self } @@ -784,7 +813,10 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn register_ots(&mut self) -> &mut Self { + pub fn register_ots(&mut self) -> &mut Self + where + EthApi: EthApiServer + TraceExt, + { let otterscan_api = self.otterscan_api(); self.modules.insert(RethRpcModule::Ots, otterscan_api.into_rpc().into()); self @@ -795,7 +827,10 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn register_debug(&mut self) -> &mut Self { + pub fn register_debug(&mut self) -> &mut Self + where + EthApi: EthApiSpec + EthTransactions + TraceExt, + { let debug_api = self.debug_api(); self.modules.insert(RethRpcModule::Debug, debug_api.into_rpc().into()); self @@ -806,34 +841,15 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn register_trace(&mut self) -> &mut Self { + pub fn register_trace(&mut self) -> &mut Self + where + EthApi: TraceExt, + { let trace_api = self.trace_api(); self.modules.insert(RethRpcModule::Trace, trace_api.into_rpc().into()); self } - /// Configures the auth module that includes the - /// * `engine_` namespace - /// * `api_` namespace - /// - /// Note: This does _not_ register the `engine_` in this registry. - pub fn create_auth_module(&mut self, engine_api: EngineApi) -> AuthRpcModule - where - EngineT: EngineTypes + 'static, - EngineApi: EngineApiServer, - { - let eth_handlers = self.eth_handlers(); - let mut module = RpcModule::new(()); - - module.merge(engine_api.into_rpc()).expect("No conflicting methods"); - - // also merge a subset of `eth_` handlers - let engine_eth = EngineEthApi::new(eth_handlers.api.clone(), eth_handlers.filter); - module.merge(engine_eth.into_rpc()).expect("No conflicting methods"); - - AuthRpcModule { inner: module } - } - /// Register Net Namespace /// /// See also [`Self::eth_api`] @@ -841,7 +857,10 @@ where /// # Panics /// /// If called outside of the tokio runtime. - pub fn register_net(&mut self) -> &mut Self { + pub fn register_net(&mut self) -> &mut Self + where + EthApi: EthApiSpec + 'static, + { let netapi = self.net_api(); self.modules.insert(RethRpcModule::Net, netapi.into_rpc().into()); self @@ -860,6 +879,113 @@ where self } + /// Instantiates `TraceApi` + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn trace_api(&self) -> TraceApi + where + EthApi: TraceExt, + { + TraceApi::new( + self.provider.clone(), + self.eth_api().clone(), + self.blocking_pool_guard.clone(), + ) + } + + /// Instantiates [`EthBundle`] Api + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn bundle_api(&self) -> EthBundle + where + EthApi: EthTransactions + LoadPendingBlock + Call, + { + let eth_api = self.eth_api().clone(); + EthBundle::new(eth_api, self.blocking_pool_guard.clone()) + } + + /// Instantiates `OtterscanApi` + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn otterscan_api(&self) -> OtterscanApi + where + EthApi: EthApiServer, + { + let eth_api = self.eth_api().clone(); + OtterscanApi::new(eth_api) + } + + /// Instantiates `DebugApi` + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn debug_api(&self) -> DebugApi + where + EthApi: EthApiSpec + EthTransactions + TraceExt, + { + let eth_api = self.eth_api().clone(); + DebugApi::new(self.provider.clone(), eth_api, self.blocking_pool_guard.clone()) + } + + /// Instantiates `NetApi` + /// + /// # Panics + /// + /// If called outside of the tokio runtime. See also [`Self::eth_api`] + pub fn net_api(&self) -> NetApi + where + EthApi: EthApiSpec + 'static, + { + let eth_api = self.eth_api().clone(); + NetApi::new(self.network.clone(), eth_api) + } + + /// Instantiates `RethApi` + pub fn reth_api(&self) -> RethApi { + RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) + } +} + +impl + RpcRegistryInner +where + Provider: FullRpcProvider + AccountReader + ChangeSetReader, + Pool: TransactionPool + 'static, + Network: NetworkInfo + Peers + Clone + 'static, + Tasks: TaskSpawner + Clone + 'static, + Events: CanonStateSubscriptions + Clone + 'static, + EthApi: FullEthApiServer, +{ + /// Configures the auth module that includes the + /// * `engine_` namespace + /// * `api_` namespace + /// + /// Note: This does _not_ register the `engine_` in this registry. + pub fn create_auth_module(&self, engine_api: EngineApi) -> AuthRpcModule + where + EngineT: EngineTypes + 'static, + EngineApi: EngineApiServer, + { + let mut module = RpcModule::new(()); + + module.merge(engine_api.into_rpc()).expect("No conflicting methods"); + + // also merge a subset of `eth_` handlers + let eth_handlers = self.eth_handlers(); + let engine_eth = EngineEthApi::new(eth_handlers.api.clone(), eth_handlers.filter.clone()); + + module.merge(engine_eth.into_rpc()).expect("No conflicting methods"); + + AuthRpcModule { inner: module } + } + /// Helper function to create a [`RpcModule`] if it's not `None` fn maybe_module(&mut self, config: Option<&RpcModuleSelection>) -> Option> { config.map(|config| self.module_for(config)) @@ -907,13 +1033,8 @@ where &mut self, namespaces: impl Iterator, ) -> Vec { - let EthHandlers { - api: eth_api, - filter: eth_filter, - pubsub: eth_pubsub, - cache: _, - blocking_task_pool: _, - } = self.with_eth(|eth| eth.clone()); + let EthHandlers { api: eth_api, filter: eth_filter, pubsub: eth_pubsub, .. } = + self.eth_handlers().clone(); // Create a copy, so we can list out all the methods for rpc_ api let namespaces: Vec<_> = namespaces.collect(); @@ -982,120 +1103,6 @@ where }) .collect::>() } - - /// Returns the [`EthStateCache`] frontend - /// - /// This will spawn exactly one [`EthStateCache`] service if this is the first time the cache is - /// requested. - pub fn eth_cache(&mut self) -> EthStateCache { - self.with_eth(|handlers| handlers.cache.clone()) - } - - /// Creates the [`EthHandlers`] type the first time this is called. - /// - /// This will spawn the required service tasks for [`EthApi`] for: - /// - [`EthStateCache`] - /// - [`reth_rpc::eth::FeeHistoryCache`] - fn with_eth(&mut self, f: F) -> R - where - F: FnOnce(&EthHandlers) -> R, - { - f(match &self.eth { - Some(eth) => eth, - None => self.eth.insert(self.init_eth()), - }) - } - - fn init_eth(&self) -> EthHandlers { - EthHandlersBuilder::new( - EthHandlersConfig { - provider: self.provider.clone(), - pool: self.pool.clone(), - network: self.network.clone(), - executor: self.executor.clone(), - events: self.events.clone(), - evm_config: self.evm_config.clone(), - eth_raw_transaction_forwarder: self.eth_raw_transaction_forwarder.clone(), - }, - self.config.clone(), - ) - .build() - } - - /// Returns the configured [`EthHandlers`] or creates it if it does not exist yet - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn eth_handlers(&mut self) -> EthHandlers { - self.with_eth(|handlers| handlers.clone()) - } - - /// Returns the configured [`EthApi`] or creates it if it does not exist yet - /// - /// Caution: This will spawn the necessary tasks required by the [`EthApi`]: [`EthStateCache`]. - /// - /// # Panics - /// - /// If called outside of the tokio runtime. - pub fn eth_api(&mut self) -> EthApi { - self.with_eth(|handlers| handlers.api.clone()) - } - - /// Instantiates `TraceApi` - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn trace_api(&mut self) -> TraceApi> { - let eth = self.eth_handlers(); - TraceApi::new(self.provider.clone(), eth.api, self.blocking_pool_guard.clone()) - } - - /// Instantiates [`EthBundle`] Api - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn bundle_api(&mut self) -> EthBundle> { - let eth_api = self.eth_api(); - EthBundle::new(eth_api, self.blocking_pool_guard.clone()) - } - - /// Instantiates `OtterscanApi` - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn otterscan_api(&mut self) -> OtterscanApi> { - let eth_api = self.eth_api(); - OtterscanApi::new(eth_api) - } - - /// Instantiates `DebugApi` - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&mut self) -> DebugApi> { - let eth_api = self.eth_api(); - DebugApi::new(self.provider.clone(), eth_api, self.blocking_pool_guard.clone()) - } - - /// Instantiates `NetApi` - /// - /// # Panics - /// - /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn net_api(&mut self) -> NetApi> { - let eth_api = self.eth_api(); - NetApi::new(self.network.clone(), eth_api) - } - - /// Instantiates `RethApi` - pub fn reth_api(&self) -> RethApi { - RethApi::new(self.provider.clone(), Box::new(self.executor.clone())) - } } /// A builder type for configuring and launching the servers that will handle RPC requests. @@ -1270,28 +1277,26 @@ impl RpcServerConfig { self.ipc_endpoint.clone() } - /// Convenience function to do [`RpcServerConfig::build`] and [`RpcServer::start`] in one step - pub async fn start(self, modules: TransportRpcModules) -> Result { - self.build(&modules).await?.start(modules).await - } - /// Creates the [`CorsLayer`] if any fn maybe_cors_layer(cors: Option) -> Result, CorsDomainError> { cors.as_deref().map(cors::create_cors_layer).transpose() } /// Creates the [`AuthLayer`] if any - fn maybe_jwt_layer(&self) -> Option> { - self.jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) + fn maybe_jwt_layer(jwt_secret: Option) -> Option> { + jwt_secret.map(|secret| AuthLayer::new(JwtAuthValidator::new(secret))) } - /// Builds the ws and http server(s). + /// Builds and starts the configured server(s): http, ws, ipc. /// - /// If both are on the same port, they are combined into one server. - async fn build_ws_http( - &mut self, - modules: &TransportRpcModules, - ) -> Result { + /// If both http and ws are on the same port, they are combined into one server. + /// + /// Returns the [`RpcServerHandle`] with the handle to the started servers. + pub async fn start(self, modules: &TransportRpcModules) -> Result { + let mut http_handle = None; + let mut ws_handle = None; + let mut ipc_handle = None; + let http_socket_addr = self.http_addr.unwrap_or(SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::LOCALHOST, constants::DEFAULT_HTTP_RPC_PORT, @@ -1302,6 +1307,17 @@ impl RpcServerConfig { constants::DEFAULT_WS_RPC_PORT, ))); + let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); + let ipc_path = + self.ipc_endpoint.clone().unwrap_or_else(|| constants::DEFAULT_IPC_ENDPOINT.into()); + + if let Some(builder) = self.ipc_server_config { + let ipc = builder + .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) + .build(ipc_path); + ipc_handle = Some(ipc.start(modules.ipc.clone().expect("ipc server error")).await?); + } + // If both are configured on the same port, we combine them into one server. if self.http_addr == self.ws_addr && self.http_server_config.is_some() && @@ -1314,7 +1330,7 @@ impl RpcServerConfig { http_cors_domains: Some(http_cors.clone()), ws_cors_domains: Some(ws_cors.clone()), } - .into()) + .into()); } Some(ws_cors) } @@ -1323,53 +1339,62 @@ impl RpcServerConfig { .cloned(); // we merge this into one server using the http setup - self.ws_server_config.take(); - modules.config.ensure_ws_http_identical()?; - let builder = self.http_server_config.take().expect("http_server_config is Some"); - let server = builder - .set_http_middleware( - tower::ServiceBuilder::new() - .option_layer(Self::maybe_cors_layer(cors)?) - .option_layer(self.maybe_jwt_layer()), - ) - .set_rpc_middleware( - RpcServiceBuilder::new().layer( - modules - .http - .as_ref() - .or(modules.ws.as_ref()) - .map(RpcRequestMetrics::same_port) - .unwrap_or_default(), - ), - ) - .build(http_socket_addr) - .await - .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; - let addr = server - .local_addr() - .map_err(|err| RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)))?; - return Ok(WsHttpServer { - http_local_addr: Some(addr), - ws_local_addr: Some(addr), - server: WsHttpServers::SamePort(server), - jwt_secret: self.jwt_secret, - }) + if let Some(builder) = self.http_server_config { + let server = builder + .set_http_middleware( + tower::ServiceBuilder::new() + .option_layer(Self::maybe_cors_layer(cors)?) + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), + ) + .set_rpc_middleware( + RpcServiceBuilder::new().layer( + modules + .http + .as_ref() + .or(modules.ws.as_ref()) + .map(RpcRequestMetrics::same_port) + .unwrap_or_default(), + ), + ) + .build(http_socket_addr) + .await + .map_err(|err| { + RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)) + })?; + let addr = server.local_addr().map_err(|err| { + RpcError::server_error(err, ServerKind::WsHttp(http_socket_addr)) + })?; + if let Some(module) = modules.http.as_ref().or(modules.ws.as_ref()) { + let handle = server.start(module.clone()); + http_handle = Some(handle.clone()); + ws_handle = Some(handle); + } + return Ok(RpcServerHandle { + http_local_addr: Some(addr), + ws_local_addr: Some(addr), + http: http_handle, + ws: ws_handle, + ipc_endpoint: self.ipc_endpoint.clone(), + ipc: ipc_handle, + jwt_secret: self.jwt_secret, + }); + } } + let mut ws_local_addr = None; + let mut ws_server = None; let mut http_local_addr = None; let mut http_server = None; - let mut ws_local_addr = None; - let mut ws_server = None; - if let Some(builder) = self.ws_server_config.take() { + if let Some(builder) = self.ws_server_config { let server = builder .ws_only() .set_http_middleware( tower::ServiceBuilder::new() .option_layer(Self::maybe_cors_layer(self.ws_cors_domains.clone())?) - .option_layer(self.maybe_jwt_layer()), + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( RpcServiceBuilder::new() @@ -1378,6 +1403,7 @@ impl RpcServerConfig { .build(ws_socket_addr) .await .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; + let addr = server .local_addr() .map_err(|err| RpcError::server_error(err, ServerKind::WS(ws_socket_addr)))?; @@ -1386,13 +1412,13 @@ impl RpcServerConfig { ws_server = Some(server); } - if let Some(builder) = self.http_server_config.take() { + if let Some(builder) = self.http_server_config { let server = builder .http_only() .set_http_middleware( tower::ServiceBuilder::new() .option_layer(Self::maybe_cors_layer(self.http_cors_domains.clone())?) - .option_layer(self.maybe_jwt_layer()), + .option_layer(Self::maybe_jwt_layer(self.jwt_secret)), ) .set_rpc_middleware( RpcServiceBuilder::new().layer( @@ -1409,36 +1435,20 @@ impl RpcServerConfig { http_server = Some(server); } - Ok(WsHttpServer { + http_handle = http_server + .map(|http_server| http_server.start(modules.http.clone().expect("http server error"))); + ws_handle = ws_server + .map(|ws_server| ws_server.start(modules.ws.clone().expect("ws server error"))); + Ok(RpcServerHandle { http_local_addr, ws_local_addr, - server: WsHttpServers::DifferentPort { http: http_server, ws: ws_server }, + http: http_handle, + ws: ws_handle, + ipc_endpoint: self.ipc_endpoint.clone(), + ipc: ipc_handle, jwt_secret: self.jwt_secret, }) } - - /// Finalize the configuration of the server(s). - /// - /// This consumes the builder and returns a server. - /// - /// Note: The server is not started and does nothing unless polled, See also - /// [`RpcServer::start`] - pub async fn build(mut self, modules: &TransportRpcModules) -> Result { - let mut server = RpcServer::empty(); - server.ws_http = self.build_ws_http(modules).await?; - - if let Some(builder) = self.ipc_server_config { - let metrics = modules.ipc.as_ref().map(RpcRequestMetrics::ipc).unwrap_or_default(); - let ipc_path = - self.ipc_endpoint.unwrap_or_else(|| constants::DEFAULT_IPC_ENDPOINT.into()); - let ipc = builder - .set_rpc_middleware(IpcRpcServiceBuilder::new().layer(metrics)) - .build(ipc_path); - server.ipc = Some(ipc); - } - - Ok(server) - } } /// Holds modules to be installed per transport type @@ -1645,167 +1655,6 @@ impl TransportRpcModules { self.merge_ipc(other)?; Ok(()) } - - /// Convenience function for starting a server - pub async fn start_server(self, builder: RpcServerConfig) -> Result { - builder.start(self).await - } -} - -/// Container type for ws and http servers in all possible combinations. -#[derive(Default)] -struct WsHttpServer { - /// The address of the http server - http_local_addr: Option, - /// The address of the ws server - ws_local_addr: Option, - /// Configured ws,http servers - server: WsHttpServers, - /// The jwt secret. - jwt_secret: Option, -} - -// Define the type alias with detailed type complexity -type WsHttpServerKind = Server< - Stack< - tower::util::Either, Identity>, - Stack, Identity>, - >, - Stack, ->; - -/// Enum for holding the http and ws servers in all possible combinations. -enum WsHttpServers { - /// Both servers are on the same port - SamePort(WsHttpServerKind), - /// Servers are on different ports - DifferentPort { http: Option, ws: Option }, -} - -// === impl WsHttpServers === - -impl WsHttpServers { - /// Starts the servers and returns the handles (http, ws) - async fn start( - self, - http_module: Option>, - ws_module: Option>, - config: &TransportRpcModuleConfig, - ) -> Result<(Option, Option), RpcError> { - let mut http_handle = None; - let mut ws_handle = None; - match self { - Self::SamePort(server) => { - // Make sure http and ws modules are identical, since we currently can't run - // different modules on same server - config.ensure_ws_http_identical()?; - - if let Some(module) = http_module.or(ws_module) { - let handle = server.start(module); - http_handle = Some(handle.clone()); - ws_handle = Some(handle); - } - } - Self::DifferentPort { http, ws } => { - if let Some((server, module)) = - http.and_then(|server| http_module.map(|module| (server, module))) - { - http_handle = Some(server.start(module)); - } - if let Some((server, module)) = - ws.and_then(|server| ws_module.map(|module| (server, module))) - { - ws_handle = Some(server.start(module)); - } - } - } - - Ok((http_handle, ws_handle)) - } -} - -impl Default for WsHttpServers { - fn default() -> Self { - Self::DifferentPort { http: None, ws: None } - } -} - -/// Container type for each transport ie. http, ws, and ipc server -pub struct RpcServer { - /// Configured ws,http servers - ws_http: WsHttpServer, - /// ipc server - ipc: Option>>, -} - -// === impl RpcServer === - -impl RpcServer { - fn empty() -> Self { - Self { ws_http: Default::default(), ipc: None } - } - - /// Returns the [`SocketAddr`] of the http server if started. - pub const fn http_local_addr(&self) -> Option { - self.ws_http.http_local_addr - } - /// Return the `JwtSecret` of the server - pub const fn jwt(&self) -> Option { - self.ws_http.jwt_secret - } - - /// Returns the [`SocketAddr`] of the ws server if started. - pub const fn ws_local_addr(&self) -> Option { - self.ws_http.ws_local_addr - } - - /// Returns the endpoint of the ipc server if started. - pub fn ipc_endpoint(&self) -> Option { - self.ipc.as_ref().map(|ipc| ipc.endpoint()) - } - - /// Starts the configured server by spawning the servers on the tokio runtime. - /// - /// This returns an [RpcServerHandle] that's connected to the server task(s) until the server is - /// stopped or the [RpcServerHandle] is dropped. - #[instrument(name = "start", skip_all, fields(http = ?self.http_local_addr(), ws = ?self.ws_local_addr(), ipc = ?self.ipc_endpoint()), target = "rpc", level = "TRACE")] - pub async fn start(self, modules: TransportRpcModules) -> Result { - trace!(target: "rpc", "staring RPC server"); - let Self { ws_http, ipc: ipc_server } = self; - let TransportRpcModules { config, http, ws, ipc } = modules; - let mut handle = RpcServerHandle { - http_local_addr: ws_http.http_local_addr, - ws_local_addr: ws_http.ws_local_addr, - http: None, - ws: None, - ipc_endpoint: None, - ipc: None, - jwt_secret: None, - }; - - let (http, ws) = ws_http.server.start(http, ws, &config).await?; - handle.http = http; - handle.ws = ws; - - if let Some((server, module)) = - ipc_server.and_then(|server| ipc.map(|module| (server, module))) - { - handle.ipc_endpoint = Some(server.endpoint()); - handle.ipc = Some(server.start(module).await?); - } - - Ok(handle) - } -} - -impl fmt::Debug for RpcServer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RpcServer") - .field("http", &self.ws_http.http_local_addr.is_some()) - .field("ws", &self.ws_http.ws_local_addr.is_some()) - .field("ipc", &self.ipc.is_some()) - .finish() - } } /// A handle to the spawned servers. diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index caf16ebf6..14143d229 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -11,7 +11,7 @@ use jsonrpsee::{ rpc_params, types::error::ErrorCode, }; -use reth_chainspec::net::NodeRecord; +use reth_network_peers::NodeRecord; use reth_primitives::{ hex_literal::hex, Address, BlockId, BlockNumberOrTag, Bytes, TxHash, B256, B64, U256, U64, }; @@ -234,7 +234,7 @@ where let block_id = BlockId::Number(BlockNumberOrTag::default()); DebugApiClient::raw_header(client, block_id).await.unwrap(); - DebugApiClient::raw_block(client, block_id).await.unwrap(); + DebugApiClient::raw_block(client, block_id).await.unwrap_err(); DebugApiClient::raw_transaction(client, B256::default()).await.unwrap(); DebugApiClient::raw_receipts(client, block_id).await.unwrap(); assert!(is_unimplemented(DebugApiClient::bad_blocks(client).await.err().unwrap())); @@ -295,12 +295,14 @@ where let address = Address::default(); let sender = Address::default(); let tx_hash = TxHash::default(); - let block_number = BlockNumberOrTag::default(); + let block_number = 1; let page_number = 1; let page_size = 10; let nonce = 1; let block_hash = B256::default(); + OtterscanClient::get_header_by_number(client, block_number).await.unwrap(); + OtterscanClient::has_code(client, address, None).await.unwrap(); OtterscanClient::get_api_level(client).await.unwrap(); @@ -309,9 +311,7 @@ where OtterscanClient::get_transaction_error(client, tx_hash).await.unwrap(); - assert!(is_unimplemented( - OtterscanClient::trace_transaction(client, tx_hash).await.err().unwrap() - )); + OtterscanClient::trace_transaction(client, tx_hash).await.unwrap(); OtterscanClient::get_block_details(client, block_number).await.unwrap(); @@ -334,15 +334,11 @@ where .err() .unwrap() )); - assert!(is_unimplemented( - OtterscanClient::get_transaction_by_sender_and_nonce(client, sender, nonce,) - .await - .err() - .unwrap() - )); - assert!(is_unimplemented( - OtterscanClient::get_contract_creator(client, address).await.err().unwrap() - )); + assert!(OtterscanClient::get_transaction_by_sender_and_nonce(client, sender, nonce) + .await + .err() + .is_none()); + assert!(OtterscanClient::get_contract_creator(client, address).await.unwrap().is_none()); } #[tokio::test(flavor = "multi_thread")] @@ -552,7 +548,7 @@ async fn test_eth_logs_args() { let client = handle.http_client().unwrap(); let mut params = ArrayParams::default(); - params.insert( serde_json::json!({"blockHash":"0x58dc57ab582b282c143424bd01e8d923cddfdcda9455bad02a29522f6274a948"})).unwrap(); + params.insert(serde_json::json!({"blockHash":"0x58dc57ab582b282c143424bd01e8d923cddfdcda9455bad02a29522f6274a948"})).unwrap(); let resp = client.request::, _>("eth_getLogs", params).await; // block does not exist diff --git a/crates/rpc/rpc-builder/tests/it/startup.rs b/crates/rpc/rpc-builder/tests/it/startup.rs index 91800166f..5680d03a5 100644 --- a/crates/rpc/rpc-builder/tests/it/startup.rs +++ b/crates/rpc/rpc-builder/tests/it/startup.rs @@ -1,14 +1,16 @@ //! Startup tests -use crate::utils::{ - launch_http, launch_http_ws_same_port, launch_ws, test_address, test_rpc_builder, -}; +use std::io; + use reth_rpc_builder::{ error::{RpcError, ServerKind, WsHttpSamePortError}, - RpcServerConfig, TransportRpcModuleConfig, + EthApiBuild, RpcServerConfig, TransportRpcModuleConfig, }; use reth_rpc_server_types::RethRpcModule; -use std::io; + +use crate::utils::{ + launch_http, launch_http_ws_same_port, launch_ws, test_address, test_rpc_builder, +}; fn is_addr_in_use_kind(err: &RpcError, kind: ServerKind) -> bool { match err { @@ -24,10 +26,10 @@ async fn test_http_addr_in_use() { let handle = launch_http(vec![RethRpcModule::Admin]).await; let addr = handle.http_local_addr().unwrap(); let builder = test_rpc_builder(); - let server = builder.build(TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin])); - let result = server - .start_server(RpcServerConfig::http(Default::default()).with_http_address(addr)) - .await; + let server = builder + .build(TransportRpcModuleConfig::set_http(vec![RethRpcModule::Admin]), EthApiBuild::build); + let result = + RpcServerConfig::http(Default::default()).with_http_address(addr).start(&server).await; let err = result.unwrap_err(); assert!(is_addr_in_use_kind(&err, ServerKind::Http(addr)), "{err}"); } @@ -37,9 +39,9 @@ async fn test_ws_addr_in_use() { let handle = launch_ws(vec![RethRpcModule::Admin]).await; let addr = handle.ws_local_addr().unwrap(); let builder = test_rpc_builder(); - let server = builder.build(TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin])); - let result = - server.start_server(RpcServerConfig::ws(Default::default()).with_ws_address(addr)).await; + let server = builder + .build(TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]), EthApiBuild::build); + let result = RpcServerConfig::ws(Default::default()).with_ws_address(addr).start(&server).await; let err = result.unwrap_err(); assert!(is_addr_in_use_kind(&err, ServerKind::WS(addr)), "{err}"); } @@ -58,15 +60,14 @@ async fn test_launch_same_port_different_modules() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Admin]) .with_http(vec![RethRpcModule::Eth]), + EthApiBuild::build, ); let addr = test_address(); - let res = server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(addr) - .with_http(Default::default()) - .with_http_address(addr), - ) + let res = RpcServerConfig::ws(Default::default()) + .with_ws_address(addr) + .with_http(Default::default()) + .with_http_address(addr) + .start(&server) .await; let err = res.unwrap_err(); assert!(matches!( @@ -81,17 +82,16 @@ async fn test_launch_same_port_same_cors() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), + EthApiBuild::build, ); let addr = test_address(); - let res = server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(addr) - .with_http(Default::default()) - .with_cors(Some("*".to_string())) - .with_http_cors(Some("*".to_string())) - .with_http_address(addr), - ) + let res = RpcServerConfig::ws(Default::default()) + .with_ws_address(addr) + .with_http(Default::default()) + .with_cors(Some("*".to_string())) + .with_http_cors(Some("*".to_string())) + .with_http_address(addr) + .start(&server) .await; assert!(res.is_ok()); } @@ -102,17 +102,16 @@ async fn test_launch_same_port_different_cors() { let server = builder.build( TransportRpcModuleConfig::set_ws(vec![RethRpcModule::Eth]) .with_http(vec![RethRpcModule::Eth]), + EthApiBuild::build, ); let addr = test_address(); - let res = server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(addr) - .with_http(Default::default()) - .with_cors(Some("*".to_string())) - .with_http_cors(Some("example".to_string())) - .with_http_address(addr), - ) + let res = RpcServerConfig::ws(Default::default()) + .with_ws_address(addr) + .with_http(Default::default()) + .with_cors(Some("*".to_string())) + .with_http_cors(Some("example".to_string())) + .with_http_address(addr) + .start(&server) .await; let err = res.unwrap_err(); assert!(matches!( diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index d751b2d33..ea9954f23 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,3 +1,5 @@ +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; + use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; use reth_ethereum_engine_primitives::EthEngineTypes; @@ -7,7 +9,7 @@ use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_provider::test_utils::{NoopProvider, TestCanonStateSubscriptions}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerConfig, AuthServerHandle}, - RpcModuleBuilder, RpcServerConfig, RpcServerHandle, TransportRpcModuleConfig, + EthApiBuild, RpcModuleBuilder, RpcServerConfig, RpcServerHandle, TransportRpcModuleConfig, }; use reth_rpc_engine_api::EngineApi; use reth_rpc_layer::JwtSecret; @@ -15,7 +17,6 @@ use reth_rpc_server_types::RpcModuleSelection; use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_tasks::TokioTaskExecutor; use reth_transaction_pool::test_utils::{TestPool, TestPoolBuilder}; -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use tokio::sync::mpsc::unbounded_channel; /// Localhost with port 0 so a free port is used. @@ -51,9 +52,10 @@ pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { /// Launches a new server with http only with the given modules pub async fn launch_http(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = builder.build(TransportRpcModuleConfig::set_http(modules)); - server - .start_server(RpcServerConfig::http(Default::default()).with_http_address(test_address())) + let server = builder.build(TransportRpcModuleConfig::set_http(modules), EthApiBuild::build); + RpcServerConfig::http(Default::default()) + .with_http_address(test_address()) + .start(&server) .await .unwrap() } @@ -61,9 +63,10 @@ pub async fn launch_http(modules: impl Into) -> RpcServerHan /// Launches a new server with ws only with the given modules pub async fn launch_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); - let server = builder.build(TransportRpcModuleConfig::set_ws(modules)); - server - .start_server(RpcServerConfig::ws(Default::default()).with_ws_address(test_address())) + let server = builder.build(TransportRpcModuleConfig::set_ws(modules), EthApiBuild::build); + RpcServerConfig::ws(Default::default()) + .with_http_address(test_address()) + .start(&server) .await .unwrap() } @@ -72,15 +75,15 @@ pub async fn launch_ws(modules: impl Into) -> RpcServerHandl pub async fn launch_http_ws(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); let modules = modules.into(); - let server = - builder.build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules)); - server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(test_address()) - .with_http(Default::default()) - .with_http_address(test_address()), - ) + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), + EthApiBuild::build, + ); + RpcServerConfig::ws(Default::default()) + .with_ws_address(test_address()) + .with_http(Default::default()) + .with_http_address(test_address()) + .start(&server) .await .unwrap() } @@ -89,16 +92,16 @@ pub async fn launch_http_ws(modules: impl Into) -> RpcServer pub async fn launch_http_ws_same_port(modules: impl Into) -> RpcServerHandle { let builder = test_rpc_builder(); let modules = modules.into(); - let server = - builder.build(TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules)); + let server = builder.build( + TransportRpcModuleConfig::set_ws(modules.clone()).with_http(modules), + EthApiBuild::build, + ); let addr = test_address(); - server - .start_server( - RpcServerConfig::ws(Default::default()) - .with_ws_address(addr) - .with_http(Default::default()) - .with_http_address(addr), - ) + RpcServerConfig::ws(Default::default()) + .with_ws_address(addr) + .with_http(Default::default()) + .with_http_address(addr) + .start(&server) .await .unwrap() } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 8185bbe8c..b64b9fa20 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -10,16 +10,18 @@ use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, Hardfork, B256, U64}; +use reth_primitives::{ + Block, BlockHash, BlockHashOrNumber, BlockNumber, EthereumHardfork, B256, U64, +}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, - ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, - ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, - CAPABILITIES, + ExecutionPayloadBodiesV2, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, + ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + TransitionConfiguration, CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ - convert_payload_input_v2_to_payload, convert_to_payload_body_v1, + convert_payload_input_v2_to_payload, convert_to_payload_body_v1, convert_to_payload_body_v2, }; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -83,7 +85,7 @@ where } /// Fetches the client version. - async fn get_client_version_v1( + fn get_client_version_v1( &self, _client: ClientVersionV1, ) -> EngineApiResult> { @@ -359,21 +361,18 @@ where }) } - /// Returns the execution payload bodies by the range starting at `start`, containing `count` - /// blocks. - /// - /// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus - /// layer p2p specification, meaning the input should be treated as untrusted or potentially - /// adversarial. - /// - /// Implementers should take care when acting on the input to this method, specifically - /// ensuring that the range is limited properly, and that the range boundaries are computed - /// correctly and without panics. - pub async fn get_payload_bodies_by_range( + /// Fetches all the blocks for the provided range starting at `start`, containing `count` + /// blocks and returns the mapped payload bodies. + async fn get_payload_bodies_by_range_with( &self, start: BlockNumber, count: u64, - ) -> EngineApiResult { + f: F, + ) -> EngineApiResult>> + where + F: Fn(Block) -> R + Send + 'static, + R: Send + 'static, + { let (tx, rx) = oneshot::channel(); let inner = self.inner.clone(); @@ -405,7 +404,7 @@ where let block_result = inner.provider.block(BlockHashOrNumber::Number(num)); match block_result { Ok(block) => { - result.push(block.map(convert_to_payload_body_v1)); + result.push(block.map(&f)); } Err(err) => { tx.send(Err(EngineApiError::Internal(Box::new(err)))).ok(); @@ -419,11 +418,45 @@ where rx.await.map_err(|err| EngineApiError::Internal(Box::new(err)))? } + /// Returns the execution payload bodies by the range starting at `start`, containing `count` + /// blocks. + /// + /// WARNING: This method is associated with the `BeaconBlocksByRange` message in the consensus + /// layer p2p specification, meaning the input should be treated as untrusted or potentially + /// adversarial. + /// + /// Implementers should take care when acting on the input to this method, specifically + /// ensuring that the range is limited properly, and that the range boundaries are computed + /// correctly and without panics. + pub async fn get_payload_bodies_by_range_v1( + &self, + start: BlockNumber, + count: u64, + ) -> EngineApiResult { + self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v1).await + } + + /// Returns the execution payload bodies by the range starting at `start`, containing `count` + /// blocks. + /// + /// Same as [`Self::get_payload_bodies_by_range_v1`] but as [`ExecutionPayloadBodiesV2`]. + pub async fn get_payload_bodies_by_range_v2( + &self, + start: BlockNumber, + count: u64, + ) -> EngineApiResult { + self.get_payload_bodies_by_range_with(start, count, convert_to_payload_body_v2).await + } + /// Called to retrieve execution payload bodies by hashes. - pub fn get_payload_bodies_by_hash( + fn get_payload_bodies_by_hash_with( &self, hashes: Vec, - ) -> EngineApiResult { + f: F, + ) -> EngineApiResult>> + where + F: Fn(Block) -> R, + { let len = hashes.len() as u64; if len > MAX_PAYLOAD_BODIES_LIMIT { return Err(EngineApiError::PayloadRequestTooLarge { len }) @@ -436,15 +469,33 @@ where .provider .block(BlockHashOrNumber::Hash(hash)) .map_err(|err| EngineApiError::Internal(Box::new(err)))?; - result.push(block.map(convert_to_payload_body_v1)); + result.push(block.map(&f)); } Ok(result) } + /// Called to retrieve execution payload bodies by hashes. + pub fn get_payload_bodies_by_hash_v1( + &self, + hashes: Vec, + ) -> EngineApiResult { + self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v1) + } + + /// Called to retrieve execution payload bodies by hashes. + /// + /// Same as [`Self::get_payload_bodies_by_hash_v1`] but as [`ExecutionPayloadBodiesV2`]. + pub fn get_payload_bodies_by_hash_v2( + &self, + hashes: Vec, + ) -> EngineApiResult { + self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v2) + } + /// Called to verify network configuration parameters and ensure that Consensus and Execution /// layers are using the latest configuration. - pub async fn exchange_transition_configuration( + pub fn exchange_transition_configuration( &self, config: TransitionConfiguration, ) -> EngineApiResult { @@ -457,7 +508,7 @@ where let merge_terminal_td = self .inner .chain_spec - .fork(Hardfork::Paris) + .fork(EthereumHardfork::Paris) .ttd() .expect("the engine API should not be running for chains w/o paris"); @@ -469,7 +520,7 @@ where }) } - self.inner.beacon_consensus.transition_configuration_exchanged().await; + self.inner.beacon_consensus.transition_configuration_exchanged(); // Short circuit if communicated block hash is zero if terminal_block_hash.is_zero() { @@ -760,11 +811,22 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV1"); let start = Instant::now(); - let res = Self::get_payload_bodies_by_hash(self, block_hashes); + let res = Self::get_payload_bodies_by_hash_v1(self, block_hashes); self.inner.metrics.latency.get_payload_bodies_by_hash_v1.record(start.elapsed()); Ok(res?) } + async fn get_payload_bodies_by_hash_v2( + &self, + block_hashes: Vec, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByHashV2"); + let start = Instant::now(); + let res = Self::get_payload_bodies_by_hash_v2(self, block_hashes); + self.inner.metrics.latency.get_payload_bodies_by_hash_v2.record(start.elapsed()); + Ok(res?) + } + /// Handler for `engine_getPayloadBodiesByRangeV1` /// /// See also @@ -788,11 +850,23 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV1"); let start_time = Instant::now(); - let res = Self::get_payload_bodies_by_range(self, start.to(), count.to()).await; + let res = Self::get_payload_bodies_by_range_v1(self, start.to(), count.to()).await; self.inner.metrics.latency.get_payload_bodies_by_range_v1.record(start_time.elapsed()); Ok(res?) } + async fn get_payload_bodies_by_range_v2( + &self, + start: U64, + count: U64, + ) -> RpcResult { + trace!(target: "rpc::engine", "Serving engine_getPayloadBodiesByRangeV2"); + let start_time = Instant::now(); + let res = Self::get_payload_bodies_by_range_v2(self, start.to(), count.to()).await; + self.inner.metrics.latency.get_payload_bodies_by_range_v2.record(start_time.elapsed()); + Ok(res?) + } + /// Handler for `engine_exchangeTransitionConfigurationV1` /// See also async fn exchange_transition_configuration( @@ -801,7 +875,7 @@ where ) -> RpcResult { trace!(target: "rpc::engine", "Serving engine_exchangeTransitionConfigurationV1"); let start = Instant::now(); - let res = Self::exchange_transition_configuration(self, config).await; + let res = Self::exchange_transition_configuration(self, config); self.inner.metrics.latency.exchange_transition_configuration.record(start.elapsed()); Ok(res?) } @@ -814,7 +888,7 @@ where client: ClientVersionV1, ) -> RpcResult> { trace!(target: "rpc::engine", "Serving engine_getClientVersionV1"); - let res = Self::get_client_version_v1(self, client).await; + let res = Self::get_client_version_v1(self, client); Ok(res?) } @@ -889,7 +963,7 @@ mod tests { commit: "defa64b2".to_string(), }; let (_, api) = setup_engine_api(); - let res = api.get_client_version_v1(client.clone()).await; + let res = api.get_client_version_v1(client.clone()); assert_eq!(res.unwrap(), vec![client]); } @@ -929,7 +1003,7 @@ mod tests { // test [EngineApiMessage::GetPayloadBodiesByRange] for (start, count) in by_range_tests { - let res = api.get_payload_bodies_by_range(start, count).await; + let res = api.get_payload_bodies_by_range_v1(start, count).await; assert_matches!(res, Err(EngineApiError::InvalidBodiesRange { .. })); } } @@ -939,7 +1013,7 @@ mod tests { let (_, api) = setup_engine_api(); let request_count = MAX_PAYLOAD_BODIES_LIMIT + 1; - let res = api.get_payload_bodies_by_range(0, request_count).await; + let res = api.get_payload_bodies_by_range_v1(0, request_count).await; assert_matches!(res, Err(EngineApiError::PayloadRequestTooLarge { .. })); } @@ -959,7 +1033,7 @@ mod tests { .map(|b| Some(convert_to_payload_body_v1(b.unseal()))) .collect::>(); - let res = api.get_payload_bodies_by_range(start, count).await.unwrap(); + let res = api.get_payload_bodies_by_range_v1(start, count).await.unwrap(); assert_eq!(res, expected); } @@ -1000,7 +1074,7 @@ mod tests { }) .collect::>(); - let res = api.get_payload_bodies_by_range(start, count).await.unwrap(); + let res = api.get_payload_bodies_by_range_v1(start, count).await.unwrap(); assert_eq!(res, expected); let expected = blocks @@ -1020,7 +1094,7 @@ mod tests { .collect::>(); let hashes = blocks.iter().map(|b| b.hash()).collect(); - let res = api.get_payload_bodies_by_hash(hashes).unwrap(); + let res = api.get_payload_bodies_by_hash_v1(hashes).unwrap(); assert_eq!(res, expected); } } @@ -1036,17 +1110,21 @@ mod tests { let (handle, api) = setup_engine_api(); let transition_config = TransitionConfiguration { - terminal_total_difficulty: handle.chain_spec.fork(Hardfork::Paris).ttd().unwrap() + + terminal_total_difficulty: handle + .chain_spec + .fork(EthereumHardfork::Paris) + .ttd() + .unwrap() + U256::from(1), ..Default::default() }; - let res = api.exchange_transition_configuration(transition_config).await; + let res = api.exchange_transition_configuration(transition_config); assert_matches!( res, Err(EngineApiError::TerminalTD { execution, consensus }) - if execution == handle.chain_spec.fork(Hardfork::Paris).ttd().unwrap() && consensus == U256::from(transition_config.terminal_total_difficulty) + if execution == handle.chain_spec.fork(EthereumHardfork::Paris).ttd().unwrap() && consensus == U256::from(transition_config.terminal_total_difficulty) ); } @@ -1063,13 +1141,17 @@ mod tests { random_block(&mut rng, terminal_block_number, None, None, None); let transition_config = TransitionConfiguration { - terminal_total_difficulty: handle.chain_spec.fork(Hardfork::Paris).ttd().unwrap(), + terminal_total_difficulty: handle + .chain_spec + .fork(EthereumHardfork::Paris) + .ttd() + .unwrap(), terminal_block_hash: consensus_terminal_block.hash(), terminal_block_number: U64::from(terminal_block_number), }; // Unknown block number - let res = api.exchange_transition_configuration(transition_config).await; + let res = api.exchange_transition_configuration(transition_config); assert_matches!( res, @@ -1083,7 +1165,7 @@ mod tests { execution_terminal_block.clone().unseal(), ); - let res = api.exchange_transition_configuration(transition_config).await; + let res = api.exchange_transition_configuration(transition_config); assert_matches!( res, @@ -1101,14 +1183,18 @@ mod tests { random_block(&mut generators::rng(), terminal_block_number, None, None, None); let transition_config = TransitionConfiguration { - terminal_total_difficulty: handle.chain_spec.fork(Hardfork::Paris).ttd().unwrap(), + terminal_total_difficulty: handle + .chain_spec + .fork(EthereumHardfork::Paris) + .ttd() + .unwrap(), terminal_block_hash: terminal_block.hash(), terminal_block_number: U64::from(terminal_block_number), }; handle.provider.add_block(terminal_block.hash(), terminal_block.unseal()); - let config = api.exchange_transition_configuration(transition_config).await.unwrap(); + let config = api.exchange_transition_configuration(transition_config).unwrap(); assert_eq!(config, transition_config); } } diff --git a/crates/rpc/rpc-engine-api/src/metrics.rs b/crates/rpc/rpc-engine-api/src/metrics.rs index 73489b755..0ae97768b 100644 --- a/crates/rpc/rpc-engine-api/src/metrics.rs +++ b/crates/rpc/rpc-engine-api/src/metrics.rs @@ -44,8 +44,12 @@ pub(crate) struct EngineApiLatencyMetrics { pub(crate) get_payload_v4: Histogram, /// Latency for `engine_getPayloadBodiesByRangeV1` pub(crate) get_payload_bodies_by_range_v1: Histogram, + /// Latency for `engine_getPayloadBodiesByRangeV2` + pub(crate) get_payload_bodies_by_range_v2: Histogram, /// Latency for `engine_getPayloadBodiesByHashV1` pub(crate) get_payload_bodies_by_hash_v1: Histogram, + /// Latency for `engine_getPayloadBodiesByHashV2` + pub(crate) get_payload_bodies_by_hash_v2: Histogram, /// Latency for `engine_exchangeTransitionConfigurationV1` pub(crate) exchange_transition_configuration: Histogram, } diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 0f2853f1f..afc66ddb0 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -25,6 +25,7 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi body: transformed.body, ommers: transformed.ommers, withdrawals: transformed.withdrawals, + sidecars: transformed.sidecars, requests: transformed.requests, }) .0 diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml new file mode 100644 index 000000000..fb8d84b3f --- /dev/null +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "reth-rpc-eth-api" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Reth RPC 'eth' namespace API" + +[lints] +workspace = true + +[dependencies] +# reth +revm.workspace = true +revm-inspectors = { workspace = true, features = ["js-tracer"] } +revm-primitives = { workspace = true, features = ["dev"] } +reth-errors.workspace = true +reth-evm.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-rpc-types.workspace = true +reth-rpc-types-compat.workspace = true +reth-tasks = { workspace = true, features = ["rayon"] } +reth-transaction-pool.workspace = true +reth-chainspec.workspace = true +reth-execution-types.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true + +# ethereum +alloy-dyn-abi = { workspace = true, features = ["eip712"] } + +# rpc +jsonrpsee = { workspace = true, features = ["server", "macros"] } + +# async +async-trait.workspace = true +futures.workspace = true +parking_lot.workspace = true +tokio.workspace = true + +# misc +auto_impl.workspace = true +dyn-clone.workspace = true +tracing.workspace = true + +[features] +client = ["jsonrpsee/client", "jsonrpsee/async-client"] +optimism = [ + "reth-primitives/optimism", + "revm/optimism", + "reth-provider/optimism", + "reth-rpc-eth-types/optimism" +] \ No newline at end of file diff --git a/crates/rpc/rpc-api/src/bundle.rs b/crates/rpc/rpc-eth-api/src/bundle.rs similarity index 98% rename from crates/rpc/rpc-api/src/bundle.rs rename to crates/rpc/rpc-eth-api/src/bundle.rs index 429f6948f..bf3a623df 100644 --- a/crates/rpc/rpc-api/src/bundle.rs +++ b/crates/rpc/rpc-eth-api/src/bundle.rs @@ -1,10 +1,10 @@ -//! Additional `eth_` functions for bundles +//! Additional `eth_` RPC API for bundles. //! //! See also use jsonrpsee::proc_macros::rpc; use reth_primitives::{Bytes, B256}; -use reth_rpc_types::{ +use reth_rpc_types::mev::{ CancelBundleRequest, CancelPrivateTransactionRequest, EthBundleHash, EthCallBundle, EthCallBundleResponse, EthSendBundle, PrivateTransactionRequest, }; diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs new file mode 100644 index 000000000..83740b53b --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -0,0 +1,746 @@ +//! Implementation of the [`jsonrpsee`] generated [`EthApiServer`] trait. Handles RPC requests for +//! the `eth_` namespace. + +use alloy_dyn_abi::TypedData; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; +use reth_rpc_types::{ + serde_helpers::JsonStorageKey, + state::{EvmOverrides, StateOverride}, + AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, BlockSidecar, Bundle, + EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, + StateContext, SyncStatus, Transaction, TransactionRequest, Work, +}; +use tracing::trace; + +use crate::helpers::{ + transaction::UpdateRawTxForwarder, EthApiSpec, EthBlocks, EthCall, EthFees, EthState, + EthTransactions, FullEthApi, +}; + +/// Helper trait, unifies functionality that must be supported to implement all RPC methods for +/// server. +pub trait FullEthApiServer: EthApiServer + FullEthApi + UpdateRawTxForwarder + Clone {} + +impl FullEthApiServer for T where T: EthApiServer + FullEthApi + UpdateRawTxForwarder + Clone {} + +/// Eth rpc interface: +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] +pub trait EthApi { + /// Returns the protocol version encoded as a string. + #[method(name = "protocolVersion")] + async fn protocol_version(&self) -> RpcResult; + + /// Returns an object with data about the sync status or false. + #[method(name = "syncing")] + fn syncing(&self) -> RpcResult; + + /// Returns the client coinbase address. + #[method(name = "coinbase")] + async fn author(&self) -> RpcResult
; + + /// Returns a list of addresses owned by client. + #[method(name = "accounts")] + fn accounts(&self) -> RpcResult>; + + /// Returns the number of most recent block. + #[method(name = "blockNumber")] + fn block_number(&self) -> RpcResult; + + /// Returns the chain ID of the current network. + #[method(name = "chainId")] + async fn chain_id(&self) -> RpcResult>; + + /// Returns information about a block by hash. + #[method(name = "getBlockByHash")] + async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult>; + + /// Returns information about a block by number. + #[method(name = "getBlockByNumber")] + async fn block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> RpcResult>; + + /// Returns the number of transactions in a block from a block matching the given block hash. + #[method(name = "getBlockTransactionCountByHash")] + async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns the number of transactions in a block matching the given block number. + #[method(name = "getBlockTransactionCountByNumber")] + async fn block_transaction_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult>; + + /// Returns the number of uncles in a block from a block matching the given block hash. + #[method(name = "getUncleCountByBlockHash")] + async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns the number of uncles in a block with given block number. + #[method(name = "getUncleCountByBlockNumber")] + async fn block_uncles_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult>; + + /// Returns all transaction receipts for a given block. + #[method(name = "getBlockReceipts")] + async fn block_receipts( + &self, + block_id: BlockId, + ) -> RpcResult>>; + + /// Returns an uncle block of the given block and index. + #[method(name = "getUncleByBlockHashAndIndex")] + async fn uncle_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult>; + + /// Returns an uncle block of the given block and index. + #[method(name = "getUncleByBlockNumberAndIndex")] + async fn uncle_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>; + + /// Returns the EIP-2718 encoded transaction if it exists. + /// + /// If this is a EIP-4844 transaction that is in the pool it will include the sidecar. + #[method(name = "getRawTransactionByHash")] + async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns the information about a transaction requested by transaction hash. + #[method(name = "getTransactionByHash")] + async fn transaction_by_hash(&self, hash: B256) -> RpcResult>; + + /// Returns information about a raw transaction by block hash and transaction index position. + #[method(name = "getRawTransactionByBlockHashAndIndex")] + async fn raw_transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult>; + + /// Returns information about a transaction by block hash and transaction index position. + #[method(name = "getTransactionByBlockHashAndIndex")] + async fn transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult>; + + /// Returns information about a raw transaction by block number and transaction index + /// position. + #[method(name = "getRawTransactionByBlockNumberAndIndex")] + async fn raw_transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>; + + /// Returns information about a transaction by block number and transaction index position. + #[method(name = "getTransactionByBlockNumberAndIndex")] + async fn transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult>; + + /// Returns the receipt of a transaction by transaction hash. + #[method(name = "getTransactionReceipt")] + async fn transaction_receipt(&self, hash: B256) -> RpcResult>; + + /// Returns the balance of the account of given address. + #[method(name = "getBalance")] + async fn balance(&self, address: Address, block_number: Option) -> RpcResult; + + /// Returns the value from a storage position at a given address + #[method(name = "getStorageAt")] + async fn storage_at( + &self, + address: Address, + index: JsonStorageKey, + block_number: Option, + ) -> RpcResult; + + /// Returns the number of transactions sent from an address at given block number. + #[method(name = "getTransactionCount")] + async fn transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult; + + /// Returns code at a given address at given block number. + #[method(name = "getCode")] + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult; + + /// Returns the block's header at given number. + #[method(name = "getHeaderByNumber")] + async fn header_by_number(&self, hash: BlockNumberOrTag) -> RpcResult>; + + /// Returns the block's header at given hash. + #[method(name = "getHeaderByHash")] + async fn header_by_hash(&self, hash: B256) -> RpcResult>; + + /// Executes a new message call immediately without creating a transaction on the block chain. + #[method(name = "call")] + async fn call( + &self, + request: TransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult; + + /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the + /// optionality of state overrides + #[method(name = "callMany")] + async fn call_many( + &self, + bundle: Bundle, + state_context: Option, + state_override: Option, + ) -> RpcResult>; + + /// Generates an access list for a transaction. + /// + /// This method creates an [EIP2930](https://eips.ethereum.org/EIPS/eip-2930) type accessList based on a given Transaction. + /// + /// An access list contains all storage slots and addresses touched by the transaction, except + /// for the sender account and the chain's precompiles. + /// + /// It returns list of addresses and storage keys used by the transaction, plus the gas + /// consumed when the access list is added. That is, it gives you the list of addresses and + /// storage keys that will be used by that transaction, plus the gas consumed if the access + /// list is included. Like eth_estimateGas, this is an estimation; the list could change + /// when the transaction is actually mined. Adding an accessList to your transaction does + /// not necessary result in lower gas usage compared to a transaction without an access + /// list. + #[method(name = "createAccessList")] + async fn create_access_list( + &self, + request: TransactionRequest, + block_number: Option, + ) -> RpcResult; + + /// Generates and returns an estimate of how much gas is necessary to allow the transaction to + /// complete. + #[method(name = "estimateGas")] + async fn estimate_gas( + &self, + request: TransactionRequest, + block_number: Option, + state_override: Option, + ) -> RpcResult; + + /// Returns the current price per gas in wei. + #[method(name = "gasPrice")] + async fn gas_price(&self) -> RpcResult; + + /// Introduced in EIP-1559, returns suggestion for the priority for dynamic fee transactions. + #[method(name = "maxPriorityFeePerGas")] + async fn max_priority_fee_per_gas(&self) -> RpcResult; + + /// Introduced in EIP-4844, returns the current blob base fee in wei. + #[method(name = "blobBaseFee")] + async fn blob_base_fee(&self) -> RpcResult; + + /// Returns the Transaction fee history + /// + /// Introduced in EIP-1559 for getting information on the appropriate priority fee to use. + /// + /// Returns transaction base fee per gas and effective priority fee per gas for the + /// requested/supported block range. The returned Fee history for the returned block range + /// can be a subsection of the requested range if not all blocks are available. + #[method(name = "feeHistory")] + async fn fee_history( + &self, + block_count: U64, + newest_block: BlockNumberOrTag, + reward_percentiles: Option>, + ) -> RpcResult; + + /// Returns whether the client is actively mining new blocks. + #[method(name = "mining")] + async fn is_mining(&self) -> RpcResult; + + /// Returns the number of hashes per second that the node is mining with. + #[method(name = "hashrate")] + async fn hashrate(&self) -> RpcResult; + + /// Returns the hash of the current block, the seedHash, and the boundary condition to be met + /// (“target”) + #[method(name = "getWork")] + async fn get_work(&self) -> RpcResult; + + /// Used for submitting mining hashrate. + /// + /// Can be used for remote miners to submit their hash rate. + /// It accepts the miner hash rate and an identifier which must be unique between nodes. + /// Returns `true` if the block was successfully submitted, `false` otherwise. + #[method(name = "submitHashrate")] + async fn submit_hashrate(&self, hashrate: U256, id: B256) -> RpcResult; + + /// Used for submitting a proof-of-work solution. + #[method(name = "submitWork")] + async fn submit_work(&self, nonce: B64, pow_hash: B256, mix_digest: B256) -> RpcResult; + + /// Sends transaction; will block waiting for signer to return the + /// transaction hash. + #[method(name = "sendTransaction")] + async fn send_transaction(&self, request: TransactionRequest) -> RpcResult; + + /// Sends signed transaction, returning its hash. + #[method(name = "sendRawTransaction")] + async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult; + + /// Returns an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n" + /// + len(message) + message))). + #[method(name = "sign")] + async fn sign(&self, address: Address, message: Bytes) -> RpcResult; + + /// Signs a transaction that can be submitted to the network at a later time using with + /// `sendRawTransaction.` + #[method(name = "signTransaction")] + async fn sign_transaction(&self, transaction: TransactionRequest) -> RpcResult; + + /// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md). + #[method(name = "signTypedData")] + async fn sign_typed_data(&self, address: Address, data: TypedData) -> RpcResult; + + /// Returns the account and storage values of the specified account including the Merkle-proof. + /// This call can be used to verify that the data you are pulling from is not tampered with. + #[method(name = "getProof")] + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult; + + /// Returns the Sidecars of a given block number or hash. + #[method(name = "getBlobSidecars")] + async fn get_blob_sidecars(&self, block_id: BlockId) -> RpcResult>>; + + /// Returns a sidecar of a given blob transaction + #[method(name = "getBlockSidecarByTxHash")] + async fn get_block_sidecar_by_tx_hash(&self, hash: B256) -> RpcResult>; +} + +#[async_trait::async_trait] +impl EthApiServer for T +where + Self: FullEthApi, +{ + /// Handler for: `eth_protocolVersion` + async fn protocol_version(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_protocolVersion"); + EthApiSpec::protocol_version(self).await.to_rpc_result() + } + + /// Handler for: `eth_syncing` + fn syncing(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_syncing"); + EthApiSpec::sync_status(self).to_rpc_result() + } + + /// Handler for: `eth_coinbase` + async fn author(&self) -> RpcResult
{ + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_accounts` + fn accounts(&self) -> RpcResult> { + trace!(target: "rpc::eth", "Serving eth_accounts"); + Ok(EthApiSpec::accounts(self)) + } + + /// Handler for: `eth_blockNumber` + fn block_number(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_blockNumber"); + Ok(U256::from( + EthApiSpec::chain_info(self).with_message("failed to read chain info")?.best_number, + )) + } + + /// Handler for: `eth_chainId` + async fn chain_id(&self) -> RpcResult> { + trace!(target: "rpc::eth", "Serving eth_chainId"); + Ok(Some(EthApiSpec::chain_id(self))) + } + + /// Handler for: `eth_getBlockByHash` + async fn block_by_hash(&self, hash: B256, full: bool) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?full, "Serving eth_getBlockByHash"); + Ok(EthBlocks::rpc_block(self, hash.into(), full).await?) + } + + /// Handler for: `eth_getBlockByNumber` + async fn block_by_number( + &self, + number: BlockNumberOrTag, + full: bool, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber"); + Ok(EthBlocks::rpc_block(self, number.into(), full).await?) + } + + /// Handler for: `eth_getBlockTransactionCountByHash` + async fn block_transaction_count_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash"); + Ok(EthBlocks::block_transaction_count(self, hash.into()).await?.map(U256::from)) + } + + /// Handler for: `eth_getBlockTransactionCountByNumber` + async fn block_transaction_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber"); + Ok(EthBlocks::block_transaction_count(self, number.into()).await?.map(U256::from)) + } + + /// Handler for: `eth_getUncleCountByBlockHash` + async fn block_uncles_count_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getUncleCountByBlockHash"); + Ok(EthBlocks::ommers(self, hash.into())?.map(|ommers| U256::from(ommers.len()))) + } + + /// Handler for: `eth_getUncleCountByBlockNumber` + async fn block_uncles_count_by_number( + &self, + number: BlockNumberOrTag, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, "Serving eth_getUncleCountByBlockNumber"); + Ok(EthBlocks::ommers(self, number.into())?.map(|ommers| U256::from(ommers.len()))) + } + + /// Handler for: `eth_getBlockReceipts` + async fn block_receipts( + &self, + block_id: BlockId, + ) -> RpcResult>> { + trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); + Ok(EthBlocks::block_receipts(self, block_id).await?) + } + + /// Handler for: `eth_getUncleByBlockHashAndIndex` + async fn uncle_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getUncleByBlockHashAndIndex"); + Ok(EthBlocks::ommer_by_block_and_index(self, hash.into(), index).await?) + } + + /// Handler for: `eth_getUncleByBlockNumberAndIndex` + async fn uncle_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getUncleByBlockNumberAndIndex"); + Ok(EthBlocks::ommer_by_block_and_index(self, number.into(), index).await?) + } + + /// Handler for: `eth_getRawTransactionByHash` + async fn raw_transaction_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getRawTransactionByHash"); + Ok(EthTransactions::raw_transaction_by_hash(self, hash).await?) + } + + /// Handler for: `eth_getTransactionByHash` + async fn transaction_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); + Ok(EthTransactions::transaction_by_hash(self, hash).await?.map(Into::into)) + } + + /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` + async fn raw_transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); + Ok(EthTransactions::raw_transaction_by_block_and_tx_index(self, hash.into(), index.into()) + .await?) + } + + /// Handler for: `eth_getTransactionByBlockHashAndIndex` + async fn transaction_by_block_hash_and_index( + &self, + hash: B256, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getTransactionByBlockHashAndIndex"); + Ok(EthTransactions::transaction_by_block_and_tx_index(self, hash.into(), index.into()) + .await?) + } + + /// Handler for: `eth_getRawTransactionByBlockNumberAndIndex` + async fn raw_transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getRawTransactionByBlockNumberAndIndex"); + Ok(EthTransactions::raw_transaction_by_block_and_tx_index( + self, + number.into(), + index.into(), + ) + .await?) + } + + /// Handler for: `eth_getTransactionByBlockNumberAndIndex` + async fn transaction_by_block_number_and_index( + &self, + number: BlockNumberOrTag, + index: Index, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getTransactionByBlockNumberAndIndex"); + Ok(EthTransactions::transaction_by_block_and_tx_index(self, number.into(), index.into()) + .await?) + } + + /// Handler for: `eth_getTransactionReceipt` + async fn transaction_receipt(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt"); + Ok(EthTransactions::transaction_receipt(self, hash).await?) + } + + /// Handler for: `eth_getBalance` + async fn balance(&self, address: Address, block_number: Option) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getBalance"); + Ok(EthState::balance(self, address, block_number).await?) + } + + /// Handler for: `eth_getStorageAt` + async fn storage_at( + &self, + address: Address, + index: JsonStorageKey, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getStorageAt"); + let res: B256 = EthState::storage_at(self, address, index, block_number).await?; + Ok(res) + } + + /// Handler for: `eth_getTransactionCount` + async fn transaction_count( + &self, + address: Address, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getTransactionCount"); + Ok(EthState::transaction_count(self, address, block_number).await?) + } + + /// Handler for: `eth_getCode` + async fn get_code(&self, address: Address, block_number: Option) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getCode"); + Ok(EthState::get_code(self, address, block_number).await?) + } + + /// Handler for: `eth_getHeaderByNumber` + async fn header_by_number(&self, block_number: BlockNumberOrTag) -> RpcResult> { + trace!(target: "rpc::eth", ?block_number, "Serving eth_getHeaderByNumber"); + Ok(EthBlocks::rpc_block_header(self, block_number.into()).await?) + } + + /// Handler for: `eth_getHeaderByHash` + async fn header_by_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getHeaderByHash"); + Ok(EthBlocks::rpc_block_header(self, hash.into()).await?) + } + + /// Handler for: `eth_call` + async fn call( + &self, + request: TransactionRequest, + block_number: Option, + state_overrides: Option, + block_overrides: Option>, + ) -> RpcResult { + trace!(target: "rpc::eth", ?request, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); + Ok(EthCall::call( + self, + request, + block_number, + EvmOverrides::new(state_overrides, block_overrides), + ) + .await?) + } + + /// Handler for: `eth_callMany` + async fn call_many( + &self, + bundle: Bundle, + state_context: Option, + state_override: Option, + ) -> RpcResult> { + trace!(target: "rpc::eth", ?bundle, ?state_context, ?state_override, "Serving eth_callMany"); + Ok(EthCall::call_many(self, bundle, state_context, state_override).await?) + } + + /// Handler for: `eth_createAccessList` + async fn create_access_list( + &self, + request: TransactionRequest, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_createAccessList"); + let access_list_with_gas_used = + EthCall::create_access_list_at(self, request, block_number).await?; + + Ok(access_list_with_gas_used) + } + + /// Handler for: `eth_estimateGas` + async fn estimate_gas( + &self, + request: TransactionRequest, + block_number: Option, + state_override: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); + Ok(EthCall::estimate_gas_at( + self, + request, + block_number.unwrap_or_default(), + state_override, + ) + .await?) + } + + /// Handler for: `eth_gasPrice` + async fn gas_price(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_gasPrice"); + return Ok(EthFees::gas_price(self).await?) + } + + /// Handler for: `eth_maxPriorityFeePerGas` + async fn max_priority_fee_per_gas(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_maxPriorityFeePerGas"); + return Ok(EthFees::suggested_priority_fee(self).await?) + } + + /// Handler for: `eth_blobBaseFee` + async fn blob_base_fee(&self) -> RpcResult { + trace!(target: "rpc::eth", "Serving eth_blobBaseFee"); + return Ok(EthFees::blob_base_fee(self).await?) + } + + // FeeHistory is calculated based on lazy evaluation of fees for historical blocks, and further + // caching of it in the LRU cache. + // When new RPC call is executed, the cache gets locked, we check it for the historical fees + // according to the requested block range, and fill any cache misses (in both RPC response + // and cache itself) with the actual data queried from the database. + // To minimize the number of database seeks required to query the missing data, we calculate the + // first non-cached block number and last non-cached block number. After that, we query this + // range of consecutive blocks from the database. + /// Handler for: `eth_feeHistory` + async fn fee_history( + &self, + block_count: U64, + newest_block: BlockNumberOrTag, + reward_percentiles: Option>, + ) -> RpcResult { + trace!(target: "rpc::eth", ?block_count, ?newest_block, ?reward_percentiles, "Serving eth_feeHistory"); + return Ok( + EthFees::fee_history(self, block_count.to(), newest_block, reward_percentiles).await? + ) + } + + /// Handler for: `eth_mining` + async fn is_mining(&self) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_hashrate` + async fn hashrate(&self) -> RpcResult { + Ok(U256::ZERO) + } + + /// Handler for: `eth_getWork` + async fn get_work(&self) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_submitHashrate` + async fn submit_hashrate(&self, _hashrate: U256, _id: B256) -> RpcResult { + Ok(false) + } + + /// Handler for: `eth_submitWork` + async fn submit_work( + &self, + _nonce: B64, + _pow_hash: B256, + _mix_digest: B256, + ) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_sendTransaction` + async fn send_transaction(&self, request: TransactionRequest) -> RpcResult { + trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction"); + Ok(EthTransactions::send_transaction(self, request).await?) + } + + /// Handler for: `eth_sendRawTransaction` + async fn send_raw_transaction(&self, tx: Bytes) -> RpcResult { + trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransaction"); + Ok(EthTransactions::send_raw_transaction(self, tx).await?) + } + + /// Handler for: `eth_sign` + async fn sign(&self, address: Address, message: Bytes) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?message, "Serving eth_sign"); + Ok(EthTransactions::sign(self, address, message).await?) + } + + /// Handler for: `eth_signTransaction` + async fn sign_transaction(&self, _transaction: TransactionRequest) -> RpcResult { + Err(internal_rpc_err("unimplemented")) + } + + /// Handler for: `eth_signTypedData` + async fn sign_typed_data(&self, address: Address, data: TypedData) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?data, "Serving eth_signTypedData"); + Ok(EthTransactions::sign_typed_data(self, &data, address)?) + } + + /// Handler for: `eth_getProof` + async fn get_proof( + &self, + address: Address, + keys: Vec, + block_number: Option, + ) -> RpcResult { + trace!(target: "rpc::eth", ?address, ?keys, ?block_number, "Serving eth_getProof"); + Ok(EthState::get_proof(self, address, keys, block_number)?.await?) + } + + /// Handler for: `eth_getBlobSidecars` + async fn get_blob_sidecars(&self, block_id: BlockId) -> RpcResult>> { + trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlobSidecars"); + Ok(EthBlocks::rpc_block_sidecars(self, block_id).await?) + } + + /// Handler for: `eth_getBlockSidecarByTxHash` + async fn get_block_sidecar_by_tx_hash(&self, hash: B256) -> RpcResult> { + trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockSidecarByTxHash"); + Ok(EthTransactions::rpc_transaction_sidecar(self, hash).await?) + } +} diff --git a/crates/rpc/rpc-api/src/eth_filter.rs b/crates/rpc/rpc-eth-api/src/filter.rs similarity index 97% rename from crates/rpc/rpc-api/src/eth_filter.rs rename to crates/rpc/rpc-eth-api/src/filter.rs index 2e395d5ba..da53b577e 100644 --- a/crates/rpc/rpc-api/src/eth_filter.rs +++ b/crates/rpc/rpc-eth-api/src/filter.rs @@ -1,5 +1,8 @@ +//! `eth_` RPC API for filtering. + use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use reth_rpc_types::{Filter, FilterChanges, FilterId, Log, PendingTransactionFilterKind}; + /// Rpc Interface for poll-based ethereum filter API. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs new file mode 100644 index 000000000..e468ccf95 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -0,0 +1,269 @@ +//! Database access for `eth_` block RPC methods. Loads block and receipt data w.r.t. network. + +use std::sync::Arc; + +use futures::Future; +use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta}; +use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; +use reth_rpc_types::{AnyTransactionReceipt, BlockSidecar, Header, Index, RichBlock}; +use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; + +use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; + +/// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the +/// `eth_` namespace. +pub trait EthBlocks: LoadBlock { + /// Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl HeaderProvider; + + /// Returns the block header for the given block id. + fn rpc_block_header( + &self, + block_id: BlockId, + ) -> impl Future>> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { Ok(self.rpc_block(block_id, false).await?.map(|block| block.inner.header)) } + } + + /// Returns the populated rpc block object for the given block id. + /// + /// If `full` is true, the block object will contain all transaction objects, otherwise it will + /// only contain the transaction hashes. + fn rpc_block( + &self, + block_id: BlockId, + full: bool, + ) -> impl Future>> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { + let block = match self.block_with_senders(block_id).await? { + Some(block) => block, + None => return Ok(None), + }; + let block_hash = block.hash(); + let total_difficulty = EthBlocks::provider(self) + .header_td_by_number(block.number)? + .ok_or(EthApiError::UnknownBlockNumber)?; + let block = + from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash))?; + Ok(Some(block.into())) + } + } + + /// Returns the number transactions in the given block. + /// + /// Returns `None` if the block does not exist + fn block_transaction_count( + &self, + block_id: BlockId, + ) -> impl Future>> + Send { + async move { + if block_id.is_pending() { + // Pending block can be fetched directly without need for caching + return Ok(LoadBlock::provider(self).pending_block()?.map(|block| block.body.len())) + } + + let block_hash = match LoadBlock::provider(self).block_hash_for_id(block_id)? { + Some(block_hash) => block_hash, + None => return Ok(None), + }; + + Ok(self.cache().get_block_transactions(block_hash).await?.map(|txs| txs.len())) + } + } + + /// Helper function for `eth_getBlockReceipts`. + /// + /// Returns all transaction receipts in block, or `None` if block wasn't found. + fn block_receipts( + &self, + block_id: BlockId, + ) -> impl Future>>> + Send + where + Self: LoadReceipt, + { + async move { + if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { + let block_number = block.number; + let base_fee = block.base_fee_per_gas; + let block_hash = block.hash(); + let excess_blob_gas = block.excess_blob_gas; + let timestamp = block.timestamp; + let block = block.unseal(); + + let receipts = block + .body + .into_iter() + .zip(receipts.iter()) + .enumerate() + .map(|(idx, (tx, receipt))| { + let meta = TransactionMeta { + tx_hash: tx.hash, + index: idx as u64, + block_hash, + block_number, + base_fee, + excess_blob_gas, + timestamp, + }; + + ReceiptBuilder::new(&tx, meta, receipt, &receipts) + .map(|builder| builder.build()) + }) + .collect::>>(); + return receipts.map(Some) + } + + Ok(None) + } + } + + /// Helper method that loads a bock and all its receipts. + fn load_block_and_receipts( + &self, + block_id: BlockId, + ) -> impl Future>)>>> + Send + where + Self: LoadReceipt, + { + async move { + if block_id.is_pending() { + return Ok(LoadBlock::provider(self) + .pending_block_and_receipts()? + .map(|(sb, receipts)| (sb, Arc::new(receipts)))) + } + + if let Some(block_hash) = LoadBlock::provider(self).block_hash_for_id(block_id)? { + return Ok(LoadReceipt::cache(self).get_block_and_receipts(block_hash).await?) + } + + Ok(None) + } + } + + /// Returns uncle headers of given block. + /// + /// Returns an empty vec if there are none. + fn ommers(&self, block_id: BlockId) -> EthResult>> { + Ok(LoadBlock::provider(self).ommers_by_id(block_id)?) + } + + /// Returns uncle block at given index in given block. + /// + /// Returns `None` if index out of range. + fn ommer_by_block_and_index( + &self, + block_id: BlockId, + index: Index, + ) -> impl Future>> + Send { + async move { + let uncles = if block_id.is_pending() { + // Pending block can be fetched directly without need for caching + LoadBlock::provider(self).pending_block()?.map(|block| block.ommers) + } else { + LoadBlock::provider(self).ommers_by_id(block_id)? + } + .unwrap_or_default(); + + let index = usize::from(index); + let uncle = + uncles.into_iter().nth(index).map(|header| uncle_block_from_header(header).into()); + Ok(uncle) + } + } + + /// Returns the sidecars for the given block id. + fn rpc_block_sidecars( + &self, + block_id: BlockId, + ) -> impl Future>>> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { + if block_id.is_pending() { + return Ok(None); + } + + let sidecars = + if let Some(block_hash) = LoadBlock::provider(self).block_hash_for_id(block_id)? { + self.cache().get_sidecars(block_hash).await? + } else { + None + }; + + Ok(sidecars.map(|sidecars| { + sidecars + .into_iter() + .map(|item| BlockSidecar { + blob_sidecar: item.blob_transaction_sidecar, + block_number: item.block_number.to(), + block_hash: item.block_hash, + tx_index: item.tx_index, + tx_hash: item.tx_hash, + }) + .collect() + })) + } + } +} + +/// Loads a block from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. +pub trait LoadBlock: LoadPendingBlock + SpawnBlocking { + // Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl BlockReaderIdExt; + + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Returns the block object for the given block id. + fn block( + &self, + block_id: BlockId, + ) -> impl Future>> + Send { + async move { + self.block_with_senders(block_id) + .await + .map(|maybe_block| maybe_block.map(|block| block.block)) + } + } + + /// Returns the block object for the given block id. + fn block_with_senders( + &self, + block_id: BlockId, + ) -> impl Future>> + Send { + async move { + if block_id.is_pending() { + // Pending block can be fetched directly without need for caching + let maybe_pending = + LoadPendingBlock::provider(self).pending_block_with_senders()?; + return if maybe_pending.is_some() { + Ok(maybe_pending) + } else { + self.local_pending_block().await + } + } + + let block_hash = match LoadPendingBlock::provider(self).block_hash_for_id(block_id)? { + Some(block_hash) => block_hash, + None => return Ok(None), + }; + + Ok(self.cache().get_sealed_block_with_senders(block_hash).await?) + } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs new file mode 100644 index 000000000..4a2c81b0f --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/blocking_task.rs @@ -0,0 +1,65 @@ +//! Spawns a blocking task. CPU heavy tasks are executed with the `rayon` library. IO heavy tasks +//! are executed on the `tokio` runtime. + +use futures::Future; +use reth_rpc_eth_types::{EthApiError, EthResult}; +use reth_tasks::{pool::BlockingTaskPool, TaskSpawner}; +use tokio::sync::{oneshot, AcquireError, OwnedSemaphorePermit}; + +/// Executes code on a blocking thread. +pub trait SpawnBlocking: Clone + Send + Sync + 'static { + /// Returns a handle for spawning IO heavy blocking tasks. + /// + /// Runtime access in default trait method implementations. + fn io_task_spawner(&self) -> impl TaskSpawner; + + /// Returns a handle for spawning CPU heavy blocking tasks. + /// + /// Thread pool access in default trait method implementations. + fn tracing_task_pool(&self) -> &BlockingTaskPool; + + /// See also [`Semaphore::acquire_owned`](`tokio::sync::Semaphore::acquire_owned`). + fn acquire_owned( + &self, + ) -> impl Future> + Send; + + /// See also [`Semaphore::acquire_many_owned`](`tokio::sync::Semaphore::acquire_many_owned`). + fn acquire_many_owned( + &self, + n: u32, + ) -> impl Future> + Send; + + /// Executes the future on a new blocking task. + /// + /// Note: This is expected for futures that are dominated by blocking IO operations, for tracing + /// or CPU bound operations in general use [`spawn_tracing`](Self::spawn_tracing). + fn spawn_blocking_io(&self, f: F) -> impl Future> + Send + where + F: FnOnce(Self) -> EthResult + Send + 'static, + R: Send + 'static, + { + let (tx, rx) = oneshot::channel(); + let this = self.clone(); + self.io_task_spawner().spawn_blocking(Box::pin(async move { + let res = async move { f(this) }.await; + let _ = tx.send(res); + })); + + async move { rx.await.map_err(|_| EthApiError::InternalEthError)? } + } + + /// Executes a blocking task on the tracing pool. + /// + /// Note: This is expected for futures that are predominantly CPU bound, as it uses `rayon` + /// under the hood, for blocking IO futures use [`spawn_blocking`](Self::spawn_blocking_io). See + /// . + fn spawn_tracing(&self, f: F) -> impl Future> + Send + where + F: FnOnce(Self) -> EthResult + Send + 'static, + R: Send + 'static, + { + let this = self.clone(); + let fut = self.tracing_task_pool().spawn(move || f(this)); + async move { fut.await.map_err(|_| EthApiError::InternalBlockingTaskError)? } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs new file mode 100644 index 000000000..037929289 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -0,0 +1,779 @@ +//! Loads a pending block from database. Helper trait for `eth_` transaction, call and trace RPC +//! methods. + +use futures::Future; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_primitives::{ + revm_primitives::{ + BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason, + ResultAndState, TransactTo, + }, + Bytes, TransactionSignedEcRecovered, TxKind, B256, U256, +}; +use reth_provider::StateProvider; +use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; +use reth_rpc_eth_types::{ + cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, + error::ensure_success, + revm_utils::{ + apply_state_overrides, build_call_evm_env, caller_gas_allowance, + cap_tx_gas_limit_with_caller_allowance, get_precompiles, prepare_call_env, + }, + EthApiError, EthResult, RevertError, RpcInvalidTransactionError, StateCacheDb, +}; +use reth_rpc_server_types::constants::gas_oracle::{ESTIMATE_GAS_ERROR_RATIO, MIN_TRANSACTION_GAS}; +use reth_rpc_types::{ + state::{EvmOverrides, StateOverride}, + AccessListWithGasUsed, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, + TransactionRequest, +}; +use revm::{Database, DatabaseCommit}; +use revm_inspectors::access_list::AccessListInspector; +use tracing::trace; + +use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; + +/// Execution related functions for the [`EthApiServer`](crate::EthApiServer) trait in +/// the `eth_` namespace. +pub trait EthCall: Call + LoadPendingBlock { + /// Estimate gas needed for execution of the `request` at the [`BlockId`]. + fn estimate_gas_at( + &self, + request: TransactionRequest, + at: BlockId, + state_override: Option, + ) -> impl Future> + Send { + Call::estimate_gas_at(self, request, at, state_override) + } + + /// Executes the call request (`eth_call`) and returns the output + fn call( + &self, + request: TransactionRequest, + block_number: Option, + overrides: EvmOverrides, + ) -> impl Future> + Send { + async move { + let (res, _env) = + self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; + + ensure_success(res.result) + } + } + + /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the + /// optionality of state overrides + fn call_many( + &self, + bundle: Bundle, + state_context: Option, + mut state_override: Option, + ) -> impl Future>> + Send + where + Self: LoadBlock, + { + async move { + let Bundle { transactions, block_override } = bundle; + if transactions.is_empty() { + return Err(EthApiError::InvalidParams(String::from("transactions are empty."))) + } + + let StateContext { transaction_index, block_number } = + state_context.unwrap_or_default(); + let transaction_index = transaction_index.unwrap_or_default(); + + let target_block = block_number.unwrap_or_default(); + let is_block_target_pending = target_block.is_pending(); + + let ((cfg, block_env, _), block) = futures::try_join!( + self.evm_env_at(target_block), + self.block_with_senders(target_block) + )?; + + let Some(block) = block else { return Err(EthApiError::UnknownBlockNumber) }; + let gas_limit = self.call_gas_limit(); + + // we're essentially replaying the transactions in the block here, hence we need the + // state that points to the beginning of the block, which is the state at + // the parent block + let mut at = block.parent_hash; + let mut replay_block_txs = true; + + let num_txs = transaction_index.index().unwrap_or(block.body.len()); + // but if all transactions are to be replayed, we can use the state at the block itself, + // however only if we're not targeting the pending block, because for pending we can't + // rely on the block's state being available + if !is_block_target_pending && num_txs == block.body.len() { + at = block.hash(); + replay_block_txs = false; + } + + let this = self.clone(); + self.spawn_with_state_at_block(at.into(), move |state| { + let mut results = Vec::with_capacity(transactions.len()); + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + if replay_block_txs { + // only need to replay the transactions in the block if not all transactions are + // to be replayed + let transactions = block.into_transactions_ecrecovered().take(num_txs); + for tx in transactions { + let env = EnvWithHandlerCfg::new_with_cfg_env( + cfg.clone(), + block_env.clone(), + Call::evm_config(&this).tx_env(&tx), + ); + let (res, _) = this.transact(&mut db, env)?; + db.commit(res.state); + } + } + + let block_overrides = block_override.map(Box::new); + + let mut transactions = transactions.into_iter().peekable(); + while let Some(tx) = transactions.next() { + // apply state overrides only once, before the first transaction + let state_overrides = state_override.take(); + let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); + + let env = prepare_call_env( + cfg.clone(), + block_env.clone(), + tx, + gas_limit, + &mut db, + overrides, + )?; + let (res, _) = this.transact(&mut db, env)?; + + match ensure_success(res.result) { + Ok(output) => { + results.push(EthCallResponse { value: Some(output), error: None }); + } + Err(err) => { + results.push(EthCallResponse { + value: None, + error: Some(err.to_string()), + }); + } + } + + if transactions.peek().is_some() { + // need to apply the state changes of this call before executing the next + // call + db.commit(res.state); + } + } + + Ok(results) + }) + .await + } + } + + /// Creates [`AccessListWithGasUsed`] for the [`TransactionRequest`] at the given + /// [`BlockId`], or latest block. + fn create_access_list_at( + &self, + request: TransactionRequest, + block_number: Option, + ) -> impl Future> + Send + where + Self: Trace, + { + async move { + let block_id = block_number.unwrap_or_default(); + let (cfg, block, at) = self.evm_env_at(block_id).await?; + + self.spawn_blocking_io(move |this| { + this.create_access_list_with(cfg, block, at, request) + }) + .await + } + } + + /// Creates [`AccessListWithGasUsed`] for the [`TransactionRequest`] at the given + /// [`BlockId`]. + fn create_access_list_with( + &self, + cfg: CfgEnvWithHandlerCfg, + block: BlockEnv, + at: BlockId, + mut request: TransactionRequest, + ) -> EthResult + where + Self: Trace, + { + let state = self.state_at_block_id(at)?; + + let mut env = build_call_evm_env(cfg, block, request.clone())?; + + // we want to disable this in eth_createAccessList, since this is common practice used by + // other node impls and providers + env.cfg.disable_block_gas_limit = true; + + // The basefee should be ignored for eth_createAccessList + // See: + // + env.cfg.disable_base_fee = true; + + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + if request.gas.is_none() && env.tx.gas_price > U256::ZERO { + // no gas limit was provided in the request, so we need to cap the request's gas limit + cap_tx_gas_limit_with_caller_allowance(&mut db, &mut env.tx)?; + } + + let from = request.from.unwrap_or_default(); + let to = if let Some(TxKind::Call(to)) = request.to { + to + } else { + let nonce = db.basic_ref(from)?.unwrap_or_default().nonce; + from.create(nonce) + }; + + // can consume the list since we're not using the request anymore + let initial = request.access_list.take().unwrap_or_default(); + + let precompiles = get_precompiles(env.handler_cfg.spec_id); + let mut inspector = AccessListInspector::new(initial, from, to, precompiles); + let (result, env) = self.inspect(&mut db, env, &mut inspector)?; + + match result.result { + ExecutionResult::Halt { reason, .. } => Err(match reason { + HaltReason::NonceOverflow => RpcInvalidTransactionError::NonceMaxValue, + halt => RpcInvalidTransactionError::EvmHalt(halt), + }), + ExecutionResult::Revert { output, .. } => { + Err(RpcInvalidTransactionError::Revert(RevertError::new(output))) + } + ExecutionResult::Success { .. } => Ok(()), + }?; + + let access_list = inspector.into_access_list(); + + let cfg_with_spec_id = + CfgEnvWithHandlerCfg { cfg_env: env.cfg.clone(), handler_cfg: env.handler_cfg }; + + // calculate the gas used using the access list + request.access_list = Some(access_list.clone()); + let gas_used = + self.estimate_gas_with(cfg_with_spec_id, env.block.clone(), request, &*db.db, None)?; + + Ok(AccessListWithGasUsed { access_list, gas_used }) + } +} + +/// Executes code on state. +pub trait Call: LoadState + SpawnBlocking { + /// Returns default gas limit to use for `eth_call` and tracing RPC methods. + /// + /// Data access in default trait method implementations. + fn call_gas_limit(&self) -> u64; + + /// Returns a handle for reading evm config. + /// + /// Data access in default (L1) trait method implementations. + fn evm_config(&self) -> &impl ConfigureEvm; + + /// Executes the closure with the state that corresponds to the given [`BlockId`]. + fn with_state_at_block(&self, at: BlockId, f: F) -> EthResult + where + F: FnOnce(StateProviderTraitObjWrapper<'_>) -> EthResult, + { + let state = self.state_at_block_id(at)?; + f(StateProviderTraitObjWrapper(&state)) + } + + /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state + /// changes. + fn transact( + &self, + db: DB, + env: EnvWithHandlerCfg, + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> + where + DB: Database, + ::Error: Into, + { + let mut evm = self.evm_config().evm_with_env(db, env); + let res = evm.transact()?; + let (_, env) = evm.into_db_and_env_with_handler_cfg(); + Ok((res, env)) + } + + /// Executes the call request at the given [`BlockId`]. + fn transact_call_at( + &self, + request: TransactionRequest, + at: BlockId, + overrides: EvmOverrides, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + { + let this = self.clone(); + self.spawn_with_call_at(request, at, overrides, move |db, env| this.transact(db, env)) + } + + /// Executes the closure with the state that corresponds to the given [`BlockId`] on a new task + fn spawn_with_state_at_block( + &self, + at: BlockId, + f: F, + ) -> impl Future> + Send + where + F: FnOnce(StateProviderTraitObjWrapper<'_>) -> EthResult + Send + 'static, + T: Send + 'static, + { + self.spawn_tracing(move |this| { + let state = this.state_at_block_id(at)?; + f(StateProviderTraitObjWrapper(&state)) + }) + } + + /// Prepares the state and env for the given [`TransactionRequest`] at the given [`BlockId`] and + /// executes the closure on a new task returning the result of the closure. + /// + /// This returns the configured [`EnvWithHandlerCfg`] for the given [`TransactionRequest`] at + /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. + fn spawn_with_call_at( + &self, + request: TransactionRequest, + at: BlockId, + overrides: EvmOverrides, + f: F, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + F: FnOnce(StateCacheDbRefMutWrapper<'_, '_>, EnvWithHandlerCfg) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + async move { + let (cfg, block_env, at) = self.evm_env_at(at).await?; + let this = self.clone(); + self.spawn_tracing(move |_| { + let state = this.state_at_block_id(at)?; + let mut db = + CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + + let env = prepare_call_env( + cfg, + block_env, + request, + this.call_gas_limit(), + &mut db, + overrides, + )?; + + f(StateCacheDbRefMutWrapper(&mut db), env) + }) + .await + .map_err(|_| EthApiError::InternalBlockingTaskError) + } + } + + /// Retrieves the transaction if it exists and executes it. + /// + /// Before the transaction is executed, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed + /// and the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool). + fn spawn_replay_transaction( + &self, + hash: B256, + f: F, + ) -> impl Future>> + Send + where + Self: LoadBlock + LoadPendingBlock + LoadTransaction, + F: FnOnce(TransactionInfo, ResultAndState, StateCacheDb<'_>) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + async move { + let (transaction, block) = match self.transaction_and_block(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + let (tx, tx_info) = transaction.split(); + + let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; + + // we need to get the state of the parent block because we're essentially replaying the + // block the transaction is included in + let parent_block = block.parent_hash; + let block_txs = block.into_transactions_ecrecovered(); + + let this = self.clone(); + self.spawn_with_state_at_block(parent_block.into(), move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // replay all transactions prior to the targeted transaction + this.replay_transactions_until( + &mut db, + cfg.clone(), + block_env.clone(), + block_txs, + tx.hash, + )?; + + let env = EnvWithHandlerCfg::new_with_cfg_env( + cfg, + block_env, + Call::evm_config(&this).tx_env(&tx), + ); + + let (res, _) = this.transact(&mut db, env)?; + f(tx_info, res, db) + }) + .await + .map(Some) + } + } + + /// Replays all the transactions until the target transaction is found. + /// + /// All transactions before the target transaction are executed and their changes are written to + /// the _runtime_ db ([`CacheDB`]). + /// + /// Note: This assumes the target transaction is in the given iterator. + /// Returns the index of the target transaction in the given iterator. + fn replay_transactions_until( + &self, + db: &mut CacheDB, + cfg: CfgEnvWithHandlerCfg, + block_env: BlockEnv, + transactions: impl IntoIterator, + target_tx_hash: B256, + ) -> Result + where + DB: DatabaseRef, + EthApiError: From<::Error>, + { + let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); + + let mut evm = self.evm_config().evm_with_env(db, env); + let mut index = 0; + for tx in transactions { + if tx.hash() == target_tx_hash { + // reached the target transaction + break + } + + let sender = tx.signer(); + self.evm_config().fill_tx_env(evm.tx_mut(), &tx.into_signed(), sender); + evm.transact_commit()?; + index += 1; + } + Ok(index) + } + + /// Estimate gas needed for execution of the `request` at the [`BlockId`]. + fn estimate_gas_at( + &self, + request: TransactionRequest, + at: BlockId, + state_override: Option, + ) -> impl Future> + Send + where + Self: LoadPendingBlock, + { + async move { + let (cfg, block_env, at) = self.evm_env_at(at).await?; + + self.spawn_blocking_io(move |this| { + let state = this.state_at_block_id(at)?; + this.estimate_gas_with(cfg, block_env, request, state, state_override) + }) + .await + } + } + + /// Estimates the gas usage of the `request` with the state. + /// + /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search + fn estimate_gas_with( + &self, + mut cfg: CfgEnvWithHandlerCfg, + block: BlockEnv, + request: TransactionRequest, + state: S, + state_override: Option, + ) -> EthResult + where + S: StateProvider, + { + // Disabled because eth_estimateGas is sometimes used with eoa senders + // See + cfg.disable_eip3607 = true; + + // The basefee should be ignored for eth_createAccessList + // See: + // + cfg.disable_base_fee = true; + + // Keep a copy of gas related request values + let tx_request_gas_limit = request.gas; + let tx_request_gas_price = request.gas_price; + let block_env_gas_limit = block.gas_limit; + + // Determine the highest possible gas limit, considering both the request's specified limit + // and the block's limit. + let mut highest_gas_limit = tx_request_gas_limit + .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) + .unwrap_or(block_env_gas_limit); + + // Configure the evm env + let mut env = build_call_evm_env(cfg, block, request)?; + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // Apply any state overrides if specified. + if let Some(state_override) = state_override { + apply_state_overrides(state_override, &mut db)?; + } + + // Optimize for simple transfer transactions, potentially reducing the gas estimate. + if env.tx.data.is_empty() { + if let TransactTo::Call(to) = env.tx.transact_to { + if let Ok(code) = db.db.account_code(to) { + let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); + if no_code_callee { + // If the tx is a simple transfer (call to an account with no code) we can + // shortcircuit. But simply returning + // `MIN_TRANSACTION_GAS` is dangerous because there might be additional + // field combos that bump the price up, so we try executing the function + // with the minimum gas limit to make sure. + let mut env = env.clone(); + env.tx.gas_limit = MIN_TRANSACTION_GAS; + if let Ok((res, _)) = self.transact(&mut db, env) { + if res.result.is_success() { + return Ok(U256::from(MIN_TRANSACTION_GAS)) + } + } + } + } + } + } + + // Check funds of the sender (only useful to check if transaction gas price is more than 0). + // + // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` + if env.tx.gas_price > U256::ZERO { + // cap the highest gas limit by max gas caller can afford with given gas price + highest_gas_limit = highest_gas_limit.min(caller_gas_allowance(&mut db, &env.tx)?); + } + + // We can now normalize the highest gas limit to a u64 + let mut highest_gas_limit: u64 = highest_gas_limit.try_into().unwrap_or(u64::MAX); + + // If the provided gas limit is less than computed cap, use that + env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); + + trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); + + // Execute the transaction with the highest possible gas limit. + let (mut res, mut env) = match self.transact(&mut db, env.clone()) { + // Handle the exceptional case where the transaction initialization uses too much gas. + // If the gas price or gas limit was specified in the request, retry the transaction + // with the block's gas limit to determine if the failure was due to + // insufficient gas. + Err(EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) + if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() => + { + return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } + // Propagate other results (successful or other errors). + ethres => ethres?, + }; + + let gas_refund = match res.result { + ExecutionResult::Success { gas_refunded, .. } => gas_refunded, + ExecutionResult::Halt { reason, gas_used } => { + // here we don't check for invalid opcode because already executed with highest gas + // limit + return Err(RpcInvalidTransactionError::halt(reason, gas_used).into()) + } + ExecutionResult::Revert { output, .. } => { + // if price or limit was included in the request then we can execute the request + // again with the block's gas limit to check if revert is gas related or not + return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { + Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + } else { + // the transaction did revert + Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into()) + } + } + }; + + // At this point we know the call succeeded but want to find the _best_ (lowest) gas the + // transaction succeeds with. We find this by doing a binary search over the possible range. + // + // NOTE: this is the gas the transaction used, which is less than the + // transaction requires to succeed. + let mut gas_used = res.result.gas_used(); + // the lowest value is capped by the gas used by the unconstrained transaction + let mut lowest_gas_limit = gas_used.saturating_sub(1); + + // As stated in Geth, there is a good chance that the transaction will pass if we set the + // gas limit to the execution gas used plus the gas refund, so we check this first + // 1 { + // An estimation error is allowed once the current gas limit range used in the binary + // search is small enough (less than 1.5% of the highest gas limit) + // { + // Increase the lowest gas limit if gas is too high + lowest_gas_limit = mid_gas_limit; + } + // Handle other cases, including successful transactions. + ethres => { + // Unpack the result and environment if the transaction was successful. + (res, env) = ethres?; + // Update the estimated gas range based on the transaction result. + self.update_estimated_gas_range( + res.result, + mid_gas_limit, + &mut highest_gas_limit, + &mut lowest_gas_limit, + )?; + } + } + + // New midpoint + mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; + } + + Ok(U256::from(highest_gas_limit)) + } + + /// Updates the highest and lowest gas limits for binary search based on the execution result. + /// + /// This function refines the gas limit estimates used in a binary search to find the optimal + /// gas limit for a transaction. It adjusts the highest or lowest gas limits depending on + /// whether the execution succeeded, reverted, or halted due to specific reasons. + #[inline] + fn update_estimated_gas_range( + &self, + result: ExecutionResult, + tx_gas_limit: u64, + highest_gas_limit: &mut u64, + lowest_gas_limit: &mut u64, + ) -> EthResult<()> { + match result { + ExecutionResult::Success { .. } => { + // Cap the highest gas limit with the succeeding gas limit. + *highest_gas_limit = tx_gas_limit; + } + ExecutionResult::Revert { .. } => { + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + ExecutionResult::Halt { reason, .. } => { + match reason { + HaltReason::OutOfGas(_) | HaltReason::InvalidFEOpcode => { + // Both `OutOfGas` and `InvalidEFOpcode` can occur dynamically if the gas + // left is too low. Treat this as an out of gas + // condition, knowing that the call succeeds with a + // higher gas limit. + // + // Common usage of invalid opcode in OpenZeppelin: + // + + // Increase the lowest gas limit. + *lowest_gas_limit = tx_gas_limit; + } + err => { + // These cases should be unreachable because we know the transaction + // succeeds, but if they occur, treat them as an + // error. + return Err(RpcInvalidTransactionError::EvmHalt(err).into()) + } + } + } + }; + + Ok(()) + } + + /// Executes the requests again after an out of gas error to check if the error is gas related + /// or not + #[inline] + fn map_out_of_gas_err( + &self, + env_gas_limit: U256, + mut env: EnvWithHandlerCfg, + db: &mut CacheDB>, + ) -> EthApiError + where + S: StateProvider, + { + let req_gas_limit = env.tx.gas_limit; + env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); + let (res, _) = match self.transact(db, env) { + Ok(res) => res, + Err(err) => return err, + }; + match res.result { + ExecutionResult::Success { .. } => { + // transaction succeeded by manually increasing the gas limit to + // highest, which means the caller lacks funds to pay for the tx + RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into() + } + ExecutionResult::Revert { output, .. } => { + // reverted again after bumping the limit + RpcInvalidTransactionError::Revert(RevertError::new(output)).into() + } + ExecutionResult::Halt { reason, .. } => { + RpcInvalidTransactionError::EvmHalt(reason).into() + } + } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs new file mode 100644 index 000000000..54c577ea2 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -0,0 +1,346 @@ +//! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. + +use futures::Future; +use reth_primitives::U256; +use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; +use reth_rpc_eth_types::{ + fee_history::calculate_reward_percentiles_for_block, EthApiError, EthResult, EthStateCache, + FeeHistoryCache, FeeHistoryEntry, GasPriceOracle, RpcInvalidTransactionError, +}; +use reth_rpc_types::{BlockNumberOrTag, FeeHistory}; +use tracing::debug; + +use super::LoadBlock; + +/// Fee related functions for the [`EthApiServer`](crate::EthApiServer) trait in the +/// `eth_` namespace. +pub trait EthFees: LoadFee { + /// Returns a suggestion for a gas price for legacy transactions. + /// + /// See also: + fn gas_price(&self) -> impl Future> + Send + where + Self: LoadBlock, + { + LoadFee::gas_price(self) + } + + /// Returns a suggestion for a base fee for blob transactions. + fn blob_base_fee(&self) -> impl Future> + Send + where + Self: LoadBlock, + { + LoadFee::blob_base_fee(self) + } + + /// Returns a suggestion for the priority fee (the tip) + fn suggested_priority_fee(&self) -> impl Future> + Send + where + Self: 'static, + { + LoadFee::suggested_priority_fee(self) + } + + /// Reports the fee history, for the given amount of blocks, up until the given newest block. + /// + /// If `reward_percentiles` are provided the [`FeeHistory`] will include the _approximated_ + /// rewards for the requested range. + fn fee_history( + &self, + mut block_count: u64, + newest_block: BlockNumberOrTag, + reward_percentiles: Option>, + ) -> impl Future> + Send { + async move { + if block_count == 0 { + return Ok(FeeHistory::default()) + } + + // See https://github.com/ethereum/go-ethereum/blob/2754b197c935ee63101cbbca2752338246384fec/eth/gasprice/feehistory.go#L218C8-L225 + let max_fee_history = if reward_percentiles.is_none() { + self.gas_oracle().config().max_header_history + } else { + self.gas_oracle().config().max_block_history + }; + + if block_count > max_fee_history { + debug!( + requested = block_count, + truncated = max_fee_history, + "Sanitizing fee history block count" + ); + block_count = max_fee_history + } + + let Some(end_block) = + LoadFee::provider(self).block_number_for_id(newest_block.into())? + else { + return Err(EthApiError::UnknownBlockNumber) + }; + + // need to add 1 to the end block to get the correct (inclusive) range + let end_block_plus = end_block + 1; + // Ensure that we would not be querying outside of genesis + if end_block_plus < block_count { + block_count = end_block_plus; + } + + // If reward percentiles were specified, we + // need to validate that they are monotonically + // increasing and 0 <= p <= 100 + // Note: The types used ensure that the percentiles are never < 0 + if let Some(percentiles) = &reward_percentiles { + if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) { + return Err(EthApiError::InvalidRewardPercentiles) + } + } + + // Fetch the headers and ensure we got all of them + // + // Treat a request for 1 block as a request for `newest_block..=newest_block`, + // otherwise `newest_block - 2 + // NOTE: We ensured that block count is capped + let start_block = end_block_plus - block_count; + + // Collect base fees, gas usage ratios and (optionally) reward percentile data + let mut base_fee_per_gas: Vec = Vec::new(); + let mut gas_used_ratio: Vec = Vec::new(); + + let mut base_fee_per_blob_gas: Vec = Vec::new(); + let mut blob_gas_used_ratio: Vec = Vec::new(); + + let mut rewards: Vec> = Vec::new(); + + // Check if the requested range is within the cache bounds + let fee_entries = self.fee_history_cache().get_history(start_block, end_block).await; + + if let Some(fee_entries) = fee_entries { + if fee_entries.len() != block_count as usize { + return Err(EthApiError::InvalidBlockRange) + } + + for entry in &fee_entries { + base_fee_per_gas.push(entry.base_fee_per_gas as u128); + gas_used_ratio.push(entry.gas_used_ratio); + base_fee_per_blob_gas.push(entry.base_fee_per_blob_gas.unwrap_or_default()); + blob_gas_used_ratio.push(entry.blob_gas_used_ratio); + + if let Some(percentiles) = &reward_percentiles { + let mut block_rewards = Vec::with_capacity(percentiles.len()); + for &percentile in percentiles { + block_rewards.push(self.approximate_percentile(entry, percentile)); + } + rewards.push(block_rewards); + } + } + let last_entry = fee_entries.last().expect("is not empty"); + + // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the + // next block + base_fee_per_gas + .push(last_entry.next_block_base_fee(&LoadFee::provider(self).chain_spec()) + as u128); + + base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); + } else { + // read the requested header range + let headers = LoadFee::provider(self).sealed_headers_range(start_block..=end_block)?; + if headers.len() != block_count as usize { + return Err(EthApiError::InvalidBlockRange) + } + + for header in &headers { + base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); + gas_used_ratio.push(header.gas_used as f64 / header.gas_limit as f64); + base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); + blob_gas_used_ratio.push( + header.blob_gas_used.unwrap_or_default() as f64 / + reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, + ); + + // Percentiles were specified, so we need to collect reward percentile ino + if let Some(percentiles) = &reward_percentiles { + let (transactions, receipts) = LoadFee::cache(self) + .get_transactions_and_receipts(header.hash()) + .await? + .ok_or(EthApiError::InvalidBlockRange)?; + rewards.push( + calculate_reward_percentiles_for_block( + percentiles, + header.gas_used, + header.base_fee_per_gas.unwrap_or_default(), + &transactions, + &receipts, + ) + .unwrap_or_default(), + ); + } + } + + // The spec states that `base_fee_per_gas` "[..] includes the next block after the + // newest of the returned range, because this value can be derived from the + // newest block" + // + // The unwrap is safe since we checked earlier that we got at least 1 header. + let last_header = headers.last().expect("is present"); + base_fee_per_gas.push( + LoadFee::provider(self).chain_spec().base_fee_params_at_timestamp(last_header.timestamp).next_block_base_fee( + last_header.gas_used as u128, + last_header.gas_limit as u128, + last_header.base_fee_per_gas.unwrap_or_default() as u128, + )); + + // Same goes for the `base_fee_per_blob_gas`: + // > "[..] includes the next block after the newest of the returned range, because this value can be derived from the newest block. + base_fee_per_blob_gas + .push(last_header.next_block_blob_fee().unwrap_or_default()); + }; + + Ok(FeeHistory { + base_fee_per_gas, + gas_used_ratio, + base_fee_per_blob_gas, + blob_gas_used_ratio, + oldest_block: start_block, + reward: reward_percentiles.map(|_| rewards), + }) + } + } + + /// Approximates reward at a given percentile for a specific block + /// Based on the configured resolution + fn approximate_percentile(&self, entry: &FeeHistoryEntry, requested_percentile: f64) -> u128 { + let resolution = self.fee_history_cache().resolution(); + let rounded_percentile = + (requested_percentile * resolution as f64).round() / resolution as f64; + let clamped_percentile = rounded_percentile.clamp(0.0, 100.0); + + // Calculate the index in the precomputed rewards array + let index = (clamped_percentile / (1.0 / resolution as f64)).round() as usize; + // Fetch the reward from the FeeHistoryEntry + entry.rewards.get(index).cloned().unwrap_or_default() + } +} + +/// Loads fee from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` fees RPC methods. +pub trait LoadFee: LoadBlock { + // Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider; + + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Returns a handle for reading gas price. + /// + /// Data access in default (L1) trait method implementations. + fn gas_oracle(&self) -> &GasPriceOracle; + + /// Returns a handle for reading fee history data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn fee_history_cache(&self) -> &FeeHistoryCache; + + /// Returns the gas price if it is set, otherwise fetches a suggested gas price for legacy + /// transactions. + fn legacy_gas_price( + &self, + gas_price: Option, + ) -> impl Future> + Send { + async move { + match gas_price { + Some(gas_price) => Ok(gas_price), + None => { + // fetch a suggested gas price + self.gas_price().await + } + } + } + } + + /// Returns the EIP-1559 fees if they are set, otherwise fetches a suggested gas price for + /// EIP-1559 transactions. + /// + /// Returns (`max_fee`, `priority_fee`) + fn eip1559_fees( + &self, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + ) -> impl Future> + Send { + async move { + let max_fee_per_gas = match max_fee_per_gas { + Some(max_fee_per_gas) => max_fee_per_gas, + None => { + // fetch pending base fee + let base_fee = self + .block(BlockNumberOrTag::Pending.into()) + .await? + .ok_or(EthApiError::UnknownBlockNumber)? + .base_fee_per_gas + .ok_or_else(|| { + EthApiError::InvalidTransaction( + RpcInvalidTransactionError::TxTypeNotSupported, + ) + })?; + U256::from(base_fee) + } + }; + + let max_priority_fee_per_gas = match max_priority_fee_per_gas { + Some(max_priority_fee_per_gas) => max_priority_fee_per_gas, + None => self.suggested_priority_fee().await?, + }; + Ok((max_fee_per_gas, max_priority_fee_per_gas)) + } + } + + /// Returns the EIP-4844 blob fee if it is set, otherwise fetches a blob fee. + fn eip4844_blob_fee( + &self, + blob_fee: Option, + ) -> impl Future> + Send { + async move { + match blob_fee { + Some(blob_fee) => Ok(blob_fee), + None => self.blob_base_fee().await, + } + } + } + + /// Returns a suggestion for a gas price for legacy transactions. + /// + /// See also: + fn gas_price(&self) -> impl Future> + Send { + let header = self.block(BlockNumberOrTag::Latest.into()); + let suggested_tip = self.suggested_priority_fee(); + async move { + let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; + let base_fee = header.and_then(|h| h.base_fee_per_gas).unwrap_or_default(); + Ok(suggested_tip + U256::from(base_fee)) + } + } + + /// Returns a suggestion for a base fee for blob transactions. + fn blob_base_fee(&self) -> impl Future> + Send { + async move { + self.block(BlockNumberOrTag::Latest.into()) + .await? + .and_then(|h: reth_primitives::SealedBlock| h.next_block_blob_fee()) + .ok_or(EthApiError::ExcessBlobGasNotSet) + .map(U256::from) + } + } + + /// Returns a suggestion for the priority fee (the tip) + fn suggested_priority_fee(&self) -> impl Future> + Send + where + Self: 'static, + { + self.gas_oracle().suggest_tip_cap() + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/mod.rs b/crates/rpc/rpc-eth-api/src/helpers/mod.rs new file mode 100644 index 000000000..72e49077e --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/mod.rs @@ -0,0 +1,67 @@ +//! Behaviour needed to serve `eth_` RPC requests, divided into general database reads and +//! specific database access. +//! +//! Traits with `Load` prefix, read atomic data from database, e.g. a block or transaction. Any +//! database read done in more than one default `Eth` trait implementation, is defined in a `Load` +//! trait. +//! +//! Traits with `Eth` prefix, compose specific data needed to serve RPC requests in the `eth` +//! namespace. They use `Load` traits as building blocks. [`EthTransactions`] also writes data +//! (submits transactions). Based on the `eth_` request method semantics, request methods are +//! divided into: [`EthTransactions`], [`EthBlocks`], [`EthFees`], [`EthState`] and [`EthCall`]. +//! Default implementation of the `Eth` traits, is done w.r.t. L1. +//! +//! [`EthApiServer`](crate::EthApiServer), is implemented for any type that implements +//! all the `Eth` traits, e.g. `reth_rpc::EthApi`. + +pub mod block; +pub mod blocking_task; +pub mod call; +pub mod fee; +pub mod pending_block; +pub mod receipt; +pub mod signer; +pub mod spec; +pub mod state; +pub mod trace; +pub mod transaction; + +pub use block::{EthBlocks, LoadBlock}; +pub use blocking_task::SpawnBlocking; +pub use call::{Call, EthCall}; +pub use fee::{EthFees, LoadFee}; +pub use pending_block::LoadPendingBlock; +pub use receipt::LoadReceipt; +pub use signer::EthSigner; +pub use spec::EthApiSpec; +pub use state::{EthState, LoadState}; +pub use trace::Trace; +pub use transaction::{EthTransactions, LoadTransaction, UpdateRawTxForwarder}; + +/// Extension trait that bundles traits needed for tracing transactions. +pub trait TraceExt: + LoadTransaction + LoadBlock + LoadPendingBlock + SpawnBlocking + Trace + Call +{ +} + +impl TraceExt for T where T: LoadTransaction + LoadBlock + LoadPendingBlock + Trace + Call {} + +/// Helper trait to unify all `eth` rpc server building block traits, for simplicity. +/// +/// This trait is automatically implemented for any type that implements all the `Eth` traits. +pub trait FullEthApi: + EthApiSpec + EthTransactions + EthBlocks + EthState + EthCall + EthFees + Trace + LoadReceipt +{ +} + +impl FullEthApi for T where + T: EthApiSpec + + EthTransactions + + EthBlocks + + EthState + + EthCall + + EthFees + + Trace + + LoadReceipt +{ +} diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs similarity index 51% rename from crates/rpc/rpc/src/eth/api/pending_block.rs rename to crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 4f9a616de..497b79341 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -1,63 +1,209 @@ -//! Support for building a pending block via local txpool. +//! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace +//! RPC methods. -use crate::eth::error::{EthApiError, EthResult}; -use reth_chainspec::ChainSpec; -use reth_errors::ProviderError; +use std::time::{Duration, Instant}; + +use futures::Future; +use reth_chainspec::EthereumHardforks; +use reth_evm::{system_calls::pre_block_beacon_root_contract_call, ConfigureEvm, ConfigureEvmEnv}; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE, EMPTY_ROOT_HASH}, - proofs, - revm::env::tx_env_with_recovered, + proofs::calculate_transaction_root, revm_primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EVMError, Env, InvalidTransaction, ResultAndState, SpecId, + BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, + ResultAndState, SpecId, }, - Block, BlockId, BlockNumberOrTag, Header, IntoRecoveredTransaction, Receipt, Requests, - SealedBlockWithSenders, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, + Block, BlockNumber, Header, IntoRecoveredTransaction, Receipt, Requests, + SealedBlockWithSenders, SealedHeader, TransactionSignedEcRecovered, B256, + EMPTY_OMMER_ROOT_HASH, U256, +}; +use reth_provider::{ + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, }; -use reth_provider::{ChainSpecProvider, StateProviderFactory}; use reth_revm::{ - database::StateProviderDatabase, - state_change::{ - apply_beacon_root_contract_call, apply_blockhashes_update, - post_block_withdrawals_balance_increments, - }, + database::StateProviderDatabase, state_change::post_block_withdrawals_balance_increments, +}; +use reth_rpc_eth_types::{ + pending_block::pre_block_blockhashes_update, EthApiError, EthResult, PendingBlock, + PendingBlockEnv, PendingBlockEnvOrigin, }; use reth_transaction_pool::{BestTransactionsAttributes, TransactionPool}; -use revm::{db::states::bundle_state::BundleRetention, Database, DatabaseCommit, State}; -use revm_primitives::EnvWithHandlerCfg; -use std::time::Instant; - -/// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block -#[derive(Debug, Clone)] -pub(crate) struct PendingBlockEnv { - /// Configured [`CfgEnvWithHandlerCfg`] for the pending block. - pub(crate) cfg: CfgEnvWithHandlerCfg, - /// Configured [`BlockEnv`] for the pending block. - pub(crate) block_env: BlockEnv, - /// Origin block for the config - pub(crate) origin: PendingBlockEnvOrigin, -} +use revm::{db::states::bundle_state::BundleRetention, DatabaseCommit, State}; +use tokio::sync::Mutex; +use tracing::debug; + +use super::SpawnBlocking; -impl PendingBlockEnv { - /// Builds a pending block using the given client and pool. +/// Loads a pending block from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. +#[auto_impl::auto_impl(&, Arc)] +pub trait LoadPendingBlock { + /// Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider( + &self, + ) -> impl BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory; + + /// Returns a handle for reading data from transaction pool. + /// + /// Data access in default (L1) trait method implementations. + fn pool(&self) -> impl TransactionPool; + + /// Returns a handle to the pending block. + /// + /// Data access in default (L1) trait method implementations. + fn pending_block(&self) -> &Mutex>; + + /// Returns a handle for reading evm config. + /// + /// Data access in default (L1) trait method implementations. + fn evm_config(&self) -> &impl ConfigureEvm; + + /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block + /// + /// If no pending block is available, this will derive it from the `latest` block + fn pending_block_env_and_cfg(&self) -> EthResult { + let origin: PendingBlockEnvOrigin = if let Some(pending) = + self.provider().pending_block_with_senders()? + { + PendingBlockEnvOrigin::ActualPending(pending) + } else { + // no pending block from the CL yet, so we use the latest block and modify the env + // values that we can + let latest = + self.provider().latest_header()?.ok_or_else(|| EthApiError::UnknownBlockNumber)?; + + let (mut latest_header, block_hash) = latest.split(); + // child block + latest_header.number += 1; + // assumed child block is in the next slot: 12s + latest_header.timestamp += 12; + // base fee of the child block + let chain_spec = self.provider().chain_spec(); + + latest_header.base_fee_per_gas = latest_header.next_block_base_fee( + chain_spec.base_fee_params_at_timestamp(latest_header.timestamp), + ); + + // update excess blob gas consumed above target + latest_header.excess_blob_gas = latest_header.next_block_excess_blob_gas(); + + // we're reusing the same block hash because we need this to lookup the block's state + let latest = SealedHeader::new(latest_header, block_hash); + + PendingBlockEnvOrigin::DerivedFromLatest(latest) + }; + + let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); + + let mut block_env = BlockEnv::default(); + // Note: for the PENDING block we assume it is past the known merge block and thus this will + // not fail when looking up the total difficulty value for the blockenv. + self.provider().fill_env_with_header( + &mut cfg, + &mut block_env, + origin.header(), + self.evm_config().clone(), + )?; + + Ok(PendingBlockEnv::new(cfg, block_env, origin)) + } + + /// Returns the locally built pending block + fn local_pending_block( + &self, + ) -> impl Future>> + Send + where + Self: SpawnBlocking, + { + async move { + let pending = self.pending_block_env_and_cfg()?; + if pending.origin.is_actual_pending() { + return Ok(pending.origin.into_actual_pending()) + } + + let mut lock = self.pending_block().lock().await; + + let now = Instant::now(); + + // check if the block is still good + if let Some(pending_block) = lock.as_ref() { + // this is guaranteed to be the `latest` header + if pending.block_env.number.to::() == pending_block.block.number && + pending.origin.header().hash() == pending_block.block.parent_hash && + now <= pending_block.expires_at + { + return Ok(Some(pending_block.block.clone())) + } + } + + // no pending block from the CL yet, so we need to build it ourselves via txpool + let pending_block = match self + .spawn_blocking_io(move |this| { + // we rebuild the block + this.build_block(pending) + }) + .await + { + Ok(block) => block, + Err(err) => { + debug!(target: "rpc", "Failed to build pending block: {:?}", err); + return Ok(None) + } + }; + + let now = Instant::now(); + *lock = Some(PendingBlock::new(pending_block.clone(), now + Duration::from_secs(1))); + + Ok(Some(pending_block)) + } + } + + /// Assembles a [`Receipt`] for a transaction, based on its [`ExecutionResult`]. + fn assemble_receipt( + &self, + tx: &TransactionSignedEcRecovered, + result: ExecutionResult, + cumulative_gas_used: u64, + ) -> Receipt { + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + #[cfg(feature = "optimism")] + deposit_nonce: None, + #[cfg(feature = "optimism")] + deposit_receipt_version: None, + } + } + + /// Calculates receipts root in block building. + /// + /// Panics if block is not in the [`ExecutionOutcome`]'s block range. + fn receipts_root( + &self, + _block_env: &BlockEnv, + execution_outcome: &ExecutionOutcome, + block_number: BlockNumber, + ) -> B256 { + execution_outcome.receipts_root_slow(block_number).expect("Block is present") + } + + /// Builds a pending block using the configured provider and pool. /// /// If the origin is the actual pending block, the block is built with withdrawals. /// /// After Cancun, if the origin is the actual pending block, the block includes the EIP-4788 pre /// block contract call using the parent beacon block root received from the CL. - pub(crate) fn build_block( - self, - client: &Client, - pool: &Pool, - ) -> EthResult - where - Client: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, - { - let Self { cfg, block_env, origin } = self; + fn build_block(&self, env: PendingBlockEnv) -> EthResult { + let PendingBlockEnv { cfg, block_env, origin } = env; let parent_hash = origin.build_target_hash(); - let state_provider = client.history_by_block_hash(parent_hash)?; + let state_provider = self.provider().history_by_block_hash(parent_hash)?; let state = StateProviderDatabase::new(state_provider); let mut db = State::builder().with_database(state).with_bundle_update().build(); @@ -69,10 +215,11 @@ impl PendingBlockEnv { let mut executed_txs = Vec::new(); let mut senders = Vec::new(); - let mut best_txs = pool.best_transactions_with_attributes(BestTransactionsAttributes::new( - base_fee, - block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), - )); + let mut best_txs = + self.pool().best_transactions_with_attributes(BestTransactionsAttributes::new( + base_fee, + block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), + )); let (withdrawals, withdrawals_root) = match origin { PendingBlockEnvOrigin::ActualPending(ref block) => { @@ -81,19 +228,22 @@ impl PendingBlockEnv { PendingBlockEnvOrigin::DerivedFromLatest(_) => (None, None), }; - let chain_spec = client.chain_spec(); + let chain_spec = self.provider().chain_spec(); let parent_beacon_block_root = if origin.is_actual_pending() { // apply eip-4788 pre block contract call if we got the block from the CL with the real // parent beacon block root pre_block_beacon_root_contract_call( &mut db, + self.evm_config(), chain_spec.as_ref(), - block_number, &cfg, &block_env, + block_number, + block_env.timestamp.to::(), origin.header().parent_beacon_block_root, - )?; + ) + .map_err(|err| EthApiError::Internal(err.into()))?; origin.header().parent_beacon_block_root } else { None @@ -144,8 +294,11 @@ impl PendingBlockEnv { } // Configure the environment for the block. - let env = - Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx_env_with_recovered(&tx)); + let env = Env::boxed( + cfg.cfg_env.clone(), + block_env.clone(), + Self::evm_config(self).tx_env(&tx), + ); let mut evm = revm::Evm::builder().with_env(env).with_db(&mut db).build(); @@ -192,16 +345,7 @@ impl PendingBlockEnv { cumulative_gas_used += gas_used; // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(Some(Receipt { - tx_type: tx.tx_type(), - success: result.is_success(), - cumulative_gas_used, - logs: result.into_logs().into_iter().map(Into::into).collect(), - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - })); + receipts.push(Some(self.assemble_receipt(&tx, result, cumulative_gas_used))); // append transaction to the list of executed transactions let (tx, sender) = tx.to_components(); @@ -229,18 +373,7 @@ impl PendingBlockEnv { Vec::new(), ); - #[cfg(feature = "optimism")] - let receipts_root = execution_outcome - .optimism_receipts_root_slow( - block_number, - chain_spec.as_ref(), - block_env.timestamp.to::(), - ) - .expect("Block is present"); - - #[cfg(not(feature = "optimism"))] - let receipts_root = - execution_outcome.receipts_root_slow(block_number).expect("Block is present"); + let receipts_root = self.receipts_root(&block_env, &execution_outcome, block_number); let logs_bloom = execution_outcome.block_logs_bloom(block_number).expect("Block is present"); @@ -250,7 +383,7 @@ impl PendingBlockEnv { let state_root = state_provider.state_root(execution_outcome.state())?; // create the block header - let transactions_root = proofs::calculate_transaction_root(&executed_txs); + let transactions_root = calculate_transaction_root(&executed_txs); // check if cancun is activated to set eip4844 header fields correctly let blob_gas_used = @@ -289,142 +422,12 @@ impl PendingBlockEnv { requests_root, }; + // sidecars should be queried by `eth_getBlobSidecars` + let sidecars = None; + // seal the block - let block = Block { header, body: executed_txs, ommers: vec![], withdrawals, requests }; + let block = + Block { header, body: executed_txs, ommers: vec![], withdrawals, sidecars, requests }; Ok(SealedBlockWithSenders { block: block.seal_slow(), senders }) } } - -/// Apply the [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) pre block contract call. -/// -/// This constructs a new [Evm](revm::Evm) with the given DB, and environment -/// [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] to execute the pre block contract call. -/// -/// This uses [`apply_beacon_root_contract_call`] to ultimately apply the beacon root contract state -/// change. -fn pre_block_beacon_root_contract_call( - db: &mut DB, - chain_spec: &ChainSpec, - block_number: u64, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, - parent_beacon_block_root: Option, -) -> EthResult<()> -where - DB::Error: std::fmt::Display, -{ - // apply pre-block EIP-4788 contract call - let mut evm_pre_block = revm::Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build(); - - // initialize a block from the env, because the pre block call needs the block itself - apply_beacon_root_contract_call( - chain_spec, - initialized_block_env.timestamp.to::(), - block_number, - parent_beacon_block_root, - &mut evm_pre_block, - ) - .map_err(|err| EthApiError::Internal(err.into())) -} - -/// Apply the [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) pre block state transitions. -/// -/// This constructs a new [Evm](revm::Evm) with the given DB, and environment -/// [`CfgEnvWithHandlerCfg`] and [`BlockEnv`]. -/// -/// This uses [`apply_blockhashes_update`]. -fn pre_block_blockhashes_update + DatabaseCommit>( - db: &mut DB, - chain_spec: &ChainSpec, - initialized_block_env: &BlockEnv, - block_number: u64, - parent_block_hash: B256, -) -> EthResult<()> -where - DB::Error: std::fmt::Display, -{ - apply_blockhashes_update( - db, - chain_spec, - initialized_block_env.timestamp.to::(), - block_number, - parent_block_hash, - ) - .map_err(|err| EthApiError::Internal(err.into())) -} - -/// The origin for a configured [`PendingBlockEnv`] -#[derive(Clone, Debug)] -pub(crate) enum PendingBlockEnvOrigin { - /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders), - /// The _modified_ header of the latest block. - /// - /// This derives the pending state based on the latest header by modifying: - /// - the timestamp - /// - the block number - /// - fees - DerivedFromLatest(SealedHeader), -} - -impl PendingBlockEnvOrigin { - /// Returns true if the origin is the actual pending block as received from the CL. - pub(crate) const fn is_actual_pending(&self) -> bool { - matches!(self, Self::ActualPending(_)) - } - - /// Consumes the type and returns the actual pending block. - pub(crate) fn into_actual_pending(self) -> Option { - match self { - Self::ActualPending(block) => Some(block), - _ => None, - } - } - - /// Returns the [`BlockId`] that represents the state of the block. - /// - /// If this is the actual pending block, the state is the "Pending" tag, otherwise we can safely - /// identify the block by its hash (latest block). - pub(crate) fn state_block_id(&self) -> BlockId { - match self { - Self::ActualPending(_) => BlockNumberOrTag::Pending.into(), - Self::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), - } - } - - /// Returns the hash of the block the pending block should be built on. - /// - /// For the [`PendingBlockEnvOrigin::ActualPending`] this is the parent hash of the block. - /// For the [`PendingBlockEnvOrigin::DerivedFromLatest`] this is the hash of the _latest_ - /// header. - fn build_target_hash(&self) -> B256 { - match self { - Self::ActualPending(block) => block.parent_hash, - Self::DerivedFromLatest(header) => header.hash(), - } - } - - /// Returns the header this pending block is based on. - pub(crate) fn header(&self) -> &SealedHeader { - match self { - Self::ActualPending(block) => &block.header, - Self::DerivedFromLatest(header) => header, - } - } -} - -/// In memory pending block for `pending` tag -#[derive(Debug)] -pub(crate) struct PendingBlock { - /// The cached pending block - pub(crate) block: SealedBlockWithSenders, - /// Timestamp when the pending block is considered outdated - pub(crate) expires_at: Instant, -} diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs new file mode 100644 index 000000000..5cd6c03c4 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -0,0 +1,36 @@ +//! Loads a receipt from database. Helper trait for `eth_` block and transaction RPC methods, that +//! loads receipt data w.r.t. network. + +use futures::Future; +use reth_primitives::{Receipt, TransactionMeta, TransactionSigned}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, ReceiptBuilder}; +use reth_rpc_types::AnyTransactionReceipt; + +/// Assembles transaction receipt data w.r.t to network. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. +#[auto_impl::auto_impl(&, Arc)] +pub trait LoadReceipt: Send + Sync { + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. + fn build_transaction_receipt( + &self, + tx: TransactionSigned, + meta: TransactionMeta, + receipt: Receipt, + ) -> impl Future> + Send { + async move { + // get all receipts for the block + let all_receipts = match self.cache().get_receipts(meta.block_hash).await? { + Some(recpts) => recpts, + None => return Err(EthApiError::UnknownBlockNumber), + }; + + Ok(ReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) + } + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs new file mode 100644 index 000000000..2a75d9abb --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -0,0 +1,39 @@ +//! An abstraction over ethereum signers. + +use std::result; + +use alloy_dyn_abi::TypedData; +use dyn_clone::DynClone; +use reth_primitives::{Address, Signature, TransactionSigned}; +use reth_rpc_eth_types::SignError; +use reth_rpc_types::TypedTransactionRequest; + +/// Result returned by [`EthSigner`] methods. +pub type Result = result::Result; + +/// An Ethereum Signer used via RPC. +#[async_trait::async_trait] +pub trait EthSigner: Send + Sync + DynClone { + /// Returns the available accounts for this signer. + fn accounts(&self) -> Vec
; + + /// Returns `true` whether this signer can sign for this address + fn is_signer_for(&self, addr: &Address) -> bool { + self.accounts().contains(addr) + } + + /// Returns the signature + async fn sign(&self, address: Address, message: &[u8]) -> Result; + + /// signs a transaction request using the given account in request + fn sign_transaction( + &self, + request: TypedTransactionRequest, + address: &Address, + ) -> Result; + + /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. + fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result; +} + +dyn_clone::clone_trait_object!(EthSigner); diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs new file mode 100644 index 000000000..63722e376 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -0,0 +1,31 @@ +//! Loads chain metadata. + +use futures::Future; +use reth_chainspec::ChainInfo; +use reth_errors::RethResult; +use reth_primitives::{Address, U64}; +use reth_rpc_types::SyncStatus; + +/// `Eth` API trait. +/// +/// Defines core functionality of the `eth` API implementation. +#[auto_impl::auto_impl(&, Arc)] +pub trait EthApiSpec: Send + Sync { + /// Returns the current ethereum protocol version. + fn protocol_version(&self) -> impl Future> + Send; + + /// Returns the chain id + fn chain_id(&self) -> U64; + + /// Returns provider chain info + fn chain_info(&self) -> RethResult; + + /// Returns a list of addresses owned by provider. + fn accounts(&self) -> Vec
; + + /// Returns `true` if the network is undergoing sync. + fn is_syncing(&self) -> bool; + + /// Returns the [`SyncStatus`] of the network + fn sync_status(&self) -> RethResult; +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs new file mode 100644 index 000000000..4b0d62925 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -0,0 +1,252 @@ +//! Loads a pending block from database. Helper trait for `eth_` block, transaction, call and trace +//! RPC methods. + +use futures::Future; +use reth_errors::RethError; +use reth_evm::ConfigureEvmEnv; +use reth_primitives::{Address, BlockId, Bytes, Header, B256, U256}; +use reth_provider::{BlockIdReader, StateProvider, StateProviderBox, StateProviderFactory}; +use reth_rpc_eth_types::{ + EthApiError, EthResult, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError, +}; +use reth_rpc_types::{serde_helpers::JsonStorageKey, EIP1186AccountProofResponse}; +use reth_rpc_types_compat::proof::from_primitive_account_proof; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use revm::db::BundleState; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; + +use super::{EthApiSpec, LoadPendingBlock, SpawnBlocking}; + +/// Helper methods for `eth_` methods relating to state (accounts). +pub trait EthState: LoadState + SpawnBlocking { + /// Returns the maximum number of blocks into the past for generating state proofs. + fn max_proof_window(&self) -> u64; + + /// Returns the number of transactions sent from an address at the given block identifier. + /// + /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// look up the highest transaction in pool and return the next nonce (highest + 1). + fn transaction_count( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send { + LoadState::transaction_count(self, address, block_id) + } + + /// Returns code of given account, at given blocknumber. + fn get_code( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send { + self.spawn_blocking_io(move |this| { + Ok(this + .state_at_block_id_or_latest(block_id)? + .account_code(address)? + .unwrap_or_default() + .original_bytes()) + }) + } + + /// Returns balance of given account, at given blocknumber. + fn balance( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send { + self.spawn_blocking_io(move |this| { + Ok(this + .state_at_block_id_or_latest(block_id)? + .account_balance(address)? + .unwrap_or_default()) + }) + } + + /// Returns values stored of given account, at given blocknumber. + fn storage_at( + &self, + address: Address, + index: JsonStorageKey, + block_id: Option, + ) -> impl Future> + Send { + self.spawn_blocking_io(move |this| { + Ok(B256::new( + this.state_at_block_id_or_latest(block_id)? + .storage(address, index.0)? + .unwrap_or_default() + .to_be_bytes(), + )) + }) + } + + /// Returns values stored of given account, with Merkle-proof, at given blocknumber. + fn get_proof( + &self, + address: Address, + keys: Vec, + block_id: Option, + ) -> EthResult> + Send> + where + Self: EthApiSpec, + { + let chain_info = self.chain_info()?; + let block_id = block_id.unwrap_or_default(); + + // Check whether the distance to the block exceeds the maximum configured window. + let block_number = self + .provider() + .block_number_for_id(block_id)? + .ok_or(EthApiError::UnknownBlockNumber)?; + let max_window = self.max_proof_window(); + if chain_info.best_number.saturating_sub(block_number) > max_window { + return Err(EthApiError::ExceedsMaxProofWindow) + } + + Ok(async move { + let _permit = self + .acquire_owned() + .await + .map_err(|err| EthApiError::Internal(RethError::other(err)))?; + self.spawn_blocking_io(move |this| { + let state = this.state_at_block_id(block_id)?; + let storage_keys = keys.iter().map(|key| key.0).collect::>(); + let proof = state.proof(&BundleState::default(), address, &storage_keys)?; + Ok(from_primitive_account_proof(proof)) + }) + .await + }) + } +} + +/// Loads state from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. +pub trait LoadState { + /// Returns a handle for reading state from database. + /// + /// Data access in default trait method implementations. + fn provider(&self) -> impl StateProviderFactory; + + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Returns a handle for reading data from transaction pool. + /// + /// Data access in default trait method implementations. + fn pool(&self) -> impl TransactionPool; + + /// Returns the state at the given block number + fn state_at_hash(&self, block_hash: B256) -> EthResult { + Ok(self.provider().history_by_block_hash(block_hash)?) + } + + /// Returns the state at the given [`BlockId`] enum. + /// + /// Note: if not [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this + /// will only return canonical state. See also + fn state_at_block_id(&self, at: BlockId) -> EthResult { + Ok(self.provider().state_by_block_id(at)?) + } + + /// Returns the _latest_ state + fn latest_state(&self) -> EthResult { + Ok(self.provider().latest()?) + } + + /// Returns the state at the given [`BlockId`] enum or the latest. + /// + /// Convenience function to interprets `None` as `BlockId::Number(BlockNumberOrTag::Latest)` + fn state_at_block_id_or_latest( + &self, + block_id: Option, + ) -> EthResult { + if let Some(block_id) = block_id { + self.state_at_block_id(block_id) + } else { + Ok(self.latest_state()?) + } + } + + /// Returns the revm evm env for the requested [`BlockId`] + /// + /// If the [`BlockId`] this will return the [`BlockId`] of the block the env was configured + /// for. + /// If the [`BlockId`] is pending, this will return the "Pending" tag, otherwise this returns + /// the hash of the exact block. + fn evm_env_at( + &self, + at: BlockId, + ) -> impl Future> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { + if at.is_pending() { + let PendingBlockEnv { cfg, block_env, origin } = + self.pending_block_env_and_cfg()?; + Ok((cfg, block_env, origin.state_block_id())) + } else { + // Use cached values if there is no pending block + let block_hash = LoadPendingBlock::provider(self) + .block_hash_for_id(at)? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + let (cfg, env) = self.cache().get_evm_env(block_hash).await?; + Ok((cfg, env, block_hash.into())) + } + } + } + + /// Returns the revm evm env for the raw block header + /// + /// This is used for tracing raw blocks + fn evm_env_for_raw_block( + &self, + header: &Header, + ) -> impl Future> + Send + where + Self: LoadPendingBlock + SpawnBlocking, + { + async move { + // get the parent config first + let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash.into()).await?; + + let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; + self.evm_config().fill_block_env(&mut block_env, header, after_merge); + + Ok((cfg, block_env)) + } + } + + /// Returns the number of transactions sent from an address at the given block identifier. + /// + /// If this is [`BlockNumberOrTag::Pending`](reth_primitives::BlockNumberOrTag) then this will + /// look up the highest transaction in pool and return the next nonce (highest + 1). + fn transaction_count( + &self, + address: Address, + block_id: Option, + ) -> impl Future> + Send + where + Self: SpawnBlocking, + { + self.spawn_blocking_io(move |this| { + if block_id == Some(BlockId::pending()) { + let address_txs = this.pool().get_transactions_by_sender(address); + if let Some(highest_nonce) = + address_txs.iter().map(|item| item.transaction.nonce()).max() + { + let tx_count = highest_nonce + .checked_add(1) + .ok_or(RpcInvalidTransactionError::NonceMaxValue)?; + return Ok(U256::from(tx_count)) + } + } + + let state = this.state_at_block_id_or_latest(block_id)?; + Ok(U256::from(state.account_nonce(address)?.unwrap_or_default())) + }) + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs new file mode 100644 index 000000000..d48e566ed --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -0,0 +1,415 @@ +//! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. + +use futures::Future; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; +use reth_primitives::B256; +use reth_revm::database::StateProviderDatabase; +use reth_rpc_eth_types::{ + cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, + EthApiError, EthResult, +}; +use reth_rpc_types::{BlockId, TransactionInfo}; +use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; +use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; +use revm_primitives::{EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState}; + +use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; + +/// Executes CPU heavy tasks. +pub trait Trace: LoadState { + /// Returns a handle for reading evm config. + /// + /// Data access in default (L1) trait method implementations. + fn evm_config(&self) -> &impl ConfigureEvm; + + /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state + /// changes. + fn inspect( + &self, + db: DB, + env: EnvWithHandlerCfg, + inspector: I, + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> + where + DB: Database, + ::Error: Into, + I: GetInspector, + { + self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) + } + + /// Same as [`inspect`](Self::inspect) but also returns the database again. + /// + /// Even though [Database] is also implemented on `&mut` + /// this is still useful if there are certain trait bounds on the Inspector's database generic + /// type + fn inspect_and_return_db( + &self, + db: DB, + env: EnvWithHandlerCfg, + inspector: I, + ) -> EthResult<(ResultAndState, EnvWithHandlerCfg, DB)> + where + DB: Database, + ::Error: Into, + I: GetInspector, + { + let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); + let res = evm.transact()?; + let (db, env) = evm.into_db_and_env_with_handler_cfg(); + Ok((res, env, db)) + } + + /// Executes the transaction on top of the given [`BlockId`] with a tracer configured by the + /// config. + /// + /// The callback is then called with the [`TracingInspector`] and the [`ResultAndState`] after + /// the configured [`EnvWithHandlerCfg`] was inspected. + /// + /// Caution: this is blocking + fn trace_at( + &self, + env: EnvWithHandlerCfg, + config: TracingInspectorConfig, + at: BlockId, + f: F, + ) -> EthResult + where + Self: Call, + F: FnOnce(TracingInspector, ResultAndState) -> EthResult, + { + self.with_state_at_block(at, |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut inspector = TracingInspector::new(config); + let (res, _) = self.inspect(&mut db, env, &mut inspector)?; + f(inspector, res) + }) + } + + /// Same as [`trace_at`](Self::trace_at) but also provides the used database to the callback. + /// + /// Executes the transaction on top of the given [`BlockId`] with a tracer configured by the + /// config. + /// + /// The callback is then called with the [`TracingInspector`] and the [`ResultAndState`] after + /// the configured [`EnvWithHandlerCfg`] was inspected. + fn spawn_trace_at_with_state( + &self, + env: EnvWithHandlerCfg, + config: TracingInspectorConfig, + at: BlockId, + f: F, + ) -> impl Future> + Send + where + Self: LoadPendingBlock + Call, + F: FnOnce(TracingInspector, ResultAndState, StateCacheDb<'_>) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + let this = self.clone(); + self.spawn_with_state_at_block(at, move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + let mut inspector = TracingInspector::new(config); + let (res, _) = this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + f(inspector, res, db) + }) + } + + /// Retrieves the transaction if it exists and returns its trace. + /// + /// Before the transaction is traced, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed + /// and the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool). + fn spawn_trace_transaction_in_block( + &self, + hash: B256, + config: TracingInspectorConfig, + f: F, + ) -> impl Future>> + Send + where + Self: LoadPendingBlock + LoadTransaction + Call, + F: FnOnce( + TransactionInfo, + TracingInspector, + ResultAndState, + StateCacheDb<'_>, + ) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + self.spawn_trace_transaction_in_block_with_inspector(hash, TracingInspector::new(config), f) + } + + /// Retrieves the transaction if it exists and returns its trace. + /// + /// Before the transaction is traced, all previous transaction in the block are applied to the + /// state by executing them first. + /// The callback `f` is invoked with the [`ResultAndState`] after the transaction was executed + /// and the database that points to the beginning of the transaction. + /// + /// Note: Implementers should use a threadpool where blocking is allowed, such as + /// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool). + fn spawn_trace_transaction_in_block_with_inspector( + &self, + hash: B256, + mut inspector: Insp, + f: F, + ) -> impl Future>> + Send + where + Self: LoadPendingBlock + LoadTransaction + Call, + F: FnOnce(TransactionInfo, Insp, ResultAndState, StateCacheDb<'_>) -> EthResult + + Send + + 'static, + Insp: for<'a, 'b> Inspector> + Send + 'static, + R: Send + 'static, + { + async move { + let (transaction, block) = match self.transaction_and_block(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + let (tx, tx_info) = transaction.split(); + + let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; + + // we need to get the state of the parent block because we're essentially replaying the + // block the transaction is included in + let parent_block = block.parent_hash; + let block_txs = block.into_transactions_ecrecovered(); + + let this = self.clone(); + self.spawn_with_state_at_block(parent_block.into(), move |state| { + let mut db = CacheDB::new(StateProviderDatabase::new(state)); + + // replay all transactions prior to the targeted transaction + this.replay_transactions_until( + &mut db, + cfg.clone(), + block_env.clone(), + block_txs, + tx.hash, + )?; + + let env = EnvWithHandlerCfg::new_with_cfg_env( + cfg, + block_env, + Call::evm_config(&this).tx_env(&tx), + ); + let (res, _) = + this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + f(tx_info, inspector, res, db) + }) + .await + .map(Some) + } + } + + /// Executes all transactions of a block up to a given index. + /// + /// If a `highest_index` is given, this will only execute the first `highest_index` + /// transactions, in other words, it will stop executing transactions after the + /// `highest_index`th transaction. If `highest_index` is `None`, all transactions + /// are executed. + fn trace_block_until( + &self, + block_id: BlockId, + highest_index: Option, + config: TracingInspectorConfig, + f: F, + ) -> impl Future>>> + Send + where + Self: LoadBlock, + F: Fn( + TransactionInfo, + TracingInspector, + ExecutionResult, + &EvmState, + &StateCacheDb<'_>, + ) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + self.trace_block_until_with_inspector( + block_id, + highest_index, + move || TracingInspector::new(config), + f, + ) + } + + /// Executes all transactions of a block. + /// + /// If a `highest_index` is given, this will only execute the first `highest_index` + /// transactions, in other words, it will stop executing transactions after the + /// `highest_index`th transaction. + /// + /// Note: This expect tx index to be 0-indexed, so the first transaction is at index 0. + /// + /// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing + /// the transactions. + fn trace_block_until_with_inspector( + &self, + block_id: BlockId, + highest_index: Option, + mut inspector_setup: Setup, + f: F, + ) -> impl Future>>> + Send + where + Self: LoadBlock, + F: Fn(TransactionInfo, Insp, ExecutionResult, &EvmState, &StateCacheDb<'_>) -> EthResult + + Send + + 'static, + Setup: FnMut() -> Insp + Send + 'static, + Insp: for<'a, 'b> Inspector> + Send + 'static, + R: Send + 'static, + { + async move { + let ((cfg, block_env, _), block) = + futures::try_join!(self.evm_env_at(block_id), self.block_with_senders(block_id))?; + + let Some(block) = block else { return Ok(None) }; + + if block.body.is_empty() { + // nothing to trace + return Ok(Some(Vec::new())) + } + + // replay all transactions of the block + self.spawn_tracing(move |this| { + // we need to get the state of the parent block because we're replaying this block + // on top of its parent block's state + let state_at = block.parent_hash; + let block_hash = block.hash(); + + let block_number = block_env.number.saturating_to::(); + let base_fee = block_env.basefee.saturating_to::(); + + // prepare transactions, we do everything upfront to reduce time spent with open + // state + let max_transactions = highest_index.map_or(block.body.len(), |highest| { + // we need + 1 because the index is 0-based + highest as usize + 1 + }); + let mut results = Vec::with_capacity(max_transactions); + + let mut transactions = block + .into_transactions_ecrecovered() + .take(max_transactions) + .enumerate() + .map(|(idx, tx)| { + let tx_info = TransactionInfo { + hash: Some(tx.hash()), + index: Some(idx as u64), + block_hash: Some(block_hash), + block_number: Some(block_number), + base_fee: Some(base_fee), + }; + let tx_env = Trace::evm_config(&this).tx_env(&tx); + (tx_info, tx_env) + }) + .peekable(); + + // now get the state + let state = this.state_at_block_id(state_at.into())?; + let mut db = + CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); + + while let Some((tx_info, tx)) = transactions.next() { + let env = + EnvWithHandlerCfg::new_with_cfg_env(cfg.clone(), block_env.clone(), tx); + + let mut inspector = inspector_setup(); + let (res, _) = + this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + let ResultAndState { result, state } = res; + results.push(f(tx_info, inspector, result, &state, &db)?); + + // need to apply the state changes of this transaction before executing the + // next transaction, but only if there's a next transaction + if transactions.peek().is_some() { + // commit the state changes to the DB + db.commit(state) + } + } + + Ok(Some(results)) + }) + .await + } + } + + /// Executes all transactions of a block and returns a list of callback results invoked for each + /// transaction in the block. + /// + /// This + /// 1. fetches all transactions of the block + /// 2. configures the EVM evn + /// 3. loops over all transactions and executes them + /// 4. calls the callback with the transaction info, the execution result, the changed state + /// _after_ the transaction [`StateProviderDatabase`] and the database that points to the + /// state right _before_ the transaction. + fn trace_block_with( + &self, + block_id: BlockId, + config: TracingInspectorConfig, + f: F, + ) -> impl Future>>> + Send + where + Self: LoadBlock, + // This is the callback that's invoked for each transaction with the inspector, the result, + // state and db + F: Fn( + TransactionInfo, + TracingInspector, + ExecutionResult, + &EvmState, + &StateCacheDb<'_>, + ) -> EthResult + + Send + + 'static, + R: Send + 'static, + { + self.trace_block_until(block_id, None, config, f) + } + + /// Executes all transactions of a block and returns a list of callback results invoked for each + /// transaction in the block. + /// + /// This + /// 1. fetches all transactions of the block + /// 2. configures the EVM evn + /// 3. loops over all transactions and executes them + /// 4. calls the callback with the transaction info, the execution result, the changed state + /// _after_ the transaction [`EvmState`] and the database that points to the state right + /// _before_ the transaction, in other words the state the transaction was executed on: + /// `changed_state = tx(cached_state)` + /// + /// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing + /// a transaction. This is invoked for each transaction. + fn trace_block_inspector( + &self, + block_id: BlockId, + insp_setup: Setup, + f: F, + ) -> impl Future>>> + Send + where + Self: LoadBlock, + // This is the callback that's invoked for each transaction with the inspector, the result, + // state and db + F: Fn(TransactionInfo, Insp, ExecutionResult, &EvmState, &StateCacheDb<'_>) -> EthResult + + Send + + 'static, + Setup: FnMut() -> Insp + Send + 'static, + Insp: for<'a, 'b> Inspector> + Send + 'static, + R: Send + 'static, + { + self.trace_block_until_with_inspector(block_id, None, insp_setup, f) + } +} diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs new file mode 100644 index 000000000..05426ba49 --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -0,0 +1,698 @@ +//! Database access for `eth_` transaction RPC methods. Loads transaction and receipt data w.r.t. +//! network. + +use std::{fmt, ops::Deref, sync::Arc}; + +use alloy_dyn_abi::TypedData; +use futures::Future; +use reth_primitives::{ + Address, BlockId, Bytes, FromRecoveredPooledTransaction, IntoRecoveredTransaction, Receipt, + SealedBlockWithSenders, TransactionMeta, TransactionSigned, TxHash, TxKind, B256, U256, +}; +use reth_provider::{BlockReaderIdExt, ReceiptProvider, TransactionsProvider}; +use reth_rpc_eth_types::{ + utils::recover_raw_transaction, EthApiError, EthResult, EthStateCache, SignError, + TransactionSource, +}; +use reth_rpc_types::{ + transaction::{ + EIP1559TransactionRequest, EIP2930TransactionRequest, EIP4844TransactionRequest, + LegacyTransactionRequest, + }, + AnyTransactionReceipt, BlockSidecar, Transaction, TransactionRequest, TypedTransactionRequest, +}; +use reth_rpc_types_compat::transaction::from_recovered_with_block_context; +use reth_transaction_pool::{TransactionOrigin, TransactionPool}; + +use super::EthSigner; + +use super::{Call, EthApiSpec, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, SpawnBlocking}; + +/// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in +/// the `eth_` namespace. +/// +/// This includes utilities for transaction tracing, transacting and inspection. +/// +/// Async functions that are spawned onto the +/// [`BlockingTaskPool`](reth_tasks::pool::BlockingTaskPool) begin with `spawn_` +/// +/// ## Calls +/// +/// There are subtle differences between when transacting [`TransactionRequest`]: +/// +/// The endpoints `eth_call` and `eth_estimateGas` and `eth_createAccessList` should always +/// __disable__ the base fee check in the +/// [`EnvWithHandlerCfg`](revm_primitives::CfgEnvWithHandlerCfg). +/// +/// The behaviour for tracing endpoints is not consistent across clients. +/// Geth also disables the basefee check for tracing: +/// Erigon does not: +/// +/// See also +/// +/// This implementation follows the behaviour of Geth and disables the basefee check for tracing. +pub trait EthTransactions: LoadTransaction { + /// Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl BlockReaderIdExt; + + /// Returns a handle for forwarding received raw transactions. + /// + /// Access to transaction forwarder in default (L1) trait method implementations. + fn raw_tx_forwarder(&self) -> Option>; + + /// Returns a handle for signing data. + /// + /// Singer access in default (L1) trait method implementations. + fn signers(&self) -> &parking_lot::RwLock>>; + + /// Returns the transaction by hash. + /// + /// Checks the pool and state. + /// + /// Returns `Ok(None)` if no matching transaction was found. + fn transaction_by_hash( + &self, + hash: B256, + ) -> impl Future>> + Send { + LoadTransaction::transaction_by_hash(self, hash) + } + + /// Get all transactions in the block with the given hash. + /// + /// Returns `None` if block does not exist. + fn transactions_by_block( + &self, + block: B256, + ) -> impl Future>>> + Send { + async move { Ok(self.cache().get_block_transactions(block).await?) } + } + + /// Returns the EIP-2718 encoded transaction by hash. + /// + /// If this is a pooled EIP-4844 transaction, the blob sidecar is included. + /// + /// Checks the pool and state. + /// + /// Returns `Ok(None)` if no matching transaction was found. + fn raw_transaction_by_hash( + &self, + hash: B256, + ) -> impl Future>> + Send { + async move { + // Note: this is mostly used to fetch pooled transactions so we check the pool first + if let Some(tx) = + self.pool().get_pooled_transaction_element(hash).map(|tx| tx.envelope_encoded()) + { + return Ok(Some(tx)) + } + + self.spawn_blocking_io(move |ref this| { + Ok(LoadTransaction::provider(this) + .transaction_by_hash(hash)? + .map(|tx| tx.envelope_encoded())) + }) + .await + } + } + + /// Returns the _historical_ transaction and the block it was mined in + fn historical_transaction_by_hash_at( + &self, + hash: B256, + ) -> impl Future>> + Send { + async move { + match self.transaction_by_hash_at(hash).await? { + None => Ok(None), + Some((tx, at)) => Ok(at.as_block_hash().map(|hash| (tx, hash))), + } + } + } + + /// Returns the transaction receipt for the given hash. + /// + /// Returns None if the transaction does not exist or is pending + /// Note: The tx receipt is not available for pending transactions. + fn transaction_receipt( + &self, + hash: B256, + ) -> impl Future>> + Send + where + Self: LoadReceipt + 'static, + { + async move { + let result = self.load_transaction_and_receipt(hash).await?; + + let (tx, meta, receipt) = match result { + Some((tx, meta, receipt)) => (tx, meta, receipt), + None => return Ok(None), + }; + + self.build_transaction_receipt(tx, meta, receipt).await.map(Some) + } + } + + /// Helper method that loads a transaction and its receipt. + fn load_transaction_and_receipt( + &self, + hash: TxHash, + ) -> impl Future>> + Send + where + Self: 'static, + { + let this = self.clone(); + self.spawn_blocking_io(move |_| { + let (tx, meta) = + match LoadTransaction::provider(&this).transaction_by_hash_with_meta(hash)? { + Some((tx, meta)) => (tx, meta), + None => return Ok(None), + }; + + let receipt = match EthTransactions::provider(&this).receipt_by_hash(hash)? { + Some(recpt) => recpt, + None => return Ok(None), + }; + + Ok(Some((tx, meta, receipt))) + }) + } + + /// Get [`Transaction`] by [`BlockId`] and index of transaction within that block. + /// + /// Returns `Ok(None)` if the block does not exist, or index is out of range. + fn transaction_by_block_and_tx_index( + &self, + block_id: BlockId, + index: usize, + ) -> impl Future>> + Send + where + Self: LoadBlock, + { + async move { + if let Some(block) = self.block_with_senders(block_id).await? { + let block_hash = block.hash(); + let block_number = block.number; + let base_fee_per_gas = block.base_fee_per_gas; + if let Some(tx) = block.into_transactions_ecrecovered().nth(index) { + return Ok(Some(from_recovered_with_block_context( + tx, + block_hash, + block_number, + base_fee_per_gas, + index, + ))) + } + } + + Ok(None) + } + } + + /// Get transaction, as raw bytes, by [`BlockId`] and index of transaction within that block. + /// + /// Returns `Ok(None)` if the block does not exist, or index is out of range. + fn raw_transaction_by_block_and_tx_index( + &self, + block_id: BlockId, + index: usize, + ) -> impl Future>> + Send + where + Self: LoadBlock, + { + async move { + if let Some(block) = self.block_with_senders(block_id).await? { + if let Some(tx) = block.transactions().nth(index) { + return Ok(Some(tx.envelope_encoded())) + } + } + + Ok(None) + } + } + + /// Decodes and recovers the transaction and submits it to the pool. + /// + /// Returns the hash of the transaction. + fn send_raw_transaction(&self, tx: Bytes) -> impl Future> + Send { + async move { + // On optimism, transactions are forwarded directly to the sequencer to be included in + // blocks that it builds. + if let Some(client) = self.raw_tx_forwarder().as_ref() { + tracing::debug!( target: "rpc::eth", "forwarding raw transaction to"); + client.forward_raw_transaction(&tx).await?; + } + + let recovered = recover_raw_transaction(tx)?; + let pool_transaction = + ::Transaction::from_recovered_pooled_transaction( + recovered, + ); + + // submit the transaction to the pool with a `Local` origin + let hash = + self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; + + Ok(hash) + } + } + + /// Signs transaction with a matching signer, if any and submits the transaction to the pool. + /// Returns the hash of the signed transaction. + fn send_transaction( + &self, + mut request: TransactionRequest, + ) -> impl Future> + Send + where + Self: EthApiSpec + LoadBlock + LoadPendingBlock + LoadFee + Call, + { + async move { + let from = match request.from { + Some(from) => from, + None => return Err(SignError::NoAccount.into()), + }; + + // set nonce if not already set before + if request.nonce.is_none() { + let nonce = self.transaction_count(from, Some(BlockId::pending())).await?; + // note: `.to()` can't panic because the nonce is constructed from a `u64` + request.nonce = Some(nonce.to::()); + } + + let chain_id = self.chain_id(); + + let estimated_gas = + self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; + let gas_limit = estimated_gas; + + let TransactionRequest { + to, + gas_price, + max_fee_per_gas, + max_priority_fee_per_gas, + gas, + value, + input: data, + nonce, + mut access_list, + max_fee_per_blob_gas, + blob_versioned_hashes, + sidecar, + .. + } = request; + + // todo: remove this inlining after https://github.com/alloy-rs/alloy/pull/183#issuecomment-1928161285 + let transaction = match ( + gas_price, + max_fee_per_gas, + access_list.take(), + max_fee_per_blob_gas, + blob_versioned_hashes, + sidecar, + ) { + // legacy transaction + // gas price required + (Some(_), None, None, None, None, None) => { + Some(TypedTransactionRequest::Legacy(LegacyTransactionRequest { + nonce: nonce.unwrap_or_default(), + gas_price: U256::from(gas_price.unwrap_or_default()), + gas_limit: U256::from(gas.unwrap_or_default()), + value: value.unwrap_or_default(), + input: data.into_input().unwrap_or_default(), + kind: to.unwrap_or(TxKind::Create), + chain_id: None, + })) + } + // EIP2930 + // if only accesslist is set, and no eip1599 fees + (_, None, Some(access_list), None, None, None) => { + Some(TypedTransactionRequest::EIP2930(EIP2930TransactionRequest { + nonce: nonce.unwrap_or_default(), + gas_price: U256::from(gas_price.unwrap_or_default()), + gas_limit: U256::from(gas.unwrap_or_default()), + value: value.unwrap_or_default(), + input: data.into_input().unwrap_or_default(), + kind: to.unwrap_or(TxKind::Create), + chain_id: 0, + access_list, + })) + } + // EIP1559 + // if 4844 fields missing + // gas_price, max_fee_per_gas, access_list, max_fee_per_blob_gas, + // blob_versioned_hashes, sidecar, + (None, _, _, None, None, None) => { + // Empty fields fall back to the canonical transaction schema. + Some(TypedTransactionRequest::EIP1559(EIP1559TransactionRequest { + nonce: nonce.unwrap_or_default(), + max_fee_per_gas: U256::from(max_fee_per_gas.unwrap_or_default()), + max_priority_fee_per_gas: U256::from( + max_priority_fee_per_gas.unwrap_or_default(), + ), + gas_limit: U256::from(gas.unwrap_or_default()), + value: value.unwrap_or_default(), + input: data.into_input().unwrap_or_default(), + kind: to.unwrap_or(TxKind::Create), + chain_id: 0, + access_list: access_list.unwrap_or_default(), + })) + } + // EIP4884 + // all blob fields required + ( + None, + _, + _, + Some(max_fee_per_blob_gas), + Some(blob_versioned_hashes), + Some(sidecar), + ) => { + // As per the EIP, we follow the same semantics as EIP-1559. + Some(TypedTransactionRequest::EIP4844(EIP4844TransactionRequest { + chain_id: 0, + nonce: nonce.unwrap_or_default(), + max_priority_fee_per_gas: U256::from( + max_priority_fee_per_gas.unwrap_or_default(), + ), + max_fee_per_gas: U256::from(max_fee_per_gas.unwrap_or_default()), + gas_limit: U256::from(gas.unwrap_or_default()), + value: value.unwrap_or_default(), + input: data.into_input().unwrap_or_default(), + #[allow(clippy::manual_unwrap_or_default)] // clippy is suggesting here unwrap_or_default + to: match to { + Some(TxKind::Call(to)) => to, + _ => Address::default(), + }, + access_list: access_list.unwrap_or_default(), + + // eip-4844 specific. + max_fee_per_blob_gas: U256::from(max_fee_per_blob_gas), + blob_versioned_hashes, + sidecar, + })) + } + + _ => None, + }; + + let transaction = match transaction { + Some(TypedTransactionRequest::Legacy(mut req)) => { + req.chain_id = Some(chain_id.to()); + req.gas_limit = gas_limit.saturating_to(); + req.gas_price = self.legacy_gas_price(gas_price.map(U256::from)).await?; + + TypedTransactionRequest::Legacy(req) + } + Some(TypedTransactionRequest::EIP2930(mut req)) => { + req.chain_id = chain_id.to(); + req.gas_limit = gas_limit.saturating_to(); + req.gas_price = self.legacy_gas_price(gas_price.map(U256::from)).await?; + + TypedTransactionRequest::EIP2930(req) + } + Some(TypedTransactionRequest::EIP1559(mut req)) => { + let (max_fee_per_gas, max_priority_fee_per_gas) = self + .eip1559_fees( + max_fee_per_gas.map(U256::from), + max_priority_fee_per_gas.map(U256::from), + ) + .await?; + + req.chain_id = chain_id.to(); + req.gas_limit = gas_limit.saturating_to(); + req.max_fee_per_gas = max_fee_per_gas.saturating_to(); + req.max_priority_fee_per_gas = max_priority_fee_per_gas.saturating_to(); + + TypedTransactionRequest::EIP1559(req) + } + Some(TypedTransactionRequest::EIP4844(mut req)) => { + let (max_fee_per_gas, max_priority_fee_per_gas) = self + .eip1559_fees( + max_fee_per_gas.map(U256::from), + max_priority_fee_per_gas.map(U256::from), + ) + .await?; + + req.max_fee_per_gas = max_fee_per_gas; + req.max_priority_fee_per_gas = max_priority_fee_per_gas; + req.max_fee_per_blob_gas = + self.eip4844_blob_fee(max_fee_per_blob_gas.map(U256::from)).await?; + + req.chain_id = chain_id.to(); + req.gas_limit = gas_limit; + + TypedTransactionRequest::EIP4844(req) + } + None => return Err(EthApiError::ConflictingFeeFieldsInRequest), + }; + + let signed_tx = self.sign_request(&from, transaction)?; + + let recovered = + signed_tx.into_ecrecovered().ok_or(EthApiError::InvalidTransactionSignature)?; + + let pool_transaction = match recovered.try_into() { + Ok(converted) => <::Pool as TransactionPool>::Transaction::from_recovered_pooled_transaction(converted), + Err(_) => return Err(EthApiError::TransactionConversionError), + }; + + // submit the transaction to the pool with a `Local` origin + let hash = LoadTransaction::pool(self) + .add_transaction(TransactionOrigin::Local, pool_transaction) + .await?; + + Ok(hash) + } + } + + /// Signs a transaction, with configured signers. + fn sign_request( + &self, + from: &Address, + request: TypedTransactionRequest, + ) -> EthResult { + for signer in self.signers().read().iter() { + if signer.is_signer_for(from) { + return match signer.sign_transaction(request, from) { + Ok(tx) => Ok(tx), + Err(e) => Err(e.into()), + } + } + } + Err(EthApiError::InvalidTransactionSignature) + } + + /// Signs given message. Returns the signature. + fn sign( + &self, + account: Address, + message: Bytes, + ) -> impl Future> + Send { + async move { Ok(self.find_signer(&account)?.sign(account, &message).await?.to_hex_bytes()) } + } + + /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. + fn sign_typed_data(&self, data: &TypedData, account: Address) -> EthResult { + Ok(self.find_signer(&account)?.sign_typed_data(account, data)?.to_hex_bytes()) + } + + /// Returns the signer for the given account, if found in configured signers. + fn find_signer(&self, account: &Address) -> Result, SignError> { + self.signers() + .read() + .iter() + .find(|signer| signer.is_signer_for(account)) + .map(|signer| dyn_clone::clone_box(&**signer)) + .ok_or(SignError::NoAccount) + } + + /// Returns the sidecar for the given transaction hash. + fn rpc_transaction_sidecar( + &self, + hash: B256, + ) -> impl Future>> + Send + where + Self: LoadReceipt + 'static, + { + async move { + let meta = match LoadTransaction::provider(self).transaction_by_hash_with_meta(hash)? { + Some((_, meta)) => meta, + None => return Ok(None), + }; + + // If no block sidecars found, return None + let Some(sidecars) = LoadTransaction::cache(self).get_sidecars(meta.block_hash).await? + else { + return Ok(None); + }; + + Ok(sidecars.iter().find(|item| item.tx_hash == hash).map(|sidecar| BlockSidecar { + blob_sidecar: sidecar.blob_transaction_sidecar.clone(), + block_number: sidecar.block_number.to(), + block_hash: sidecar.block_hash, + tx_index: sidecar.tx_index, + tx_hash: sidecar.tx_hash, + })) + } + } +} + +/// Loads a transaction from database. +/// +/// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` transactions RPC +/// methods. +pub trait LoadTransaction: SpawnBlocking { + /// Transaction pool with pending transactions. [`TransactionPool::Transaction`] is the + /// supported transaction type. + type Pool: TransactionPool; + + /// Returns a handle for reading data from disk. + /// + /// Data access in default (L1) trait method implementations. + fn provider(&self) -> impl TransactionsProvider; + + /// Returns a handle for reading data from memory. + /// + /// Data access in default (L1) trait method implementations. + fn cache(&self) -> &EthStateCache; + + /// Returns a handle for reading data from pool. + /// + /// Data access in default (L1) trait method implementations. + fn pool(&self) -> &Self::Pool; + + /// Returns the transaction by hash. + /// + /// Checks the pool and state. + /// + /// Returns `Ok(None)` if no matching transaction was found. + fn transaction_by_hash( + &self, + hash: B256, + ) -> impl Future>> + Send { + async move { + // Try to find the transaction on disk + let mut resp = self + .spawn_blocking_io(move |this| { + match this.provider().transaction_by_hash_with_meta(hash)? { + None => Ok(None), + Some((tx, meta)) => { + // Note: we assume this transaction is valid, because it's mined (or + // part of pending block) and already. We don't need to + // check for pre EIP-2 because this transaction could be pre-EIP-2. + let transaction = tx + .into_ecrecovered_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature)?; + + let tx = TransactionSource::Block { + transaction, + index: meta.index, + block_hash: meta.block_hash, + block_number: meta.block_number, + base_fee: meta.base_fee, + }; + Ok(Some(tx)) + } + } + }) + .await?; + + if resp.is_none() { + // tx not found on disk, check pool + if let Some(tx) = + self.pool().get(&hash).map(|tx| tx.transaction.to_recovered_transaction()) + { + resp = Some(TransactionSource::Pool(tx)); + } + } + + Ok(resp) + } + } + + /// Returns the transaction by including its corresponding [`BlockId`]. + /// + /// Note: this supports pending transactions + fn transaction_by_hash_at( + &self, + transaction_hash: B256, + ) -> impl Future>> + Send { + async move { + match self.transaction_by_hash(transaction_hash).await? { + None => Ok(None), + Some(tx) => { + let res = match tx { + tx @ TransactionSource::Pool(_) => (tx, BlockId::pending()), + TransactionSource::Block { + transaction, + index, + block_hash, + block_number, + base_fee, + } => { + let at = BlockId::Hash(block_hash.into()); + let tx = TransactionSource::Block { + transaction, + index, + block_hash, + block_number, + base_fee, + }; + (tx, at) + } + }; + Ok(Some(res)) + } + } + } + } + + /// Fetches the transaction and the transaction's block + fn transaction_and_block( + &self, + hash: B256, + ) -> impl Future>> + Send + { + async move { + let (transaction, at) = match self.transaction_by_hash_at(hash).await? { + None => return Ok(None), + Some(res) => res, + }; + + // Note: this is always either hash or pending + let block_hash = match at { + BlockId::Hash(hash) => hash.block_hash, + _ => return Ok(None), + }; + let block = self.cache().get_block_with_senders(block_hash).await?; + Ok(block.map(|block| (transaction, block.seal(block_hash)))) + } + } +} + +/// A trait that allows for forwarding raw transactions. +/// +/// For example to a sequencer. +#[async_trait::async_trait] +pub trait RawTransactionForwarder: fmt::Debug + Send + Sync + 'static { + /// Forwards raw transaction bytes for `eth_sendRawTransaction` + async fn forward_raw_transaction(&self, raw: &[u8]) -> EthResult<()>; +} + +/// Configure server's forwarder for `eth_sendRawTransaction`, at runtime. +pub trait UpdateRawTxForwarder { + /// Sets a forwarder for `eth_sendRawTransaction` + /// + /// Note: this might be removed in the future in favor of a more generic approach. + fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc); +} + +impl UpdateRawTxForwarder for T +where + T: Deref>, + K: UpdateRawTxForwarder, +{ + fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc) { + self.deref().deref().set_eth_raw_transaction_forwarder(forwarder); + } +} diff --git a/crates/rpc/rpc-eth-api/src/lib.rs b/crates/rpc/rpc-eth-api/src/lib.rs new file mode 100644 index 000000000..1aed94d5c --- /dev/null +++ b/crates/rpc/rpc-eth-api/src/lib.rs @@ -0,0 +1,33 @@ +//! Reth RPC `eth_` API implementation +//! +//! ## Feature Flags +//! +//! - `client`: Enables JSON-RPC client support. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod bundle; +pub mod core; +pub mod filter; +pub mod helpers; +pub mod pubsub; + +pub use bundle::{EthBundleApiServer, EthCallBundleApiServer}; +pub use core::{EthApiServer, FullEthApiServer}; +pub use filter::EthFilterApiServer; +pub use pubsub::EthPubSubApiServer; + +pub use helpers::transaction::RawTransactionForwarder; + +#[cfg(feature = "client")] +pub use bundle::{EthBundleApiClient, EthCallBundleApiClient}; +#[cfg(feature = "client")] +pub use core::EthApiClient; +#[cfg(feature = "client")] +pub use filter::EthFilterApiClient; diff --git a/crates/rpc/rpc-api/src/eth_pubsub.rs b/crates/rpc/rpc-eth-api/src/pubsub.rs similarity index 92% rename from crates/rpc/rpc-api/src/eth_pubsub.rs rename to crates/rpc/rpc-eth-api/src/pubsub.rs index eaa1ef2d8..8de125152 100644 --- a/crates/rpc/rpc-api/src/eth_pubsub.rs +++ b/crates/rpc/rpc-eth-api/src/pubsub.rs @@ -1,3 +1,5 @@ +//! `eth_` RPC API for pubsub subscription. + use jsonrpsee::proc_macros::rpc; use reth_rpc_types::pubsub::{Params, SubscriptionKind}; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml new file mode 100644 index 000000000..807d362f0 --- /dev/null +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -0,0 +1,71 @@ +[package] +name = "reth-rpc-eth-types" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Types supporting implementation of 'eth' namespace RPC server API" + +[lints] +workspace = true + +[dependencies] +reth-chainspec.workspace = true +reth-errors.workspace = true +reth-evm.workspace = true +reth-execution-types.workspace = true +reth-metrics.workspace = true +reth-primitives.workspace = true +reth-provider.workspace = true +reth-revm.workspace = true +reth-rpc-server-types.workspace = true +reth-rpc-types.workspace = true +reth-rpc-types-compat.workspace = true +reth-tasks.workspace = true +reth-transaction-pool.workspace = true +reth-trie.workspace = true + +# ethereum +alloy-sol-types.workspace = true +revm.workspace = true +revm-inspectors = { workspace = true, features = ["js-tracer"] } +revm-primitives = { workspace = true, features = ["dev"] } + +# rpc +jsonrpsee-core.workspace = true +jsonrpsee-types.workspace = true + +# async +futures.workspace = true +tokio.workspace = true +tokio-stream.workspace = true + +# metrics +metrics.workspace = true + +# misc +serde = { workspace = true, features = ["derive"] } +thiserror.workspace = true +derive_more.workspace = true +schnellru.workspace = true +rand.workspace = true +tracing.workspace = true + +[dev-dependencies] +serde_json.workspace = true + +[features] +optimism = [ + "reth-primitives/optimism", + "reth-provider/optimism", + "reth-revm/optimism", + "reth-chainspec/optimism", + "reth-execution-types/optimism", + "reth-revm/optimism", + "revm/optimism" +] +bsc = [ + "reth-primitives/bsc", +] \ No newline at end of file diff --git a/crates/rpc/rpc/src/eth/cache/config.rs b/crates/rpc/rpc-eth-types/src/cache/config.rs similarity index 71% rename from crates/rpc/rpc/src/eth/cache/config.rs rename to crates/rpc/rpc-eth-types/src/cache/config.rs index 5dc989e8e..64999bd6b 100644 --- a/crates/rpc/rpc/src/eth/cache/config.rs +++ b/crates/rpc/rpc-eth-types/src/cache/config.rs @@ -1,8 +1,14 @@ -use reth_rpc_server_types::constants::cache::*; +//! Configuration for RPC cache. + use serde::{Deserialize, Serialize}; -/// Settings for the [`EthStateCache`](crate::eth::cache::EthStateCache). -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +use reth_rpc_server_types::constants::cache::{ + DEFAULT_BLOCK_CACHE_MAX_LEN, DEFAULT_CONCURRENT_DB_REQUESTS, DEFAULT_ENV_CACHE_MAX_LEN, + DEFAULT_RECEIPT_CACHE_MAX_LEN, +}; + +/// Settings for the [`EthStateCache`](super::EthStateCache). +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EthStateCacheConfig { /// Max number of blocks in cache. diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs new file mode 100644 index 000000000..0370f5e60 --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -0,0 +1,172 @@ +//! Helper types to workaround 'higher-ranked lifetime error' +//! in default implementation of +//! `reth_rpc_eth_api::helpers::Call`. + +use reth_primitives::{B256, U256}; +use reth_provider::StateProvider; +use reth_revm::{database::StateProviderDatabase, db::CacheDB, DatabaseRef}; +use revm::Database; + +/// Helper alias type for the state's [`CacheDB`] +pub type StateCacheDb<'a> = CacheDB>>; + +/// Hack to get around 'higher-ranked lifetime error', see +/// +#[allow(missing_debug_implementations)] +pub struct StateProviderTraitObjWrapper<'a>(pub &'a dyn StateProvider); + +impl<'a> reth_provider::StateRootProvider for StateProviderTraitObjWrapper<'a> { + fn state_root( + &self, + bundle_state: &revm::db::BundleState, + ) -> reth_errors::ProviderResult { + self.0.state_root(bundle_state) + } + + fn state_root_with_updates( + &self, + bundle_state: &revm::db::BundleState, + ) -> reth_errors::ProviderResult<(B256, reth_trie::updates::TrieUpdates)> { + self.0.state_root_with_updates(bundle_state) + } +} + +impl<'a> reth_provider::StateProofProvider for StateProviderTraitObjWrapper<'a> { + fn proof( + &self, + state: &revm::db::BundleState, + address: revm_primitives::Address, + slots: &[B256], + ) -> reth_errors::ProviderResult { + self.0.proof(state, address, slots) + } +} + +impl<'a> reth_provider::AccountReader for StateProviderTraitObjWrapper<'a> { + fn basic_account( + &self, + address: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.basic_account(address) + } +} + +impl<'a> reth_provider::BlockHashReader for StateProviderTraitObjWrapper<'a> { + fn block_hash( + &self, + block_number: reth_primitives::BlockNumber, + ) -> reth_errors::ProviderResult> { + self.0.block_hash(block_number) + } + + fn canonical_hashes_range( + &self, + start: reth_primitives::BlockNumber, + end: reth_primitives::BlockNumber, + ) -> reth_errors::ProviderResult> { + self.0.canonical_hashes_range(start, end) + } + + fn convert_block_hash( + &self, + hash_or_number: reth_rpc_types::BlockHashOrNumber, + ) -> reth_errors::ProviderResult> { + self.0.convert_block_hash(hash_or_number) + } +} + +impl<'a> StateProvider for StateProviderTraitObjWrapper<'a> { + fn account_balance( + &self, + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_balance(addr) + } + + fn account_code( + &self, + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_code(addr) + } + + fn account_nonce( + &self, + addr: revm_primitives::Address, + ) -> reth_errors::ProviderResult> { + self.0.account_nonce(addr) + } + + fn bytecode_by_hash( + &self, + code_hash: B256, + ) -> reth_errors::ProviderResult> { + self.0.bytecode_by_hash(code_hash) + } + + fn storage( + &self, + account: revm_primitives::Address, + storage_key: reth_primitives::StorageKey, + ) -> reth_errors::ProviderResult> { + self.0.storage(account, storage_key) + } +} + +/// Hack to get around 'higher-ranked lifetime error', see +/// +#[allow(missing_debug_implementations)] +pub struct StateCacheDbRefMutWrapper<'a, 'b>(pub &'b mut StateCacheDb<'a>); + +impl<'a, 'b> Database for StateCacheDbRefMutWrapper<'a, 'b> { + type Error = as Database>::Error; + fn basic( + &mut self, + address: revm_primitives::Address, + ) -> Result, Self::Error> { + self.0.basic(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + self.0.code_by_hash(code_hash) + } + + fn storage( + &mut self, + address: revm_primitives::Address, + index: U256, + ) -> Result { + self.0.storage(address, index) + } + + fn block_hash(&mut self, number: u64) -> Result { + self.0.block_hash(number) + } +} + +impl<'a, 'b> DatabaseRef for StateCacheDbRefMutWrapper<'a, 'b> { + type Error = as Database>::Error; + + fn basic_ref( + &self, + address: revm_primitives::Address, + ) -> Result, Self::Error> { + self.0.basic_ref(address) + } + + fn code_by_hash_ref(&self, code_hash: B256) -> Result { + self.0.code_by_hash_ref(code_hash) + } + + fn storage_ref( + &self, + address: revm_primitives::Address, + index: U256, + ) -> Result { + self.0.storage_ref(address, index) + } + + fn block_hash_ref(&self, number: u64) -> Result { + self.0.block_hash_ref(number) + } +} diff --git a/crates/rpc/rpc/src/eth/cache/metrics.rs b/crates/rpc/rpc-eth-types/src/cache/metrics.rs similarity index 93% rename from crates/rpc/rpc/src/eth/cache/metrics.rs rename to crates/rpc/rpc-eth-types/src/cache/metrics.rs index c9b18a299..d87a35e03 100644 --- a/crates/rpc/rpc/src/eth/cache/metrics.rs +++ b/crates/rpc/rpc-eth-types/src/cache/metrics.rs @@ -1,3 +1,5 @@ +//! Tracks state of RPC cache. + use metrics::Counter; use reth_metrics::{metrics::Gauge, Metrics}; diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs similarity index 88% rename from crates/rpc/rpc/src/eth/cache/mod.rs rename to crates/rpc/rpc-eth-types/src/cache/mod.rs index cfbe68311..641fe86d3 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -5,8 +5,8 @@ use reth_errors::{ProviderError, ProviderResult}; use reth_evm::ConfigureEvm; use reth_execution_types::Chain; use reth_primitives::{ - Block, BlockHashOrNumber, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, - TransactionSigned, TransactionSignedEcRecovered, B256, + BlobSidecars, Block, BlockHashOrNumber, BlockWithSenders, Receipt, SealedBlock, + SealedBlockWithSenders, TransactionSigned, TransactionSignedEcRecovered, B256, }; use reth_provider::{ BlockReader, CanonStateNotification, EvmEnvProvider, StateProviderFactory, TransactionVariant, @@ -26,13 +26,12 @@ use tokio::sync::{ }; use tokio_stream::wrappers::UnboundedReceiverStream; -mod config; -pub use config::*; +use super::{EthStateCacheConfig, MultiConsumerLruCache}; -mod metrics; - -mod multi_consumer; -pub use multi_consumer::MultiConsumerLruCache; +pub mod config; +pub mod db; +pub mod metrics; +pub mod multi_consumer; /// The type that can send the response to a requested [Block] type BlockTransactionsResponseSender = @@ -44,6 +43,9 @@ type BlockWithSendersResponseSender = oneshot::Sender>>>>; +/// The type that can send the response to the requested sidecars of a block. +type SidecarsResponseSender = oneshot::Sender>>; + /// The type that can send the response to a requested env type EnvResponseSender = oneshot::Sender>; @@ -57,6 +59,8 @@ type BlockLruCache = MultiConsumerLruCache< type ReceiptsLruCache = MultiConsumerLruCache>, L, ReceiptsResponseSender>; +type SidecarsLruCache = MultiConsumerLruCache; + type EnvLruCache = MultiConsumerLruCache; @@ -85,6 +89,7 @@ impl EthStateCache { provider, full_block_cache: BlockLruCache::new(max_blocks, "blocks"), receipts_cache: ReceiptsLruCache::new(max_receipts, "receipts"), + sidecars_cache: SidecarsLruCache::new(max_blocks, "sidecars"), evm_env_cache: EnvLruCache::new(max_envs, "evm_env"), action_tx: to_service.clone(), action_rx: UnboundedReceiverStream::new(rx), @@ -107,7 +112,7 @@ impl EthStateCache { ) -> Self where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, - EvmConfig: ConfigureEvm + 'static, + EvmConfig: ConfigureEvm, { Self::spawn_with(provider, config, TokioTaskExecutor::default(), evm_config) } @@ -125,7 +130,7 @@ impl EthStateCache { where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, + EvmConfig: ConfigureEvm, { let EthStateCacheConfig { max_blocks, max_receipts, max_envs, max_concurrent_db_requests } = config; @@ -250,6 +255,13 @@ impl EthStateCache { Ok(block.zip(receipts)) } + /// Fetches sidecars for the given block hash. + pub async fn get_sidecars(&self, block_hash: B256) -> ProviderResult> { + let (response_tx, rx) = oneshot::channel(); + let _ = self.to_service.send(CacheAction::GetSidecars { block_hash, response_tx }); + rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? + } + /// Requests the evm env config for the block hash. /// /// Returns an error if the corresponding header (required for populating the envs) was not @@ -277,7 +289,7 @@ impl EthStateCache { /// handles messages and does LRU lookups and never blocking IO. /// /// Caution: The channel for the data is _unbounded_ it is assumed that this is mainly used by the -/// [`EthApi`](crate::EthApi) which is typically invoked by the RPC server, which already uses +/// `reth_rpc::EthApi` which is typically invoked by the RPC server, which already uses /// permits to limit concurrent requests. #[must_use = "Type does nothing unless spawned"] pub(crate) struct EthStateCacheService< @@ -287,17 +299,21 @@ pub(crate) struct EthStateCacheService< LimitBlocks = ByLength, LimitReceipts = ByLength, LimitEnvs = ByLength, + LimitSidecars = ByLength, > where LimitBlocks: Limiter, LimitReceipts: Limiter>>, LimitEnvs: Limiter, + LimitSidecars: Limiter, { /// The type used to lookup data from disk provider: Provider, /// The LRU cache for full blocks grouped by their hash. full_block_cache: BlockLruCache, - /// The LRU cache for full blocks grouped by their hash. + /// The LRU cache for receipts grouped by their hash. receipts_cache: ReceiptsLruCache, + /// The LRU cache for receipts grouped by their hash. + sidecars_cache: SidecarsLruCache, /// The LRU cache for revm environments evm_env_cache: EnvLruCache, /// Sender half of the action channel. @@ -316,7 +332,7 @@ impl EthStateCacheService>) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { @@ -403,7 +419,7 @@ impl Future for EthStateCacheService { + // check if block is cached + if let Some(sidecars) = this.sidecars_cache.get(&block_hash).cloned() { + let _ = response_tx.send(Ok(Some(sidecars))); + continue + } + + // block is not in the cache, request it if this is the first consumer + if this.sidecars_cache.queue(block_hash, response_tx) { + let provider = this.provider.clone(); + let action_tx = this.action_tx.clone(); + let rate_limiter = this.rate_limiter.clone(); + this.action_task_spawner.spawn_blocking(Box::pin(async move { + // Acquire permit + let _permit = rate_limiter.acquire().await; + let res = provider.sidecars(&block_hash); + + let _ = action_tx + .send(CacheAction::SidecarsResult { block_hash, res }); + })); + } + } CacheAction::GetEnv { block_hash, response_tx } => { // check if env data is cached if let Some(env) = this.evm_env_cache.get(&block_hash).cloned() { @@ -561,6 +599,19 @@ where this.evm_env_cache.insert(block_hash, data); } } + CacheAction::SidecarsResult { block_hash, res } => { + if let Some(queued) = this.sidecars_cache.remove(&block_hash) { + // send the response to queued senders + for tx in queued { + let _ = tx.send(res.clone()); + } + } + + // cache good env data + if let Ok(Some(data)) = res { + this.sidecars_cache.insert(block_hash, data); + } + } CacheAction::CacheNewCanonicalChain { chain_change } => { for block in chain_change.blocks { this.on_new_block(block.hash(), Ok(Some(block.unseal()))); @@ -608,6 +659,8 @@ enum CacheAction { EnvResult { block_hash: B256, res: Box> }, CacheNewCanonicalChain { chain_change: ChainChange }, RemoveReorgedChain { chain_change: ChainChange }, + GetSidecars { block_hash: B256, response_tx: SidecarsResponseSender }, + SidecarsResult { block_hash: B256, res: ProviderResult> }, } struct BlockReceipts { diff --git a/crates/rpc/rpc/src/eth/cache/multi_consumer.rs b/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs similarity index 94% rename from crates/rpc/rpc/src/eth/cache/multi_consumer.rs rename to crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs index cd02ecc56..77d861343 100644 --- a/crates/rpc/rpc/src/eth/cache/multi_consumer.rs +++ b/crates/rpc/rpc-eth-types/src/cache/multi_consumer.rs @@ -1,20 +1,25 @@ -use super::metrics::CacheMetrics; -use schnellru::{ByLength, Limiter, LruMap}; +//! Metered cache, which also provides storage for senders in order to queue queries that result in +//! a cache miss. + use std::{ collections::{hash_map::Entry, HashMap}, fmt::{self, Debug, Formatter}, hash::Hash, }; +use schnellru::{ByLength, Limiter, LruMap}; + +use super::metrics::CacheMetrics; + /// A multi-consumer LRU cache. pub struct MultiConsumerLruCache where K: Hash + Eq, L: Limiter, { - /// The LRU cache for the + /// The LRU cache. cache: LruMap, - /// All queued consumers + /// All queued consumers. queued: HashMap>, /// Cache metrics metrics: CacheMetrics, diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc-eth-types/src/error.rs similarity index 94% rename from crates/rpc/rpc/src/eth/error.rs rename to crates/rpc/rpc-eth-types/src/error.rs index 84c150401..95d989a19 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -1,10 +1,13 @@ //! Implementation specific Errors for the `eth_` namespace. -use crate::result::{internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code}; +use std::time::Duration; + use alloy_sol_types::decode_revert_reason; -use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}; use reth_errors::RethError; use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes}; +use reth_rpc_server_types::result::{ + internal_rpc_err, invalid_params_rpc_err, rpc_err, rpc_error_with_code, +}; use reth_rpc_types::{ error::EthRpcErrorCode, request::TransactionInputError, BlockError, ToRpcError, }; @@ -14,7 +17,6 @@ use reth_transaction_pool::error::{ }; use revm::primitives::{EVMError, ExecutionResult, HaltReason, OutOfGasError}; use revm_inspectors::tracing::{js::JsInspectorError, MuxError}; -use std::time::Duration; /// Result alias pub type EthResult = Result; @@ -52,6 +54,9 @@ pub enum EthApiError { /// When an invalid block range is provided #[error("invalid block range")] InvalidBlockRange, + /// Thrown when the target block for proof computation exceeds the maximum configured window. + #[error("distance to target block exceeds maximum proof window")] + ExceedsMaxProofWindow, /// An internal error where prevrandao is not set in the evm's environment #[error("prevrandao not in the EVM's environment after merge")] PrevrandaoNotSet, @@ -134,13 +139,14 @@ impl EthApiError { } } -impl From for ErrorObject<'static> { +impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(error: EthApiError) -> Self { match error { EthApiError::FailedToDecodeSignedTransaction | EthApiError::InvalidTransactionSignature | EthApiError::EmptyRawTransactionData | EthApiError::InvalidBlockRange | + EthApiError::ExceedsMaxProofWindow | EthApiError::ConflictingFeeFieldsInRequest | EthApiError::Signing(_) | EthApiError::BothStateAndStateDiffInOverride(_) | @@ -165,9 +171,10 @@ impl From for ErrorObject<'static> { EthApiError::Unsupported(msg) => internal_rpc_err(msg), EthApiError::InternalJsTracerError(msg) => internal_rpc_err(msg), EthApiError::InvalidParams(msg) => invalid_params_rpc_err(msg), - err @ EthApiError::ExecutionTimedOut(_) => { - rpc_error_with_code(CALL_EXECUTION_FAILED_CODE, err.to_string()) - } + err @ EthApiError::ExecutionTimedOut(_) => rpc_error_with_code( + jsonrpsee_types::error::CALL_EXECUTION_FAILED_CODE, + err.to_string(), + ), err @ EthApiError::InternalBlockingTaskError | err @ EthApiError::InternalEthError => { internal_rpc_err(err.to_string()) } @@ -353,6 +360,15 @@ pub enum RpcInvalidTransactionError { /// Blob transaction is a create transaction #[error("blob transaction is a create transaction")] BlobTransactionIsCreate, + /// EOF crate should have `to` address + #[error("EOF crate should have `to` address")] + EofCrateShouldHaveToAddress, + /// EIP-7702 is not enabled. + #[error("EIP-7702 authorization list not supported")] + AuthorizationListNotSupported, + /// EIP-7702 transaction has invalid fields set. + #[error("EIP-7702 authorization list has invalid fields")] + AuthorizationListInvalidFields, /// Optimism related error #[error(transparent)] #[cfg(feature = "optimism")] @@ -386,7 +402,7 @@ impl RpcInvalidTransactionError { /// Converts the halt error /// /// Takes the configured gas limit of the transaction which is attached to the error - pub(crate) const fn halt(reason: HaltReason, gas_limit: u64) -> Self { + pub const fn halt(reason: HaltReason, gas_limit: u64) -> Self { match reason { HaltReason::OutOfGas(err) => Self::out_of_gas(err, gas_limit), HaltReason::NonceOverflow => Self::NonceMaxValue, @@ -395,7 +411,7 @@ impl RpcInvalidTransactionError { } /// Converts the out of gas error - pub(crate) const fn out_of_gas(reason: OutOfGasError, gas_limit: u64) -> Self { + pub const fn out_of_gas(reason: OutOfGasError, gas_limit: u64) -> Self { match reason { OutOfGasError::Basic => Self::BasicOutOfGas(gas_limit), OutOfGasError::Memory | OutOfGasError::MemoryLimit => Self::MemoryOutOfGas(gas_limit), @@ -405,7 +421,7 @@ impl RpcInvalidTransactionError { } } -impl From for ErrorObject<'static> { +impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(err: RpcInvalidTransactionError) -> Self { match err { RpcInvalidTransactionError::Revert(revert) => { @@ -447,6 +463,13 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::BlobVersionNotSupported => Self::BlobHashVersionMismatch, InvalidTransaction::TooManyBlobs { max, have } => Self::TooManyBlobs { max, have }, InvalidTransaction::BlobCreateTransaction => Self::BlobTransactionIsCreate, + InvalidTransaction::EofCrateShouldHaveToAddress => Self::EofCrateShouldHaveToAddress, + InvalidTransaction::AuthorizationListNotSupported => { + Self::AuthorizationListNotSupported + } + InvalidTransaction::AuthorizationListInvalidFields => { + Self::AuthorizationListInvalidFields + } #[cfg(feature = "optimism")] InvalidTransaction::DepositSystemTxPostRegolith => { Self::Optimism(OptimismInvalidTransactionError::DepositSystemTxPostRegolith) @@ -455,8 +478,6 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::HaltedDepositPostRegolith => { Self::Optimism(OptimismInvalidTransactionError::HaltedDepositPostRegolith) } - // TODO(EOF) - InvalidTransaction::EofCrateShouldHaveToAddress => todo!("EOF"), } } } @@ -580,7 +601,7 @@ pub enum RpcPoolError { Other(Box), } -impl From for ErrorObject<'static> { +impl From for jsonrpsee_types::error::ErrorObject<'static> { fn from(error: RpcPoolError) -> Self { match error { RpcPoolError::Invalid(err) => err.into(), @@ -655,7 +676,7 @@ pub enum SignError { /// Converts the evm [`ExecutionResult`] into a result where `Ok` variant is the output bytes if it /// is [`ExecutionResult::Success`]. -pub(crate) fn ensure_success(result: ExecutionResult) -> EthResult { +pub fn ensure_success(result: ExecutionResult) -> EthResult { match result { ExecutionResult::Success { output, .. } => Ok(output.into_data()), ExecutionResult::Revert { output, .. } => { diff --git a/crates/rpc/rpc/src/eth/api/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs similarity index 98% rename from crates/rpc/rpc/src/eth/api/fee_history.rs rename to crates/rpc/rpc-eth-types/src/fee_history.rs index 626c67037..fef2dc9ea 100644 --- a/crates/rpc/rpc/src/eth/api/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -1,6 +1,11 @@ //! Consist of types adjacent to the fee history cache and its configs -use crate::eth::{cache::EthStateCache, error::EthApiError}; +use std::{ + collections::{BTreeMap, VecDeque}, + fmt::Debug, + sync::{atomic::Ordering::SeqCst, Arc}, +}; + use futures::{ future::{Fuse, FusedFuture}, FutureExt, Stream, StreamExt, @@ -13,16 +18,14 @@ use reth_primitives::{ Receipt, SealedBlock, TransactionSigned, B256, }; use reth_provider::{BlockReaderIdExt, CanonStateNotification, ChainSpecProvider}; -use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; use reth_rpc_types::TxGasAndReward; use serde::{Deserialize, Serialize}; -use std::{ - collections::{BTreeMap, VecDeque}, - fmt::Debug, - sync::{atomic::Ordering::SeqCst, Arc}, -}; use tracing::trace; +use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; + +use super::{EthApiError, EthStateCache}; + /// Contains cached fee history entries for blocks. /// /// Purpose for this is to provide cached data for `eth_feeHistory`. @@ -165,7 +168,7 @@ impl FeeHistoryCache { } /// Settings for the [`FeeHistoryCache`]. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct FeeHistoryCacheConfig { /// Max number of blocks in cache. @@ -263,7 +266,7 @@ pub async fn fee_history_cache_new_blocks_task( /// the corresponding rewards for the transactions at each percentile. /// /// The results are returned as a vector of U256 values. -pub(crate) fn calculate_reward_percentiles_for_block( +pub fn calculate_reward_percentiles_for_block( percentiles: &[f64], gas_used: u64, base_fee_per_gas: u64, diff --git a/crates/rpc/rpc/src/eth/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs similarity index 92% rename from crates/rpc/rpc/src/eth/gas_oracle.rs rename to crates/rpc/rpc-eth-types/src/gas_oracle.rs index bb44af67b..92226748c 100644 --- a/crates/rpc/rpc/src/eth/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -1,22 +1,30 @@ //! An implementation of the eth gas price oracle, used for providing gas price estimates based on //! previous blocks. -use crate::eth::{ - cache::EthStateCache, - error::{EthApiError, EthResult, RpcInvalidTransactionError}, -}; -use derive_more::{Deref, DerefMut}; +use std::fmt::{self, Debug, Formatter}; + +use derive_more::{Deref, DerefMut, From, Into}; use reth_primitives::{constants::GWEI_TO_WEI, BlockNumberOrTag, B256, U256}; use reth_provider::BlockReaderIdExt; -use reth_rpc_server_types::constants::gas_oracle::*; +use reth_rpc_server_types::constants; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; -use std::fmt::{self, Debug, Formatter}; use tokio::sync::Mutex; use tracing::warn; +use reth_rpc_server_types::constants::gas_oracle::{ + DEFAULT_GAS_PRICE_BLOCKS, DEFAULT_GAS_PRICE_PERCENTILE, DEFAULT_IGNORE_GAS_PRICE, + DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, SAMPLE_NUMBER, +}; + +use super::{EthApiError, EthResult, EthStateCache, RpcInvalidTransactionError}; + +/// The default gas limit for `eth_call` and adjacent calls. See +/// [`RPC_DEFAULT_GAS_CAP`](constants::gas_oracle::RPC_DEFAULT_GAS_CAP). +pub const RPC_DEFAULT_GAS_CAP: GasCap = GasCap(constants::gas_oracle::RPC_DEFAULT_GAS_CAP); + /// Settings for the [`GasPriceOracle`] -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GasPriceOracleConfig { /// The number of populated blocks to produce the gas price estimate @@ -73,7 +81,7 @@ pub struct GasPriceOracle { impl GasPriceOracle where - Provider: BlockReaderIdExt + 'static, + Provider: BlockReaderIdExt, { /// Creates and returns the [`GasPriceOracle`]. pub fn new( @@ -286,6 +294,16 @@ impl Default for GasPriceOracleResult { } } +/// The wrapper type for gas limit +#[derive(Debug, Clone, Copy, From, Into)] +pub struct GasCap(pub u64); + +impl Default for GasCap { + fn default() -> Self { + RPC_DEFAULT_GAS_CAP + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc/src/eth/id_provider.rs b/crates/rpc/rpc-eth-types/src/id_provider.rs similarity index 84% rename from crates/rpc/rpc/src/eth/id_provider.rs rename to crates/rpc/rpc-eth-types/src/id_provider.rs index 6691e13a9..642d87578 100644 --- a/crates/rpc/rpc/src/eth/id_provider.rs +++ b/crates/rpc/rpc-eth-types/src/id_provider.rs @@ -1,14 +1,19 @@ -use jsonrpsee::types::SubscriptionId; +//! Helper type for `reth_rpc_eth_api::EthPubSubApiServer` implementation. +//! +//! Generates IDs for tracking subscriptions. + use std::fmt::Write; -/// An [`IdProvider`](jsonrpsee::core::traits::IdProvider) for ethereum subscription ids. +use jsonrpsee_types::SubscriptionId; + +/// An [`IdProvider`](jsonrpsee_core::traits::IdProvider) for ethereum subscription ids. /// /// Returns new hex-string [QUANTITY](https://ethereum.org/en/developers/docs/apis/json-rpc/#quantities-encoding) ids #[derive(Debug, Clone, Copy, Default)] #[non_exhaustive] pub struct EthSubscriptionIdProvider; -impl jsonrpsee::core::traits::IdProvider for EthSubscriptionIdProvider { +impl jsonrpsee_core::traits::IdProvider for EthSubscriptionIdProvider { fn next_id(&self) -> SubscriptionId<'static> { to_quantity(rand::random::()) } diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs new file mode 100644 index 000000000..fb9901dd0 --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -0,0 +1,36 @@ +//! Reth RPC server types, used in server implementation of `eth` namespace API. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +pub mod cache; +pub mod error; +pub mod fee_history; +pub mod gas_oracle; +pub mod id_provider; +pub mod logs_utils; +pub mod pending_block; +pub mod receipt; +pub mod revm_utils; +pub mod transaction; +pub mod utils; + +pub use cache::{ + config::EthStateCacheConfig, db::StateCacheDb, multi_consumer::MultiConsumerLruCache, + EthStateCache, +}; +pub use error::{EthApiError, EthResult, RevertError, RpcInvalidTransactionError, SignError}; +pub use fee_history::{FeeHistoryCache, FeeHistoryCacheConfig, FeeHistoryEntry}; +pub use gas_oracle::{ + GasCap, GasPriceOracle, GasPriceOracleConfig, GasPriceOracleResult, RPC_DEFAULT_GAS_CAP, +}; +pub use id_provider::EthSubscriptionIdProvider; +pub use logs_utils::EthFilterError; +pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; +pub use receipt::ReceiptBuilder; +pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc/src/eth/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs similarity index 78% rename from crates/rpc/rpc/src/eth/logs_utils.rs rename to crates/rpc/rpc-eth-types/src/logs_utils.rs index c57ce5fcb..5cd5fa789 100644 --- a/crates/rpc/rpc/src/eth/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -1,12 +1,66 @@ -use super::filter::FilterError; -use alloy_primitives::TxHash; +//! Helper functions for `reth_rpc_eth_api::EthFilterApiServer` implementation. +//! +//! Log parsing for building filter. + use reth_chainspec::ChainInfo; -use reth_primitives::{BlockNumHash, Receipt}; +use reth_primitives::{BlockNumHash, Receipt, TxHash}; use reth_provider::{BlockReader, ProviderError}; -use reth_rpc_types::{FilteredParams, Log}; +use reth_rpc_server_types::result::rpc_error_with_code; +use reth_rpc_types::{FilterId, FilteredParams, Log}; + +use crate::EthApiError; + +/// Errors that can occur in the handler implementation +#[derive(Debug, thiserror::Error)] +pub enum EthFilterError { + /// Filter not found. + #[error("filter not found")] + FilterNotFound(FilterId), + /// Invalid block range. + #[error("invalid block range params")] + InvalidBlockRangeParams, + /// Query scope is too broad. + #[error("query exceeds max block range {0}")] + QueryExceedsMaxBlocks(u64), + /// Query result is too large. + #[error("query exceeds max results {0}")] + QueryExceedsMaxResults(usize), + /// Error serving request in `eth_` namespace. + #[error(transparent)] + EthAPIError(#[from] EthApiError), + /// Error thrown when a spawned task failed to deliver a response. + #[error("internal filter error")] + InternalError, +} + +// convert the error +impl From for jsonrpsee_types::error::ErrorObject<'static> { + fn from(err: EthFilterError) -> Self { + match err { + EthFilterError::FilterNotFound(_) => { + rpc_error_with_code(jsonrpsee_types::error::INVALID_PARAMS_CODE, "filter not found") + } + err @ EthFilterError::InternalError => { + rpc_error_with_code(jsonrpsee_types::error::INTERNAL_ERROR_CODE, err.to_string()) + } + EthFilterError::EthAPIError(err) => err.into(), + err @ EthFilterError::InvalidBlockRangeParams | + err @ EthFilterError::QueryExceedsMaxBlocks(_) | + err @ EthFilterError::QueryExceedsMaxResults(_) => { + rpc_error_with_code(jsonrpsee_types::error::INVALID_PARAMS_CODE, err.to_string()) + } + } + } +} + +impl From for EthFilterError { + fn from(err: ProviderError) -> Self { + Self::EthAPIError(err.into()) + } +} /// Returns all matching of a block's receipts when the transaction hashes are known. -pub(crate) fn matching_block_logs_with_tx_hashes<'a, I>( +pub fn matching_block_logs_with_tx_hashes<'a, I>( filter: &FilteredParams, block_num_hash: BlockNumHash, tx_hashes_and_receipts: I, @@ -43,7 +97,7 @@ where /// Appends all matching logs of a block's receipts. /// If the log matches, look up the corresponding transaction hash. -pub(crate) fn append_matching_block_logs( +pub fn append_matching_block_logs( all_logs: &mut Vec, provider: impl BlockReader, filter: &FilteredParams, @@ -51,7 +105,7 @@ pub(crate) fn append_matching_block_logs( receipts: &[Receipt], removed: bool, block_timestamp: u64, -) -> Result<(), FilterError> { +) -> Result<(), EthFilterError> { // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; @@ -110,7 +164,7 @@ pub(crate) fn append_matching_block_logs( } /// Returns true if the log matches the filter and should be included -pub(crate) fn log_matches_filter( +pub fn log_matches_filter( block: BlockNumHash, log: &reth_primitives::Log, params: &FilteredParams, @@ -127,7 +181,7 @@ pub(crate) fn log_matches_filter( } /// Computes the block range based on the filter range and current block numbers -pub(crate) fn get_filter_block_range( +pub fn get_filter_block_range( from_block: Option, to_block: Option, start_block: u64, diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs new file mode 100644 index 000000000..64dd2aeb5 --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -0,0 +1,123 @@ +//! Helper types for `reth_rpc_eth_api::EthApiServer` implementation. +//! +//! Types used in block building. + +use std::{fmt, time::Instant}; + +use derive_more::Constructor; +use reth_chainspec::ChainSpec; +use reth_primitives::{BlockId, BlockNumberOrTag, SealedBlockWithSenders, SealedHeader, B256}; +use reth_provider::ProviderError; +use reth_revm::state_change::apply_blockhashes_update; +use revm_primitives::{ + db::{Database, DatabaseCommit}, + BlockEnv, CfgEnvWithHandlerCfg, +}; + +use super::{EthApiError, EthResult}; + +/// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block +#[derive(Debug, Clone, Constructor)] +pub struct PendingBlockEnv { + /// Configured [`CfgEnvWithHandlerCfg`] for the pending block. + pub cfg: CfgEnvWithHandlerCfg, + /// Configured [`BlockEnv`] for the pending block. + pub block_env: BlockEnv, + /// Origin block for the config + pub origin: PendingBlockEnvOrigin, +} + +/// Apply the [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) pre block state transitions. +/// +/// This constructs a new [Evm](revm::Evm) with the given DB, and environment +/// [`CfgEnvWithHandlerCfg`] and [`BlockEnv`]. +/// +/// This uses [`apply_blockhashes_update`]. +pub fn pre_block_blockhashes_update + DatabaseCommit>( + db: &mut DB, + chain_spec: &ChainSpec, + initialized_block_env: &BlockEnv, + block_number: u64, + parent_block_hash: B256, +) -> EthResult<()> +where + DB::Error: fmt::Display, +{ + apply_blockhashes_update( + db, + chain_spec, + initialized_block_env.timestamp.to::(), + block_number, + parent_block_hash, + ) + .map_err(|err| EthApiError::Internal(err.into())) +} + +/// The origin for a configured [`PendingBlockEnv`] +#[derive(Clone, Debug)] +pub enum PendingBlockEnvOrigin { + /// The pending block as received from the CL. + ActualPending(SealedBlockWithSenders), + /// The _modified_ header of the latest block. + /// + /// This derives the pending state based on the latest header by modifying: + /// - the timestamp + /// - the block number + /// - fees + DerivedFromLatest(SealedHeader), +} + +impl PendingBlockEnvOrigin { + /// Returns true if the origin is the actual pending block as received from the CL. + pub const fn is_actual_pending(&self) -> bool { + matches!(self, Self::ActualPending(_)) + } + + /// Consumes the type and returns the actual pending block. + pub fn into_actual_pending(self) -> Option { + match self { + Self::ActualPending(block) => Some(block), + _ => None, + } + } + + /// Returns the [`BlockId`] that represents the state of the block. + /// + /// If this is the actual pending block, the state is the "Pending" tag, otherwise we can safely + /// identify the block by its hash (latest block). + pub fn state_block_id(&self) -> BlockId { + match self { + Self::ActualPending(_) => BlockNumberOrTag::Pending.into(), + Self::DerivedFromLatest(header) => BlockId::Hash(header.hash().into()), + } + } + + /// Returns the hash of the block the pending block should be built on. + /// + /// For the [`PendingBlockEnvOrigin::ActualPending`] this is the parent hash of the block. + /// For the [`PendingBlockEnvOrigin::DerivedFromLatest`] this is the hash of the _latest_ + /// header. + pub fn build_target_hash(&self) -> B256 { + match self { + Self::ActualPending(block) => block.parent_hash, + Self::DerivedFromLatest(header) => header.hash(), + } + } + + /// Returns the header this pending block is based on. + pub fn header(&self) -> &SealedHeader { + match self { + Self::ActualPending(block) => &block.header, + Self::DerivedFromLatest(header) => header, + } + } +} + +/// In memory pending block for `pending` tag +#[derive(Debug, Constructor)] +pub struct PendingBlock { + /// The cached pending block + pub block: SealedBlockWithSenders, + /// Timestamp when the pending block is considered outdated + pub expires_at: Instant, +} diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs new file mode 100644 index 000000000..cd3fd1ed5 --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -0,0 +1,126 @@ +//! RPC receipt response builder, extends a layer one receipt with layer two data. + +use reth_primitives::{Address, Receipt, TransactionMeta, TransactionSigned, TxKind}; +use reth_rpc_types::{ + AnyReceiptEnvelope, AnyTransactionReceipt, Log, OtherFields, ReceiptWithBloom, + TransactionReceipt, WithOtherFields, +}; +use revm_primitives::calc_blob_gasprice; + +use super::{EthApiError, EthResult}; + +/// Receipt response builder. +#[derive(Debug)] +pub struct ReceiptBuilder { + /// The base response body, contains L1 fields. + base: TransactionReceipt>, + /// Additional L2 fields. + other: OtherFields, +} + +impl ReceiptBuilder { + /// Returns a new builder with the base response body (L1 fields) set. + /// + /// Note: This requires _all_ block receipts because we need to calculate the gas used by the + /// transaction. + pub fn new( + transaction: &TransactionSigned, + meta: TransactionMeta, + receipt: &Receipt, + all_receipts: &[Receipt], + ) -> EthResult { + // Note: we assume this transaction is valid, because it's mined (or part of pending block) + // and we don't need to check for pre EIP-2 + let from = transaction + .recover_signer_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature)?; + + // get the previous transaction cumulative gas used + let gas_used = if meta.index == 0 { + receipt.cumulative_gas_used + } else { + let prev_tx_idx = (meta.index - 1) as usize; + all_receipts + .get(prev_tx_idx) + .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) + .unwrap_or_default() + }; + + let blob_gas_used = transaction.transaction.blob_gas_used(); + // Blob gas price should only be present if the transaction is a blob transaction + let blob_gas_price = + blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); + let logs_bloom = receipt.bloom_slow(); + + // get number of logs in the block + let mut num_logs = 0; + for prev_receipt in all_receipts.iter().take(meta.index as usize) { + num_logs += prev_receipt.logs.len(); + } + + let mut logs = Vec::with_capacity(receipt.logs.len()); + for (tx_log_idx, log) in receipt.logs.iter().enumerate() { + let rpclog = Log { + inner: log.clone(), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + block_timestamp: Some(meta.timestamp), + transaction_hash: Some(meta.tx_hash), + transaction_index: Some(meta.index), + log_index: Some((num_logs + tx_log_idx) as u64), + removed: false, + }; + logs.push(rpclog); + } + + let rpc_receipt = reth_rpc_types::Receipt { + status: receipt.success.into(), + cumulative_gas_used: receipt.cumulative_gas_used as u128, + logs, + }; + + let (contract_address, to) = match transaction.transaction.kind() { + TxKind::Create => (Some(from.create(transaction.transaction.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + #[allow(clippy::needless_update)] + let base = TransactionReceipt { + inner: AnyReceiptEnvelope { + inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, + r#type: transaction.transaction.tx_type().into(), + }, + transaction_hash: meta.tx_hash, + transaction_index: Some(meta.index), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + from, + to, + gas_used: gas_used as u128, + contract_address, + effective_gas_price: transaction.effective_gas_price(meta.base_fee), + // TODO pre-byzantium receipts have a post-transaction state root + state_root: None, + // EIP-4844 fields + blob_gas_price, + blob_gas_used: blob_gas_used.map(u128::from), + }; + + Ok(Self { base, other: Default::default() }) + } + + /// Adds fields to response body. + pub fn add_other_fields(mut self, mut fields: OtherFields) -> Self { + self.other.append(&mut fields); + self + } + + /// Builds a receipt response from the base response body, and any set additional fields. + pub fn build(self) -> AnyTransactionReceipt { + let Self { base, other } = self; + let mut res = WithOtherFields::new(base); + res.other = other; + + res + } +} diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs similarity index 90% rename from crates/rpc/rpc/src/eth/revm_utils.rs rename to crates/rpc/rpc-eth-types/src/revm_utils.rs index 938de2ebc..2b13aef27 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -1,14 +1,8 @@ //! utilities for working with revm -use crate::eth::error::{EthApiError, EthResult, RpcInvalidTransactionError}; -#[cfg(feature = "optimism")] -use reth_primitives::revm::env::fill_op_tx_env; -#[cfg(not(feature = "optimism"))] -use reth_primitives::revm::env::fill_tx_env; -use reth_primitives::{ - revm::env::fill_tx_env_with_recovered, Address, TransactionSigned, - TransactionSignedEcRecovered, TxHash, TxKind, B256, U256, -}; +use std::cmp::min; + +use reth_primitives::{Address, TxKind, B256, U256}; use reth_rpc_types::{ state::{AccountOverride, EvmOverrides, StateOverride}, BlockOverrides, TransactionRequest, @@ -25,58 +19,9 @@ use revm::{ }, Database, }; -use std::cmp::min; use tracing::trace; -/// Helper type to work with different transaction types when configuring the EVM env. -/// -/// This makes it easier to handle errors. -pub trait FillableTransaction { - /// Returns the hash of the transaction. - fn hash(&self) -> TxHash; - - /// Fill the transaction environment with the given transaction. - fn try_fill_tx_env(&self, tx_env: &mut TxEnv) -> EthResult<()>; -} - -impl FillableTransaction for TransactionSignedEcRecovered { - fn hash(&self) -> TxHash { - self.hash - } - - fn try_fill_tx_env(&self, tx_env: &mut TxEnv) -> EthResult<()> { - #[cfg(not(feature = "optimism"))] - fill_tx_env_with_recovered(tx_env, self); - - #[cfg(feature = "optimism")] - { - let mut envelope_buf = Vec::with_capacity(self.length_without_header()); - self.encode_enveloped(&mut envelope_buf); - fill_tx_env_with_recovered(tx_env, self, envelope_buf.into()); - } - Ok(()) - } -} -impl FillableTransaction for TransactionSigned { - fn hash(&self) -> TxHash { - self.hash - } - - fn try_fill_tx_env(&self, tx_env: &mut TxEnv) -> EthResult<()> { - let signer = - self.recover_signer().ok_or_else(|| EthApiError::InvalidTransactionSignature)?; - #[cfg(not(feature = "optimism"))] - fill_tx_env(tx_env, self, signer); - - #[cfg(feature = "optimism")] - { - let mut envelope_buf = Vec::with_capacity(self.length_without_header()); - self.encode_enveloped(&mut envelope_buf); - fill_op_tx_env(tx_env, self, signer, envelope_buf.into()); - } - Ok(()) - } -} +use super::{EthApiError, EthResult, RpcInvalidTransactionError}; /// Returns the addresses of the precompiles corresponding to the `SpecId`. #[inline] @@ -223,12 +168,11 @@ pub fn create_txn_env(block_env: &BlockEnv, request: TransactionRequest) -> EthR value: value.unwrap_or_default(), data: input.try_into_unique_input()?.unwrap_or_default(), chain_id, - access_list: access_list - .map(reth_rpc_types::AccessList::into_flattened) - .unwrap_or_default(), + access_list: access_list.unwrap_or_default().into(), // EIP-4844 fields blob_hashes: blob_versioned_hashes.unwrap_or_default(), max_fee_per_blob_gas, + authorization_list: None, #[cfg(feature = "optimism")] optimism: OptimismFields { enveloped_tx: Some(Bytes::new()), ..Default::default() }, #[cfg(feature = "bsc")] diff --git a/crates/rpc/rpc-eth-types/src/transaction.rs b/crates/rpc/rpc-eth-types/src/transaction.rs new file mode 100644 index 000000000..32c81d396 --- /dev/null +++ b/crates/rpc/rpc-eth-types/src/transaction.rs @@ -0,0 +1,96 @@ +//! Helper types for `reth_rpc_eth_api::EthApiServer` implementation. +//! +//! Transaction wrapper that labels transaction with its origin. + +use reth_primitives::{TransactionSignedEcRecovered, B256}; +use reth_rpc_types::{Transaction, TransactionInfo}; +use reth_rpc_types_compat::transaction::from_recovered_with_block_context; + +/// Represents from where a transaction was fetched. +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum TransactionSource { + /// Transaction exists in the pool (Pending) + Pool(TransactionSignedEcRecovered), + /// Transaction already included in a block + /// + /// This can be a historical block or a pending block (received from the CL) + Block { + /// Transaction fetched via provider + transaction: TransactionSignedEcRecovered, + /// Index of the transaction in the block + index: u64, + /// Hash of the block. + block_hash: B256, + /// Number of the block. + block_number: u64, + /// base fee of the block. + base_fee: Option, + }, +} + +// === impl TransactionSource === + +impl TransactionSource { + /// Consumes the type and returns the wrapped transaction. + pub fn into_recovered(self) -> TransactionSignedEcRecovered { + self.into() + } + + /// Returns the transaction and block related info, if not pending + pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { + match self { + Self::Pool(tx) => { + let hash = tx.hash(); + ( + tx, + TransactionInfo { + hash: Some(hash), + index: None, + block_hash: None, + block_number: None, + base_fee: None, + }, + ) + } + Self::Block { transaction, index, block_hash, block_number, base_fee } => { + let hash = transaction.hash(); + ( + transaction, + TransactionInfo { + hash: Some(hash), + index: Some(index), + block_hash: Some(block_hash), + block_number: Some(block_number), + base_fee: base_fee.map(u128::from), + }, + ) + } + } + } +} + +impl From for TransactionSignedEcRecovered { + fn from(value: TransactionSource) -> Self { + match value { + TransactionSource::Pool(tx) => tx, + TransactionSource::Block { transaction, .. } => transaction, + } + } +} + +impl From for Transaction { + fn from(value: TransactionSource) -> Self { + match value { + TransactionSource::Pool(tx) => reth_rpc_types_compat::transaction::from_recovered(tx), + TransactionSource::Block { transaction, index, block_hash, block_number, base_fee } => { + from_recovered_with_block_context( + transaction, + block_hash, + block_number, + base_fee, + index as usize, + ) + } + } + } +} diff --git a/crates/rpc/rpc/src/eth/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs similarity index 79% rename from crates/rpc/rpc/src/eth/utils.rs rename to crates/rpc/rpc-eth-types/src/utils.rs index a4291c4b9..a35708396 100644 --- a/crates/rpc/rpc/src/eth/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,14 +1,13 @@ //! Commonly used code snippets -use crate::eth::error::{EthApiError, EthResult}; use reth_primitives::{Bytes, PooledTransactionsElement, PooledTransactionsElementEcRecovered}; +use super::{EthApiError, EthResult}; + /// Recovers a [`PooledTransactionsElementEcRecovered`] from an enveloped encoded byte stream. /// /// See [`PooledTransactionsElement::decode_enveloped`] -pub(crate) fn recover_raw_transaction( - data: Bytes, -) -> EthResult { +pub fn recover_raw_transaction(data: Bytes) -> EthResult { if data.is_empty() { return Err(EthApiError::EmptyRawTransactionData) } diff --git a/crates/rpc/rpc-layer/src/auth_layer.rs b/crates/rpc/rpc-layer/src/auth_layer.rs index 0a11ae802..255273194 100644 --- a/crates/rpc/rpc-layer/src/auth_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_layer.rs @@ -232,7 +232,7 @@ mod tests { let body = r#"{"jsonrpc": "2.0", "method": "greet_melkor", "params": [], "id": 1}"#; let response = client - .post(&format!("http://{AUTH_ADDR}:{AUTH_PORT}")) + .post(format!("http://{AUTH_ADDR}:{AUTH_PORT}")) .bearer_auth(jwt.unwrap_or_default()) .body(body) .header(header::CONTENT_TYPE, "application/json") diff --git a/crates/rpc/rpc-server-types/Cargo.toml b/crates/rpc/rpc-server-types/Cargo.toml index ddecc0a49..628654eba 100644 --- a/crates/rpc/rpc-server-types/Cargo.toml +++ b/crates/rpc/rpc-server-types/Cargo.toml @@ -12,9 +12,20 @@ description = "RPC server types and constants" workspace = true [dependencies] +reth-errors.workspace = true +reth-network-api.workspace = true +reth-primitives.workspace = true +reth-rpc-types.workspace = true + + # ethereum alloy-primitives.workspace = true +# rpc +jsonrpsee-core.workspace = true +jsonrpsee-types.workspace = true + # misc strum = { workspace = true, features = ["derive"] } serde = { workspace = true, features = ["derive"] } + diff --git a/crates/rpc/rpc-server-types/src/constants.rs b/crates/rpc/rpc-server-types/src/constants.rs index 3784d7508..e3c129bf6 100644 --- a/crates/rpc/rpc-server-types/src/constants.rs +++ b/crates/rpc/rpc-server-types/src/constants.rs @@ -26,6 +26,9 @@ pub fn default_max_tracing_requests() -> usize { .map_or(25, |cpus| max(cpus.get().saturating_sub(RESERVED), RESERVED)) } +/// The default number of getproof calls we are allowing to run concurrently. +pub const DEFAULT_PROOF_PERMITS: usize = 25; + /// The default IPC endpoint #[cfg(windows)] pub const DEFAULT_IPC_ENDPOINT: &str = r"\\.\pipe\reth.ipc"; @@ -42,6 +45,12 @@ pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = r"\\.\pipe\reth_engine_api.ipc #[cfg(not(windows))] pub const DEFAULT_ENGINE_API_IPC_ENDPOINT: &str = "/tmp/reth_engine_api.ipc"; +/// The default eth historical proof window. +pub const DEFAULT_ETH_PROOF_WINDOW: u64 = 0; + +/// Maximum eth historical proof window. Equivalent to roughly one month of data. +pub const MAX_ETH_PROOF_WINDOW: u64 = 216_000; + /// GPO specific constants pub mod gas_oracle { use alloy_primitives::U256; @@ -64,6 +73,20 @@ pub mod gas_oracle { /// The default minimum gas price, under which the sample will be ignored pub const DEFAULT_IGNORE_GAS_PRICE: U256 = U256::from_limbs([2u64, 0, 0, 0]); + + /// The default gas limit for `eth_call` and adjacent calls. + /// + /// This is different from the default to regular 30M block gas limit + /// [`ETHEREUM_BLOCK_GAS_LIMIT`](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow + /// for more complex calls. + pub const RPC_DEFAULT_GAS_CAP: u64 = 50_000_000; + + /// Gas per transaction not creating a contract. + pub const MIN_TRANSACTION_GAS: u64 = 21_000u64; + /// Allowed error ratio for gas estimation + /// Taken from Geth's implementation in order to pass the hive tests + /// + pub const ESTIMATE_GAS_ERROR_RATIO: f64 = 0.015; } /// Cache specific constants diff --git a/crates/rpc/rpc-server-types/src/lib.rs b/crates/rpc/rpc-server-types/src/lib.rs index 4bdee53f8..c20b57881 100644 --- a/crates/rpc/rpc-server-types/src/lib.rs +++ b/crates/rpc/rpc-server-types/src/lib.rs @@ -10,6 +10,9 @@ /// Common RPC constants. pub mod constants; +pub mod result; mod module; pub use module::{RethRpcModule, RpcModuleSelection}; + +pub use result::ToRpcResult; diff --git a/crates/rpc/rpc/src/result.rs b/crates/rpc/rpc-server-types/src/result.rs similarity index 77% rename from crates/rpc/rpc/src/result.rs rename to crates/rpc/rpc-server-types/src/result.rs index f00c9e279..252c78f24 100644 --- a/crates/rpc/rpc/src/result.rs +++ b/crates/rpc/rpc-server-types/src/result.rs @@ -1,9 +1,10 @@ //! Additional helpers for converting errors. -use jsonrpsee::core::RpcResult; -use reth_rpc_types::engine::PayloadError; use std::fmt::Display; +use jsonrpsee_core::RpcResult; +use reth_rpc_types::engine::PayloadError; + /// Helper trait to easily convert various `Result` types into [`RpcResult`] pub trait ToRpcResult: Sized { /// Converts the error of the [Result] to an [`RpcResult`] via the `Err` [Display] impl. @@ -21,14 +22,14 @@ pub trait ToRpcResult: Sized { M: Into; /// Converts this type into an [`RpcResult`] with the - /// [`jsonrpsee::types::error::INTERNAL_ERROR_CODE` and the given message. + /// [`jsonrpsee_types::error::INTERNAL_ERROR_CODE`] and the given message. fn map_internal_err(self, op: F) -> RpcResult where F: FnOnce(Err) -> M, M: Into; /// Converts this type into an [`RpcResult`] with the - /// [`jsonrpsee::types::error::INTERNAL_ERROR_CODE`] and given message and data. + /// [`jsonrpsee_types::error::INTERNAL_ERROR_CODE`] and given message and data. fn map_internal_err_with_data<'a, F, M>(self, op: F) -> RpcResult where F: FnOnce(Err) -> (M, &'a [u8]), @@ -46,7 +47,7 @@ macro_rules! impl_to_rpc_result { ($err:ty) => { impl ToRpcResult for Result { #[inline] - fn map_rpc_err<'a, F, M>(self, op: F) -> jsonrpsee::core::RpcResult + fn map_rpc_err<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult where F: FnOnce($err) -> (i32, M, Option<&'a [u8]>), M: Into, @@ -61,7 +62,7 @@ macro_rules! impl_to_rpc_result { } #[inline] - fn map_internal_err<'a, F, M>(self, op: F) -> jsonrpsee::core::RpcResult + fn map_internal_err<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult where F: FnOnce($err) -> M, M: Into, @@ -70,7 +71,7 @@ macro_rules! impl_to_rpc_result { } #[inline] - fn map_internal_err_with_data<'a, F, M>(self, op: F) -> jsonrpsee::core::RpcResult + fn map_internal_err_with_data<'a, F, M>(self, op: F) -> jsonrpsee_core::RpcResult where F: FnOnce($err) -> (M, &'a [u8]), M: Into, @@ -85,7 +86,7 @@ macro_rules! impl_to_rpc_result { } #[inline] - fn with_message(self, msg: &str) -> jsonrpsee::core::RpcResult { + fn with_message(self, msg: &str) -> jsonrpsee_core::RpcResult { match self { Ok(t) => Ok(t), Err(err) => { @@ -104,46 +105,44 @@ impl_to_rpc_result!(reth_errors::ProviderError); impl_to_rpc_result!(reth_network_api::NetworkError); /// Constructs an invalid params JSON-RPC error. -pub(crate) fn invalid_params_rpc_err( +pub fn invalid_params_rpc_err( msg: impl Into, -) -> jsonrpsee::types::error::ErrorObject<'static> { - rpc_err(jsonrpsee::types::error::INVALID_PARAMS_CODE, msg, None) +) -> jsonrpsee_types::error::ErrorObject<'static> { + rpc_err(jsonrpsee_types::error::INVALID_PARAMS_CODE, msg, None) } /// Constructs an internal JSON-RPC error. -pub(crate) fn internal_rpc_err( - msg: impl Into, -) -> jsonrpsee::types::error::ErrorObject<'static> { - rpc_err(jsonrpsee::types::error::INTERNAL_ERROR_CODE, msg, None) +pub fn internal_rpc_err(msg: impl Into) -> jsonrpsee_types::error::ErrorObject<'static> { + rpc_err(jsonrpsee_types::error::INTERNAL_ERROR_CODE, msg, None) } /// Constructs an internal JSON-RPC error with data -pub(crate) fn internal_rpc_err_with_data( +pub fn internal_rpc_err_with_data( msg: impl Into, data: &[u8], -) -> jsonrpsee::types::error::ErrorObject<'static> { - rpc_err(jsonrpsee::types::error::INTERNAL_ERROR_CODE, msg, Some(data)) +) -> jsonrpsee_types::error::ErrorObject<'static> { + rpc_err(jsonrpsee_types::error::INTERNAL_ERROR_CODE, msg, Some(data)) } /// Constructs an internal JSON-RPC error with code and message -pub(crate) fn rpc_error_with_code( +pub fn rpc_error_with_code( code: i32, msg: impl Into, -) -> jsonrpsee::types::error::ErrorObject<'static> { +) -> jsonrpsee_types::error::ErrorObject<'static> { rpc_err(code, msg, None) } /// Constructs a JSON-RPC error, consisting of `code`, `message` and optional `data`. -pub(crate) fn rpc_err( +pub fn rpc_err( code: i32, msg: impl Into, data: Option<&[u8]>, -) -> jsonrpsee::types::error::ErrorObject<'static> { - jsonrpsee::types::error::ErrorObject::owned( +) -> jsonrpsee_types::error::ErrorObject<'static> { + jsonrpsee_types::error::ErrorObject::owned( code, msg.into(), data.map(|data| { - jsonrpsee::core::to_json_raw_value(&reth_primitives::hex::encode_prefixed(data)) + jsonrpsee_core::to_json_raw_value(&reth_primitives::hex::encode_prefixed(data)) .expect("serializing String can't fail") }), ) diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 898fec038..8ab37d1b1 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -29,3 +29,4 @@ similar-asserts.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros", "rt"] } +reth-rpc-eth-api.workspace = true diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index a6439f074..029e9fbbc 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,7 +1,7 @@ use futures::StreamExt; use jsonrpsee::http_client::HttpClientBuilder; -use reth_rpc_api::EthApiClient; use reth_rpc_api_testing_util::{debug::DebugApiExt, trace::TraceApiExt, utils::parse_env_url}; +use reth_rpc_eth_api::EthApiClient; use reth_rpc_types::trace::{ filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, }; diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index dacfab064..dc46687eb 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -8,8 +8,8 @@ use reth_primitives::{ }; use reth_rpc_types::engine::{ payload::{ExecutionPayloadBodyV1, ExecutionPayloadFieldV2, ExecutionPayloadInputV2}, - ExecutionPayload, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, - ExecutionPayloadV4, PayloadError, + ExecutionPayload, ExecutionPayloadBodyV2, ExecutionPayloadV1, ExecutionPayloadV2, + ExecutionPayloadV3, ExecutionPayloadV4, PayloadError, }; /// Converts [`ExecutionPayloadV1`] to [Block] @@ -68,6 +68,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result Result Result { - let ExecutionPayloadV4 { payload_inner, deposit_requests, withdrawal_requests } = payload; + let ExecutionPayloadV4 { + payload_inner, + deposit_requests, + withdrawal_requests, + consolidation_requests, + } = payload; let mut block = try_payload_v3_to_block(payload_inner)?; // attach requests with asc type identifiers @@ -105,6 +111,7 @@ pub fn try_payload_v4_to_block(payload: ExecutionPayloadV4) -> Result>(); let requests_root = proofs::calculate_requests_root(&requests); @@ -211,10 +218,10 @@ pub fn block_to_payload_v3(value: SealedBlock) -> (ExecutionPayloadV3, Option ExecutionPayloadV4 { - let (deposit_requests, withdrawal_requests) = + let (deposit_requests, withdrawal_requests, consolidation_requests) = value.requests.take().unwrap_or_default().into_iter().fold( - (Vec::new(), Vec::new()), - |(mut deposits, mut withdrawals), request| { + (Vec::new(), Vec::new(), Vec::new()), + |(mut deposits, mut withdrawals, mut consolidation_requests), request| { match request { Request::DepositRequest(r) => { deposits.push(r); @@ -222,16 +229,20 @@ pub fn block_to_payload_v4(mut value: SealedBlock) -> ExecutionPayloadV4 { Request::WithdrawalRequest(r) => { withdrawals.push(r); } + Request::ConsolidationRequest(r) => { + consolidation_requests.push(r); + } _ => {} }; - (deposits, withdrawals) + (deposits, withdrawals, consolidation_requests) }, ); ExecutionPayloadV4 { deposit_requests, withdrawal_requests, + consolidation_requests, payload_inner: block_to_payload_v3(value).0, } } @@ -368,6 +379,52 @@ pub fn convert_to_payload_body_v1(value: Block) -> ExecutionPayloadBodyV1 { } } +/// Converts [Block] to [`ExecutionPayloadBodyV2`] +pub fn convert_to_payload_body_v2(value: Block) -> ExecutionPayloadBodyV2 { + let transactions = value.body.into_iter().map(|tx| { + let mut out = Vec::new(); + tx.encode_enveloped(&mut out); + out.into() + }); + + let mut payload = ExecutionPayloadBodyV2 { + transactions: transactions.collect(), + withdrawals: value.withdrawals.map(Withdrawals::into_inner), + deposit_requests: None, + withdrawal_requests: None, + consolidation_requests: None, + }; + + if let Some(requests) = value.requests { + let (deposit_requests, withdrawal_requests, consolidation_requests) = + requests.into_iter().fold( + (Vec::new(), Vec::new(), Vec::new()), + |(mut deposits, mut withdrawals, mut consolidation_requests), request| { + match request { + Request::DepositRequest(r) => { + deposits.push(r); + } + Request::WithdrawalRequest(r) => { + withdrawals.push(r); + } + Request::ConsolidationRequest(r) => { + consolidation_requests.push(r); + } + _ => {} + }; + + (deposits, withdrawals, consolidation_requests) + }, + ); + + payload.deposit_requests = Some(deposit_requests); + payload.withdrawal_requests = Some(withdrawal_requests); + payload.consolidation_requests = Some(consolidation_requests); + } + + payload +} + /// Transforms a [`SealedBlock`] into a [`ExecutionPayloadV1`] pub fn execution_payload_from_sealed_block(value: SealedBlock) -> ExecutionPayloadV1 { let transactions = value.raw_transactions(); @@ -661,7 +718,8 @@ mod tests { "0x02f9021e8330182401843b9aca0085174876e80083030d40944242424242424242424242424242424242424242893635c9adc5dea00000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000120d694d6a0b0103651aafd87db6c88297175d7317c6e6da53ccf706c3c991c91fd0000000000000000000000000000000000000000000000000000000000000030b0b1b3b51cf688ead965a954c5cc206ba4e76f3f8efac60656ae708a9aad63487a2ca1fb30ccaf2ebe1028a2b2886b1b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020002d2b75f4a27f78e585a4735a40ab2437eceb12ec39938a94dc785a54d625130000000000000000000000000000000000000000000000000000000000000060b9759766e9bb191b1c457ae1da6cdf71a23fb9d8bc9f845eaa49ee4af280b3b9720ac4d81e64b1b50a65db7b8b4e76f1176a12e19d293d75574600e99fbdfecc1ab48edaeeffb3226cd47691d24473821dad0c6ff3973f03e4aa89f418933a56c080a099dc5b94a51e9b91a6425b1fed9792863006496ab71a4178524819d7db0c5e88a0119748e62700234079d91ae80f4676f9e0f71b260e9b46ef9b4aff331d3c2318" ], "withdrawalRequests": [], - "withdrawals": [] + "withdrawals": [], + "consolidationRequests": [] }"#; let payload = serde_json::from_str::(s).unwrap(); diff --git a/crates/rpc/rpc-types/Cargo.toml b/crates/rpc/rpc-types/Cargo.toml index 648b8b24f..fa5c8b79c 100644 --- a/crates/rpc/rpc-types/Cargo.toml +++ b/crates/rpc/rpc-types/Cargo.toml @@ -16,24 +16,19 @@ workspace = true # ethereum alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } alloy-rpc-types = { workspace = true, features = ["jsonrpsee-types"] } +alloy-rpc-types-admin.workspace = true alloy-rpc-types-anvil.workspace = true -alloy-rpc-types-trace.workspace = true alloy-rpc-types-beacon.workspace = true -alloy-rpc-types-admin.workspace = true +alloy-rpc-types-mev.workspace = true +alloy-rpc-types-trace.workspace = true alloy-rpc-types-txpool.workspace = true alloy-serde.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jsonrpsee-types"] } # misc serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true jsonrpsee-types = { workspace = true, optional = true } -[features] -default = ["jsonrpsee-types"] -arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] - - [dev-dependencies] # misc alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde", "arbitrary"] } @@ -43,4 +38,8 @@ proptest-derive.workspace = true rand.workspace = true similar-asserts.workspace = true bytes.workspace = true +serde_json.workspace = true +[features] +default = ["jsonrpsee-types"] +arbitrary = ["alloy-primitives/arbitrary", "alloy-rpc-types/arbitrary"] \ No newline at end of file diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 5df802da0..7f578ab29 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -11,7 +11,6 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #[allow(hidden_glob_reexports)] mod eth; -mod mev; mod peer; mod rpc; @@ -29,15 +28,18 @@ pub mod trace { pub use alloy_rpc_types_trace::*; } +// re-export admin +pub use alloy_rpc_types_admin as admin; + // Anvil specific rpc types coming from alloy. pub use alloy_rpc_types_anvil as anvil; +// re-export mev +pub use alloy_rpc_types_mev as mev; + // re-export beacon pub use alloy_rpc_types_beacon as beacon; -// re-export admin -pub use alloy_rpc_types_admin as admin; - // re-export txpool pub use alloy_rpc_types_txpool as txpool; @@ -51,6 +53,5 @@ pub use eth::{ transaction::{self, TransactionRequest, TypedTransactionRequest}, }; -pub use mev::*; pub use peer::*; pub use rpc::*; diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs deleted file mode 100644 index 20c92f1a6..000000000 --- a/crates/rpc/rpc-types/src/mev.rs +++ /dev/null @@ -1,1047 +0,0 @@ -//! MEV bundle type bindings - -use crate::{BlockId, BlockNumberOrTag, Log}; -use alloy_primitives::{Address, Bytes, TxHash, B256, U256}; -use serde::{ - ser::{SerializeSeq, Serializer}, - Deserialize, Deserializer, Serialize, -}; -/// A bundle of transactions to send to the matchmaker. -/// -/// Note: this is for `mev_sendBundle` and not `eth_sendBundle`. -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct SendBundleRequest { - /// The version of the MEV-share API to use. - #[serde(rename = "version")] - pub protocol_version: ProtocolVersion, - /// Data used by block builders to check if the bundle should be considered for inclusion. - #[serde(rename = "inclusion")] - pub inclusion: Inclusion, - /// The transactions to include in the bundle. - #[serde(rename = "body")] - pub bundle_body: Vec, - /// Requirements for the bundle to be included in the block. - #[serde(rename = "validity", skip_serializing_if = "Option::is_none")] - pub validity: Option, - /// Preferences on what data should be shared about the bundle and its transactions - #[serde(rename = "privacy", skip_serializing_if = "Option::is_none")] - pub privacy: Option, -} - -/// Data used by block builders to check if the bundle should be considered for inclusion. -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct Inclusion { - /// The first block the bundle is valid for. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub block: u64, - /// The last block the bundle is valid for. - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub max_block: Option, -} - -impl Inclusion { - /// Creates a new inclusion with the given min block.. - pub const fn at_block(block: u64) -> Self { - Self { block, max_block: None } - } - - /// Returns the block number of the first block the bundle is valid for. - #[inline] - pub const fn block_number(&self) -> u64 { - self.block - } - - /// Returns the block number of the last block the bundle is valid for. - #[inline] - pub fn max_block_number(&self) -> Option { - self.max_block.as_ref().map(|b| *b) - } -} - -/// A bundle tx, which can either be a transaction hash, or a full tx. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -#[serde(untagged)] -#[serde(rename_all = "camelCase")] -pub enum BundleItem { - /// The hash of either a transaction or bundle we are trying to backrun. - Hash { - /// Tx hash. - hash: TxHash, - }, - /// A new signed transaction. - #[serde(rename_all = "camelCase")] - Tx { - /// Bytes of the signed transaction. - tx: Bytes, - /// If true, the transaction can revert without the bundle being considered invalid. - can_revert: bool, - }, -} - -/// Requirements for the bundle to be included in the block. -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct Validity { - /// Specifies the minimum percent of a given bundle's earnings to redistribute - /// for it to be included in a builder's block. - #[serde(skip_serializing_if = "Option::is_none")] - pub refund: Option>, - /// Specifies what addresses should receive what percent of the overall refund for this bundle, - /// if it is enveloped by another bundle (eg. a searcher backrun). - #[serde(skip_serializing_if = "Option::is_none")] - pub refund_config: Option>, -} - -/// Specifies the minimum percent of a given bundle's earnings to redistribute -/// for it to be included in a builder's block. -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct Refund { - /// The index of the transaction in the bundle. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub body_idx: u64, - /// The minimum percent of the bundle's earnings to redistribute. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub percent: u64, -} - -/// Specifies what addresses should receive what percent of the overall refund for this bundle, -/// if it is enveloped by another bundle (eg. a searcher backrun). -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct RefundConfig { - /// The address to refund. - pub address: Address, - /// The minimum percent of the bundle's earnings to redistribute. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub percent: u64, -} - -/// Preferences on what data should be shared about the bundle and its transactions -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct Privacy { - /// Hints on what data should be shared about the bundle and its transactions - #[serde(skip_serializing_if = "Option::is_none")] - pub hints: Option, - /// The addresses of the builders that should be allowed to see the bundle/transaction. - #[serde(skip_serializing_if = "Option::is_none")] - pub builders: Option>, -} - -/// Hints on what data should be shared about the bundle and its transactions -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct PrivacyHint { - /// The calldata of the bundle's transactions should be shared. - pub calldata: bool, - /// The address of the bundle's transactions should be shared. - pub contract_address: bool, - /// The logs of the bundle's transactions should be shared. - pub logs: bool, - /// The function selector of the bundle's transactions should be shared. - pub function_selector: bool, - /// The hash of the bundle's transactions should be shared. - pub hash: bool, - /// The hash of the bundle should be shared. - pub tx_hash: bool, -} - -impl PrivacyHint { - /// Sets the flag indicating inclusion of calldata and returns the modified `PrivacyHint` - /// instance. - pub const fn with_calldata(mut self) -> Self { - self.calldata = true; - self - } - - /// Sets the flag indicating inclusion of contract address and returns the modified - /// `PrivacyHint` instance. - pub const fn with_contract_address(mut self) -> Self { - self.contract_address = true; - self - } - - /// Sets the flag indicating inclusion of logs and returns the modified `PrivacyHint` instance. - pub const fn with_logs(mut self) -> Self { - self.logs = true; - self - } - - /// Sets the flag indicating inclusion of function selector and returns the modified - /// `PrivacyHint` instance. - pub const fn with_function_selector(mut self) -> Self { - self.function_selector = true; - self - } - - /// Sets the flag indicating inclusion of hash and returns the modified `PrivacyHint` instance. - pub const fn with_hash(mut self) -> Self { - self.hash = true; - self - } - - /// Sets the flag indicating inclusion of transaction hash and returns the modified - /// `PrivacyHint` instance. - pub const fn with_tx_hash(mut self) -> Self { - self.tx_hash = true; - self - } - - /// Checks if calldata inclusion flag is set. - pub const fn has_calldata(&self) -> bool { - self.calldata - } - - /// Checks if contract address inclusion flag is set. - pub const fn has_contract_address(&self) -> bool { - self.contract_address - } - - /// Checks if logs inclusion flag is set. - pub const fn has_logs(&self) -> bool { - self.logs - } - - /// Checks if function selector inclusion flag is set. - pub const fn has_function_selector(&self) -> bool { - self.function_selector - } - - /// Checks if hash inclusion flag is set. - pub const fn has_hash(&self) -> bool { - self.hash - } - - /// Checks if transaction hash inclusion flag is set. - pub const fn has_tx_hash(&self) -> bool { - self.tx_hash - } - - /// Calculates the number of hints set within the `PrivacyHint` instance. - const fn num_hints(&self) -> usize { - let mut num_hints = 0; - if self.calldata { - num_hints += 1; - } - if self.contract_address { - num_hints += 1; - } - if self.logs { - num_hints += 1; - } - if self.function_selector { - num_hints += 1; - } - if self.hash { - num_hints += 1; - } - if self.tx_hash { - num_hints += 1; - } - num_hints - } -} - -impl Serialize for PrivacyHint { - fn serialize(&self, serializer: S) -> Result { - let mut seq = serializer.serialize_seq(Some(self.num_hints()))?; - if self.calldata { - seq.serialize_element("calldata")?; - } - if self.contract_address { - seq.serialize_element("contract_address")?; - } - if self.logs { - seq.serialize_element("logs")?; - } - if self.function_selector { - seq.serialize_element("function_selector")?; - } - if self.hash { - seq.serialize_element("hash")?; - } - if self.tx_hash { - seq.serialize_element("tx_hash")?; - } - seq.end() - } -} - -impl<'de> Deserialize<'de> for PrivacyHint { - fn deserialize>(deserializer: D) -> Result { - let hints = Vec::::deserialize(deserializer)?; - let mut privacy_hint = Self::default(); - for hint in hints { - match hint.as_str() { - "calldata" => privacy_hint.calldata = true, - "contract_address" => privacy_hint.contract_address = true, - "logs" => privacy_hint.logs = true, - "function_selector" => privacy_hint.function_selector = true, - "hash" => privacy_hint.hash = true, - "tx_hash" => privacy_hint.tx_hash = true, - _ => return Err(serde::de::Error::custom("invalid privacy hint")), - } - } - Ok(privacy_hint) - } -} - -/// Response from the matchmaker after sending a bundle. -#[derive(Deserialize, Debug, Serialize, Clone, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct SendBundleResponse { - /// Hash of the bundle bodies. - pub bundle_hash: B256, -} - -/// The version of the MEV-share API to use. -#[derive(Deserialize, Debug, Serialize, Clone, Default, PartialEq, Eq)] -pub enum ProtocolVersion { - #[default] - #[serde(rename = "beta-1")] - /// The beta-1 version of the API. - Beta1, - /// The 0.1 version of the API. - #[serde(rename = "v0.1")] - V0_1, -} - -/// Optional fields to override simulation state. -#[derive(Deserialize, Debug, Serialize, Clone, Default, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct SimBundleOverrides { - /// Block used for simulation state. Defaults to latest block. - /// Block header data will be derived from parent block by default. - /// Specify other params to override the default values. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub parent_block: Option, - /// Block number used for simulation, defaults to parentBlock.number + 1 - #[serde(default, with = "alloy_rpc_types::serde_helpers::quantity::opt")] - pub block_number: Option, - /// Coinbase used for simulation, defaults to parentBlock.coinbase - #[serde(default, skip_serializing_if = "Option::is_none")] - pub coinbase: Option
, - /// Timestamp used for simulation, defaults to parentBlock.timestamp + 12 - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub timestamp: Option, - /// Gas limit used for simulation, defaults to parentBlock.gasLimit - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub gas_limit: Option, - /// Base fee used for simulation, defaults to parentBlock.baseFeePerGas - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub base_fee: Option, - /// Timeout in seconds, defaults to 5 - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub timeout: Option, -} - -/// Response from the matchmaker after sending a simulation request. -#[derive(Deserialize, Debug, Serialize, Clone, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct SimBundleResponse { - /// Whether the simulation was successful. - pub success: bool, - /// Error message if the simulation failed. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub error: Option, - /// The block number of the simulated block. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub state_block: u64, - /// The gas price of the simulated block. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub mev_gas_price: u64, - /// The profit of the simulated block. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub profit: u64, - /// The refundable value of the simulated block. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub refundable_value: u64, - /// The gas used by the simulated block. - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub gas_used: u64, - /// Logs returned by `mev_simBundle`. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub logs: Option>, -} - -/// Logs returned by `mev_simBundle`. -#[derive(Deserialize, Debug, Serialize, Clone, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct SimBundleLogs { - /// Logs for transactions in bundle. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub tx_logs: Option>, - /// Logs for bundles in bundle. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub bundle_logs: Option>, -} - -impl SendBundleRequest { - /// Create a new bundle request. - pub const fn new( - block_num: u64, - max_block: Option, - protocol_version: ProtocolVersion, - bundle_body: Vec, - ) -> Self { - Self { - protocol_version, - inclusion: Inclusion { block: block_num, max_block }, - bundle_body, - validity: None, - privacy: None, - } - } -} - -/// Request for `eth_cancelBundle` -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct CancelBundleRequest { - /// Bundle hash of the bundle to be canceled - pub bundle_hash: String, -} - -/// Request for `eth_sendPrivateTransaction` -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct PrivateTransactionRequest { - /// raw signed transaction - pub tx: Bytes, - /// Hex-encoded number string, optional. Highest block number in which the transaction should - /// be included. - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub max_block_number: Option, - /// Preferences for private transaction. - #[serde(default, skip_serializing_if = "PrivateTransactionPreferences::is_empty")] - pub preferences: PrivateTransactionPreferences, -} - -/// Additional preferences for `eth_sendPrivateTransaction` -#[derive(Serialize, Deserialize, Default, Debug, Clone, PartialEq, Eq)] -pub struct PrivateTransactionPreferences { - /// Requirements for the bundle to be included in the block. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub validity: Option, - /// Preferences on what data should be shared about the bundle and its transactions - #[serde(default, skip_serializing_if = "Option::is_none")] - pub privacy: Option, -} - -impl PrivateTransactionPreferences { - /// Returns true if the preferences are empty. - pub const fn is_empty(&self) -> bool { - self.validity.is_none() && self.privacy.is_none() - } -} - -/// Request for `eth_cancelPrivateTransaction` -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct CancelPrivateTransactionRequest { - /// Transaction hash of the transaction to be canceled - pub tx_hash: B256, -} - -// TODO(@optimiz-r): Revisit after is closed. -/// Response for `flashbots_getBundleStatsV2` represents stats for a single bundle -/// -/// Note: this is V2: -/// -/// Timestamp format: "2022-10-06T21:36:06.322Z" -#[derive(Default, Debug, Clone, PartialEq, Eq)] -pub enum BundleStats { - /// The relayer has not yet seen the bundle. - #[default] - Unknown, - /// The relayer has seen the bundle, but has not simulated it yet. - Seen(StatsSeen), - /// The relayer has seen the bundle and has simulated it. - Simulated(StatsSimulated), -} - -impl Serialize for BundleStats { - fn serialize(&self, serializer: S) -> Result { - match self { - Self::Unknown => serde_json::json!({"isSimulated": false}).serialize(serializer), - Self::Seen(stats) => stats.serialize(serializer), - Self::Simulated(stats) => stats.serialize(serializer), - } - } -} - -impl<'de> Deserialize<'de> for BundleStats { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let map = serde_json::Map::deserialize(deserializer)?; - - if map.get("receivedAt").is_none() { - Ok(Self::Unknown) - } else if map["isSimulated"] == false { - StatsSeen::deserialize(serde_json::Value::Object(map)) - .map(BundleStats::Seen) - .map_err(serde::de::Error::custom) - } else { - StatsSimulated::deserialize(serde_json::Value::Object(map)) - .map(BundleStats::Simulated) - .map_err(serde::de::Error::custom) - } - } -} - -/// Response for `flashbots_getBundleStatsV2` represents stats for a single bundle -/// -/// Note: this is V2: -/// -/// Timestamp format: "2022-10-06T21:36:06.322Z -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct StatsSeen { - /// boolean representing if this searcher has a high enough reputation to be in the high - /// priority queue - pub is_high_priority: bool, - /// representing whether the bundle gets simulated. All other fields will be omitted except - /// simulated field if API didn't receive bundle - pub is_simulated: bool, - /// time at which the bundle API received the bundle - pub received_at: String, -} - -/// Response for `flashbots_getBundleStatsV2` represents stats for a single bundle -/// -/// Note: this is V2: -/// -/// Timestamp format: "2022-10-06T21:36:06.322Z -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct StatsSimulated { - /// boolean representing if this searcher has a high enough reputation to be in the high - /// priority queue - pub is_high_priority: bool, - /// representing whether the bundle gets simulated. All other fields will be omitted except - /// simulated field if API didn't receive bundle - pub is_simulated: bool, - /// time at which the bundle gets simulated - pub simulated_at: String, - /// time at which the bundle API received the bundle - pub received_at: String, - /// indicates time at which each builder selected the bundle to be included in the target - /// block - #[serde(default = "Vec::new")] - pub considered_by_builders_at: Vec, - /// indicates time at which each builder sealed a block containing the bundle - #[serde(default = "Vec::new")] - pub sealed_by_builders_at: Vec, -} - -/// Represents information about when a bundle was considered by a builder. -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ConsideredByBuildersAt { - /// The public key of the builder. - pub pubkey: String, - /// The timestamp indicating when the bundle was considered by the builder. - pub timestamp: String, -} - -/// Represents information about when a bundle was sealed by a builder. -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct SealedByBuildersAt { - /// The public key of the builder. - pub pubkey: String, - /// The timestamp indicating when the bundle was sealed by the builder. - pub timestamp: String, -} - -/// Response for `flashbots_getUserStatsV2` represents stats for a searcher. -/// -/// Note: this is V2: -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UserStats { - /// Represents whether this searcher has a high enough reputation to be in the high priority - /// queue. - pub is_high_priority: bool, - /// The total amount paid to validators over all time. - #[serde(with = "u256_numeric_string")] - pub all_time_validator_payments: U256, - /// The total amount of gas simulated across all bundles submitted to Flashbots. - /// This is the actual gas used in simulations, not gas limit. - #[serde(with = "u256_numeric_string")] - pub all_time_gas_simulated: U256, - /// The total amount paid to validators the last 7 days. - #[serde(with = "u256_numeric_string")] - pub last_7d_validator_payments: U256, - /// The total amount of gas simulated across all bundles submitted to Flashbots in the last 7 - /// days. This is the actual gas used in simulations, not gas limit. - #[serde(with = "u256_numeric_string")] - pub last_7d_gas_simulated: U256, - /// The total amount paid to validators the last day. - #[serde(with = "u256_numeric_string")] - pub last_1d_validator_payments: U256, - /// The total amount of gas simulated across all bundles submitted to Flashbots in the last - /// day. This is the actual gas used in simulations, not gas limit. - #[serde(with = "u256_numeric_string")] - pub last_1d_gas_simulated: U256, -} - -/// Bundle of transactions for `eth_sendBundle` -/// -/// Note: this is for `eth_sendBundle` and not `mev_sendBundle` -/// -/// -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct EthSendBundle { - /// A list of hex-encoded signed transactions - pub txs: Vec, - /// hex-encoded block number for which this bundle is valid - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub block_number: u64, - /// unix timestamp when this bundle becomes active - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub min_timestamp: Option, - /// unix timestamp how long this bundle stays valid - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub max_timestamp: Option, - /// list of hashes of possibly reverting txs - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub reverting_tx_hashes: Vec, - /// UUID that can be used to cancel/replace this bundle - #[serde(default, rename = "replacementUuid", skip_serializing_if = "Option::is_none")] - pub replacement_uuid: Option, -} - -/// Response from the matchmaker after sending a bundle. -#[derive(Deserialize, Debug, Serialize, Clone, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct EthBundleHash { - /// Hash of the bundle bodies. - pub bundle_hash: B256, -} - -/// Bundle of transactions for `eth_callBundle` -/// -/// -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct EthCallBundle { - /// A list of hex-encoded signed transactions - pub txs: Vec, - /// hex encoded block number for which this bundle is valid on - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub block_number: u64, - /// Either a hex encoded number or a block tag for which state to base this simulation on - pub state_block_number: BlockNumberOrTag, - /// the timestamp to use for this bundle simulation, in seconds since the unix epoch - #[serde( - default, - with = "alloy_rpc_types::serde_helpers::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub timestamp: Option, -} - -/// Response for `eth_callBundle` -#[derive(Default, Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct EthCallBundleResponse { - /// The hash of the bundle bodies. - pub bundle_hash: B256, - /// The gas price of the entire bundle - #[serde(with = "u256_numeric_string")] - pub bundle_gas_price: U256, - /// The difference in Ether sent to the coinbase after all transactions in the bundle - #[serde(with = "u256_numeric_string")] - pub coinbase_diff: U256, - /// The total amount of Ether sent to the coinbase after all transactions in the bundle - #[serde(with = "u256_numeric_string")] - pub eth_sent_to_coinbase: U256, - /// The total gas fees paid for all transactions in the bundle - #[serde(with = "u256_numeric_string")] - pub gas_fees: U256, - /// Results of individual transactions within the bundle - pub results: Vec, - /// The block number used as a base for this simulation - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub state_block_number: u64, - /// The total gas used by all transactions in the bundle - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub total_gas_used: u64, -} - -/// Result of a single transaction in a bundle for `eth_callBundle` -#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct EthCallBundleTransactionResult { - /// The difference in Ether sent to the coinbase after the transaction - #[serde(with = "u256_numeric_string")] - pub coinbase_diff: U256, - /// The amount of Ether sent to the coinbase after the transaction - #[serde(with = "u256_numeric_string")] - pub eth_sent_to_coinbase: U256, - /// The address from which the transaction originated - pub from_address: Address, - /// The gas fees paid for the transaction - #[serde(with = "u256_numeric_string")] - pub gas_fees: U256, - /// The gas price used for the transaction - #[serde(with = "u256_numeric_string")] - pub gas_price: U256, - /// The amount of gas used by the transaction - #[serde(with = "alloy_rpc_types::serde_helpers::quantity")] - pub gas_used: u64, - /// The address to which the transaction is sent (optional) - pub to_address: Option
, - /// The transaction hash - pub tx_hash: B256, - /// Contains the return data if the transaction succeeded - /// - /// Note: this is mutually exclusive with `revert` - #[serde(skip_serializing_if = "Option::is_none")] - pub value: Option, - /// Contains the return data if the transaction reverted - #[serde(skip_serializing_if = "Option::is_none")] - pub revert: Option, -} - -mod u256_numeric_string { - use alloy_primitives::U256; - use serde::{de, Deserialize, Serializer}; - use std::str::FromStr; - - pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - let val = serde_json::Value::deserialize(deserializer)?; - match val { - serde_json::Value::String(s) => { - if let Ok(val) = s.parse::() { - return Ok(U256::from(val)) - } - U256::from_str(&s).map_err(de::Error::custom) - } - serde_json::Value::Number(num) => { - num.as_u64().map(U256::from).ok_or_else(|| de::Error::custom("invalid u256")) - } - _ => Err(de::Error::custom("invalid u256")), - } - } - - pub(crate) fn serialize(val: &U256, serializer: S) -> Result - where - S: Serializer, - { - let val: u128 = (*val).try_into().map_err(serde::ser::Error::custom)?; - serializer.serialize_str(&val.to_string()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::str::FromStr; - - #[test] - fn can_deserialize_simple() { - let str = r#" - [{ - "version": "v0.1", - "inclusion": { - "block": "0x1" - }, - "body": [{ - "tx": "0x02f86b0180843b9aca00852ecc889a0082520894c87037874aed04e51c29f582394217a0a2b89d808080c080a0a463985c616dd8ee17d7ef9112af4e6e06a27b071525b42182fe7b0b5c8b4925a00af5ca177ffef2ff28449292505d41be578bebb77110dfc09361d2fb56998260", - "canRevert": false - }] - }] - "#; - let res: Result, _> = serde_json::from_str(str); - assert!(res.is_ok()); - } - - #[test] - fn can_deserialize_complex() { - let str = r#" - [{ - "version": "v0.1", - "inclusion": { - "block": "0x1" - }, - "body": [{ - "tx": "0x02f86b0180843b9aca00852ecc889a0082520894c87037874aed04e51c29f582394217a0a2b89d808080c080a0a463985c616dd8ee17d7ef9112af4e6e06a27b071525b42182fe7b0b5c8b4925a00af5ca177ffef2ff28449292505d41be578bebb77110dfc09361d2fb56998260", - "canRevert": false - }], - "privacy": { - "hints": [ - "calldata" - ] - }, - "validity": { - "refundConfig": [ - { - "address": "0x8EC1237b1E80A6adf191F40D4b7D095E21cdb18f", - "percent": 100 - } - ] - } - }] - "#; - let res: Result, _> = serde_json::from_str(str); - assert!(res.is_ok()); - } - - #[test] - fn can_serialize_complex() { - let str = r#" - [{ - "version": "v0.1", - "inclusion": { - "block": "0x1" - }, - "body": [{ - "tx": "0x02f86b0180843b9aca00852ecc889a0082520894c87037874aed04e51c29f582394217a0a2b89d808080c080a0a463985c616dd8ee17d7ef9112af4e6e06a27b071525b42182fe7b0b5c8b4925a00af5ca177ffef2ff28449292505d41be578bebb77110dfc09361d2fb56998260", - "canRevert": false - }], - "privacy": { - "hints": [ - "calldata" - ] - }, - "validity": { - "refundConfig": [ - { - "address": "0x8EC1237b1E80A6adf191F40D4b7D095E21cdb18f", - "percent": 100 - } - ] - } - }] - "#; - let bundle_body = vec![BundleItem::Tx { - tx: Bytes::from_str("0x02f86b0180843b9aca00852ecc889a0082520894c87037874aed04e51c29f582394217a0a2b89d808080c080a0a463985c616dd8ee17d7ef9112af4e6e06a27b071525b42182fe7b0b5c8b4925a00af5ca177ffef2ff28449292505d41be578bebb77110dfc09361d2fb56998260").unwrap(), - can_revert: false, - }]; - - let validity = Some(Validity { - refund_config: Some(vec![RefundConfig { - address: "0x8EC1237b1E80A6adf191F40D4b7D095E21cdb18f".parse().unwrap(), - percent: 100, - }]), - ..Default::default() - }); - - let privacy = Some(Privacy { - hints: Some(PrivacyHint { calldata: true, ..Default::default() }), - ..Default::default() - }); - - let bundle = SendBundleRequest { - protocol_version: ProtocolVersion::V0_1, - inclusion: Inclusion { block: 1, max_block: None }, - bundle_body, - validity, - privacy, - }; - let expected = serde_json::from_str::>(str).unwrap(); - assert_eq!(bundle, expected[0]); - } - - #[test] - fn can_serialize_privacy_hint() { - let hint = PrivacyHint { - calldata: true, - contract_address: true, - logs: true, - function_selector: true, - hash: true, - tx_hash: true, - }; - let expected = - r#"["calldata","contract_address","logs","function_selector","hash","tx_hash"]"#; - let actual = serde_json::to_string(&hint).unwrap(); - assert_eq!(actual, expected); - } - - #[test] - fn can_deserialize_privacy_hint() { - let hint = PrivacyHint { - calldata: true, - contract_address: false, - logs: true, - function_selector: false, - hash: true, - tx_hash: false, - }; - let expected = r#"["calldata","logs","hash"]"#; - let actual: PrivacyHint = serde_json::from_str(expected).unwrap(); - assert_eq!(actual, hint); - } - - #[test] - fn can_dererialize_sim_response() { - let expected = r#" - { - "success": true, - "stateBlock": "0x8b8da8", - "mevGasPrice": "0x74c7906005", - "profit": "0x4bc800904fc000", - "refundableValue": "0x4bc800904fc000", - "gasUsed": "0xa620", - "logs": [{},{}] - } - "#; - let actual: SimBundleResponse = serde_json::from_str(expected).unwrap(); - assert!(actual.success); - } - - #[test] - fn can_deserialize_eth_call_resp() { - let s = r#"{ - "bundleGasPrice": "476190476193", - "bundleHash": "0x73b1e258c7a42fd0230b2fd05529c5d4b6fcb66c227783f8bece8aeacdd1db2e", - "coinbaseDiff": "20000000000126000", - "ethSentToCoinbase": "20000000000000000", - "gasFees": "126000", - "results": [ - { - "coinbaseDiff": "10000000000063000", - "ethSentToCoinbase": "10000000000000000", - "fromAddress": "0x02A727155aeF8609c9f7F2179b2a1f560B39F5A0", - "gasFees": "63000", - "gasPrice": "476190476193", - "gasUsed": 21000, - "toAddress": "0x73625f59CAdc5009Cb458B751b3E7b6b48C06f2C", - "txHash": "0x669b4704a7d993a946cdd6e2f95233f308ce0c4649d2e04944e8299efcaa098a", - "value": "0x" - }, - { - "coinbaseDiff": "10000000000063000", - "ethSentToCoinbase": "10000000000000000", - "fromAddress": "0x02A727155aeF8609c9f7F2179b2a1f560B39F5A0", - "gasFees": "63000", - "gasPrice": "476190476193", - "gasUsed": 21000, - "toAddress": "0x73625f59CAdc5009Cb458B751b3E7b6b48C06f2C", - "txHash": "0xa839ee83465657cac01adc1d50d96c1b586ed498120a84a64749c0034b4f19fa", - "value": "0x" - } - ], - "stateBlockNumber": 5221585, - "totalGasUsed": 42000 - }"#; - - let _call = serde_json::from_str::(s).unwrap(); - } - - #[test] - fn can_serialize_deserialize_bundle_stats() { - let fixtures = [ - ( - r#"{ - "isSimulated": false - }"#, - BundleStats::Unknown, - ), - ( - r#"{ - "isHighPriority": false, - "isSimulated": false, - "receivedAt": "476190476193" - }"#, - BundleStats::Seen(StatsSeen { - is_high_priority: false, - is_simulated: false, - received_at: "476190476193".to_string(), - }), - ), - ( - r#"{ - "isHighPriority": true, - "isSimulated": true, - "simulatedAt": "111", - "receivedAt": "222", - "consideredByBuildersAt":[], - "sealedByBuildersAt": [ - { - "pubkey": "333", - "timestamp": "444" - }, - { - "pubkey": "555", - "timestamp": "666" - } - ] - }"#, - BundleStats::Simulated(StatsSimulated { - is_high_priority: true, - is_simulated: true, - simulated_at: String::from("111"), - received_at: String::from("222"), - considered_by_builders_at: vec![], - sealed_by_builders_at: vec![ - SealedByBuildersAt { - pubkey: String::from("333"), - timestamp: String::from("444"), - }, - SealedByBuildersAt { - pubkey: String::from("555"), - timestamp: String::from("666"), - }, - ], - }), - ), - ]; - - let strip_whitespaces = - |input: &str| input.chars().filter(|&c| !c.is_whitespace()).collect::(); - - for (serialized, deserialized) in fixtures { - // Check de-serialization - let deserialized_expected = serde_json::from_str::(serialized).unwrap(); - assert_eq!(deserialized, deserialized_expected); - - // Check serialization - let serialized_expected = &serde_json::to_string(&deserialized).unwrap(); - assert_eq!(strip_whitespaces(serialized), strip_whitespaces(serialized_expected)); - } - } -} diff --git a/crates/rpc/rpc-types/src/rpc.rs b/crates/rpc/rpc-types/src/rpc.rs index bb5ae5d77..0b9afeb79 100644 --- a/crates/rpc/rpc-types/src/rpc.rs +++ b/crates/rpc/rpc-types/src/rpc.rs @@ -24,6 +24,7 @@ impl RpcModules { #[cfg(test)] mod tests { use super::*; + #[test] fn test_parse_module_versions_roundtrip() { let s = r#"{"txpool":"1.0","trace":"1.0","eth":"1.0","web3":"1.0","net":"1.0"}"#; diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index d84b90278..3a28b8457 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -16,11 +16,11 @@ workspace = true reth-chainspec.workspace = true reth-primitives.workspace = true reth-rpc-api.workspace = true -reth-rpc-server-types.workspace = true +reth-rpc-eth-api.workspace = true reth-rpc-types.workspace = true reth-errors.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } -reth-transaction-pool = { workspace = true, features = ["test-utils"] } +reth-provider.workspace = true +reth-transaction-pool.workspace = true reth-network-api.workspace = true reth-rpc-engine-api.workspace = true reth-revm.workspace = true @@ -28,17 +28,16 @@ reth-tasks = { workspace = true, features = ["rayon"] } reth-consensus-common.workspace = true reth-rpc-types-compat.workspace = true revm-inspectors = { workspace = true, features = ["js-tracer"] } -reth-evm.workspace = true reth-network-peers.workspace = true -reth-execution-types.workspace = true - +reth-evm.workspace = true +reth-rpc-eth-types.workspace = true +reth-rpc-server-types.workspace = true reth-evm-optimism = { workspace = true, optional = true } # eth +alloy-dyn-abi.workspace = true alloy-rlp.workspace = true -alloy-dyn-abi = { workspace = true, features = ["eip712"] } alloy-primitives.workspace = true -alloy-sol-types.workspace = true alloy-genesis.workspace = true revm = { workspace = true, features = [ "optional_block_gas_limit", @@ -46,6 +45,7 @@ revm = { workspace = true, features = [ "optional_no_base_fee", ] } revm-primitives = { workspace = true, features = ["serde"] } +secp256k1.workspace = true # rpc jsonrpsee.workspace = true @@ -53,55 +53,47 @@ http.workspace = true http-body.workspace = true hyper.workspace = true jsonwebtoken.workspace = true +serde_json.workspace = true +jsonrpsee-types = { workspace = true, optional = true } # async async-trait.workspace = true tokio = { workspace = true, features = ["sync"] } +tokio-stream.workspace = true tower.workspace = true -tokio-stream = { workspace = true, features = ["sync"] } pin-project.workspace = true parking_lot.workspace = true -# metrics -reth-metrics.workspace = true -metrics.workspace = true - # misc -secp256k1 = { workspace = true, features = [ - "global-context", - "rand-std", - "recovery", -] } -serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true -thiserror.workspace = true -rand.workspace = true tracing.workspace = true tracing-futures = "0.2" -schnellru.workspace = true futures.workspace = true +rand.workspace = true +serde.workspace = true +thiserror.workspace = true derive_more.workspace = true -dyn-clone.workspace = true [dev-dependencies] reth-evm-ethereum.workspace = true reth-testing-utils.workspace = true +jsonrpsee-types.workspace = true jsonrpsee = { workspace = true, features = ["client"] } assert_matches.workspace = true tempfile.workspace = true [features] +bsc = [ + "reth-primitives/bsc", +] optimism = [ "reth-primitives/optimism", "reth-rpc-types-compat/optimism", "reth-provider/optimism", - "dep:reth-evm-optimism", - "reth-evm-optimism/optimism", + "reth-rpc-api/optimism", + "reth-rpc-eth-api/optimism", "reth-revm/optimism", -] - - -bsc = [ - "reth-primitives/bsc", + "jsonrpsee-types", + "reth-evm-optimism", + "reth-rpc-eth-types/optimism", ] \ No newline at end of file diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 71f95fede..1d59baa6e 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -1,17 +1,18 @@ -use crate::result::ToRpcResult; +use std::sync::Arc; + use alloy_genesis::ChainConfig; use alloy_primitives::B256; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::ChainSpec; use reth_network_api::{NetworkInfo, PeerKind, Peers}; -use reth_network_peers::{AnyNode, NodeRecord}; +use reth_network_peers::{id2pk, AnyNode, NodeRecord}; use reth_rpc_api::AdminApiServer; -use reth_rpc_types::{ - admin::{EthProtocolInfo, NodeInfo, Ports, ProtocolInfo}, - PeerEthProtocolInfo, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, +use reth_rpc_server_types::ToRpcResult; +use reth_rpc_types::admin::{ + EthInfo, EthPeerInfo, EthProtocolInfo, NodeInfo, PeerInfo, PeerNetworkInfo, PeerProtocolInfo, + Ports, ProtocolInfo, }; -use std::sync::Arc; /// `admin` API implementation. /// @@ -37,7 +38,7 @@ where { /// Handler for `admin_addPeer` fn add_peer(&self, record: NodeRecord) -> RpcResult { - self.network.add_peer(record.id, record.tcp_addr()); + self.network.add_peer_with_udp(record.id, record.tcp_addr(), record.udp_addr()); Ok(true) } @@ -50,7 +51,7 @@ where /// Handler for `admin_addTrustedPeer` fn add_trusted_peer(&self, record: AnyNode) -> RpcResult { if let Some(record) = record.node_record() { - self.network.add_trusted_peer(record.id, record.tcp_addr()) + self.network.add_trusted_peer_with_udp(record.id, record.tcp_addr(), record.udp_addr()) } self.network.add_trusted_peer_id(record.peer_id()); Ok(true) @@ -62,33 +63,43 @@ where Ok(true) } + /// Handler for `admin_peers` async fn peers(&self) -> RpcResult> { let peers = self.network.get_all_peers().await.to_rpc_result()?; - let peers = peers - .into_iter() - .map(|peer| PeerInfo { - id: Some(peer.remote_id.to_string()), - name: peer.client_version.to_string(), - caps: peer.capabilities.capabilities().iter().map(|cap| cap.to_string()).collect(), - network: PeerNetworkInfo { - remote_address: peer.remote_addr.to_string(), - local_address: peer - .local_addr - .unwrap_or_else(|| self.network.local_addr()) - .to_string(), - }, - protocols: PeerProtocolsInfo { - eth: Some(PeerEthProtocolInfo { - difficulty: Some(peer.status.total_difficulty), - head: peer.status.blockhash.to_string(), - version: peer.status.version as u32, - }), - pip: None, - }, - }) - .collect(); + let mut infos = Vec::with_capacity(peers.len()); + + for peer in peers { + if let Ok(pk) = id2pk(peer.remote_id) { + infos.push(PeerInfo { + id: pk.to_string(), + name: peer.client_version.to_string(), + enode: peer.enode, + enr: peer.enr, + caps: peer + .capabilities + .capabilities() + .iter() + .map(|cap| cap.to_string()) + .collect(), + network: PeerNetworkInfo { + remote_address: peer.remote_addr, + local_address: peer.local_addr.unwrap_or_else(|| self.network.local_addr()), + inbound: peer.direction.is_incoming(), + trusted: peer.kind.is_trusted(), + static_node: peer.kind.is_static(), + }, + protocols: PeerProtocolInfo { + eth: Some(EthPeerInfo::Info(EthInfo { + version: peer.status.version as u64, + })), + snap: None, + other: Default::default(), + }, + }) + } + } - Ok(peers) + Ok(infos) } /// Handler for `admin_nodeInfo` diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b2d524a63..8e04c6256 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -1,24 +1,22 @@ -use crate::{ - eth::{ - error::{EthApiError, EthResult}, - revm_utils::prepare_call_env, - EthTransactions, - }, - result::{internal_rpc_err, ToRpcResult}, - EthApiSpec, -}; +use std::sync::Arc; + use alloy_rlp::{Decodable, Encodable}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; +use reth_chainspec::EthereumHardforks; +use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - revm::env::tx_env_with_recovered, Address, Block, BlockId, BlockNumberOrTag, Bytes, - TransactionSignedEcRecovered, Withdrawals, B256, U256, + Address, Block, BlockId, BlockNumberOrTag, Bytes, TransactionSignedEcRecovered, B256, U256, }; use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, HeaderProvider, StateProviderBox, TransactionVariant, + BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, HeaderProvider, StateProviderFactory, + TransactionVariant, }; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::DebugApiServer; +use reth_rpc_eth_api::helpers::{Call, EthApiSpec, EthTransactions, TraceExt}; +use reth_rpc_eth_types::{revm_utils::prepare_call_env, EthApiError, EthResult, StateCacheDb}; +use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_rpc_types::{ state::EvmOverrides, trace::geth::{ @@ -36,7 +34,6 @@ use revm_inspectors::tracing::{ js::{JsInspector, TransactionContext}, FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, }; -use std::sync::Arc; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `debug` API implementation. @@ -65,8 +62,13 @@ impl DebugApi { impl DebugApi where - Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider + 'static, - Eth: EthTransactions + 'static, + Provider: BlockReaderIdExt + + HeaderProvider + + ChainSpecProvider + + StateProviderFactory + + EvmEnvProvider + + 'static, + Eth: TraceExt + 'static, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -74,7 +76,7 @@ where } /// Trace the entire block asynchronously - async fn trace_block_with( + async fn trace_block( &self, at: BlockId, transactions: Vec, @@ -97,9 +99,13 @@ where let mut transactions = transactions.into_iter().enumerate().peekable(); while let Some((index, tx)) = transactions.next() { let tx_hash = tx.hash; - let tx = tx_env_with_recovered(&tx); + let env = EnvWithHandlerCfg { - env: Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx), + env: Env::boxed( + cfg.cfg_env.clone(), + block_env.clone(), + Call::evm_config(this.eth_api()).tx_env(&tx), + ), handler_cfg: cfg.handler_cfg, }; let (result, state_changes) = this.trace_transaction( @@ -165,7 +171,7 @@ where .collect::>>()? }; - self.trace_block_with(parent.into(), transactions, cfg, block_env, opts).await + self.trace_block(parent.into(), transactions, cfg, block_env, opts).await } /// Replays a block and returns the trace of each transaction. @@ -182,7 +188,7 @@ where let ((cfg, block_env, _), block) = futures::try_join!( self.inner.eth_api.evm_env_at(block_hash.into()), - self.inner.eth_api.block_by_id_with_senders(block_id), + self.inner.eth_api.block_with_senders(block_id), )?; let block = block.ok_or_else(|| EthApiError::UnknownBlockNumber)?; @@ -190,7 +196,7 @@ where // its parent block's state let state_at = block.parent_hash; - self.trace_block_with( + self.trace_block( state_at.into(), block.into_transactions_ecrecovered().collect(), cfg, @@ -238,7 +244,11 @@ where )?; let env = EnvWithHandlerCfg { - env: Env::boxed(cfg.cfg_env.clone(), block_env, tx_env_with_recovered(&tx)), + env: Env::boxed( + cfg.cfg_env.clone(), + block_env, + Call::evm_config(this.eth_api()).tx_env(&tx), + ), handler_cfg: cfg.handler_cfg, }; @@ -324,6 +334,10 @@ where self.inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', + // see + let db = db.0; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector @@ -346,6 +360,10 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', see + // + let db = db.0; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let frame = inspector.try_into_mux_frame(&res, db)?; @@ -364,6 +382,10 @@ where .inner .eth_api .spawn_with_call_at(call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', see + // + let db = db.0; + let mut inspector = JsInspector::new(code, config)?; let (res, _) = this.eth_api().inspect(&mut *db, env.clone(), &mut inspector)?; @@ -415,7 +437,7 @@ where let target_block = block_number.unwrap_or_default(); let ((cfg, mut block_env, _), block) = futures::try_join!( self.inner.eth_api.evm_env_at(target_block), - self.inner.eth_api.block_by_id_with_senders(target_block), + self.inner.eth_api.block_with_senders(target_block), )?; let opts = opts.unwrap_or_default(); @@ -439,6 +461,7 @@ where } let this = self.clone(); + self.inner .eth_api .spawn_with_state_at_block(at.into(), move |state| { @@ -453,9 +476,12 @@ where // Execute all transactions until index for tx in transactions { - let tx = tx_env_with_recovered(&tx); let env = EnvWithHandlerCfg { - env: Env::boxed(cfg.cfg_env.clone(), block_env.clone(), tx), + env: Env::boxed( + cfg.cfg_env.clone(), + block_env.clone(), + Call::evm_config(this.eth_api()).tx_env(&tx), + ), handler_cfg: cfg.handler_cfg, }; let (res, _) = this.inner.eth_api.transact(&mut db, env)?; @@ -518,7 +544,7 @@ where &self, opts: GethDebugTracingOptions, env: EnvWithHandlerCfg, - db: &mut CacheDB>, + db: &mut StateCacheDb<'_>, transaction_context: Option, ) -> EthResult<(GethTrace, revm_primitives::EvmState)> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; @@ -614,8 +640,13 @@ where #[async_trait] impl DebugApiServer for DebugApi where - Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider + 'static, - Eth: EthApiSpec + 'static, + Provider: BlockReaderIdExt + + HeaderProvider + + ChainSpecProvider + + StateProviderFactory + + EvmEnvProvider + + 'static, + Eth: EthApiSpec + EthTransactions + TraceExt + 'static, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { @@ -642,17 +673,14 @@ where /// Handler for `debug_getRawBlock` async fn raw_block(&self, block_id: BlockId) -> RpcResult { - let block = self.inner.provider.block_by_id(block_id).to_rpc_result()?; - + let block = self + .inner + .provider + .block_by_id(block_id) + .to_rpc_result()? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; let mut res = Vec::new(); - if let Some(mut block) = block { - // In RPC withdrawals are always present - if block.withdrawals.is_none() { - block.withdrawals = Some(Withdrawals::default()); - } - block.encode(&mut res); - } - + block.encode(&mut res); Ok(res.into()) } diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs deleted file mode 100644 index 18a547faf..000000000 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ /dev/null @@ -1,217 +0,0 @@ -//! Contains RPC handler implementations specific to blocks. - -use crate::{ - eth::{ - api::transactions::build_transaction_receipt_with_block_receipts, - error::{EthApiError, EthResult}, - }, - EthApi, -}; -use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_primitives::{BlockId, TransactionMeta}; -use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_rpc_types::{AnyTransactionReceipt, Header, Index, RichBlock}; -use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; -use reth_transaction_pool::TransactionPool; -use std::sync::Arc; - -impl EthApi -where - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, -{ - /// Returns the uncle headers of the given block - /// - /// Returns an empty vec if there are none. - pub(crate) fn ommers( - &self, - block_id: impl Into, - ) -> EthResult>> { - let block_id = block_id.into(); - Ok(self.provider().ommers_by_id(block_id)?) - } - - pub(crate) async fn ommer_by_block_and_index( - &self, - block_id: impl Into, - index: Index, - ) -> EthResult> { - let block_id = block_id.into(); - - let uncles = if block_id.is_pending() { - // Pending block can be fetched directly without need for caching - self.provider().pending_block()?.map(|block| block.ommers) - } else { - self.provider().ommers_by_id(block_id)? - } - .unwrap_or_default(); - - let index = usize::from(index); - let uncle = - uncles.into_iter().nth(index).map(|header| uncle_block_from_header(header).into()); - Ok(uncle) - } - - /// Returns all transaction receipts in the block. - /// - /// Returns `None` if the block wasn't found. - pub(crate) async fn block_receipts( - &self, - block_id: BlockId, - ) -> EthResult>> { - // Fetch block and receipts based on block_id - let block_and_receipts = if block_id.is_pending() { - self.provider() - .pending_block_and_receipts()? - .map(|(sb, receipts)| (sb, Arc::new(receipts))) - } else if let Some(block_hash) = self.provider().block_hash_for_id(block_id)? { - self.cache().get_block_and_receipts(block_hash).await? - } else { - None - }; - - // If no block and receipts found, return None - let Some((block, receipts)) = block_and_receipts else { - return Ok(None); - }; - - // Extract block details - let block_number = block.number; - let base_fee = block.base_fee_per_gas; - let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas; - let timestamp = block.timestamp; - let block = block.unseal(); - - #[cfg(feature = "optimism")] - let (block_timestamp, l1_block_info) = { - let body = reth_evm_optimism::extract_l1_info(&block); - (block.timestamp, body.ok()) - }; - - // Build transaction receipts - block - .body - .into_iter() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| { - let meta = TransactionMeta { - tx_hash: tx.hash, - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - - #[cfg(feature = "optimism")] - let op_tx_meta = - self.build_op_tx_meta(&tx, l1_block_info.clone(), block_timestamp)?; - - build_transaction_receipt_with_block_receipts( - tx, - meta, - receipt.clone(), - &receipts, - #[cfg(feature = "optimism")] - op_tx_meta, - ) - }) - .collect::>>() - .map(Some) - } - - /// Returns the number transactions in the given block. - /// - /// Returns `None` if the block does not exist - pub(crate) async fn block_transaction_count( - &self, - block_id: impl Into, - ) -> EthResult> { - let block_id = block_id.into(); - - if block_id.is_pending() { - // Pending block can be fetched directly without need for caching - return Ok(self.provider().pending_block()?.map(|block| block.body.len())) - } - - let block_hash = match self.provider().block_hash_for_id(block_id)? { - Some(block_hash) => block_hash, - None => return Ok(None), - }; - - Ok(self.cache().get_block_transactions(block_hash).await?.map(|txs| txs.len())) - } - - /// Returns the block object for the given block id. - pub(crate) async fn block( - &self, - block_id: impl Into, - ) -> EthResult> { - self.block_with_senders(block_id) - .await - .map(|maybe_block| maybe_block.map(|block| block.block)) - } - - /// Returns the block object for the given block id. - pub(crate) async fn block_with_senders( - &self, - block_id: impl Into, - ) -> EthResult> { - let block_id = block_id.into(); - - if block_id.is_pending() { - // Pending block can be fetched directly without need for caching - let maybe_pending = self.provider().pending_block_with_senders()?; - return if maybe_pending.is_some() { - Ok(maybe_pending) - } else { - self.local_pending_block().await - } - } - - let block_hash = match self.provider().block_hash_for_id(block_id)? { - Some(block_hash) => block_hash, - None => return Ok(None), - }; - - Ok(self.cache().get_sealed_block_with_senders(block_hash).await?) - } - - /// Returns the populated rpc block object for the given block id. - /// - /// If `full` is true, the block object will contain all transaction objects, otherwise it will - /// only contain the transaction hashes. - pub(crate) async fn rpc_block( - &self, - block_id: impl Into, - full: bool, - ) -> EthResult> { - let block = match self.block_with_senders(block_id).await? { - Some(block) => block, - None => return Ok(None), - }; - let block_hash = block.hash(); - let total_difficulty = self - .provider() - .header_td_by_number(block.number)? - .ok_or(EthApiError::UnknownBlockNumber)?; - let block = from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash))?; - Ok(Some(block.into())) - } - - /// Returns the block header for the given block id. - pub(crate) async fn rpc_block_header( - &self, - block_id: impl Into, - ) -> EthResult> { - let header = self.rpc_block(block_id, false).await?.map(|block| block.inner.header); - Ok(header) - } -} diff --git a/crates/rpc/rpc/src/eth/api/call.rs b/crates/rpc/rpc/src/eth/api/call.rs deleted file mode 100644 index 907065e47..000000000 --- a/crates/rpc/rpc/src/eth/api/call.rs +++ /dev/null @@ -1,530 +0,0 @@ -//! Contains RPC handler implementations specific to endpoints that call/execute within evm. - -use crate::{ - eth::{ - error::{ensure_success, EthApiError, EthResult, RevertError, RpcInvalidTransactionError}, - revm_utils::{ - apply_state_overrides, build_call_evm_env, caller_gas_allowance, - cap_tx_gas_limit_with_caller_allowance, get_precompiles, prepare_call_env, - }, - EthTransactions, - }, - EthApi, -}; -use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, TxKind, U256}; -use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, -}; -use reth_revm::database::StateProviderDatabase; -use reth_rpc_types::{ - state::{EvmOverrides, StateOverride}, - AccessListWithGasUsed, Bundle, EthCallResponse, StateContext, TransactionRequest, -}; -use reth_transaction_pool::TransactionPool; -use revm::{ - db::{CacheDB, DatabaseRef}, - primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ExecutionResult, HaltReason}, - DatabaseCommit, -}; -use revm_inspectors::access_list::AccessListInspector; -use tracing::trace; - -// Gas per transaction not creating a contract. -const MIN_TRANSACTION_GAS: u64 = 21_000u64; -/// Allowed error ratio for gas estimation -/// Taken from Geth's implementation in order to pass the hive tests -/// -const ESTIMATE_GAS_ERROR_RATIO: f64 = 0.015; - -impl EthApi -where - Pool: TransactionPool + Clone + 'static, - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, -{ - /// Estimate gas needed for execution of the `request` at the [`BlockId`]. - pub async fn estimate_gas_at( - &self, - request: TransactionRequest, - at: BlockId, - state_override: Option, - ) -> EthResult { - let (cfg, block_env, at) = self.evm_env_at(at).await?; - - self.on_blocking_task(|this| async move { - let state = this.state_at(at)?; - this.estimate_gas_with(cfg, block_env, request, state, state_override) - }) - .await - } - - /// Executes the call request (`eth_call`) and returns the output - pub async fn call( - &self, - request: TransactionRequest, - block_number: Option, - overrides: EvmOverrides, - ) -> EthResult { - let (res, _env) = - self.transact_call_at(request, block_number.unwrap_or_default(), overrides).await?; - - ensure_success(res.result) - } - - /// Simulate arbitrary number of transactions at an arbitrary blockchain index, with the - /// optionality of state overrides - pub async fn call_many( - &self, - bundle: Bundle, - state_context: Option, - mut state_override: Option, - ) -> EthResult> { - let Bundle { transactions, block_override } = bundle; - if transactions.is_empty() { - return Err(EthApiError::InvalidParams(String::from("transactions are empty."))) - } - - let StateContext { transaction_index, block_number } = state_context.unwrap_or_default(); - let transaction_index = transaction_index.unwrap_or_default(); - - let target_block = block_number.unwrap_or_default(); - let is_block_target_pending = target_block.is_pending(); - - let ((cfg, block_env, _), block) = futures::try_join!( - self.evm_env_at(target_block), - self.block_with_senders(target_block) - )?; - - let Some(block) = block else { return Err(EthApiError::UnknownBlockNumber) }; - let gas_limit = self.inner.gas_cap; - - // we're essentially replaying the transactions in the block here, hence we need the state - // that points to the beginning of the block, which is the state at the parent block - let mut at = block.parent_hash; - let mut replay_block_txs = true; - - let num_txs = transaction_index.index().unwrap_or(block.body.len()); - // but if all transactions are to be replayed, we can use the state at the block itself, - // however only if we're not targeting the pending block, because for pending we can't rely - // on the block's state being available - if !is_block_target_pending && num_txs == block.body.len() { - at = block.hash(); - replay_block_txs = false; - } - - let this = self.clone(); - self.spawn_with_state_at_block(at.into(), move |state| { - let mut results = Vec::with_capacity(transactions.len()); - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - if replay_block_txs { - // only need to replay the transactions in the block if not all transactions are - // to be replayed - let transactions = block.into_transactions_ecrecovered().take(num_txs); - for tx in transactions { - let tx = tx_env_with_recovered(&tx); - let env = - EnvWithHandlerCfg::new_with_cfg_env(cfg.clone(), block_env.clone(), tx); - let (res, _) = this.transact(&mut db, env)?; - db.commit(res.state); - } - } - - let block_overrides = block_override.map(Box::new); - - let mut transactions = transactions.into_iter().peekable(); - while let Some(tx) = transactions.next() { - // apply state overrides only once, before the first transaction - let state_overrides = state_override.take(); - let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); - - let env = prepare_call_env( - cfg.clone(), - block_env.clone(), - tx, - gas_limit, - &mut db, - overrides, - )?; - let (res, _) = this.transact(&mut db, env)?; - - match ensure_success(res.result) { - Ok(output) => { - results.push(EthCallResponse { value: Some(output), error: None }); - } - Err(err) => { - results.push(EthCallResponse { value: None, error: Some(err.to_string()) }); - } - } - - if transactions.peek().is_some() { - // need to apply the state changes of this call before executing the next call - db.commit(res.state); - } - } - - Ok(results) - }) - .await - } - - /// Estimates the gas usage of the `request` with the state. - /// - /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search - pub fn estimate_gas_with( - &self, - mut cfg: CfgEnvWithHandlerCfg, - block: BlockEnv, - request: TransactionRequest, - state: S, - state_override: Option, - ) -> EthResult - where - S: StateProvider, - { - // Disabled because eth_estimateGas is sometimes used with eoa senders - // See - cfg.disable_eip3607 = true; - - // The basefee should be ignored for eth_createAccessList - // See: - // - cfg.disable_base_fee = true; - - // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; - let tx_request_gas_price = request.gas_price; - let block_env_gas_limit = block.gas_limit; - - // Determine the highest possible gas limit, considering both the request's specified limit - // and the block's limit. - let mut highest_gas_limit = tx_request_gas_limit - .map(|tx_gas_limit| U256::from(tx_gas_limit).max(block_env_gas_limit)) - .unwrap_or(block_env_gas_limit); - - // Configure the evm env - let mut env = build_call_evm_env(cfg, block, request)?; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - // Apply any state overrides if specified. - if let Some(state_override) = state_override { - apply_state_overrides(state_override, &mut db)?; - } - - // Optimize for simple transfer transactions, potentially reducing the gas estimate. - if env.tx.data.is_empty() { - if let TxKind::Call(to) = env.tx.transact_to { - if let Ok(code) = db.db.account_code(to) { - let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); - if no_code_callee { - // If the tx is a simple transfer (call to an account with no code) we can - // shortcircuit. But simply returning - // `MIN_TRANSACTION_GAS` is dangerous because there might be additional - // field combos that bump the price up, so we try executing the function - // with the minimum gas limit to make sure. - let mut env = env.clone(); - env.tx.gas_limit = MIN_TRANSACTION_GAS; - if let Ok((res, _)) = self.transact(&mut db, env) { - if res.result.is_success() { - return Ok(U256::from(MIN_TRANSACTION_GAS)) - } - } - } - } - } - } - - // Check funds of the sender (only useful to check if transaction gas price is more than 0). - // - // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` - if env.tx.gas_price > U256::ZERO { - // cap the highest gas limit by max gas caller can afford with given gas price - highest_gas_limit = highest_gas_limit.min(caller_gas_allowance(&mut db, &env.tx)?); - } - - // We can now normalize the highest gas limit to a u64 - let mut highest_gas_limit: u64 = highest_gas_limit.try_into().unwrap_or(u64::MAX); - - // If the provided gas limit is less than computed cap, use that - env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); - - trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); - - // Execute the transaction with the highest possible gas limit. - let (mut res, mut env) = match self.transact(&mut db, env.clone()) { - // Handle the exceptional case where the transaction initialization uses too much gas. - // If the gas price or gas limit was specified in the request, retry the transaction - // with the block's gas limit to determine if the failure was due to - // insufficient gas. - Err(EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh)) - if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() => - { - return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } - // Propagate other results (successful or other errors). - ethres => ethres?, - }; - - let gas_refund = match res.result { - ExecutionResult::Success { gas_refunded, .. } => gas_refunded, - ExecutionResult::Halt { reason, gas_used } => { - // here we don't check for invalid opcode because already executed with highest gas - // limit - return Err(RpcInvalidTransactionError::halt(reason, gas_used).into()) - } - ExecutionResult::Revert { output, .. } => { - // if price or limit was included in the request then we can execute the request - // again with the block's gas limit to check if revert is gas related or not - return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { - Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } else { - // the transaction did revert - Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into()) - } - } - }; - - // At this point we know the call succeeded but want to find the _best_ (lowest) gas the - // transaction succeeds with. We find this by doing a binary search over the possible range. - // - // NOTE: this is the gas the transaction used, which is less than the - // transaction requires to succeed. - let mut gas_used = res.result.gas_used(); - // the lowest value is capped by the gas used by the unconstrained transaction - let mut lowest_gas_limit = gas_used.saturating_sub(1); - - // As stated in Geth, there is a good chance that the transaction will pass if we set the - // gas limit to the execution gas used plus the gas refund, so we check this first - // 1 { - // An estimation error is allowed once the current gas limit range used in the binary - // search is small enough (less than 1.5% of the highest gas limit) - // { - // Increase the lowest gas limit if gas is too high - lowest_gas_limit = mid_gas_limit; - } - // Handle other cases, including successful transactions. - ethres => { - // Unpack the result and environment if the transaction was successful. - (res, env) = ethres?; - // Update the estimated gas range based on the transaction result. - update_estimated_gas_range( - res.result, - mid_gas_limit, - &mut highest_gas_limit, - &mut lowest_gas_limit, - )?; - } - } - - // New midpoint - mid_gas_limit = ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64; - } - - Ok(U256::from(highest_gas_limit)) - } - - /// Creates the `AccessList` for the `request` at the [`BlockId`] or latest. - pub(crate) async fn create_access_list_at( - &self, - request: TransactionRequest, - block_number: Option, - ) -> EthResult { - self.on_blocking_task(|this| async move { - this.create_access_list_with(request, block_number).await - }) - .await - } - - async fn create_access_list_with( - &self, - mut request: TransactionRequest, - at: Option, - ) -> EthResult { - let block_id = at.unwrap_or_default(); - let (cfg, block, at) = self.evm_env_at(block_id).await?; - let state = self.state_at(at)?; - - let mut env = build_call_evm_env(cfg, block, request.clone())?; - - // we want to disable this in eth_createAccessList, since this is common practice used by - // other node impls and providers - env.cfg.disable_block_gas_limit = true; - - // The basefee should be ignored for eth_createAccessList - // See: - // - env.cfg.disable_base_fee = true; - - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - if request.gas.is_none() && env.tx.gas_price > U256::ZERO { - // no gas limit was provided in the request, so we need to cap the request's gas limit - cap_tx_gas_limit_with_caller_allowance(&mut db, &mut env.tx)?; - } - - let from = request.from.unwrap_or_default(); - let to = if let Some(TxKind::Call(to)) = request.to { - to - } else { - let nonce = db.basic_ref(from)?.unwrap_or_default().nonce; - from.create(nonce) - }; - - // can consume the list since we're not using the request anymore - let initial = request.access_list.take().unwrap_or_default(); - - let precompiles = get_precompiles(env.handler_cfg.spec_id); - let mut inspector = AccessListInspector::new(initial, from, to, precompiles); - let (result, env) = self.inspect(&mut db, env, &mut inspector)?; - - match result.result { - ExecutionResult::Halt { reason, .. } => Err(match reason { - HaltReason::NonceOverflow => RpcInvalidTransactionError::NonceMaxValue, - halt => RpcInvalidTransactionError::EvmHalt(halt), - }), - ExecutionResult::Revert { output, .. } => { - Err(RpcInvalidTransactionError::Revert(RevertError::new(output))) - } - ExecutionResult::Success { .. } => Ok(()), - }?; - - let access_list = inspector.into_access_list(); - - let cfg_with_spec_id = - CfgEnvWithHandlerCfg { cfg_env: env.cfg.clone(), handler_cfg: env.handler_cfg }; - - // calculate the gas used using the access list - request.access_list = Some(access_list.clone()); - let gas_used = - self.estimate_gas_with(cfg_with_spec_id, env.block.clone(), request, &*db.db, None)?; - - Ok(AccessListWithGasUsed { access_list, gas_used }) - } - - /// Executes the requests again after an out of gas error to check if the error is gas related - /// or not - #[inline] - fn map_out_of_gas_err( - &self, - env_gas_limit: U256, - mut env: EnvWithHandlerCfg, - db: &mut CacheDB>, - ) -> EthApiError - where - S: StateProvider, - { - let req_gas_limit = env.tx.gas_limit; - env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); - let (res, _) = match self.transact(db, env) { - Ok(res) => res, - Err(err) => return err, - }; - match res.result { - ExecutionResult::Success { .. } => { - // transaction succeeded by manually increasing the gas limit to - // highest, which means the caller lacks funds to pay for the tx - RpcInvalidTransactionError::BasicOutOfGas(req_gas_limit).into() - } - ExecutionResult::Revert { output, .. } => { - // reverted again after bumping the limit - RpcInvalidTransactionError::Revert(RevertError::new(output)).into() - } - ExecutionResult::Halt { reason, .. } => { - RpcInvalidTransactionError::EvmHalt(reason).into() - } - } - } -} - -/// Updates the highest and lowest gas limits for binary search based on the execution result. -/// -/// This function refines the gas limit estimates used in a binary search to find the optimal gas -/// limit for a transaction. It adjusts the highest or lowest gas limits depending on whether the -/// execution succeeded, reverted, or halted due to specific reasons. -#[inline] -fn update_estimated_gas_range( - result: ExecutionResult, - tx_gas_limit: u64, - highest_gas_limit: &mut u64, - lowest_gas_limit: &mut u64, -) -> EthResult<()> { - match result { - ExecutionResult::Success { .. } => { - // Cap the highest gas limit with the succeeding gas limit. - *highest_gas_limit = tx_gas_limit; - } - ExecutionResult::Revert { .. } => { - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - ExecutionResult::Halt { reason, .. } => { - match reason { - HaltReason::OutOfGas(_) | HaltReason::InvalidEFOpcode => { - // Both `OutOfGas` and `InvalidFEOpcode` can occur dynamically if the gas left - // is too low. Treat this as an out of gas condition, - // knowing that the call succeeds with a higher gas limit. - // - // Common usage of invalid opcode in OpenZeppelin: - // - - // Increase the lowest gas limit. - *lowest_gas_limit = tx_gas_limit; - } - err => { - // These cases should be unreachable because we know the transaction succeeds, - // but if they occur, treat them as an error. - return Err(RpcInvalidTransactionError::EvmHalt(err).into()) - } - } - } - }; - Ok(()) -} diff --git a/crates/rpc/rpc/src/eth/api/fees.rs b/crates/rpc/rpc/src/eth/api/fees.rs deleted file mode 100644 index 2493d6055..000000000 --- a/crates/rpc/rpc/src/eth/api/fees.rs +++ /dev/null @@ -1,228 +0,0 @@ -//! Contains RPC handler implementations for fee history. - -use crate::{ - eth::{ - api::fee_history::{calculate_reward_percentiles_for_block, FeeHistoryEntry}, - error::{EthApiError, EthResult}, - }, - EthApi, -}; -use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_primitives::{BlockNumberOrTag, U256}; -use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; -use reth_rpc_types::FeeHistory; -use reth_transaction_pool::TransactionPool; -use tracing::debug; - -impl EthApi -where - Pool: TransactionPool + Clone + 'static, - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, -{ - /// Returns a suggestion for a gas price for legacy transactions. - /// - /// See also: - pub(crate) async fn gas_price(&self) -> EthResult { - let header = self.block(BlockNumberOrTag::Latest); - let suggested_tip = self.suggested_priority_fee(); - let (header, suggested_tip) = futures::try_join!(header, suggested_tip)?; - let base_fee = header.and_then(|h| h.base_fee_per_gas).unwrap_or_default(); - Ok(suggested_tip + U256::from(base_fee)) - } - - /// Returns a suggestion for a base fee for blob transactions. - pub(crate) async fn blob_base_fee(&self) -> EthResult { - self.block(BlockNumberOrTag::Latest) - .await? - .and_then(|h: reth_primitives::SealedBlock| h.next_block_blob_fee()) - .ok_or(EthApiError::ExcessBlobGasNotSet) - .map(U256::from) - } - - /// Returns a suggestion for the priority fee (the tip) - pub(crate) async fn suggested_priority_fee(&self) -> EthResult { - self.gas_oracle().suggest_tip_cap().await - } - - /// Reports the fee history, for the given amount of blocks, up until the given newest block. - /// - /// If `reward_percentiles` are provided the [`FeeHistory`] will include the _approximated_ - /// rewards for the requested range. - pub(crate) async fn fee_history( - &self, - mut block_count: u64, - newest_block: BlockNumberOrTag, - reward_percentiles: Option>, - ) -> EthResult { - if block_count == 0 { - return Ok(FeeHistory::default()) - } - - // See https://github.com/ethereum/go-ethereum/blob/2754b197c935ee63101cbbca2752338246384fec/eth/gasprice/feehistory.go#L218C8-L225 - let max_fee_history = if reward_percentiles.is_none() { - self.gas_oracle().config().max_header_history - } else { - self.gas_oracle().config().max_block_history - }; - - if block_count > max_fee_history { - debug!( - requested = block_count, - truncated = max_fee_history, - "Sanitizing fee history block count" - ); - block_count = max_fee_history - } - - let Some(end_block) = self.provider().block_number_for_id(newest_block.into())? else { - return Err(EthApiError::UnknownBlockNumber) - }; - - // need to add 1 to the end block to get the correct (inclusive) range - let end_block_plus = end_block + 1; - // Ensure that we would not be querying outside of genesis - if end_block_plus < block_count { - block_count = end_block_plus; - } - - // If reward percentiles were specified, we - // need to validate that they are monotonically - // increasing and 0 <= p <= 100 - // Note: The types used ensure that the percentiles are never < 0 - if let Some(percentiles) = &reward_percentiles { - if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) { - return Err(EthApiError::InvalidRewardPercentiles) - } - } - - // Fetch the headers and ensure we got all of them - // - // Treat a request for 1 block as a request for `newest_block..=newest_block`, - // otherwise `newest_block - 2 - // NOTE: We ensured that block count is capped - let start_block = end_block_plus - block_count; - - // Collect base fees, gas usage ratios and (optionally) reward percentile data - let mut base_fee_per_gas: Vec = Vec::new(); - let mut gas_used_ratio: Vec = Vec::new(); - - let mut base_fee_per_blob_gas: Vec = Vec::new(); - let mut blob_gas_used_ratio: Vec = Vec::new(); - - let mut rewards: Vec> = Vec::new(); - - // Check if the requested range is within the cache bounds - let fee_entries = self.fee_history_cache().get_history(start_block, end_block).await; - - if let Some(fee_entries) = fee_entries { - if fee_entries.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange) - } - - for entry in &fee_entries { - base_fee_per_gas.push(entry.base_fee_per_gas as u128); - gas_used_ratio.push(entry.gas_used_ratio); - base_fee_per_blob_gas.push(entry.base_fee_per_blob_gas.unwrap_or_default()); - blob_gas_used_ratio.push(entry.blob_gas_used_ratio); - - if let Some(percentiles) = &reward_percentiles { - let mut block_rewards = Vec::with_capacity(percentiles.len()); - for &percentile in percentiles { - block_rewards.push(self.approximate_percentile(entry, percentile)); - } - rewards.push(block_rewards); - } - } - let last_entry = fee_entries.last().expect("is not empty"); - - // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the next - // block - base_fee_per_gas - .push(last_entry.next_block_base_fee(&self.provider().chain_spec()) as u128); - - base_fee_per_blob_gas.push(last_entry.next_block_blob_fee().unwrap_or_default()); - } else { - // read the requested header range - let headers = self.provider().sealed_headers_range(start_block..=end_block)?; - if headers.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange) - } - - for header in &headers { - let ratio = if header.gas_limit > 0 {header.gas_used as f64 / header.gas_limit as f64} else {1.0}; - - base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); - gas_used_ratio.push(ratio); - base_fee_per_blob_gas.push(header.blob_fee().unwrap_or_default()); - blob_gas_used_ratio.push( - header.blob_gas_used.unwrap_or_default() as f64 / - reth_primitives::constants::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, - ); - - // Percentiles were specified, so we need to collect reward percentile ino - if let Some(percentiles) = &reward_percentiles { - let (transactions, receipts) = self - .cache() - .get_transactions_and_receipts(header.hash()) - .await? - .ok_or(EthApiError::InvalidBlockRange)?; - rewards.push( - calculate_reward_percentiles_for_block( - percentiles, - header.gas_used, - header.base_fee_per_gas.unwrap_or_default(), - &transactions, - &receipts, - ) - .unwrap_or_default(), - ); - } - } - - // The spec states that `base_fee_per_gas` "[..] includes the next block after the - // newest of the returned range, because this value can be derived from the - // newest block" - // - // The unwrap is safe since we checked earlier that we got at least 1 header. - let last_header = headers.last().expect("is present"); - base_fee_per_gas.push( - self.provider().chain_spec().base_fee_params_at_timestamp(last_header.timestamp).next_block_base_fee( - last_header.gas_used as u128, - last_header.gas_limit as u128, - last_header.base_fee_per_gas.unwrap_or_default() as u128, - )); - - // Same goes for the `base_fee_per_blob_gas`: - // > "[..] includes the next block after the newest of the returned range, because this value can be derived from the newest block. - base_fee_per_blob_gas - .push(last_header.next_block_blob_fee().unwrap_or_default()); - }; - - Ok(FeeHistory { - base_fee_per_gas, - gas_used_ratio, - base_fee_per_blob_gas, - blob_gas_used_ratio, - oldest_block: start_block, - reward: reward_percentiles.map(|_| rewards), - }) - } - - /// Approximates reward at a given percentile for a specific block - /// Based on the configured resolution - fn approximate_percentile(&self, entry: &FeeHistoryEntry, requested_percentile: f64) -> u128 { - let resolution = self.fee_history_cache().resolution(); - let rounded_percentile = - (requested_percentile * resolution as f64).round() / resolution as f64; - let clamped_percentile = rounded_percentile.clamp(0.0, 100.0); - - // Calculate the index in the precomputed rewards array - let index = (clamped_percentile / (1.0 / resolution as f64)).round() as usize; - // Fetch the reward from the FeeHistoryEntry - entry.rewards.get(index).cloned().unwrap_or_default() - } -} diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs deleted file mode 100644 index 364a55842..000000000 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ /dev/null @@ -1,503 +0,0 @@ -//! The entire implementation of the namespace is quite large, hence it is divided across several -//! files. - -use crate::eth::{ - api::{ - fee_history::FeeHistoryCache, - pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}, - }, - cache::EthStateCache, - error::{EthApiError, EthResult}, - gas_oracle::GasPriceOracle, - signer::EthSigner, - traits::RawTransactionForwarder, -}; -use async_trait::async_trait; -use reth_chainspec::ChainInfo; -use reth_errors::{RethError, RethResult}; -use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_primitives::{ - revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}, - Address, BlockId, BlockNumberOrTag, SealedBlockWithSenders, SealedHeader, B256, U256, U64, -}; -use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, -}; -use reth_rpc_types::{SyncInfo, SyncStatus}; -use reth_tasks::{pool::BlockingTaskPool, TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::TransactionPool; -use revm_primitives::{CfgEnv, SpecId}; -use std::{ - fmt::Debug, - future::Future, - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::sync::{oneshot, Mutex}; - -mod block; -mod call; -pub(crate) mod fee_history; - -mod fees; -#[cfg(feature = "optimism")] -mod optimism; -mod pending_block; -mod server; -mod sign; -mod state; -mod transactions; - -pub use transactions::{EthTransactions, TransactionSource}; - -/// `Eth` API trait. -/// -/// Defines core functionality of the `eth` API implementation. -#[async_trait] -pub trait EthApiSpec: EthTransactions + Send + Sync { - /// Returns the current ethereum protocol version. - async fn protocol_version(&self) -> RethResult; - - /// Returns the chain id - fn chain_id(&self) -> U64; - - /// Returns provider chain info - fn chain_info(&self) -> RethResult; - - /// Returns a list of addresses owned by provider. - fn accounts(&self) -> Vec
; - - /// Returns `true` if the network is undergoing sync. - fn is_syncing(&self) -> bool; - - /// Returns the [SyncStatus] of the network - fn sync_status(&self) -> RethResult; -} - -/// `Eth` API implementation. -/// -/// This type provides the functionality for handling `eth_` related requests. -/// These are implemented two-fold: Core functionality is implemented as [`EthApiSpec`] -/// trait. Additionally, the required server implementations (e.g. [`reth_rpc_api::EthApiServer`]) -/// are implemented separately in submodules. The rpc handler implementation can then delegate to -/// the main impls. This way [`EthApi`] is not limited to [`jsonrpsee`] and can be used standalone -/// or in other network handlers (for example ipc). -pub struct EthApi { - /// All nested fields bundled together. - inner: Arc>, -} - -impl EthApi { - /// Sets a forwarder for `eth_sendRawTransaction` - /// - /// Note: this might be removed in the future in favor of a more generic approach. - pub fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc) { - self.inner.raw_transaction_forwarder.write().replace(forwarder); - } -} - -impl EthApi -where - Provider: BlockReaderIdExt + ChainSpecProvider, -{ - /// Creates a new, shareable instance using the default tokio task spawner. - #[allow(clippy::too_many_arguments)] - pub fn new( - provider: Provider, - pool: Pool, - network: Network, - eth_cache: EthStateCache, - gas_oracle: GasPriceOracle, - gas_cap: impl Into, - blocking_task_pool: BlockingTaskPool, - fee_history_cache: FeeHistoryCache, - evm_config: EvmConfig, - raw_transaction_forwarder: Option>, - ) -> Self { - Self::with_spawner( - provider, - pool, - network, - eth_cache, - gas_oracle, - gas_cap.into().into(), - Box::::default(), - blocking_task_pool, - fee_history_cache, - evm_config, - raw_transaction_forwarder, - ) - } - - /// Creates a new, shareable instance. - #[allow(clippy::too_many_arguments)] - pub fn with_spawner( - provider: Provider, - pool: Pool, - network: Network, - eth_cache: EthStateCache, - gas_oracle: GasPriceOracle, - gas_cap: u64, - task_spawner: Box, - blocking_task_pool: BlockingTaskPool, - fee_history_cache: FeeHistoryCache, - evm_config: EvmConfig, - raw_transaction_forwarder: Option>, - ) -> Self { - // get the block number of the latest block - let latest_block = provider - .header_by_number_or_tag(BlockNumberOrTag::Latest) - .ok() - .flatten() - .map(|header| header.number) - .unwrap_or_default(); - - let inner = EthApiInner { - provider, - pool, - network, - signers: parking_lot::RwLock::new(Default::default()), - eth_cache, - gas_oracle, - gas_cap, - starting_block: U256::from(latest_block), - task_spawner, - pending_block: Default::default(), - blocking_task_pool, - fee_history_cache, - evm_config, - raw_transaction_forwarder: parking_lot::RwLock::new(raw_transaction_forwarder), - }; - - Self { inner: Arc::new(inner) } - } - - /// Executes the future on a new blocking task. - /// - /// This accepts a closure that creates a new future using a clone of this type and spawns the - /// future onto a new task that is allowed to block. - /// - /// Note: This is expected for futures that are dominated by blocking IO operations. - pub(crate) async fn on_blocking_task(&self, c: C) -> EthResult - where - C: FnOnce(Self) -> F, - F: Future> + Send + 'static, - R: Send + 'static, - { - let (tx, rx) = oneshot::channel(); - let this = self.clone(); - let f = c(this); - self.inner.task_spawner.spawn_blocking(Box::pin(async move { - let res = f.await; - let _ = tx.send(res); - })); - rx.await.map_err(|_| EthApiError::InternalEthError)? - } - - /// Returns the state cache frontend - pub(crate) fn cache(&self) -> &EthStateCache { - &self.inner.eth_cache - } - - /// Returns the gas oracle frontend - pub(crate) fn gas_oracle(&self) -> &GasPriceOracle { - &self.inner.gas_oracle - } - - /// Returns the configured gas limit cap for `eth_call` and tracing related calls - pub fn gas_cap(&self) -> u64 { - self.inner.gas_cap - } - - /// Returns the inner `Provider` - pub fn provider(&self) -> &Provider { - &self.inner.provider - } - - /// Returns the inner `Network` - pub fn network(&self) -> &Network { - &self.inner.network - } - - /// Returns the inner `Pool` - pub fn pool(&self) -> &Pool { - &self.inner.pool - } - - /// Returns fee history cache - pub fn fee_history_cache(&self) -> &FeeHistoryCache { - &self.inner.fee_history_cache - } -} - -// === State access helpers === - -impl EthApi -where - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, -{ - /// Returns the state at the given [`BlockId`] enum. - /// - /// Note: if not [`BlockNumberOrTag::Pending`] then this will only return canonical state. See also - pub fn state_at_block_id(&self, at: BlockId) -> EthResult { - Ok(self.provider().state_by_block_id(at)?) - } - - /// Returns the state at the given [`BlockId`] enum or the latest. - /// - /// Convenience function to interprets `None` as `BlockId::Number(BlockNumberOrTag::Latest)` - pub fn state_at_block_id_or_latest( - &self, - block_id: Option, - ) -> EthResult { - if let Some(block_id) = block_id { - self.state_at_block_id(block_id) - } else { - Ok(self.latest_state()?) - } - } - - /// Returns the state at the given block number - pub fn state_at_hash(&self, block_hash: B256) -> RethResult { - Ok(self.provider().history_by_block_hash(block_hash)?) - } - - /// Returns the _latest_ state - pub fn latest_state(&self) -> RethResult { - Ok(self.provider().latest()?) - } -} - -impl EthApi -where - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + Clone + 'static, -{ - /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block - /// - /// If no pending block is available, this will derive it from the `latest` block - pub(crate) fn pending_block_env_and_cfg(&self) -> EthResult { - let origin: PendingBlockEnvOrigin = if let Some(pending) = - self.provider().pending_block_with_senders()? - { - PendingBlockEnvOrigin::ActualPending(pending) - } else { - // no pending block from the CL yet, so we use the latest block and modify the env - // values that we can - let latest = - self.provider().latest_header()?.ok_or_else(|| EthApiError::UnknownBlockNumber)?; - - let (mut latest_header, block_hash) = latest.split(); - // child block - latest_header.number += 1; - // assumed child block is in the next slot: 12s - latest_header.timestamp += 12; - // base fee of the child block - let chain_spec = self.provider().chain_spec(); - - latest_header.base_fee_per_gas = latest_header.next_block_base_fee( - chain_spec.base_fee_params_at_timestamp(latest_header.timestamp), - ); - - // update excess blob gas consumed above target - latest_header.excess_blob_gas = latest_header.next_block_excess_blob_gas(); - - // we're reusing the same block hash because we need this to lookup the block's state - let latest = SealedHeader::new(latest_header, block_hash); - - PendingBlockEnvOrigin::DerivedFromLatest(latest) - }; - - let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(CfgEnv::default(), SpecId::LATEST); - - let mut block_env = BlockEnv::default(); - // Note: for the PENDING block we assume it is past the known merge block and thus this will - // not fail when looking up the total difficulty value for the blockenv. - self.provider().fill_env_with_header( - &mut cfg, - &mut block_env, - origin.header(), - self.inner.evm_config.clone(), - )?; - - Ok(PendingBlockEnv { cfg, block_env, origin }) - } - - /// Returns the locally built pending block - pub(crate) async fn local_pending_block(&self) -> EthResult> { - let pending = self.pending_block_env_and_cfg()?; - if pending.origin.is_actual_pending() { - return Ok(pending.origin.into_actual_pending()) - } - - // no pending block from the CL yet, so we need to build it ourselves via txpool - self.on_blocking_task(|this| async move { - let mut lock = this.inner.pending_block.lock().await; - let now = Instant::now(); - - // check if the block is still good - if let Some(pending_block) = lock.as_ref() { - // this is guaranteed to be the `latest` header - if pending.block_env.number.to::() == pending_block.block.number && - pending.origin.header().hash() == pending_block.block.parent_hash && - now <= pending_block.expires_at - { - return Ok(Some(pending_block.block.clone())) - } - } - - // we rebuild the block - let pending_block = match pending.build_block(this.provider(), this.pool()) { - Ok(block) => block, - Err(err) => { - tracing::debug!(target: "rpc", "Failed to build pending block: {:?}", err); - return Ok(None) - } - }; - - let now = Instant::now(); - *lock = Some(PendingBlock { - block: pending_block.clone(), - expires_at: now + Duration::from_secs(1), - }); - - Ok(Some(pending_block)) - }) - .await - } -} - -impl std::fmt::Debug - for EthApi -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("EthApi").finish_non_exhaustive() - } -} - -impl Clone for EthApi { - fn clone(&self) -> Self { - Self { inner: Arc::clone(&self.inner) } - } -} - -#[async_trait] -impl EthApiSpec for EthApi -where - Pool: TransactionPool + Clone + 'static, - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + 'static, - EvmConfig: ConfigureEvm + 'static, -{ - /// Returns the current ethereum protocol version. - /// - /// Note: This returns an `U64`, since this should return as hex string. - async fn protocol_version(&self) -> RethResult { - let status = self.network().network_status().await.map_err(RethError::other)?; - Ok(U64::from(status.protocol_version)) - } - - /// Returns the chain id - fn chain_id(&self) -> U64 { - U64::from(self.network().chain_id()) - } - - /// Returns the current info for the chain - fn chain_info(&self) -> RethResult { - Ok(self.provider().chain_info()?) - } - - fn accounts(&self) -> Vec
{ - self.inner.signers.read().iter().flat_map(|s| s.accounts()).collect() - } - - fn is_syncing(&self) -> bool { - self.network().is_syncing() - } - - /// Returns the [SyncStatus] of the network - fn sync_status(&self) -> RethResult { - let status = if self.is_syncing() { - let current_block = U256::from( - self.provider().chain_info().map(|info| info.best_number).unwrap_or_default(), - ); - SyncStatus::Info(SyncInfo { - starting_block: self.inner.starting_block, - current_block, - highest_block: current_block, - warp_chunks_amount: None, - warp_chunks_processed: None, - }) - } else { - SyncStatus::None - }; - Ok(status) - } -} - -/// The default gas limit for `eth_call` and adjacent calls. -/// -/// This is different from the default to regular 30M block gas limit -/// [`ETHEREUM_BLOCK_GAS_LIMIT`](reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT) to allow for -/// more complex calls. -pub const RPC_DEFAULT_GAS_CAP: GasCap = GasCap(50_000_000); - -/// The wrapper type for gas limit -#[derive(Debug, Clone, Copy)] -pub struct GasCap(u64); - -impl Default for GasCap { - fn default() -> Self { - RPC_DEFAULT_GAS_CAP - } -} - -impl From for GasCap { - fn from(gas_cap: u64) -> Self { - Self(gas_cap) - } -} - -impl From for u64 { - fn from(gas_cap: GasCap) -> Self { - gas_cap.0 - } -} - -/// Container type `EthApi` -struct EthApiInner { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, - /// An interface to interact with the network - network: Network, - /// All configured Signers - signers: parking_lot::RwLock>>, - /// The async cache frontend for eth related data - eth_cache: EthStateCache, - /// The async gas oracle frontend for gas price suggestions - gas_oracle: GasPriceOracle, - /// Maximum gas limit for `eth_call` and call tracing RPC methods. - gas_cap: u64, - /// The block number at which the node started - starting_block: U256, - /// The type that can spawn tasks which would otherwise block. - task_spawner: Box, - /// Cached pending block if any - pending_block: Mutex>, - /// A pool dedicated to blocking tasks. - blocking_task_pool: BlockingTaskPool, - /// Cache for block fees history - fee_history_cache: FeeHistoryCache, - /// The type that defines how to configure the EVM - evm_config: EvmConfig, - /// Allows forwarding received raw transactions - raw_transaction_forwarder: parking_lot::RwLock>>, -} diff --git a/crates/rpc/rpc/src/eth/api/optimism.rs b/crates/rpc/rpc/src/eth/api/optimism.rs deleted file mode 100644 index af5845014..000000000 --- a/crates/rpc/rpc/src/eth/api/optimism.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Optimism helpers. - -use revm::L1BlockInfo; - -/// Optimism Transaction Metadata -/// -/// Includes the L1 fee and data gas for the tx along with the L1 -/// block info. In order to pass the [`OptimismTxMeta`] into the -/// async colored `build_transaction_receipt_with_block_receipts` -/// function, a reference counter for the L1 block info is -/// used so the L1 block info can be shared between receipts. -#[derive(Debug, Default, Clone)] -pub(crate) struct OptimismTxMeta { - /// The L1 block info. - pub(crate) l1_block_info: Option, - /// The L1 fee for the block. - pub(crate) l1_fee: Option, - /// The L1 data gas for the block. - pub(crate) l1_data_gas: Option, -} - -impl OptimismTxMeta { - /// Creates a new [`OptimismTxMeta`]. - pub(crate) const fn new( - l1_block_info: Option, - l1_fee: Option, - l1_data_gas: Option, - ) -> Self { - Self { l1_block_info, l1_fee, l1_data_gas } - } -} diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs deleted file mode 100644 index f238b4da0..000000000 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ /dev/null @@ -1,723 +0,0 @@ -//! Implementation of the [`jsonrpsee`] generated [`reth_rpc_api::EthApiServer`] trait -//! Handles RPC requests for the `eth_` namespace. - -use super::EthApiSpec; -use crate::{ - eth::{ - api::{EthApi, EthTransactions}, - error::EthApiError, - }, - result::{internal_rpc_err, ToRpcResult}, -}; -use alloy_dyn_abi::TypedData; -use jsonrpsee::core::RpcResult as Result; -use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, B64, U256, U64}; -use reth_provider::{ - BlockIdReader, BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, - HeaderProvider, StateProviderFactory, -}; -use reth_rpc_api::EthApiServer; -use reth_rpc_types::{ - serde_helpers::JsonStorageKey, - state::{EvmOverrides, StateOverride}, - AccessListWithGasUsed, AnyTransactionReceipt, BlockOverrides, Bundle, - EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Header, Index, RichBlock, - StateContext, SyncStatus, TransactionRequest, Work, -}; -use reth_transaction_pool::TransactionPool; -use tracing::trace; - -#[async_trait::async_trait] -impl EthApiServer for EthApi -where - Self: EthApiSpec + EthTransactions, - Pool: TransactionPool + 'static, - Provider: BlockReader - + BlockIdReader - + BlockReaderIdExt - + ChainSpecProvider - + HeaderProvider - + StateProviderFactory - + EvmEnvProvider - + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, -{ - /// Handler for: `eth_protocolVersion` - async fn protocol_version(&self) -> Result { - trace!(target: "rpc::eth", "Serving eth_protocolVersion"); - EthApiSpec::protocol_version(self).await.to_rpc_result() - } - - /// Handler for: `eth_syncing` - fn syncing(&self) -> Result { - trace!(target: "rpc::eth", "Serving eth_syncing"); - EthApiSpec::sync_status(self).to_rpc_result() - } - - /// Handler for: `eth_coinbase` - async fn author(&self) -> Result
{ - Err(internal_rpc_err("unimplemented")) - } - - /// Handler for: `eth_accounts` - fn accounts(&self) -> Result> { - trace!(target: "rpc::eth", "Serving eth_accounts"); - Ok(EthApiSpec::accounts(self)) - } - - /// Handler for: `eth_blockNumber` - fn block_number(&self) -> Result { - trace!(target: "rpc::eth", "Serving eth_blockNumber"); - Ok(U256::from( - EthApiSpec::chain_info(self).with_message("failed to read chain info")?.best_number, - )) - } - - /// Handler for: `eth_chainId` - async fn chain_id(&self) -> Result> { - trace!(target: "rpc::eth", "Serving eth_chainId"); - Ok(Some(EthApiSpec::chain_id(self))) - } - - /// Handler for: `eth_getBlockByHash` - async fn block_by_hash(&self, hash: B256, full: bool) -> Result> { - trace!(target: "rpc::eth", ?hash, ?full, "Serving eth_getBlockByHash"); - Ok(Self::rpc_block(self, hash, full).await?) - } - - /// Handler for: `eth_getBlockByNumber` - async fn block_by_number( - &self, - number: BlockNumberOrTag, - full: bool, - ) -> Result> { - trace!(target: "rpc::eth", ?number, ?full, "Serving eth_getBlockByNumber"); - Ok(Self::rpc_block(self, number, full).await?) - } - - /// Handler for: `eth_getBlockTransactionCountByHash` - async fn block_transaction_count_by_hash(&self, hash: B256) -> Result> { - trace!(target: "rpc::eth", ?hash, "Serving eth_getBlockTransactionCountByHash"); - Ok(Self::block_transaction_count(self, hash).await?.map(U256::from)) - } - - /// Handler for: `eth_getBlockTransactionCountByNumber` - async fn block_transaction_count_by_number( - &self, - number: BlockNumberOrTag, - ) -> Result> { - trace!(target: "rpc::eth", ?number, "Serving eth_getBlockTransactionCountByNumber"); - Ok(Self::block_transaction_count(self, number).await?.map(U256::from)) - } - - /// Handler for: `eth_getUncleCountByBlockHash` - async fn block_uncles_count_by_hash(&self, hash: B256) -> Result> { - trace!(target: "rpc::eth", ?hash, "Serving eth_getUncleCountByBlockHash"); - Ok(Self::ommers(self, hash)?.map(|ommers| U256::from(ommers.len()))) - } - - /// Handler for: `eth_getUncleCountByBlockNumber` - async fn block_uncles_count_by_number(&self, number: BlockNumberOrTag) -> Result> { - trace!(target: "rpc::eth", ?number, "Serving eth_getUncleCountByBlockNumber"); - Ok(Self::ommers(self, number)?.map(|ommers| U256::from(ommers.len()))) - } - - /// Handler for: `eth_getBlockReceipts` - async fn block_receipts( - &self, - block_id: BlockId, - ) -> Result>> { - trace!(target: "rpc::eth", ?block_id, "Serving eth_getBlockReceipts"); - Ok(Self::block_receipts(self, block_id).await?) - } - - /// Handler for: `eth_getUncleByBlockHashAndIndex` - async fn uncle_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> Result> { - trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getUncleByBlockHashAndIndex"); - Ok(Self::ommer_by_block_and_index(self, hash, index).await?) - } - - /// Handler for: `eth_getUncleByBlockNumberAndIndex` - async fn uncle_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> Result> { - trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getUncleByBlockNumberAndIndex"); - Ok(Self::ommer_by_block_and_index(self, number, index).await?) - } - - /// Handler for: `eth_getRawTransactionByHash` - async fn raw_transaction_by_hash(&self, hash: B256) -> Result> { - trace!(target: "rpc::eth", ?hash, "Serving eth_getRawTransactionByHash"); - Ok(EthTransactions::raw_transaction_by_hash(self, hash).await?) - } - - /// Handler for: `eth_getTransactionByHash` - async fn transaction_by_hash(&self, hash: B256) -> Result> { - trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionByHash"); - Ok(EthTransactions::transaction_by_hash(self, hash).await?.map(Into::into)) - } - - /// Handler for: `eth_getRawTransactionByBlockHashAndIndex` - async fn raw_transaction_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> Result> { - trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getRawTransactionByBlockHashAndIndex"); - Ok(Self::raw_transaction_by_block_and_tx_index(self, hash, index).await?) - } - - /// Handler for: `eth_getTransactionByBlockHashAndIndex` - async fn transaction_by_block_hash_and_index( - &self, - hash: B256, - index: Index, - ) -> Result> { - trace!(target: "rpc::eth", ?hash, ?index, "Serving eth_getTransactionByBlockHashAndIndex"); - Ok(Self::transaction_by_block_and_tx_index(self, hash, index).await?) - } - - /// Handler for: `eth_getRawTransactionByBlockNumberAndIndex` - async fn raw_transaction_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> Result> { - trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getRawTransactionByBlockNumberAndIndex"); - Ok(Self::raw_transaction_by_block_and_tx_index(self, number, index).await?) - } - - /// Handler for: `eth_getTransactionByBlockNumberAndIndex` - async fn transaction_by_block_number_and_index( - &self, - number: BlockNumberOrTag, - index: Index, - ) -> Result> { - trace!(target: "rpc::eth", ?number, ?index, "Serving eth_getTransactionByBlockNumberAndIndex"); - Ok(Self::transaction_by_block_and_tx_index(self, number, index).await?) - } - - /// Handler for: `eth_getTransactionReceipt` - async fn transaction_receipt(&self, hash: B256) -> Result> { - trace!(target: "rpc::eth", ?hash, "Serving eth_getTransactionReceipt"); - Ok(EthTransactions::transaction_receipt(self, hash).await?) - } - - /// Handler for: `eth_getBalance` - async fn balance(&self, address: Address, block_number: Option) -> Result { - trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getBalance"); - Ok(self.on_blocking_task(|this| async move { this.balance(address, block_number) }).await?) - } - - /// Handler for: `eth_getStorageAt` - async fn storage_at( - &self, - address: Address, - index: JsonStorageKey, - block_number: Option, - ) -> Result { - trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getStorageAt"); - Ok(self - .on_blocking_task(|this| async move { this.storage_at(address, index, block_number) }) - .await?) - } - - /// Handler for: `eth_getTransactionCount` - async fn transaction_count( - &self, - address: Address, - block_number: Option, - ) -> Result { - trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getTransactionCount"); - Ok(self - .on_blocking_task( - |this| async move { this.get_transaction_count(address, block_number) }, - ) - .await?) - } - - /// Handler for: `eth_getCode` - async fn get_code(&self, address: Address, block_number: Option) -> Result { - trace!(target: "rpc::eth", ?address, ?block_number, "Serving eth_getCode"); - Ok(self - .on_blocking_task(|this| async move { this.get_code(address, block_number) }) - .await?) - } - - /// Handler for: `eth_getHeaderByNumber` - async fn header_by_number(&self, block_number: BlockNumberOrTag) -> Result> { - trace!(target: "rpc::eth", ?block_number, "Serving eth_getHeaderByNumber"); - Ok(Self::rpc_block_header(self, block_number).await?) - } - - /// Handler for: `eth_getHeaderByHash` - async fn header_by_hash(&self, hash: B256) -> Result> { - trace!(target: "rpc::eth", ?hash, "Serving eth_getHeaderByHash"); - Ok(Self::rpc_block_header(self, hash).await?) - } - - /// Handler for: `eth_call` - async fn call( - &self, - request: TransactionRequest, - block_number: Option, - state_overrides: Option, - block_overrides: Option>, - ) -> Result { - trace!(target: "rpc::eth", ?request, ?block_number, ?state_overrides, ?block_overrides, "Serving eth_call"); - Ok(self - .call(request, block_number, EvmOverrides::new(state_overrides, block_overrides)) - .await?) - } - - /// Handler for: `eth_callMany` - async fn call_many( - &self, - bundle: Bundle, - state_context: Option, - state_override: Option, - ) -> Result> { - trace!(target: "rpc::eth", ?bundle, ?state_context, ?state_override, "Serving eth_callMany"); - Ok(Self::call_many(self, bundle, state_context, state_override).await?) - } - - /// Handler for: `eth_createAccessList` - async fn create_access_list( - &self, - request: TransactionRequest, - block_number: Option, - ) -> Result { - trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_createAccessList"); - let access_list_with_gas_used = self.create_access_list_at(request, block_number).await?; - - Ok(access_list_with_gas_used) - } - - /// Handler for: `eth_estimateGas` - async fn estimate_gas( - &self, - request: TransactionRequest, - block_number: Option, - state_override: Option, - ) -> Result { - trace!(target: "rpc::eth", ?request, ?block_number, "Serving eth_estimateGas"); - Ok(self.estimate_gas_at(request, block_number.unwrap_or_default(), state_override).await?) - } - - /// Handler for: `eth_gasPrice` - async fn gas_price(&self) -> Result { - trace!(target: "rpc::eth", "Serving eth_gasPrice"); - return Ok(Self::gas_price(self).await?) - } - - /// Handler for: `eth_maxPriorityFeePerGas` - async fn max_priority_fee_per_gas(&self) -> Result { - trace!(target: "rpc::eth", "Serving eth_maxPriorityFeePerGas"); - return Ok(Self::suggested_priority_fee(self).await?) - } - - /// Handler for: `eth_blobBaseFee` - async fn blob_base_fee(&self) -> Result { - trace!(target: "rpc::eth", "Serving eth_blobBaseFee"); - return Ok(Self::blob_base_fee(self).await?) - } - - // FeeHistory is calculated based on lazy evaluation of fees for historical blocks, and further - // caching of it in the LRU cache. - // When new RPC call is executed, the cache gets locked, we check it for the historical fees - // according to the requested block range, and fill any cache misses (in both RPC response - // and cache itself) with the actual data queried from the database. - // To minimize the number of database seeks required to query the missing data, we calculate the - // first non-cached block number and last non-cached block number. After that, we query this - // range of consecutive blocks from the database. - /// Handler for: `eth_feeHistory` - async fn fee_history( - &self, - block_count: U64, - newest_block: BlockNumberOrTag, - reward_percentiles: Option>, - ) -> Result { - trace!(target: "rpc::eth", ?block_count, ?newest_block, ?reward_percentiles, "Serving eth_feeHistory"); - return Ok(Self::fee_history(self, block_count.to(), newest_block, reward_percentiles).await?) - } - - /// Handler for: `eth_mining` - async fn is_mining(&self) -> Result { - Err(internal_rpc_err("unimplemented")) - } - - /// Handler for: `eth_hashrate` - async fn hashrate(&self) -> Result { - Ok(U256::ZERO) - } - - /// Handler for: `eth_getWork` - async fn get_work(&self) -> Result { - Err(internal_rpc_err("unimplemented")) - } - - /// Handler for: `eth_submitHashrate` - async fn submit_hashrate(&self, _hashrate: U256, _id: B256) -> Result { - Ok(false) - } - - /// Handler for: `eth_submitWork` - async fn submit_work(&self, _nonce: B64, _pow_hash: B256, _mix_digest: B256) -> Result { - Err(internal_rpc_err("unimplemented")) - } - - /// Handler for: `eth_sendTransaction` - async fn send_transaction(&self, request: TransactionRequest) -> Result { - trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction"); - Ok(EthTransactions::send_transaction(self, request).await?) - } - - /// Handler for: `eth_sendRawTransaction` - async fn send_raw_transaction(&self, tx: Bytes) -> Result { - trace!(target: "rpc::eth", ?tx, "Serving eth_sendRawTransaction"); - Ok(EthTransactions::send_raw_transaction(self, tx).await?) - } - - /// Handler for: `eth_sign` - async fn sign(&self, address: Address, message: Bytes) -> Result { - trace!(target: "rpc::eth", ?address, ?message, "Serving eth_sign"); - Ok(Self::sign(self, address, &message).await?) - } - - /// Handler for: `eth_signTransaction` - async fn sign_transaction(&self, _transaction: TransactionRequest) -> Result { - Err(internal_rpc_err("unimplemented")) - } - - /// Handler for: `eth_signTypedData` - async fn sign_typed_data(&self, address: Address, data: TypedData) -> Result { - trace!(target: "rpc::eth", ?address, ?data, "Serving eth_signTypedData"); - Ok(Self::sign_typed_data(self, &data, address)?) - } - - /// Handler for: `eth_getProof` - async fn get_proof( - &self, - address: Address, - keys: Vec, - block_number: Option, - ) -> Result { - trace!(target: "rpc::eth", ?address, ?keys, ?block_number, "Serving eth_getProof"); - let res = Self::get_proof(self, address, keys, block_number).await; - - Ok(res.map_err(|e| match e { - EthApiError::InvalidBlockRange => { - internal_rpc_err("eth_getProof is unimplemented for historical blocks") - } - _ => e.into(), - })?) - } -} - -#[cfg(test)] -mod tests { - use crate::{ - eth::{ - cache::EthStateCache, gas_oracle::GasPriceOracle, FeeHistoryCache, - FeeHistoryCacheConfig, - }, - EthApi, - }; - use jsonrpsee::types::error::INVALID_PARAMS_CODE; - use reth_chainspec::BaseFeeParams; - use reth_evm_ethereum::EthEvmConfig; - use reth_network_api::noop::NoopNetwork; - use reth_primitives::{ - constants::ETHEREUM_BLOCK_GAS_LIMIT, Block, BlockNumberOrTag, Header, TransactionSigned, - B256, U64, - }; - use reth_provider::{ - test_utils::{MockEthProvider, NoopProvider}, - BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, - }; - use reth_rpc_api::EthApiServer; - use reth_rpc_types::FeeHistory; - use reth_tasks::pool::BlockingTaskPool; - use reth_testing_utils::{generators, generators::Rng}; - use reth_transaction_pool::test_utils::{testing_pool, TestPool}; - - fn build_test_eth_api< - P: BlockReaderIdExt - + BlockReader - + ChainSpecProvider - + EvmEnvProvider - + StateProviderFactory - + Unpin - + Clone - + 'static, - >( - provider: P, - ) -> EthApi { - let evm_config = EthEvmConfig::default(); - let cache = EthStateCache::spawn(provider.clone(), Default::default(), evm_config); - let fee_history_cache = - FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); - - EthApi::new( - provider.clone(), - testing_pool(), - NoopNetwork::default(), - cache.clone(), - GasPriceOracle::new(provider, Default::default(), cache), - ETHEREUM_BLOCK_GAS_LIMIT, - BlockingTaskPool::build().expect("failed to build tracing pool"), - fee_history_cache, - evm_config, - None, - ) - } - - // Function to prepare the EthApi with mock data - fn prepare_eth_api( - newest_block: u64, - mut oldest_block: Option, - block_count: u64, - mock_provider: MockEthProvider, - ) -> (EthApi, Vec, Vec) { - let mut rng = generators::rng(); - - // Build mock data - let mut gas_used_ratios = Vec::new(); - let mut base_fees_per_gas = Vec::new(); - let mut last_header = None; - let mut parent_hash = B256::default(); - - for i in (0..block_count).rev() { - let hash = rng.gen(); - let gas_limit: u64 = rng.gen(); - let gas_used: u64 = rng.gen(); - // Note: Generates a u32 to avoid overflows later - let base_fee_per_gas: Option = rng.gen::().then(|| rng.gen::() as u64); - - let header = Header { - number: newest_block - i, - gas_limit, - gas_used, - base_fee_per_gas, - parent_hash, - ..Default::default() - }; - last_header = Some(header.clone()); - parent_hash = hash; - - let mut transactions = vec![]; - for _ in 0..100 { - let random_fee: u128 = rng.gen(); - - if let Some(base_fee_per_gas) = header.base_fee_per_gas { - let transaction = TransactionSigned { - transaction: reth_primitives::Transaction::Eip1559( - reth_primitives::TxEip1559 { - max_priority_fee_per_gas: random_fee, - max_fee_per_gas: random_fee + base_fee_per_gas as u128, - ..Default::default() - }, - ), - ..Default::default() - }; - - transactions.push(transaction); - } else { - let transaction = TransactionSigned { - transaction: reth_primitives::Transaction::Legacy(Default::default()), - ..Default::default() - }; - - transactions.push(transaction); - } - } - - mock_provider.add_block( - hash, - Block { header: header.clone(), body: transactions, ..Default::default() }, - ); - mock_provider.add_header(hash, header); - - oldest_block.get_or_insert(hash); - gas_used_ratios.push(gas_used as f64 / gas_limit as f64); - base_fees_per_gas.push(base_fee_per_gas.map(|fee| fee as u128).unwrap_or_default()); - } - - // Add final base fee (for the next block outside of the request) - let last_header = last_header.unwrap(); - base_fees_per_gas.push(BaseFeeParams::ethereum().next_block_base_fee( - last_header.gas_used as u128, - last_header.gas_limit as u128, - last_header.base_fee_per_gas.unwrap_or_default() as u128, - )); - - let eth_api = build_test_eth_api(mock_provider); - - (eth_api, base_fees_per_gas, gas_used_ratios) - } - - /// Invalid block range - #[tokio::test] - async fn test_fee_history_empty() { - let response = as EthApiServer>::fee_history( - &build_test_eth_api(NoopProvider::default()), - U64::from(1), - BlockNumberOrTag::Latest, - None, - ) - .await; - assert!(response.is_err()); - let error_object = response.unwrap_err(); - assert_eq!(error_object.code(), INVALID_PARAMS_CODE); - } - - #[tokio::test] - /// Invalid block range (request is before genesis) - async fn test_fee_history_invalid_block_range_before_genesis() { - let block_count = 10; - let newest_block = 1337; - let oldest_block = None; - - let (eth_api, _, _) = - prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - - let response = as EthApiServer>::fee_history( - ð_api, - U64::from(newest_block + 1), - newest_block.into(), - Some(vec![10.0]), - ) - .await; - - assert!(response.is_err()); - let error_object = response.unwrap_err(); - assert_eq!(error_object.code(), INVALID_PARAMS_CODE); - } - - #[tokio::test] - /// Invalid block range (request is in the future) - async fn test_fee_history_invalid_block_range_in_future() { - let block_count = 10; - let newest_block = 1337; - let oldest_block = None; - - let (eth_api, _, _) = - prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - - let response = as EthApiServer>::fee_history( - ð_api, - U64::from(1), - (newest_block + 1000).into(), - Some(vec![10.0]), - ) - .await; - - assert!(response.is_err()); - let error_object = response.unwrap_err(); - assert_eq!(error_object.code(), INVALID_PARAMS_CODE); - } - - #[tokio::test] - /// Requesting no block should result in a default response - async fn test_fee_history_no_block_requested() { - let block_count = 10; - let newest_block = 1337; - let oldest_block = None; - - let (eth_api, _, _) = - prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - - let response = as EthApiServer>::fee_history( - ð_api, - U64::from(0), - newest_block.into(), - None, - ) - .await - .unwrap(); - assert_eq!( - response, - FeeHistory::default(), - "none: requesting no block should yield a default response" - ); - } - - #[tokio::test] - /// Requesting a single block should return 1 block (+ base fee for the next block over) - async fn test_fee_history_single_block() { - let block_count = 10; - let newest_block = 1337; - let oldest_block = None; - - let (eth_api, base_fees_per_gas, gas_used_ratios) = - prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - - let fee_history = eth_api.fee_history(1, newest_block.into(), None).await.unwrap(); - assert_eq!( - fee_history.base_fee_per_gas, - &base_fees_per_gas[base_fees_per_gas.len() - 2..], - "one: base fee per gas is incorrect" - ); - assert_eq!( - fee_history.base_fee_per_gas.len(), - 2, - "one: should return base fee of the next block as well" - ); - assert_eq!( - &fee_history.gas_used_ratio, - &gas_used_ratios[gas_used_ratios.len() - 1..], - "one: gas used ratio is incorrect" - ); - assert_eq!(fee_history.oldest_block, newest_block, "one: oldest block is incorrect"); - assert!( - fee_history.reward.is_none(), - "one: no percentiles were requested, so there should be no rewards result" - ); - } - - /// Requesting all blocks should be ok - #[tokio::test] - async fn test_fee_history_all_blocks() { - let block_count = 10; - let newest_block = 1337; - let oldest_block = None; - - let (eth_api, base_fees_per_gas, gas_used_ratios) = - prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - - let fee_history = - eth_api.fee_history(block_count, newest_block.into(), None).await.unwrap(); - - assert_eq!( - &fee_history.base_fee_per_gas, &base_fees_per_gas, - "all: base fee per gas is incorrect" - ); - assert_eq!( - fee_history.base_fee_per_gas.len() as u64, - block_count + 1, - "all: should return base fee of the next block as well" - ); - assert_eq!( - &fee_history.gas_used_ratio, &gas_used_ratios, - "all: gas used ratio is incorrect" - ); - assert_eq!( - fee_history.oldest_block, - newest_block - block_count + 1, - "all: oldest block is incorrect" - ); - assert!( - fee_history.reward.is_none(), - "all: no percentiles were requested, so there should be no rewards result" - ); - } -} diff --git a/crates/rpc/rpc/src/eth/api/sign.rs b/crates/rpc/rpc/src/eth/api/sign.rs deleted file mode 100644 index 5256de4a4..000000000 --- a/crates/rpc/rpc/src/eth/api/sign.rs +++ /dev/null @@ -1,41 +0,0 @@ -//! Contains RPC handler implementations specific to sign endpoints - -use crate::{ - eth::{ - error::{EthResult, SignError}, - signer::{DevSigner, EthSigner}, - }, - EthApi, -}; -use alloy_dyn_abi::TypedData; -use reth_primitives::{Address, Bytes}; - -impl EthApi { - pub(crate) async fn sign(&self, account: Address, message: &[u8]) -> EthResult { - Ok(self.find_signer(&account)?.sign(account, message).await?.to_hex_bytes()) - } - - pub(crate) fn sign_typed_data(&self, data: &TypedData, account: Address) -> EthResult { - Ok(self.find_signer(&account)?.sign_typed_data(account, data)?.to_hex_bytes()) - } - - pub(crate) fn find_signer( - &self, - account: &Address, - ) -> Result, SignError> { - self.inner - .signers - .read() - .iter() - .find(|signer| signer.is_signer_for(account)) - .map(|signer| dyn_clone::clone_box(&**signer)) - .ok_or(SignError::NoAccount) - } - - /// Generates 20 random developer accounts. - /// Used in DEV mode. - pub fn with_dev_accounts(&self) { - let mut signers = self.inner.signers.write(); - *signers = DevSigner::random_signers(20); - } -} diff --git a/crates/rpc/rpc/src/eth/api/state.rs b/crates/rpc/rpc/src/eth/api/state.rs deleted file mode 100644 index d7c1bafac..000000000 --- a/crates/rpc/rpc/src/eth/api/state.rs +++ /dev/null @@ -1,178 +0,0 @@ -//! Contains RPC handler implementations specific to state. - -use crate::{ - eth::error::{EthApiError, EthResult, RpcInvalidTransactionError}, - EthApi, -}; -use reth_evm::ConfigureEvm; -use reth_primitives::{Address, BlockId, BlockNumberOrTag, Bytes, B256, U256}; -use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProvider, StateProviderFactory, -}; -use reth_rpc_types::{serde_helpers::JsonStorageKey, EIP1186AccountProofResponse}; -use reth_rpc_types_compat::proof::from_primitive_account_proof; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; - -impl EthApi -where - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Pool: TransactionPool + Clone + 'static, - Network: Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, -{ - pub(crate) fn get_code(&self, address: Address, block_id: Option) -> EthResult { - Ok(self - .state_at_block_id_or_latest(block_id)? - .account_code(address)? - .unwrap_or_default() - .original_bytes()) - } - - pub(crate) fn balance(&self, address: Address, block_id: Option) -> EthResult { - Ok(self - .state_at_block_id_or_latest(block_id)? - .account_balance(address)? - .unwrap_or_default()) - } - - /// Returns the number of transactions sent from an address at the given block identifier. - /// - /// If this is [`BlockNumberOrTag::Pending`] then this will look up the highest transaction in - /// pool and return the next nonce (highest + 1). - pub(crate) fn get_transaction_count( - &self, - address: Address, - block_id: Option, - ) -> EthResult { - if block_id == Some(BlockId::pending()) { - let address_txs = self.pool().get_transactions_by_sender(address); - if let Some(highest_nonce) = - address_txs.iter().map(|item| item.transaction.nonce()).max() - { - let tx_count = highest_nonce - .checked_add(1) - .ok_or(RpcInvalidTransactionError::NonceMaxValue)?; - return Ok(U256::from(tx_count)) - } - } - - let state = self.state_at_block_id_or_latest(block_id)?; - Ok(U256::from(state.account_nonce(address)?.unwrap_or_default())) - } - - pub(crate) fn storage_at( - &self, - address: Address, - index: JsonStorageKey, - block_id: Option, - ) -> EthResult { - Ok(B256::new( - self.state_at_block_id_or_latest(block_id)? - .storage(address, index.0)? - .unwrap_or_default() - .to_be_bytes(), - )) - } - - pub(crate) async fn get_proof( - &self, - address: Address, - keys: Vec, - block_id: Option, - ) -> EthResult { - let chain_info = self.provider().chain_info()?; - let block_id = block_id.unwrap_or_default(); - - // if we are trying to create a proof for the latest block, but have a BlockId as input - // that is not BlockNumberOrTag::Latest, then we need to figure out whether or not the - // BlockId corresponds to the latest block - let is_latest_block = match block_id { - BlockId::Number(BlockNumberOrTag::Number(num)) => num == chain_info.best_number, - BlockId::Hash(hash) => hash == chain_info.best_hash.into(), - BlockId::Number(BlockNumberOrTag::Latest) => true, - _ => false, - }; - - // TODO: remove when HistoricalStateProviderRef::proof is implemented - if !is_latest_block { - return Err(EthApiError::InvalidBlockRange) - } - - let this = self.clone(); - self.inner - .blocking_task_pool - .spawn(move || { - let state = this.state_at_block_id(block_id)?; - let storage_keys = keys.iter().map(|key| key.0).collect::>(); - let proof = state.proof(address, &storage_keys)?; - Ok(from_primitive_account_proof(proof)) - }) - .await - .map_err(|_| EthApiError::InternalBlockingTaskError)? - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::eth::{ - cache::EthStateCache, gas_oracle::GasPriceOracle, FeeHistoryCache, FeeHistoryCacheConfig, - }; - use reth_evm_ethereum::EthEvmConfig; - use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, StorageKey, StorageValue}; - use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; - use reth_tasks::pool::BlockingTaskPool; - use reth_transaction_pool::test_utils::testing_pool; - use std::collections::HashMap; - - #[tokio::test] - async fn test_storage() { - // === Noop === - let pool = testing_pool(); - let evm_config = EthEvmConfig::default(); - - let cache = EthStateCache::spawn(NoopProvider::default(), Default::default(), evm_config); - let eth_api = EthApi::new( - NoopProvider::default(), - pool.clone(), - (), - cache.clone(), - GasPriceOracle::new(NoopProvider::default(), Default::default(), cache.clone()), - ETHEREUM_BLOCK_GAS_LIMIT, - BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), - evm_config, - None, - ); - let address = Address::random(); - let storage = eth_api.storage_at(address, U256::ZERO.into(), None).unwrap(); - assert_eq!(storage, U256::ZERO.to_be_bytes()); - - // === Mock === - let mock_provider = MockEthProvider::default(); - let storage_value = StorageValue::from(1337); - let storage_key = StorageKey::random(); - let storage = HashMap::from([(storage_key, storage_value)]); - let account = ExtendedAccount::new(0, U256::ZERO).extend_storage(storage); - mock_provider.add_account(address, account); - - let cache = EthStateCache::spawn(mock_provider.clone(), Default::default(), evm_config); - let eth_api = EthApi::new( - mock_provider.clone(), - pool, - (), - cache.clone(), - GasPriceOracle::new(mock_provider, Default::default(), cache.clone()), - ETHEREUM_BLOCK_GAS_LIMIT, - BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), - evm_config, - None, - ); - - let storage_key: U256 = storage_key.into(); - let storage = eth_api.storage_at(address, storage_key.into(), None).unwrap(); - assert_eq!(storage, storage_value.to_be_bytes()); - } -} diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs deleted file mode 100644 index 8829a0434..000000000 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ /dev/null @@ -1,1861 +0,0 @@ -//! Contains RPC handler implementations specific to transactions -use crate::{ - eth::{ - api::pending_block::PendingBlockEnv, - error::{EthApiError, EthResult, RpcInvalidTransactionError, SignError}, - revm_utils::prepare_call_env, - utils::recover_raw_transaction, - }, - EthApi, EthApiSpec, -}; -use alloy_primitives::TxKind as RpcTransactionKind; -use async_trait::async_trait; -use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_primitives::{ - eip4844::calc_blob_gasprice, - revm::env::{fill_block_env_with_coinbase, tx_env_with_recovered}, - Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredPooledTransaction, Header, - IntoRecoveredTransaction, Receipt, SealedBlock, SealedBlockWithSenders, TransactionMeta, - TransactionSigned, TransactionSignedEcRecovered, - TxKind::{Call, Create}, - B256, U256, -}; -use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, -}; -use reth_revm::database::StateProviderDatabase; -use reth_rpc_types::{ - state::EvmOverrides, - transaction::{ - EIP1559TransactionRequest, EIP2930TransactionRequest, EIP4844TransactionRequest, - LegacyTransactionRequest, - }, - AnyReceiptEnvelope, AnyTransactionReceipt, Index, Log, ReceiptWithBloom, Transaction, - TransactionInfo, TransactionReceipt, TransactionRequest, TypedTransactionRequest, - WithOtherFields, -}; -use reth_rpc_types_compat::transaction::from_recovered_with_block_context; -use reth_transaction_pool::{TransactionOrigin, TransactionPool}; -use revm::{ - db::CacheDB, - primitives::{ - db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, - ExecutionResult, ResultAndState, SpecId, - }, - GetInspector, Inspector, -}; -use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; -use std::future::Future; - -use crate::eth::revm_utils::FillableTransaction; -#[cfg(feature = "optimism")] -use reth_rpc_types::OptimismTransactionReceiptFields; -use revm_primitives::db::{Database, DatabaseRef}; - -/// Helper alias type for the state's [`CacheDB`] -pub(crate) type StateCacheDB = CacheDB>; - -/// Commonly used transaction related functions for the [EthApi] type in the `eth_` namespace. -/// -/// This includes utilities for transaction tracing, transacting and inspection. -/// -/// Async functions that are spawned onto the -/// [BlockingTaskPool](reth_tasks::pool::BlockingTaskPool) begin with `spawn_` -/// -/// ## Calls -/// -/// There are subtle differences between when transacting [TransactionRequest]: -/// -/// The endpoints `eth_call` and `eth_estimateGas` and `eth_createAccessList` should always -/// __disable__ the base fee check in the [EnvWithHandlerCfg] -/// [Cfg](revm_primitives::CfgEnvWithHandlerCfg). -/// -/// The behaviour for tracing endpoints is not consistent across clients. -/// Geth also disables the basefee check for tracing: -/// Erigon does not: -/// -/// See also -/// -/// This implementation follows the behaviour of Geth and disables the basefee check for tracing. -#[async_trait::async_trait] -pub trait EthTransactions: Send + Sync { - /// Executes the [EnvWithHandlerCfg] against the given [Database] without committing state - /// changes. - fn transact( - &self, - db: DB, - env: EnvWithHandlerCfg, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> - where - DB: Database, - ::Error: Into; - - /// Executes the [EnvWithHandlerCfg] against the given [Database] without committing state - /// changes. - fn inspect( - &self, - db: DB, - env: EnvWithHandlerCfg, - inspector: I, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> - where - DB: Database, - ::Error: Into, - I: GetInspector; - - /// Same as [Self::inspect] but also returns the database again. - /// - /// Even though [Database] is also implemented on `&mut` - /// this is still useful if there are certain trait bounds on the Inspector's database generic - /// type - fn inspect_and_return_db( - &self, - db: DB, - env: EnvWithHandlerCfg, - inspector: I, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg, DB)> - where - DB: Database, - ::Error: Into, - I: GetInspector; - - /// Replays all the transactions until the target transaction is found. - /// - /// All transactions before the target transaction are executed and their changes are written to - /// the _runtime_ db ([CacheDB]). - /// - /// Note: This assumes the target transaction is in the given iterator. - /// Returns the index of the target transaction in the given iterator. - fn replay_transactions_until( - &self, - db: &mut CacheDB, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, - transactions: I, - target_tx_hash: B256, - ) -> Result - where - DB: DatabaseRef, - EthApiError: From<::Error>, - I: IntoIterator, - Tx: FillableTransaction; - - /// Returns default gas limit to use for `eth_call` and tracing RPC methods. - fn call_gas_limit(&self) -> u64; - - /// Executes the future on a new blocking task. - /// - /// Note: This is expected for futures that are dominated by blocking IO operations, for tracing - /// or CPU bound operations in general use [Self::spawn_blocking]. - async fn spawn_blocking_future(&self, c: F) -> EthResult - where - F: Future> + Send + 'static, - R: Send + 'static; - - /// Executes a blocking on the tracing pol. - /// - /// Note: This is expected for futures that are predominantly CPU bound, for blocking IO futures - /// use [Self::spawn_blocking_future]. - async fn spawn_blocking(&self, c: F) -> EthResult - where - F: FnOnce() -> EthResult + Send + 'static, - R: Send + 'static; - - /// Returns the state at the given [BlockId] - fn state_at(&self, at: BlockId) -> EthResult; - - /// Executes the closure with the state that corresponds to the given [BlockId]. - fn with_state_at_block(&self, at: BlockId, f: F) -> EthResult - where - F: FnOnce(StateProviderBox) -> EthResult; - - /// Executes the closure with the state that corresponds to the given [BlockId] on a new task - async fn spawn_with_state_at_block(&self, at: BlockId, f: F) -> EthResult - where - F: FnOnce(StateProviderBox) -> EthResult + Send + 'static, - T: Send + 'static; - - /// Returns the revm evm env for the requested [BlockId] - /// - /// If the [BlockId] this will return the [BlockId] of the block the env was configured - /// for. - /// If the [BlockId] is pending, this will return the "Pending" tag, otherwise this returns the - /// hash of the exact block. - async fn evm_env_at(&self, at: BlockId) - -> EthResult<(CfgEnvWithHandlerCfg, BlockEnv, BlockId)>; - - /// Returns the revm evm env for the raw block header - /// - /// This is used for tracing raw blocks - async fn evm_env_for_raw_block( - &self, - at: &Header, - ) -> EthResult<(CfgEnvWithHandlerCfg, BlockEnv)>; - - /// Get all transactions in the block with the given hash. - /// - /// Returns `None` if block does not exist. - async fn transactions_by_block(&self, block: B256) - -> EthResult>>; - - /// Get the entire block for the given id. - /// - /// Returns `None` if block does not exist. - async fn block_by_id(&self, id: BlockId) -> EthResult>; - - /// Get the entire block for the given id. - /// - /// Returns `None` if block does not exist. - async fn block_by_id_with_senders( - &self, - id: BlockId, - ) -> EthResult>; - - /// Get all transactions in the block with the given hash. - /// - /// Returns `None` if block does not exist. - async fn transactions_by_block_id( - &self, - block: BlockId, - ) -> EthResult>>; - - /// Returns the EIP-2718 encoded transaction by hash. - /// - /// If this is a pooled EIP-4844 transaction, the blob sidecar is included. - /// - /// Checks the pool and state. - /// - /// Returns `Ok(None)` if no matching transaction was found. - async fn raw_transaction_by_hash(&self, hash: B256) -> EthResult>; - - /// Returns the transaction by hash. - /// - /// Checks the pool and state. - /// - /// Returns `Ok(None)` if no matching transaction was found. - async fn transaction_by_hash(&self, hash: B256) -> EthResult>; - - /// Returns the transaction by including its corresponding [BlockId] - /// - /// Note: this supports pending transactions - async fn transaction_by_hash_at( - &self, - hash: B256, - ) -> EthResult>; - - /// Returns the _historical_ transaction and the block it was mined in - async fn historical_transaction_by_hash_at( - &self, - hash: B256, - ) -> EthResult>; - - /// Returns the transaction receipt for the given hash. - /// - /// Returns None if the transaction does not exist or is pending - /// Note: The tx receipt is not available for pending transactions. - async fn transaction_receipt(&self, hash: B256) -> EthResult>; - - /// Decodes and recovers the transaction and submits it to the pool. - /// - /// Returns the hash of the transaction. - async fn send_raw_transaction(&self, tx: Bytes) -> EthResult; - - /// Signs transaction with a matching signer, if any and submits the transaction to the pool. - /// Returns the hash of the signed transaction. - async fn send_transaction(&self, request: TransactionRequest) -> EthResult; - - /// Prepares the state and env for the given [TransactionRequest] at the given [BlockId] and - /// executes the closure on a new task returning the result of the closure. - /// - /// This returns the configured [EnvWithHandlerCfg] for the given [TransactionRequest] at the - /// given [BlockId] and with configured call settings: `prepare_call_env`. - async fn spawn_with_call_at( - &self, - request: TransactionRequest, - at: BlockId, - overrides: EvmOverrides, - f: F, - ) -> EthResult - where - F: FnOnce(&mut StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, - R: Send + 'static; - - /// Executes the call request at the given [BlockId]. - async fn transact_call_at( - &self, - request: TransactionRequest, - at: BlockId, - overrides: EvmOverrides, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)>; - - /// Executes the call request at the given [BlockId] on a new task and returns the result of the - /// inspect call. - async fn spawn_inspect_call_at( - &self, - request: TransactionRequest, - at: BlockId, - overrides: EvmOverrides, - inspector: I, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> - where - I: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static; - - /// Executes the transaction on top of the given [BlockId] with a tracer configured by the - /// config. - /// - /// The callback is then called with the [TracingInspector] and the [ResultAndState] after the - /// configured [EnvWithHandlerCfg] was inspected. - /// - /// Caution: this is blocking - fn trace_at( - &self, - env: EnvWithHandlerCfg, - config: TracingInspectorConfig, - at: BlockId, - f: F, - ) -> EthResult - where - F: FnOnce(TracingInspector, ResultAndState) -> EthResult; - - /// Same as [Self::trace_at] but also provides the used database to the callback. - /// - /// Executes the transaction on top of the given [BlockId] with a tracer configured by the - /// config. - /// - /// The callback is then called with the [TracingInspector] and the [ResultAndState] after the - /// configured [EnvWithHandlerCfg] was inspected. - async fn spawn_trace_at_with_state( - &self, - env: EnvWithHandlerCfg, - config: TracingInspectorConfig, - at: BlockId, - f: F, - ) -> EthResult - where - F: FnOnce(TracingInspector, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, - R: Send + 'static; - - /// Fetches the transaction and the transaction's block - async fn transaction_and_block( - &self, - hash: B256, - ) -> EthResult>; - - /// Retrieves the transaction if it exists and returns its trace. - /// - /// Before the transaction is traced, all previous transaction in the block are applied to the - /// state by executing them first. - /// The callback `f` is invoked with the [ResultAndState] after the transaction was executed and - /// the database that points to the beginning of the transaction. - /// - /// Note: Implementers should use a threadpool where blocking is allowed, such as - /// [BlockingTaskPool](reth_tasks::pool::BlockingTaskPool). - async fn spawn_trace_transaction_in_block( - &self, - hash: B256, - config: TracingInspectorConfig, - f: F, - ) -> EthResult> - where - F: FnOnce(TransactionInfo, TracingInspector, ResultAndState, StateCacheDB) -> EthResult - + Send - + 'static, - R: Send + 'static, - { - self.spawn_trace_transaction_in_block_with_inspector(hash, TracingInspector::new(config), f) - .await - } - - /// Retrieves the transaction if it exists and returns its trace. - /// - /// Before the transaction is traced, all previous transaction in the block are applied to the - /// state by executing them first. - /// The callback `f` is invoked with the [ResultAndState] after the transaction was executed and - /// the database that points to the beginning of the transaction. - /// - /// Note: Implementers should use a threadpool where blocking is allowed, such as - /// [BlockingTaskPool](reth_tasks::pool::BlockingTaskPool). - async fn spawn_replay_transaction(&self, hash: B256, f: F) -> EthResult> - where - F: FnOnce(TransactionInfo, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, - R: Send + 'static; - - /// Retrieves the transaction if it exists and returns its trace. - /// - /// Before the transaction is traced, all previous transaction in the block are applied to the - /// state by executing them first. - /// The callback `f` is invoked with the [ResultAndState] after the transaction was executed and - /// the database that points to the beginning of the transaction. - /// - /// Note: Implementers should use a threadpool where blocking is allowed, such as - /// [BlockingTaskPool](reth_tasks::pool::BlockingTaskPool). - async fn spawn_trace_transaction_in_block_with_inspector( - &self, - hash: B256, - inspector: Insp, - f: F, - ) -> EthResult> - where - F: FnOnce(TransactionInfo, Insp, ResultAndState, StateCacheDB) -> EthResult - + Send - + 'static, - Insp: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, - R: Send + 'static; - - /// Executes all transactions of a block and returns a list of callback results invoked for each - /// transaction in the block. - /// - /// This - /// 1. fetches all transactions of the block - /// 2. configures the EVM evn - /// 3. loops over all transactions and executes them - /// 4. calls the callback with the transaction info, the execution result, the changed state - /// _after_ the transaction [StateProviderDatabase] and the database that points to the state - /// right _before_ the transaction. - async fn trace_block_with( - &self, - block_id: BlockId, - config: TracingInspectorConfig, - f: F, - ) -> EthResult>> - where - // This is the callback that's invoked for each transaction with the inspector, the result, - // state and db - F: for<'a> Fn( - TransactionInfo, - TracingInspector, - ExecutionResult, - &'a EvmState, - &'a StateCacheDB, - ) -> EthResult - + Send - + 'static, - R: Send + 'static, - { - self.trace_block_until(block_id, None, config, f).await - } - - /// Executes all transactions of a block and returns a list of callback results invoked for each - /// transaction in the block. - /// - /// This - /// 1. fetches all transactions of the block - /// 2. configures the EVM evn - /// 3. loops over all transactions and executes them - /// 4. calls the callback with the transaction info, the execution result, the changed state - /// _after_ the transaction [EvmState] and the database that points to the state - /// right _before_ the transaction, in other words the state the transaction was - /// executed on: `changed_state = tx(cached_state)` - /// - /// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing - /// a transaction. This is invoked for each transaction. - async fn trace_block_with_inspector( - &self, - block_id: BlockId, - insp_setup: Setup, - f: F, - ) -> EthResult>> - where - // This is the callback that's invoked for each transaction with the inspector, the result, - // state and db - F: for<'a> Fn( - TransactionInfo, - Insp, - ExecutionResult, - &'a EvmState, - &'a StateCacheDB, - ) -> EthResult - + Send - + 'static, - Setup: FnMut() -> Insp + Send + 'static, - Insp: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, - R: Send + 'static, - { - self.trace_block_until_with_inspector(block_id, None, insp_setup, f).await - } - - /// Executes all transactions of a block. - /// - /// If a `highest_index` is given, this will only execute the first `highest_index` - /// transactions, in other words, it will stop executing transactions after the - /// `highest_index`th transaction. - async fn trace_block_until( - &self, - block_id: BlockId, - highest_index: Option, - config: TracingInspectorConfig, - f: F, - ) -> EthResult>> - where - F: for<'a> Fn( - TransactionInfo, - TracingInspector, - ExecutionResult, - &'a EvmState, - &'a StateCacheDB, - ) -> EthResult - + Send - + 'static, - R: Send + 'static, - { - self.trace_block_until_with_inspector( - block_id, - highest_index, - move || TracingInspector::new(config), - f, - ) - .await - } - - /// Executes all transactions of a block. - /// - /// If a `highest_index` is given, this will only execute the first `highest_index` - /// transactions, in other words, it will stop executing transactions after the - /// `highest_index`th transaction. - /// - /// Note: This expect tx index to be 0-indexed, so the first transaction is at index 0. - /// - /// This accepts a `inspector_setup` closure that returns the inspector to be used for tracing - /// the transactions. - async fn trace_block_until_with_inspector( - &self, - block_id: BlockId, - highest_index: Option, - inspector_setup: Setup, - f: F, - ) -> EthResult>> - where - F: for<'a> Fn( - TransactionInfo, - Insp, - ExecutionResult, - &'a EvmState, - &'a StateCacheDB, - ) -> EthResult - + Send - + 'static, - Setup: FnMut() -> Insp + Send + 'static, - Insp: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, - R: Send + 'static; -} - -#[async_trait] -impl EthTransactions - for EthApi -where - Pool: TransactionPool + Clone + 'static, - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + Send + Sync + 'static, - EvmConfig: ConfigureEvm + 'static, -{ - fn transact( - &self, - db: DB, - env: EnvWithHandlerCfg, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> - where - DB: Database, - ::Error: Into, - { - let mut evm = self.inner.evm_config.evm_with_env(db, env); - let res = evm.transact()?; - let (_, env) = evm.into_db_and_env_with_handler_cfg(); - Ok((res, env)) - } - - fn inspect( - &self, - db: DB, - env: EnvWithHandlerCfg, - inspector: I, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> - where - DB: Database, - ::Error: Into, - I: GetInspector, - { - self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) - } - - fn inspect_and_return_db( - &self, - db: DB, - env: EnvWithHandlerCfg, - inspector: I, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg, DB)> - where - DB: Database, - ::Error: Into, - I: GetInspector, - { - let mut evm = self.inner.evm_config.evm_with_env_and_inspector(db, env, inspector); - let res = evm.transact()?; - let (db, env) = evm.into_db_and_env_with_handler_cfg(); - Ok((res, env, db)) - } - - fn replay_transactions_until( - &self, - db: &mut CacheDB, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, - transactions: I, - target_tx_hash: B256, - ) -> Result - where - DB: DatabaseRef, - EthApiError: From<::Error>, - I: IntoIterator, - Tx: FillableTransaction, - { - let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); - - let mut evm = self.inner.evm_config.evm_with_env(db, env); - let mut index = 0; - for tx in transactions { - if tx.hash() == target_tx_hash { - // reached the target transaction - break - } - - tx.try_fill_tx_env(evm.tx_mut())?; - evm.transact_commit()?; - index += 1; - } - Ok(index) - } - - fn call_gas_limit(&self) -> u64 { - self.inner.gas_cap - } - - async fn spawn_blocking_future(&self, c: F) -> EthResult - where - F: Future> + Send + 'static, - R: Send + 'static, - { - self.on_blocking_task(|_| c).await - } - - async fn spawn_blocking(&self, c: F) -> EthResult - where - F: FnOnce() -> EthResult + Send + 'static, - R: Send + 'static, - { - self.spawn_tracing_task_with(move |_| c()).await - } - - fn state_at(&self, at: BlockId) -> EthResult { - self.state_at_block_id(at) - } - - fn with_state_at_block(&self, at: BlockId, f: F) -> EthResult - where - F: FnOnce(StateProviderBox) -> EthResult, - { - let state = self.state_at(at)?; - f(state) - } - - async fn spawn_with_state_at_block(&self, at: BlockId, f: F) -> EthResult - where - F: FnOnce(StateProviderBox) -> EthResult + Send + 'static, - T: Send + 'static, - { - self.spawn_tracing_task_with(move |this| { - let state = this.state_at(at)?; - f(state) - }) - .await - } - - async fn evm_env_at( - &self, - at: BlockId, - ) -> EthResult<(CfgEnvWithHandlerCfg, BlockEnv, BlockId)> { - if at.is_pending() { - let PendingBlockEnv { cfg, block_env, origin } = self.pending_block_env_and_cfg()?; - Ok((cfg, block_env, origin.state_block_id())) - } else { - // Use cached values if there is no pending block - let block_hash = self - .provider() - .block_hash_for_id(at)? - .ok_or_else(|| EthApiError::UnknownBlockNumber)?; - let (cfg, env) = self.cache().get_evm_env(block_hash).await?; - Ok((cfg, env, block_hash.into())) - } - } - - async fn evm_env_for_raw_block( - &self, - header: &Header, - ) -> EthResult<(CfgEnvWithHandlerCfg, BlockEnv)> { - // get the parent config first - let (cfg, mut block_env, _) = self.evm_env_at(header.parent_hash.into()).await?; - - let after_merge = cfg.handler_cfg.spec_id >= SpecId::MERGE; - fill_block_env_with_coinbase(&mut block_env, header, after_merge, header.beneficiary); - - Ok((cfg, block_env)) - } - - async fn transactions_by_block( - &self, - block: B256, - ) -> EthResult>> { - Ok(self.cache().get_block_transactions(block).await?) - } - - async fn block_by_id(&self, id: BlockId) -> EthResult> { - self.block(id).await - } - - async fn block_by_id_with_senders( - &self, - id: BlockId, - ) -> EthResult> { - self.block_with_senders(id).await - } - - async fn transactions_by_block_id( - &self, - block: BlockId, - ) -> EthResult>> { - self.block_by_id(block).await.map(|block| block.map(|block| block.body)) - } - - async fn raw_transaction_by_hash(&self, hash: B256) -> EthResult> { - // Note: this is mostly used to fetch pooled transactions so we check the pool first - if let Some(tx) = - self.pool().get_pooled_transaction_element(hash).map(|tx| tx.envelope_encoded()) - { - return Ok(Some(tx)) - } - - self.on_blocking_task(|this| async move { - Ok(this.provider().transaction_by_hash(hash)?.map(|tx| tx.envelope_encoded())) - }) - .await - } - - async fn transaction_by_hash(&self, hash: B256) -> EthResult> { - // Try to find the transaction on disk - let mut resp = self - .on_blocking_task(|this| async move { - match this.provider().transaction_by_hash_with_meta(hash)? { - None => Ok(None), - Some((tx, meta)) => { - // Note: we assume this transaction is valid, because it's mined (or part of - // pending block) and already. We don't need to - // check for pre EIP-2 because this transaction could be pre-EIP-2. - let transaction = tx - .into_ecrecovered_unchecked() - .ok_or(EthApiError::InvalidTransactionSignature)?; - - let tx = TransactionSource::Block { - transaction, - index: meta.index, - block_hash: meta.block_hash, - block_number: meta.block_number, - base_fee: meta.base_fee, - }; - Ok(Some(tx)) - } - } - }) - .await?; - - if resp.is_none() { - // tx not found on disk, check pool - if let Some(tx) = - self.pool().get(&hash).map(|tx| tx.transaction.to_recovered_transaction()) - { - resp = Some(TransactionSource::Pool(tx)); - } - } - - Ok(resp) - } - - async fn transaction_by_hash_at( - &self, - transaction_hash: B256, - ) -> EthResult> { - match self.transaction_by_hash(transaction_hash).await? { - None => return Ok(None), - Some(tx) => { - let res = match tx { - tx @ TransactionSource::Pool(_) => (tx, BlockId::pending()), - TransactionSource::Block { - transaction, - index, - block_hash, - block_number, - base_fee, - } => { - let at = BlockId::Hash(block_hash.into()); - let tx = TransactionSource::Block { - transaction, - index, - block_hash, - block_number, - base_fee, - }; - (tx, at) - } - }; - Ok(Some(res)) - } - } - } - - async fn historical_transaction_by_hash_at( - &self, - hash: B256, - ) -> EthResult> { - match self.transaction_by_hash_at(hash).await? { - None => Ok(None), - Some((tx, at)) => Ok(at.as_block_hash().map(|hash| (tx, hash))), - } - } - - async fn transaction_receipt(&self, hash: B256) -> EthResult> { - let result = self - .on_blocking_task(|this| async move { - let (tx, meta) = match this.provider().transaction_by_hash_with_meta(hash)? { - Some((tx, meta)) => (tx, meta), - None => return Ok(None), - }; - - let receipt = match this.provider().receipt_by_hash(hash)? { - Some(recpt) => recpt, - None => return Ok(None), - }; - - Ok(Some((tx, meta, receipt))) - }) - .await?; - - let (tx, meta, receipt) = match result { - Some((tx, meta, receipt)) => (tx, meta, receipt), - None => return Ok(None), - }; - - self.build_transaction_receipt(tx, meta, receipt).await.map(Some) - } - - async fn send_raw_transaction(&self, tx: Bytes) -> EthResult { - // On optimism, transactions are forwarded directly to the sequencer to be included in - // blocks that it builds. - let maybe_forwarder = self.inner.raw_transaction_forwarder.read().clone(); - if let Some(client) = maybe_forwarder { - tracing::debug!( target: "rpc::eth", "forwarding raw transaction to"); - client.forward_raw_transaction(&tx).await?; - } - - let recovered = recover_raw_transaction(tx)?; - let pool_transaction = ::from_recovered_pooled_transaction(recovered); - - // submit the transaction to the pool with a `Local` origin - let hash = self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; - - Ok(hash) - } - - async fn send_transaction(&self, mut request: TransactionRequest) -> EthResult { - let from = match request.from { - Some(from) => from, - None => return Err(SignError::NoAccount.into()), - }; - - // set nonce if not already set before - if request.nonce.is_none() { - let nonce = self.get_transaction_count(from, Some(BlockId::pending()))?; - // note: `.to()` can't panic because the nonce is constructed from a `u64` - request.nonce = Some(nonce.to::()); - } - - let chain_id = self.chain_id(); - - let estimated_gas = self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; - let gas_limit = estimated_gas; - - let TransactionRequest { - to, - gas_price, - max_fee_per_gas, - max_priority_fee_per_gas, - gas, - value, - input: data, - nonce, - mut access_list, - max_fee_per_blob_gas, - blob_versioned_hashes, - sidecar, - .. - } = request; - - // todo: remove this inlining after https://github.com/alloy-rs/alloy/pull/183#issuecomment-1928161285 - let transaction = match ( - gas_price, - max_fee_per_gas, - access_list.take(), - max_fee_per_blob_gas, - blob_versioned_hashes, - sidecar, - ) { - // legacy transaction - // gas price required - (Some(_), None, None, None, None, None) => { - Some(TypedTransactionRequest::Legacy(LegacyTransactionRequest { - nonce: nonce.unwrap_or_default(), - gas_price: U256::from(gas_price.unwrap_or_default()), - gas_limit: U256::from(gas.unwrap_or_default()), - value: value.unwrap_or_default(), - input: data.into_input().unwrap_or_default(), - kind: to.unwrap_or(RpcTransactionKind::Create), - chain_id: None, - })) - } - // EIP2930 - // if only accesslist is set, and no eip1599 fees - (_, None, Some(access_list), None, None, None) => { - Some(TypedTransactionRequest::EIP2930(EIP2930TransactionRequest { - nonce: nonce.unwrap_or_default(), - gas_price: U256::from(gas_price.unwrap_or_default()), - gas_limit: U256::from(gas.unwrap_or_default()), - value: value.unwrap_or_default(), - input: data.into_input().unwrap_or_default(), - kind: to.unwrap_or(RpcTransactionKind::Create), - chain_id: 0, - access_list, - })) - } - // EIP1559 - // if 4844 fields missing - // gas_price, max_fee_per_gas, access_list, max_fee_per_blob_gas, blob_versioned_hashes, - // sidecar, - (None, _, _, None, None, None) => { - // Empty fields fall back to the canonical transaction schema. - Some(TypedTransactionRequest::EIP1559(EIP1559TransactionRequest { - nonce: nonce.unwrap_or_default(), - max_fee_per_gas: U256::from(max_fee_per_gas.unwrap_or_default()), - max_priority_fee_per_gas: U256::from( - max_priority_fee_per_gas.unwrap_or_default(), - ), - gas_limit: U256::from(gas.unwrap_or_default()), - value: value.unwrap_or_default(), - input: data.into_input().unwrap_or_default(), - kind: to.unwrap_or(RpcTransactionKind::Create), - chain_id: 0, - access_list: access_list.unwrap_or_default(), - })) - } - // EIP4884 - // all blob fields required - ( - None, - _, - _, - Some(max_fee_per_blob_gas), - Some(blob_versioned_hashes), - Some(sidecar), - ) => { - // As per the EIP, we follow the same semantics as EIP-1559. - Some(TypedTransactionRequest::EIP4844(EIP4844TransactionRequest { - chain_id: 0, - nonce: nonce.unwrap_or_default(), - max_priority_fee_per_gas: U256::from( - max_priority_fee_per_gas.unwrap_or_default(), - ), - max_fee_per_gas: U256::from(max_fee_per_gas.unwrap_or_default()), - gas_limit: U256::from(gas.unwrap_or_default()), - value: value.unwrap_or_default(), - input: data.into_input().unwrap_or_default(), - #[allow(clippy::manual_unwrap_or_default)] // clippy is suggesting here unwrap_or_default - to: match to { - Some(RpcTransactionKind::Call(to)) => to, - _ => Address::default(), - }, - access_list: access_list.unwrap_or_default(), - - // eip-4844 specific. - max_fee_per_blob_gas: U256::from(max_fee_per_blob_gas), - blob_versioned_hashes, - sidecar, - })) - } - - _ => None, - }; - - let transaction = match transaction { - Some(TypedTransactionRequest::Legacy(mut req)) => { - req.chain_id = Some(chain_id.to()); - req.gas_limit = gas_limit.saturating_to(); - req.gas_price = self.legacy_gas_price(gas_price.map(U256::from)).await?; - - TypedTransactionRequest::Legacy(req) - } - Some(TypedTransactionRequest::EIP2930(mut req)) => { - req.chain_id = chain_id.to(); - req.gas_limit = gas_limit.saturating_to(); - req.gas_price = self.legacy_gas_price(gas_price.map(U256::from)).await?; - - TypedTransactionRequest::EIP2930(req) - } - Some(TypedTransactionRequest::EIP1559(mut req)) => { - let (max_fee_per_gas, max_priority_fee_per_gas) = self - .eip1559_fees( - max_fee_per_gas.map(U256::from), - max_priority_fee_per_gas.map(U256::from), - ) - .await?; - - req.chain_id = chain_id.to(); - req.gas_limit = gas_limit.saturating_to(); - req.max_fee_per_gas = max_fee_per_gas.saturating_to(); - req.max_priority_fee_per_gas = max_priority_fee_per_gas.saturating_to(); - - TypedTransactionRequest::EIP1559(req) - } - Some(TypedTransactionRequest::EIP4844(mut req)) => { - let (max_fee_per_gas, max_priority_fee_per_gas) = self - .eip1559_fees( - max_fee_per_gas.map(U256::from), - max_priority_fee_per_gas.map(U256::from), - ) - .await?; - - req.max_fee_per_gas = max_fee_per_gas; - req.max_priority_fee_per_gas = max_priority_fee_per_gas; - req.max_fee_per_blob_gas = - self.eip4844_blob_fee(max_fee_per_blob_gas.map(U256::from)).await?; - - req.chain_id = chain_id.to(); - req.gas_limit = gas_limit; - - TypedTransactionRequest::EIP4844(req) - } - None => return Err(EthApiError::ConflictingFeeFieldsInRequest), - }; - - let signed_tx = self.sign_request(&from, transaction)?; - - let recovered = - signed_tx.into_ecrecovered().ok_or(EthApiError::InvalidTransactionSignature)?; - - let pool_transaction = match recovered.try_into() { - Ok(converted) => ::from_recovered_pooled_transaction(converted), - Err(_) => return Err(EthApiError::TransactionConversionError), - }; - - // submit the transaction to the pool with a `Local` origin - let hash = self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; - - Ok(hash) - } - - async fn spawn_with_call_at( - &self, - request: TransactionRequest, - at: BlockId, - overrides: EvmOverrides, - f: F, - ) -> EthResult - where - F: FnOnce(&mut StateCacheDB, EnvWithHandlerCfg) -> EthResult + Send + 'static, - R: Send + 'static, - { - let (cfg, block_env, at) = self.evm_env_at(at).await?; - let this = self.clone(); - self.inner - .blocking_task_pool - .spawn(move || { - let state = this.state_at(at)?; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - let env = prepare_call_env( - cfg, - block_env, - request, - this.call_gas_limit(), - &mut db, - overrides, - )?; - f(&mut db, env) - }) - .await - .map_err(|_| EthApiError::InternalBlockingTaskError)? - } - - async fn transact_call_at( - &self, - request: TransactionRequest, - at: BlockId, - overrides: EvmOverrides, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> { - let this = self.clone(); - self.spawn_with_call_at(request, at, overrides, move |db, env| this.transact(db, env)).await - } - - async fn spawn_inspect_call_at( - &self, - request: TransactionRequest, - at: BlockId, - overrides: EvmOverrides, - inspector: I, - ) -> EthResult<(ResultAndState, EnvWithHandlerCfg)> - where - I: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, - { - let this = self.clone(); - self.spawn_with_call_at(request, at, overrides, move |db, env| { - this.inspect(db, env, inspector) - }) - .await - } - - fn trace_at( - &self, - env: EnvWithHandlerCfg, - config: TracingInspectorConfig, - at: BlockId, - f: F, - ) -> EthResult - where - F: FnOnce(TracingInspector, ResultAndState) -> EthResult, - { - let this = self.clone(); - self.with_state_at_block(at, |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(&mut db, env, &mut inspector)?; - f(inspector, res) - }) - } - - async fn spawn_trace_at_with_state( - &self, - env: EnvWithHandlerCfg, - config: TracingInspectorConfig, - at: BlockId, - f: F, - ) -> EthResult - where - F: FnOnce(TracingInspector, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, - R: Send + 'static, - { - let this = self.clone(); - self.spawn_with_state_at_block(at, move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(&mut db, env, &mut inspector)?; - f(inspector, res, db) - }) - .await - } - - async fn transaction_and_block( - &self, - hash: B256, - ) -> EthResult> { - let (transaction, at) = match self.transaction_by_hash_at(hash).await? { - None => return Ok(None), - Some(res) => res, - }; - - // Note: this is always either hash or pending - let block_hash = match at { - BlockId::Hash(hash) => hash.block_hash, - _ => return Ok(None), - }; - let block = self.cache().get_block_with_senders(block_hash).await?; - Ok(block.map(|block| (transaction, block.seal(block_hash)))) - } - - async fn spawn_replay_transaction(&self, hash: B256, f: F) -> EthResult> - where - F: FnOnce(TransactionInfo, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, - R: Send + 'static, - { - let (transaction, block) = match self.transaction_and_block(hash).await? { - None => return Ok(None), - Some(res) => res, - }; - let (tx, tx_info) = transaction.split(); - - let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; - - // we need to get the state of the parent block because we're essentially replaying the - // block the transaction is included in - let parent_block = block.parent_hash; - let block_txs = block.into_transactions_ecrecovered(); - - let this = self.clone(); - self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - // replay all transactions prior to the targeted transaction - this.replay_transactions_until( - &mut db, - cfg.clone(), - block_env.clone(), - block_txs, - tx.hash, - )?; - - let env = - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, tx_env_with_recovered(&tx)); - - let (res, _) = this.transact(&mut db, env)?; - f(tx_info, res, db) - }) - .await - .map(Some) - } - - async fn spawn_trace_transaction_in_block_with_inspector( - &self, - hash: B256, - mut inspector: Insp, - f: F, - ) -> EthResult> - where - F: FnOnce(TransactionInfo, Insp, ResultAndState, StateCacheDB) -> EthResult - + Send - + 'static, - Insp: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, - R: Send + 'static, - { - let (transaction, block) = match self.transaction_and_block(hash).await? { - None => return Ok(None), - Some(res) => res, - }; - let (tx, tx_info) = transaction.split(); - - let (cfg, block_env, _) = self.evm_env_at(block.hash().into()).await?; - - // we need to get the state of the parent block because we're essentially replaying the - // block the transaction is included in - let parent_block = block.parent_hash; - let block_txs = block.into_transactions_ecrecovered(); - - let this = self.clone(); - self.spawn_with_state_at_block(parent_block.into(), move |state| { - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - // replay all transactions prior to the targeted transaction - this.replay_transactions_until( - &mut db, - cfg.clone(), - block_env.clone(), - block_txs, - tx.hash, - )?; - - let env = - EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, tx_env_with_recovered(&tx)); - - let (res, _) = this.inspect(&mut db, env, &mut inspector)?; - f(tx_info, inspector, res, db) - }) - .await - .map(Some) - } - - async fn trace_block_until_with_inspector( - &self, - block_id: BlockId, - highest_index: Option, - mut inspector_setup: Setup, - f: F, - ) -> EthResult>> - where - F: for<'a> Fn( - TransactionInfo, - Insp, - ExecutionResult, - &'a EvmState, - &'a StateCacheDB, - ) -> EthResult - + Send - + 'static, - Setup: FnMut() -> Insp + Send + 'static, - Insp: for<'a> Inspector<&'a mut StateCacheDB> + Send + 'static, - R: Send + 'static, - { - let ((cfg, block_env, _), block) = - futures::try_join!(self.evm_env_at(block_id), self.block_with_senders(block_id))?; - - let Some(block) = block else { return Ok(None) }; - - if block.body.is_empty() { - // nothing to trace - return Ok(Some(Vec::new())) - } - - // replay all transactions of the block - self.spawn_tracing_task_with(move |this| { - // we need to get the state of the parent block because we're replaying this block on - // top of its parent block's state - let state_at = block.parent_hash; - let block_hash = block.hash(); - - let block_number = block_env.number.saturating_to::(); - let base_fee = block_env.basefee.saturating_to::(); - - // prepare transactions, we do everything upfront to reduce time spent with open state - let max_transactions = highest_index.map_or(block.body.len(), |highest| { - // we need + 1 because the index is 0-based - highest as usize + 1 - }); - let mut results = Vec::with_capacity(max_transactions); - - let mut transactions = block - .into_transactions_ecrecovered() - .take(max_transactions) - .enumerate() - .map(|(idx, tx)| { - let tx_info = TransactionInfo { - hash: Some(tx.hash()), - index: Some(idx as u64), - block_hash: Some(block_hash), - block_number: Some(block_number), - base_fee: Some(base_fee), - }; - let tx_env = tx_env_with_recovered(&tx); - (tx_info, tx_env) - }) - .peekable(); - - // now get the state - let state = this.state_at(state_at.into())?; - let mut db = CacheDB::new(StateProviderDatabase::new(state)); - - while let Some((tx_info, tx)) = transactions.next() { - let env = EnvWithHandlerCfg::new_with_cfg_env(cfg.clone(), block_env.clone(), tx); - - let mut inspector = inspector_setup(); - let (res, _) = this.inspect(&mut db, env, &mut inspector)?; - let ResultAndState { result, state } = res; - results.push(f(tx_info, inspector, result, &state, &db)?); - - // need to apply the state changes of this transaction before executing the - // next transaction, but only if there's a next transaction - if transactions.peek().is_some() { - // commit the state changes to the DB - db.commit(state) - } - } - - Ok(results) - }) - .await - .map(Some) - } -} - -// === impl EthApi === - -impl EthApi -where - Self: Send + Sync + 'static, -{ - /// Spawns the given closure on a new blocking tracing task - async fn spawn_tracing_task_with(&self, f: F) -> EthResult - where - F: FnOnce(Self) -> EthResult + Send + 'static, - T: Send + 'static, - { - let this = self.clone(); - self.inner - .blocking_task_pool - .spawn(move || f(this)) - .await - .map_err(|_| EthApiError::InternalBlockingTaskError)? - } -} - -impl EthApi -where - Pool: TransactionPool + 'static, - Provider: - BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, - Network: NetworkInfo + 'static, - EvmConfig: ConfigureEvm, -{ - /// Returns the gas price if it is set, otherwise fetches a suggested gas price for legacy - /// transactions. - pub(crate) async fn legacy_gas_price(&self, gas_price: Option) -> EthResult { - match gas_price { - Some(gas_price) => Ok(gas_price), - None => { - // fetch a suggested gas price - self.gas_price().await - } - } - } - - /// Returns the EIP-1559 fees if they are set, otherwise fetches a suggested gas price for - /// EIP-1559 transactions. - /// - /// Returns (`max_fee`, `priority_fee`) - pub(crate) async fn eip1559_fees( - &self, - max_fee_per_gas: Option, - max_priority_fee_per_gas: Option, - ) -> EthResult<(U256, U256)> { - let max_fee_per_gas = match max_fee_per_gas { - Some(max_fee_per_gas) => max_fee_per_gas, - None => { - // fetch pending base fee - let base_fee = self - .block(BlockNumberOrTag::Pending) - .await? - .ok_or(EthApiError::UnknownBlockNumber)? - .base_fee_per_gas - .ok_or_else(|| { - EthApiError::InvalidTransaction( - RpcInvalidTransactionError::TxTypeNotSupported, - ) - })?; - U256::from(base_fee) - } - }; - - let max_priority_fee_per_gas = match max_priority_fee_per_gas { - Some(max_priority_fee_per_gas) => max_priority_fee_per_gas, - None => self.suggested_priority_fee().await?, - }; - Ok((max_fee_per_gas, max_priority_fee_per_gas)) - } - - /// Returns the EIP-4844 blob fee if it is set, otherwise fetches a blob fee. - pub(crate) async fn eip4844_blob_fee(&self, blob_fee: Option) -> EthResult { - match blob_fee { - Some(blob_fee) => Ok(blob_fee), - None => self.blob_base_fee().await, - } - } - - pub(crate) fn sign_request( - &self, - from: &Address, - request: TypedTransactionRequest, - ) -> EthResult { - for signer in self.inner.signers.read().iter() { - if signer.is_signer_for(from) { - return match signer.sign_transaction(request, from) { - Ok(tx) => Ok(tx), - Err(e) => Err(e.into()), - } - } - } - Err(EthApiError::InvalidTransactionSignature) - } - - /// Get Transaction by [`BlockId`] and the index of the transaction within that Block. - /// - /// Returns `Ok(None)` if the block does not exist, or the block as fewer transactions - pub(crate) async fn transaction_by_block_and_tx_index( - &self, - block_id: impl Into, - index: Index, - ) -> EthResult> { - if let Some(block) = self.block_with_senders(block_id.into()).await? { - let block_hash = block.hash(); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; - if let Some(tx) = block.into_transactions_ecrecovered().nth(index.into()) { - return Ok(Some(from_recovered_with_block_context( - tx, - block_hash, - block_number, - base_fee_per_gas, - index.into(), - ))) - } - } - - Ok(None) - } - - pub(crate) async fn raw_transaction_by_block_and_tx_index( - &self, - block_id: impl Into, - index: Index, - ) -> EthResult> { - if let Some(block) = self.block_with_senders(block_id.into()).await? { - if let Some(tx) = block.transactions().nth(index.into()) { - return Ok(Some(tx.envelope_encoded())) - } - } - - Ok(None) - } -} - -impl EthApi -where - Provider: BlockReaderIdExt + ChainSpecProvider, -{ - /// Helper function for `eth_getTransactionReceipt` - /// - /// Returns the receipt - #[cfg(not(feature = "optimism"))] - pub(crate) async fn build_transaction_receipt( - &self, - tx: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - ) -> EthResult { - // get all receipts for the block - let all_receipts = match self.cache().get_receipts(meta.block_hash).await? { - Some(recpts) => recpts, - None => return Err(EthApiError::UnknownBlockNumber), - }; - build_transaction_receipt_with_block_receipts(tx, meta, receipt, &all_receipts) - } - - /// Helper function for `eth_getTransactionReceipt` (optimism) - /// - /// Returns the receipt - #[cfg(feature = "optimism")] - pub(crate) async fn build_transaction_receipt( - &self, - tx: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - ) -> EthResult { - let (block, receipts) = self - .cache() - .get_block_and_receipts(meta.block_hash) - .await? - .ok_or(EthApiError::UnknownBlockNumber)?; - - let block = block.unseal(); - let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); - let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; - - build_transaction_receipt_with_block_receipts( - tx, - meta, - receipt, - &receipts, - optimism_tx_meta, - ) - } - - /// Builds op metadata object using the provided [`TransactionSigned`], L1 block info and - /// `block_timestamp`. The `L1BlockInfo` is used to calculate the l1 fee and l1 data gas for the - /// transaction. If the `L1BlockInfo` is not provided, the meta info will be empty. - #[cfg(feature = "optimism")] - pub(crate) fn build_op_tx_meta( - &self, - tx: &TransactionSigned, - l1_block_info: Option, - block_timestamp: u64, - ) -> EthResult { - use crate::eth::{api::optimism::OptimismTxMeta, optimism::OptimismEthApiError}; - use reth_evm_optimism::RethL1BlockInfo; - - let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; - - let (l1_fee, l1_data_gas) = if !tx.is_deposit() { - let envelope_buf = tx.envelope_encoded(); - - let inner_l1_fee = l1_block_info - .l1_tx_data_fee( - &self.inner.provider.chain_spec(), - block_timestamp, - &envelope_buf, - tx.is_deposit(), - ) - .map_err(|_| OptimismEthApiError::L1BlockFeeError)?; - let inner_l1_data_gas = l1_block_info - .l1_data_gas(&self.inner.provider.chain_spec(), block_timestamp, &envelope_buf) - .map_err(|_| OptimismEthApiError::L1BlockGasError)?; - ( - Some(inner_l1_fee.saturating_to::()), - Some(inner_l1_data_gas.saturating_to::()), - ) - } else { - (None, None) - }; - - Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas)) - } -} - -/// Represents from where a transaction was fetched. -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum TransactionSource { - /// Transaction exists in the pool (Pending) - Pool(TransactionSignedEcRecovered), - /// Transaction already included in a block - /// - /// This can be a historical block or a pending block (received from the CL) - Block { - /// Transaction fetched via provider - transaction: TransactionSignedEcRecovered, - /// Index of the transaction in the block - index: u64, - /// Hash of the block. - block_hash: B256, - /// Number of the block. - block_number: u64, - /// base fee of the block. - base_fee: Option, - }, -} - -// === impl TransactionSource === - -impl TransactionSource { - /// Consumes the type and returns the wrapped transaction. - pub fn into_recovered(self) -> TransactionSignedEcRecovered { - self.into() - } - - /// Returns the transaction and block related info, if not pending - pub fn split(self) -> (TransactionSignedEcRecovered, TransactionInfo) { - match self { - Self::Pool(tx) => { - let hash = tx.hash(); - ( - tx, - TransactionInfo { - hash: Some(hash), - index: None, - block_hash: None, - block_number: None, - base_fee: None, - }, - ) - } - Self::Block { transaction, index, block_hash, block_number, base_fee } => { - let hash = transaction.hash(); - ( - transaction, - TransactionInfo { - hash: Some(hash), - index: Some(index), - block_hash: Some(block_hash), - block_number: Some(block_number), - base_fee: base_fee.map(u128::from), - }, - ) - } - } - } -} - -impl From for TransactionSignedEcRecovered { - fn from(value: TransactionSource) -> Self { - match value { - TransactionSource::Pool(tx) => tx, - TransactionSource::Block { transaction, .. } => transaction, - } - } -} - -impl From for Transaction { - fn from(value: TransactionSource) -> Self { - match value { - TransactionSource::Pool(tx) => reth_rpc_types_compat::transaction::from_recovered(tx), - TransactionSource::Block { transaction, index, block_hash, block_number, base_fee } => { - from_recovered_with_block_context( - transaction, - block_hash, - block_number, - base_fee, - index as usize, - ) - } - } - } -} - -/// Helper function to construct a transaction receipt -/// -/// Note: This requires _all_ block receipts because we need to calculate the gas used by the -/// transaction. -pub(crate) fn build_transaction_receipt_with_block_receipts( - transaction: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - all_receipts: &[Receipt], - #[cfg(feature = "optimism")] optimism_tx_meta: crate::eth::api::optimism::OptimismTxMeta, -) -> EthResult { - // Note: we assume this transaction is valid, because it's mined (or part of pending block) and - // we don't need to check for pre EIP-2 - let from = - transaction.recover_signer_unchecked().ok_or(EthApiError::InvalidTransactionSignature)?; - - // get the previous transaction cumulative gas used - let gas_used = if meta.index == 0 { - receipt.cumulative_gas_used - } else { - let prev_tx_idx = (meta.index - 1) as usize; - all_receipts - .get(prev_tx_idx) - .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) - .unwrap_or_default() - }; - - let blob_gas_used = transaction.transaction.blob_gas_used(); - // Blob gas price should only be present if the transaction is a blob transaction - let blob_gas_price = blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); - let logs_bloom = receipt.bloom_slow(); - - // get number of logs in the block - let mut num_logs = 0; - for prev_receipt in all_receipts.iter().take(meta.index as usize) { - num_logs += prev_receipt.logs.len(); - } - - let mut logs = Vec::with_capacity(receipt.logs.len()); - for (tx_log_idx, log) in receipt.logs.into_iter().enumerate() { - let rpclog = Log { - inner: log, - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), - removed: false, - }; - logs.push(rpclog); - } - - let rpc_receipt = reth_rpc_types::Receipt { - status: receipt.success.into(), - cumulative_gas_used: receipt.cumulative_gas_used as u128, - logs, - }; - - #[allow(clippy::needless_update)] - let res_receipt = TransactionReceipt { - inner: AnyReceiptEnvelope { - inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - r#type: transaction.transaction.tx_type().into(), - }, - transaction_hash: meta.tx_hash, - transaction_index: Some(meta.index), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - from, - to: None, - gas_used: gas_used as u128, - contract_address: None, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // TODO pre-byzantium receipts have a post-transaction state root - state_root: None, - // EIP-4844 fields - blob_gas_price, - blob_gas_used: blob_gas_used.map(u128::from), - }; - let mut res_receipt = WithOtherFields::new(res_receipt); - - #[cfg(feature = "optimism")] - { - let mut op_fields = OptimismTransactionReceiptFields::default(); - - if transaction.is_deposit() { - op_fields.deposit_nonce = receipt.deposit_nonce.map(reth_primitives::U64::from); - op_fields.deposit_receipt_version = - receipt.deposit_receipt_version.map(reth_primitives::U64::from); - } else if let Some(l1_block_info) = optimism_tx_meta.l1_block_info { - op_fields.l1_fee = optimism_tx_meta.l1_fee; - op_fields.l1_gas_used = optimism_tx_meta.l1_data_gas.map(|dg| { - dg + l1_block_info.l1_fee_overhead.unwrap_or_default().saturating_to::() - }); - op_fields.l1_fee_scalar = - Some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0); - op_fields.l1_gas_price = Some(l1_block_info.l1_base_fee.saturating_to()); - } - - res_receipt.other = op_fields.into(); - } - - match transaction.transaction.kind() { - Create => { - res_receipt.contract_address = Some(from.create(transaction.transaction.nonce())); - } - Call(addr) => { - res_receipt.to = Some(Address(*addr)); - } - } - - Ok(res_receipt) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::eth::{ - cache::EthStateCache, gas_oracle::GasPriceOracle, FeeHistoryCache, FeeHistoryCacheConfig, - }; - use reth_evm_ethereum::EthEvmConfig; - use reth_network_api::noop::NoopNetwork; - use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, hex_literal::hex}; - use reth_provider::test_utils::NoopProvider; - use reth_tasks::pool::BlockingTaskPool; - use reth_transaction_pool::test_utils::testing_pool; - - #[tokio::test] - async fn send_raw_transaction() { - let noop_provider = NoopProvider::default(); - let noop_network_provider = NoopNetwork::default(); - - let pool = testing_pool(); - - let evm_config = EthEvmConfig::default(); - let cache = EthStateCache::spawn(noop_provider, Default::default(), evm_config); - let fee_history_cache = - FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); - let eth_api = EthApi::new( - noop_provider, - pool.clone(), - noop_network_provider, - cache.clone(), - GasPriceOracle::new(noop_provider, Default::default(), cache.clone()), - ETHEREUM_BLOCK_GAS_LIMIT, - BlockingTaskPool::build().expect("failed to build tracing pool"), - fee_history_cache, - evm_config, - None, - ); - - // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d - let tx_1 = Bytes::from(hex!("02f871018303579880850555633d1b82520894eee27662c2b8eba3cd936a23f039f3189633e4c887ad591c62bdaeb180c080a07ea72c68abfb8fca1bd964f0f99132ed9280261bdca3e549546c0205e800f7d0a05b4ef3039e9c9b9babc179a1878fb825b5aaf5aed2fa8744854150157b08d6f3")); - - let tx_1_result = eth_api.send_raw_transaction(tx_1).await.unwrap(); - assert_eq!( - pool.len(), - 1, - "expect 1 transactions in the pool, but pool size is {}", - pool.len() - ); - - // https://etherscan.io/tx/0x48816c2f32c29d152b0d86ff706f39869e6c1f01dc2fe59a3c1f9ecf39384694 - let tx_2 = Bytes::from(hex!("02f9043c018202b7843b9aca00850c807d37a08304d21d94ef1c6e67703c7bd7107eed8303fbe6ec2554bf6b881bc16d674ec80000b903c43593564c000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000063e2d99f00000000000000000000000000000000000000000000000000000000000000030b000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000065717fe021ea67801d1088cc80099004b05b64600000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002bc02aaa39b223fe8d0a0e5c4f27ead9083c756cc20001f4a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009e95fd5965fd1f1a6f0d4600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000428dca9537116148616a5a3e44035af17238fe9dc080a0c6ec1e41f5c0b9511c49b171ad4e04c6bb419c74d99fe9891d74126ec6e4e879a032069a753d7a2cfa158df95421724d24c0e9501593c09905abf3699b4a4405ce")); - - let tx_2_result = eth_api.send_raw_transaction(tx_2).await.unwrap(); - assert_eq!( - pool.len(), - 2, - "expect 2 transactions in the pool, but pool size is {}", - pool.len() - ); - - assert!(pool.get(&tx_1_result).is_some(), "tx1 not found in the pool"); - assert!(pool.get(&tx_2_result).is_some(), "tx2 not found in the pool"); - } -} diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index a8e088278..d28013822 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,28 +1,31 @@ //! `Eth` bundle implementation and helpers. -use crate::eth::{ - error::{EthApiError, EthResult, RpcInvalidTransactionError}, - revm_utils::FillableTransaction, - utils::recover_raw_transaction, - EthTransactions, -}; +use std::sync::Arc; + use jsonrpsee::core::RpcResult; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives::{ - constants::eip4844::MAINNET_KZG_TRUSTED_SETUP, keccak256, revm_primitives::db::{DatabaseCommit, DatabaseRef}, PooledTransactionsElement, U256, }; use reth_revm::database::StateProviderDatabase; -use reth_rpc_api::EthCallBundleApiServer; -use reth_rpc_types::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; +use reth_rpc_types::mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::CacheDB, primitives::{ResultAndState, TxEnv}, }; -use revm_primitives::{EnvWithHandlerCfg, MAX_BLOB_GAS_PER_BLOCK}; -use std::sync::Arc; +use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId, MAX_BLOB_GAS_PER_BLOCK}; + +use reth_provider::{ChainSpecProvider, HeaderProvider}; +use reth_rpc_eth_api::{ + helpers::{Call, EthTransactions, LoadPendingBlock}, + EthCallBundleApiServer, +}; +use reth_rpc_eth_types::{ + utils::recover_raw_transaction, EthApiError, EthResult, RpcInvalidTransactionError, +}; /// `Eth` bundle implementation. pub struct EthBundle { @@ -39,14 +42,22 @@ impl EthBundle { impl EthBundle where - Eth: EthTransactions + 'static, + Eth: EthTransactions + LoadPendingBlock + Call + 'static, { /// Simulates a bundle of transactions at the top of a given block number with the state of /// another (or the same) block. This can be used to simulate future blocks with the current /// state, or it can be used to simulate a past block. The sender is responsible for signing the /// transactions and using the correct nonce and ensuring validity pub async fn call_bundle(&self, bundle: EthCallBundle) -> EthResult { - let EthCallBundle { txs, block_number, state_block_number, timestamp } = bundle; + let EthCallBundle { + txs, + block_number, + state_block_number, + timestamp, + gas_limit, + difficulty, + base_fee, + } = bundle; if txs.is_empty() { return Err(EthApiError::InvalidParams( EthBundleError::EmptyBundleTransactions.to_string(), @@ -86,6 +97,7 @@ where } let block_id: reth_rpc_types::BlockId = state_block_number.into(); + // Note: the block number is considered the `parent` block: let (cfg, mut block_env, at) = self.inner.eth_api.evm_env_at(block_id).await?; // need to adjust the timestamp for the next block @@ -95,10 +107,37 @@ where block_env.timestamp += U256::from(12); } + if let Some(difficulty) = difficulty { + block_env.difficulty = U256::from(difficulty); + } + + if let Some(gas_limit) = gas_limit { + block_env.gas_limit = U256::from(gas_limit); + } + + if let Some(base_fee) = base_fee { + block_env.basefee = U256::from(base_fee); + } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { + let parent_block = block_env.number.saturating_to::(); + // here we need to fetch the _next_ block's basefee based on the parent block + let parent = LoadPendingBlock::provider(&self.inner.eth_api) + .header_by_number(parent_block)? + .ok_or_else(|| EthApiError::UnknownBlockNumber)?; + if let Some(base_fee) = parent.next_block_base_fee( + LoadPendingBlock::provider(&self.inner.eth_api) + .chain_spec() + .base_fee_params_at_block(parent_block), + ) { + block_env.basefee = U256::from(base_fee); + } + } + let state_block_number = block_env.number; // use the block number of the request block_env.number = U256::from(block_number); + let eth_api = self.inner.eth_api.clone(); + self.inner .eth_api .spawn_with_state_at_block(at, move |state| { @@ -116,8 +155,7 @@ where let mut total_gas_fess = U256::ZERO; let mut hash_bytes = Vec::with_capacity(32 * transactions.len()); - let mut evm = - revm::Evm::builder().with_db(db).with_env_with_handler_cfg(env).build(); + let mut evm = Call::evm_config(ð_api).evm_with_env(db, env); let mut results = Vec::with_capacity(transactions.len()); let mut transactions = transactions.into_iter().peekable(); @@ -126,17 +164,17 @@ where // Verify that the given blob data, commitments, and proofs are all valid for // this transaction. if let PooledTransactionsElement::BlobTransaction(ref tx) = tx { - tx.validate(MAINNET_KZG_TRUSTED_SETUP.as_ref()) + tx.validate(EnvKzgSettings::Default.get()) .map_err(|e| EthApiError::InvalidParams(e.to_string()))?; } - let tx = tx.into_ecrecovered_transaction(signer); + let tx = tx.into_transaction(); hash_bytes.extend_from_slice(tx.hash().as_slice()); let gas_price = tx .effective_tip_per_gas(basefee) .ok_or_else(|| RpcInvalidTransactionError::FeeCapTooLow)?; - tx.try_fill_tx_env(evm.tx_mut())?; + Call::evm_config(ð_api).fill_tx_env(evm.tx_mut(), &tx, signer); let ResultAndState { result, state } = evm.transact()?; let gas_used = result.gas_used(); @@ -167,7 +205,7 @@ where let tx_res = EthCallBundleTransactionResult { coinbase_diff, eth_sent_to_coinbase, - from_address: tx.signer(), + from_address: signer, gas_fees, gas_price: U256::from(gas_price), gas_used, @@ -213,7 +251,7 @@ where #[async_trait::async_trait] impl EthCallBundleApiServer for EthBundle where - Eth: EthTransactions + 'static, + Eth: EthTransactions + LoadPendingBlock + Call + 'static, { async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { Ok(Self::call_bundle(self, request).await?) diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs new file mode 100644 index 000000000..36030741f --- /dev/null +++ b/crates/rpc/rpc/src/eth/core.rs @@ -0,0 +1,611 @@ +//! Implementation of the [`jsonrpsee`] generated [`EthApiServer`](crate::EthApi) trait +//! Handles RPC requests for the `eth_` namespace. + +use futures::Future; +use std::sync::Arc; + +use derive_more::Deref; +use reth_primitives::{BlockNumberOrTag, U256}; +use reth_provider::BlockReaderIdExt; +use reth_rpc_eth_api::{ + helpers::{transaction::UpdateRawTxForwarder, EthSigner, SpawnBlocking}, + RawTransactionForwarder, +}; +use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock}; +use reth_tasks::{ + pool::{BlockingTaskGuard, BlockingTaskPool}, + TaskSpawner, TokioTaskExecutor, +}; +use tokio::sync::{AcquireError, Mutex, OwnedSemaphorePermit}; + +use crate::eth::DevSigner; + +/// `Eth` API implementation. +/// +/// This type provides the functionality for handling `eth_` related requests. +/// These are implemented two-fold: Core functionality is implemented as +/// [`EthApiSpec`](reth_rpc_eth_api::helpers::EthApiSpec) trait. Additionally, the required server +/// implementations (e.g. [`EthApiServer`](reth_rpc_eth_api::EthApiServer)) are implemented +/// separately in submodules. The rpc handler implementation can then delegate to the main impls. +/// This way [`EthApi`] is not limited to [`jsonrpsee`] and can be used standalone or in other +/// network handlers (for example ipc). +#[derive(Deref)] +pub struct EthApi { + /// All nested fields bundled together. + pub(super) inner: Arc>, +} + +impl EthApi +where + Provider: BlockReaderIdExt, +{ + /// Creates a new, shareable instance using the default tokio task spawner. + #[allow(clippy::too_many_arguments)] + pub fn new( + provider: Provider, + pool: Pool, + network: Network, + eth_cache: EthStateCache, + gas_oracle: GasPriceOracle, + gas_cap: impl Into, + eth_proof_window: u64, + blocking_task_pool: BlockingTaskPool, + fee_history_cache: FeeHistoryCache, + evm_config: EvmConfig, + raw_transaction_forwarder: Option>, + proof_permits: usize, + ) -> Self { + Self::with_spawner( + provider, + pool, + network, + eth_cache, + gas_oracle, + gas_cap.into().into(), + eth_proof_window, + Box::::default(), + blocking_task_pool, + fee_history_cache, + evm_config, + raw_transaction_forwarder, + proof_permits, + ) + } + + /// Creates a new, shareable instance. + #[allow(clippy::too_many_arguments)] + pub fn with_spawner( + provider: Provider, + pool: Pool, + network: Network, + eth_cache: EthStateCache, + gas_oracle: GasPriceOracle, + gas_cap: u64, + eth_proof_window: u64, + task_spawner: Box, + blocking_task_pool: BlockingTaskPool, + fee_history_cache: FeeHistoryCache, + evm_config: EvmConfig, + raw_transaction_forwarder: Option>, + proof_permits: usize, + ) -> Self { + // get the block number of the latest block + let latest_block = provider + .header_by_number_or_tag(BlockNumberOrTag::Latest) + .ok() + .flatten() + .map(|header| header.number) + .unwrap_or_default(); + + let inner = EthApiInner { + provider, + pool, + network, + signers: parking_lot::RwLock::new(Default::default()), + eth_cache, + gas_oracle, + gas_cap, + eth_proof_window, + starting_block: U256::from(latest_block), + task_spawner, + pending_block: Default::default(), + blocking_task_pool, + fee_history_cache, + evm_config, + raw_transaction_forwarder: parking_lot::RwLock::new(raw_transaction_forwarder), + blocking_task_guard: BlockingTaskGuard::new(proof_permits), + }; + + Self { inner: Arc::new(inner) } + } +} + +impl std::fmt::Debug + for EthApi +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("EthApi").finish_non_exhaustive() + } +} + +impl Clone for EthApi { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +impl SpawnBlocking + for EthApi +where + Self: Clone + Send + Sync + 'static, +{ + #[inline] + fn io_task_spawner(&self) -> impl reth_tasks::TaskSpawner { + self.inner.task_spawner() + } + + #[inline] + fn tracing_task_pool(&self) -> &reth_tasks::pool::BlockingTaskPool { + self.inner.blocking_task_pool() + } + + fn acquire_owned( + &self, + ) -> impl Future> + Send { + self.blocking_task_guard.clone().acquire_owned() + } + + fn acquire_many_owned( + &self, + n: u32, + ) -> impl Future> + Send { + self.blocking_task_guard.clone().acquire_many_owned(n) + } +} + +impl EthApi { + /// Generates 20 random developer accounts. + /// Used in DEV mode. + pub fn with_dev_accounts(&self) { + let mut signers = self.inner.signers.write(); + *signers = DevSigner::random_signers(20); + } +} + +/// Container type `EthApi` +#[allow(missing_debug_implementations)] +pub struct EthApiInner { + /// The transaction pool. + pool: Pool, + /// The provider that can interact with the chain. + provider: Provider, + /// An interface to interact with the network + network: Network, + /// All configured Signers + signers: parking_lot::RwLock>>, + /// The async cache frontend for eth related data + eth_cache: EthStateCache, + /// The async gas oracle frontend for gas price suggestions + gas_oracle: GasPriceOracle, + /// Maximum gas limit for `eth_call` and call tracing RPC methods. + gas_cap: u64, + /// The maximum number of blocks into the past for generating state proofs. + eth_proof_window: u64, + /// The block number at which the node started + starting_block: U256, + /// The type that can spawn tasks which would otherwise block. + task_spawner: Box, + /// Cached pending block if any + pending_block: Mutex>, + /// A pool dedicated to CPU heavy blocking tasks. + blocking_task_pool: BlockingTaskPool, + /// Cache for block fees history + fee_history_cache: FeeHistoryCache, + /// The type that defines how to configure the EVM + evm_config: EvmConfig, + /// Allows forwarding received raw transactions + raw_transaction_forwarder: parking_lot::RwLock>>, + /// Guard for getproof calls + blocking_task_guard: BlockingTaskGuard, +} + +impl EthApiInner { + /// Returns a handle to data on disk. + #[inline] + pub const fn provider(&self) -> &Provider { + &self.provider + } + + /// Returns a handle to data in memory. + #[inline] + pub const fn cache(&self) -> &EthStateCache { + &self.eth_cache + } + + /// Returns a handle to the pending block. + #[inline] + pub const fn pending_block(&self) -> &Mutex> { + &self.pending_block + } + + /// Returns a handle to the task spawner. + #[inline] + pub const fn task_spawner(&self) -> &dyn TaskSpawner { + &*self.task_spawner + } + + /// Returns a handle to the blocking thread pool. + #[inline] + pub const fn blocking_task_pool(&self) -> &BlockingTaskPool { + &self.blocking_task_pool + } + + /// Returns a handle to the EVM config. + #[inline] + pub const fn evm_config(&self) -> &EvmConfig { + &self.evm_config + } + + /// Returns a handle to the transaction pool. + #[inline] + pub const fn pool(&self) -> &Pool { + &self.pool + } + + /// Returns a handle to the transaction forwarder. + #[inline] + pub fn raw_tx_forwarder(&self) -> Option> { + self.raw_transaction_forwarder.read().clone() + } + + /// Returns the gas cap. + #[inline] + pub const fn gas_cap(&self) -> u64 { + self.gas_cap + } + + /// Returns a handle to the gas oracle. + #[inline] + pub const fn gas_oracle(&self) -> &GasPriceOracle { + &self.gas_oracle + } + + /// Returns a handle to the fee history cache. + #[inline] + pub const fn fee_history_cache(&self) -> &FeeHistoryCache { + &self.fee_history_cache + } + + /// Returns a handle to the signers. + #[inline] + pub const fn signers(&self) -> &parking_lot::RwLock>> { + &self.signers + } + + /// Returns the starting block. + #[inline] + pub const fn starting_block(&self) -> U256 { + self.starting_block + } + + /// Returns the inner `Network` + #[inline] + pub const fn network(&self) -> &Network { + &self.network + } + + /// The maximum number of blocks into the past for generating state proofs. + #[inline] + pub const fn eth_proof_window(&self) -> u64 { + self.eth_proof_window + } +} + +impl UpdateRawTxForwarder + for EthApiInner +{ + fn set_eth_raw_transaction_forwarder(&self, forwarder: Arc) { + self.raw_transaction_forwarder.write().replace(forwarder); + } +} + +#[cfg(test)] +mod tests { + use jsonrpsee_types::error::INVALID_PARAMS_CODE; + use reth_chainspec::BaseFeeParams; + use reth_evm_ethereum::EthEvmConfig; + use reth_network_api::noop::NoopNetwork; + use reth_primitives::{ + constants::ETHEREUM_BLOCK_GAS_LIMIT, Block, BlockNumberOrTag, Header, TransactionSigned, + B256, U64, + }; + use reth_provider::{ + test_utils::{MockEthProvider, NoopProvider}, + BlockReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory, + }; + use reth_rpc_eth_api::EthApiServer; + use reth_rpc_eth_types::{ + EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + }; + use reth_rpc_server_types::constants::{DEFAULT_ETH_PROOF_WINDOW, DEFAULT_PROOF_PERMITS}; + use reth_rpc_types::FeeHistory; + use reth_tasks::pool::BlockingTaskPool; + use reth_testing_utils::{generators, generators::Rng}; + use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + + use crate::EthApi; + + fn build_test_eth_api< + P: BlockReaderIdExt + + BlockReader + + ChainSpecProvider + + EvmEnvProvider + + StateProviderFactory + + Unpin + + Clone + + 'static, + >( + provider: P, + ) -> EthApi { + let evm_config = EthEvmConfig::default(); + let cache = EthStateCache::spawn(provider.clone(), Default::default(), evm_config); + let fee_history_cache = + FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); + + EthApi::new( + provider.clone(), + testing_pool(), + NoopNetwork::default(), + cache.clone(), + GasPriceOracle::new(provider, Default::default(), cache), + ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_ETH_PROOF_WINDOW, + BlockingTaskPool::build().expect("failed to build tracing pool"), + fee_history_cache, + evm_config, + None, + DEFAULT_PROOF_PERMITS, + ) + } + + // Function to prepare the EthApi with mock data + fn prepare_eth_api( + newest_block: u64, + mut oldest_block: Option, + block_count: u64, + mock_provider: MockEthProvider, + ) -> (EthApi, Vec, Vec) { + let mut rng = generators::rng(); + + // Build mock data + let mut gas_used_ratios = Vec::new(); + let mut base_fees_per_gas = Vec::new(); + let mut last_header = None; + let mut parent_hash = B256::default(); + + for i in (0..block_count).rev() { + let hash = rng.gen(); + let gas_limit: u64 = rng.gen(); + let gas_used: u64 = rng.gen(); + // Note: Generates a u32 to avoid overflows later + let base_fee_per_gas: Option = rng.gen::().then(|| rng.gen::() as u64); + + let header = Header { + number: newest_block - i, + gas_limit, + gas_used, + base_fee_per_gas, + parent_hash, + ..Default::default() + }; + last_header = Some(header.clone()); + parent_hash = hash; + + let mut transactions = vec![]; + for _ in 0..100 { + let random_fee: u128 = rng.gen(); + + if let Some(base_fee_per_gas) = header.base_fee_per_gas { + let transaction = TransactionSigned { + transaction: reth_primitives::Transaction::Eip1559( + reth_primitives::TxEip1559 { + max_priority_fee_per_gas: random_fee, + max_fee_per_gas: random_fee + base_fee_per_gas as u128, + ..Default::default() + }, + ), + ..Default::default() + }; + + transactions.push(transaction); + } else { + let transaction = TransactionSigned { + transaction: reth_primitives::Transaction::Legacy(Default::default()), + ..Default::default() + }; + + transactions.push(transaction); + } + } + + mock_provider.add_block( + hash, + Block { header: header.clone(), body: transactions, ..Default::default() }, + ); + mock_provider.add_header(hash, header); + + oldest_block.get_or_insert(hash); + gas_used_ratios.push(gas_used as f64 / gas_limit as f64); + base_fees_per_gas.push(base_fee_per_gas.map(|fee| fee as u128).unwrap_or_default()); + } + + // Add final base fee (for the next block outside of the request) + let last_header = last_header.unwrap(); + base_fees_per_gas.push(BaseFeeParams::ethereum().next_block_base_fee( + last_header.gas_used as u128, + last_header.gas_limit as u128, + last_header.base_fee_per_gas.unwrap_or_default() as u128, + )); + + let eth_api = build_test_eth_api(mock_provider); + + (eth_api, base_fees_per_gas, gas_used_ratios) + } + + /// Invalid block range + #[tokio::test] + async fn test_fee_history_empty() { + let response = as EthApiServer>::fee_history( + &build_test_eth_api(NoopProvider::default()), + U64::from(1), + BlockNumberOrTag::Latest, + None, + ) + .await; + assert!(response.is_err()); + let error_object = response.unwrap_err(); + assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Invalid block range (request is before genesis) + async fn test_fee_history_invalid_block_range_before_genesis() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let response = as EthApiServer>::fee_history( + ð_api, + U64::from(newest_block + 1), + newest_block.into(), + Some(vec![10.0]), + ) + .await; + + assert!(response.is_err()); + let error_object = response.unwrap_err(); + assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Invalid block range (request is in the future) + async fn test_fee_history_invalid_block_range_in_future() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let response = as EthApiServer>::fee_history( + ð_api, + U64::from(1), + (newest_block + 1000).into(), + Some(vec![10.0]), + ) + .await; + + assert!(response.is_err()); + let error_object = response.unwrap_err(); + assert_eq!(error_object.code(), INVALID_PARAMS_CODE); + } + + #[tokio::test] + /// Requesting no block should result in a default response + async fn test_fee_history_no_block_requested() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, _, _) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let response = as EthApiServer>::fee_history( + ð_api, + U64::from(0), + newest_block.into(), + None, + ) + .await + .unwrap(); + assert_eq!( + response, + FeeHistory::default(), + "none: requesting no block should yield a default response" + ); + } + + #[tokio::test] + /// Requesting a single block should return 1 block (+ base fee for the next block over) + async fn test_fee_history_single_block() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, base_fees_per_gas, gas_used_ratios) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let fee_history = + eth_api.fee_history(U64::from(1), newest_block.into(), None).await.unwrap(); + assert_eq!( + fee_history.base_fee_per_gas, + &base_fees_per_gas[base_fees_per_gas.len() - 2..], + "one: base fee per gas is incorrect" + ); + assert_eq!( + fee_history.base_fee_per_gas.len(), + 2, + "one: should return base fee of the next block as well" + ); + assert_eq!( + &fee_history.gas_used_ratio, + &gas_used_ratios[gas_used_ratios.len() - 1..], + "one: gas used ratio is incorrect" + ); + assert_eq!(fee_history.oldest_block, newest_block, "one: oldest block is incorrect"); + assert!( + fee_history.reward.is_none(), + "one: no percentiles were requested, so there should be no rewards result" + ); + } + + /// Requesting all blocks should be ok + #[tokio::test] + async fn test_fee_history_all_blocks() { + let block_count = 10; + let newest_block = 1337; + let oldest_block = None; + + let (eth_api, base_fees_per_gas, gas_used_ratios) = + prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); + + let fee_history = + eth_api.fee_history(U64::from(block_count), newest_block.into(), None).await.unwrap(); + + assert_eq!( + &fee_history.base_fee_per_gas, &base_fees_per_gas, + "all: base fee per gas is incorrect" + ); + assert_eq!( + fee_history.base_fee_per_gas.len() as u64, + block_count + 1, + "all: should return base fee of the next block as well" + ); + assert_eq!( + &fee_history.gas_used_ratio, &gas_used_ratios, + "all: gas used ratio is incorrect" + ); + assert_eq!( + fee_history.oldest_block, + newest_block - block_count + 1, + "all: oldest block is incorrect" + ); + assert!( + fee_history.reward.is_none(), + "all: no percentiles were requested, so there should be no rewards result" + ); + } +} diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 1fea2df4a..1a2e55a52 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,34 +1,31 @@ -use super::cache::EthStateCache; -use crate::{ - eth::{ - error::EthApiError, - logs_utils::{self, append_matching_block_logs}, - }, - result::{rpc_error_with_code, ToRpcResult}, - EthSubscriptionIdProvider, +//! `eth_` `Filter` RPC handler implementation + +use std::{ + collections::HashMap, + fmt, + iter::StepBy, + ops::RangeInclusive, + sync::Arc, + time::{Duration, Instant}, }; -use core::fmt; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; use reth_primitives::{IntoRecoveredTransaction, TxHash}; use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; -use reth_rpc_api::EthFilterApiServer; +use reth_rpc_eth_api::EthFilterApiServer; +use reth_rpc_eth_types::{ + logs_utils::{self, append_matching_block_logs}, + EthApiError, EthFilterError, EthStateCache, EthSubscriptionIdProvider, +}; +use reth_rpc_server_types::ToRpcResult; use reth_rpc_types::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log, PendingTransactionFilterKind, }; - use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; -use std::{ - collections::HashMap, - iter::StepBy, - ops::RangeInclusive, - sync::Arc, - time::{Duration, Instant}, -}; use tokio::{ sync::{mpsc::Receiver, Mutex}, time::MissedTickBehavior, @@ -132,7 +129,7 @@ where ::Transaction: 'static, { /// Returns all the filter changes for the given id, if any - pub async fn filter_changes(&self, id: FilterId) -> Result { + pub async fn filter_changes(&self, id: FilterId) -> Result { let info = self.inner.provider.chain_info()?; let best_number = info.best_number; @@ -140,7 +137,7 @@ where // the last time changes were polled, in other words the best block at last poll + 1 let (start_block, kind) = { let mut filters = self.inner.active_filters.inner.lock().await; - let filter = filters.get_mut(&id).ok_or(FilterError::FilterNotFound(id))?; + let filter = filters.get_mut(&id).ok_or(EthFilterError::FilterNotFound(id))?; if filter.block > best_number { // no new blocks since the last poll @@ -204,16 +201,16 @@ where /// Returns an error if no matching log filter exists. /// /// Handler for `eth_getFilterLogs` - pub async fn filter_logs(&self, id: FilterId) -> Result, FilterError> { + pub async fn filter_logs(&self, id: FilterId) -> Result, EthFilterError> { let filter = { let filters = self.inner.active_filters.inner.lock().await; if let FilterKind::Log(ref filter) = - filters.get(&id).ok_or_else(|| FilterError::FilterNotFound(id.clone()))?.kind + filters.get(&id).ok_or_else(|| EthFilterError::FilterNotFound(id.clone()))?.kind { *filter.clone() } else { // Not a log filter - return Err(FilterError::FilterNotFound(id)) + return Err(EthFilterError::FilterNotFound(id)) } }; @@ -347,7 +344,7 @@ where Pool: TransactionPool + 'static, { /// Returns logs matching given filter object. - async fn logs_for_filter(&self, filter: Filter) -> Result, FilterError> { + async fn logs_for_filter(&self, filter: Filter) -> Result, EthFilterError> { match filter.block_option { FilterBlockOption::AtBlockHash(block_hash) => { // for all matching logs in the block @@ -428,16 +425,16 @@ where from_block: u64, to_block: u64, chain_info: ChainInfo, - ) -> Result, FilterError> { + ) -> Result, EthFilterError> { trace!(target: "rpc::eth::filter", from=from_block, to=to_block, ?filter, "finding logs in range"); let best_number = chain_info.best_number; if to_block < from_block { - return Err(FilterError::InvalidBlockRangeParams) + return Err(EthFilterError::InvalidBlockRangeParams) } if to_block - from_block > self.max_blocks_per_filter { - return Err(FilterError::QueryExceedsMaxBlocks(self.max_blocks_per_filter)) + return Err(EthFilterError::QueryExceedsMaxBlocks(self.max_blocks_per_filter)) } let mut all_logs = Vec::new(); @@ -505,7 +502,7 @@ where // logs of a single block let is_multi_block_range = from_block != to_block; if is_multi_block_range && all_logs.len() > self.max_logs_per_response { - return Err(FilterError::QueryExceedsMaxResults( + return Err(EthFilterError::QueryExceedsMaxResults( self.max_logs_per_response, )) } @@ -682,51 +679,6 @@ enum FilterKind { PendingTransaction(PendingTransactionKind), } -/// Errors that can occur in the handler implementation -#[derive(Debug, thiserror::Error)] -pub enum FilterError { - #[error("filter not found")] - FilterNotFound(FilterId), - #[error("invalid block range params")] - InvalidBlockRangeParams, - #[error("query exceeds max block range {0}")] - QueryExceedsMaxBlocks(u64), - #[error("query exceeds max results {0}")] - QueryExceedsMaxResults(usize), - #[error(transparent)] - EthAPIError(#[from] EthApiError), - /// Error thrown when a spawned task failed to deliver a response. - #[error("internal filter error")] - InternalError, -} - -// convert the error -impl From for jsonrpsee::types::error::ErrorObject<'static> { - fn from(err: FilterError) -> Self { - match err { - FilterError::FilterNotFound(_) => rpc_error_with_code( - jsonrpsee::types::error::INVALID_PARAMS_CODE, - "filter not found", - ), - err @ FilterError::InternalError => { - rpc_error_with_code(jsonrpsee::types::error::INTERNAL_ERROR_CODE, err.to_string()) - } - FilterError::EthAPIError(err) => err.into(), - err @ FilterError::InvalidBlockRangeParams | - err @ FilterError::QueryExceedsMaxBlocks(_) | - err @ FilterError::QueryExceedsMaxResults(_) => { - rpc_error_with_code(jsonrpsee::types::error::INVALID_PARAMS_CODE, err.to_string()) - } - } - } -} - -impl From for FilterError { - fn from(err: ProviderError) -> Self { - Self::EthAPIError(err.into()) - } -} - /// An iterator that yields _inclusive_ block ranges of a given step size #[derive(Debug)] struct BlockRangeInclusiveIter { diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs new file mode 100644 index 000000000..2ce6c7ed2 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -0,0 +1,34 @@ +//! Contains RPC handler implementations specific to blocks. + +use reth_provider::{BlockReaderIdExt, HeaderProvider}; +use reth_rpc_eth_api::helpers::{EthBlocks, LoadBlock, LoadPendingBlock, SpawnBlocking}; +use reth_rpc_eth_types::EthStateCache; + +use crate::EthApi; + +impl EthBlocks for EthApi +where + Self: LoadBlock, + Provider: HeaderProvider, +{ + #[inline] + fn provider(&self) -> impl reth_provider::HeaderProvider { + self.inner.provider() + } +} + +impl LoadBlock for EthApi +where + Self: LoadPendingBlock + SpawnBlocking, + Provider: BlockReaderIdExt, +{ + #[inline] + fn provider(&self) -> impl BlockReaderIdExt { + self.inner.provider() + } + + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs new file mode 100644 index 000000000..c442c46b4 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -0,0 +1,27 @@ +//! Contains RPC handler implementations specific to endpoints that call/execute within evm. + +use reth_evm::ConfigureEvm; +use reth_rpc_eth_api::helpers::{Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}; + +use crate::EthApi; + +impl EthCall for EthApi where + Self: Call + LoadPendingBlock +{ +} + +impl Call for EthApi +where + Self: LoadState + SpawnBlocking, + EvmConfig: ConfigureEvm, +{ + #[inline] + fn call_gas_limit(&self) -> u64 { + self.inner.gas_cap() + } + + #[inline] + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs new file mode 100644 index 000000000..7380f4ea2 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -0,0 +1,39 @@ +//! Contains RPC handler implementations for fee history. + +use reth_provider::{BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider}; + +use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; +use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; + +use crate::EthApi; + +impl EthFees for EthApi where + Self: LoadFee +{ +} + +impl LoadFee for EthApi +where + Self: LoadBlock, + Provider: BlockReaderIdExt + HeaderProvider + ChainSpecProvider, +{ + #[inline] + fn provider(&self) -> impl BlockIdReader + HeaderProvider + ChainSpecProvider { + self.inner.provider() + } + + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } + + #[inline] + fn gas_oracle(&self) -> &GasPriceOracle { + self.inner.gas_oracle() + } + + #[inline] + fn fee_history_cache(&self) -> &FeeHistoryCache { + self.inner.fee_history_cache() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/mod.rs b/crates/rpc/rpc/src/eth/helpers/mod.rs new file mode 100644 index 000000000..4c86e2b5f --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/mod.rs @@ -0,0 +1,18 @@ +//! The entire implementation of the namespace is quite large, hence it is divided across several +//! files. + +pub mod signer; + +mod block; +mod call; +mod fees; +#[cfg(feature = "optimism")] +pub mod optimism; +#[cfg(not(feature = "optimism"))] +mod pending_block; +#[cfg(not(feature = "optimism"))] +mod receipt; +mod spec; +mod state; +mod trace; +mod transaction; diff --git a/crates/rpc/rpc/src/eth/helpers/optimism.rs b/crates/rpc/rpc/src/eth/helpers/optimism.rs new file mode 100644 index 000000000..751c06463 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/optimism.rs @@ -0,0 +1,231 @@ +//! Loads and formats OP transaction RPC response. + +use jsonrpsee_types::error::ErrorObject; +use reth_evm::ConfigureEvm; +use reth_evm_optimism::RethL1BlockInfo; +use reth_primitives::{ + BlockNumber, Receipt, TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, B256, +}; +use reth_provider::{ + BlockIdReader, BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, ExecutionOutcome, + StateProviderFactory, +}; +use reth_rpc_types::{AnyTransactionReceipt, OptimismTransactionReceiptFields, ToRpcError}; +use reth_transaction_pool::TransactionPool; +use revm::L1BlockInfo; +use revm_primitives::{BlockEnv, ExecutionResult}; + +use reth_rpc_eth_api::helpers::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; +use reth_rpc_eth_types::{EthApiError, EthResult, EthStateCache, PendingBlock, ReceiptBuilder}; +use reth_rpc_server_types::result::internal_rpc_err; + +use crate::EthApi; + +/// L1 fee and data gas for a transaction, along with the L1 block info. +#[derive(Debug, Default, Clone)] +pub struct OptimismTxMeta { + /// The L1 block info. + pub l1_block_info: Option, + /// The L1 fee for the block. + pub l1_fee: Option, + /// The L1 data gas for the block. + pub l1_data_gas: Option, +} + +impl OptimismTxMeta { + /// Creates a new [`OptimismTxMeta`]. + pub const fn new( + l1_block_info: Option, + l1_fee: Option, + l1_data_gas: Option, + ) -> Self { + Self { l1_block_info, l1_fee, l1_data_gas } + } +} + +impl EthApi +where + Provider: BlockIdReader + ChainSpecProvider, +{ + /// Builds [`OptimismTxMeta`] object using the provided [`TransactionSigned`], L1 block + /// info and block timestamp. The [`L1BlockInfo`] is used to calculate the l1 fee and l1 data + /// gas for the transaction. If the [`L1BlockInfo`] is not provided, the meta info will be + /// empty. + pub fn build_op_tx_meta( + &self, + tx: &TransactionSigned, + l1_block_info: Option, + block_timestamp: u64, + ) -> EthResult { + let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; + + let (l1_fee, l1_data_gas) = if !tx.is_deposit() { + let envelope_buf = tx.envelope_encoded(); + + let inner_l1_fee = l1_block_info + .l1_tx_data_fee( + &self.inner.provider().chain_spec(), + block_timestamp, + &envelope_buf, + tx.is_deposit(), + ) + .map_err(|_| OptimismEthApiError::L1BlockFeeError)?; + let inner_l1_data_gas = l1_block_info + .l1_data_gas(&self.inner.provider().chain_spec(), block_timestamp, &envelope_buf) + .map_err(|_| OptimismEthApiError::L1BlockGasError)?; + ( + Some(inner_l1_fee.saturating_to::()), + Some(inner_l1_data_gas.saturating_to::()), + ) + } else { + (None, None) + }; + + Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas)) + } +} + +impl LoadReceipt for EthApi +where + Self: Send + Sync, + Provider: BlockIdReader + ChainSpecProvider, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } + + async fn build_transaction_receipt( + &self, + tx: TransactionSigned, + meta: TransactionMeta, + receipt: Receipt, + ) -> EthResult { + let (block, receipts) = self + .cache() + .get_block_and_receipts(meta.block_hash) + .await? + .ok_or(EthApiError::UnknownBlockNumber)?; + + let block = block.unseal(); + let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); + let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; + + let resp_builder = ReceiptBuilder::new(&tx, meta, &receipt, &receipts)?; + let resp_builder = op_receipt_fields(resp_builder, &tx, &receipt, optimism_tx_meta); + + Ok(resp_builder.build()) + } +} + +/// Applies OP specific fields to a receipt. +fn op_receipt_fields( + resp_builder: ReceiptBuilder, + tx: &TransactionSigned, + receipt: &Receipt, + optimism_tx_meta: OptimismTxMeta, +) -> ReceiptBuilder { + let mut op_fields = OptimismTransactionReceiptFields::default(); + + if tx.is_deposit() { + op_fields.deposit_nonce = receipt.deposit_nonce.map(reth_primitives::U64::from); + op_fields.deposit_receipt_version = + receipt.deposit_receipt_version.map(reth_primitives::U64::from); + } else if let Some(l1_block_info) = optimism_tx_meta.l1_block_info { + op_fields.l1_fee = optimism_tx_meta.l1_fee; + op_fields.l1_gas_used = optimism_tx_meta.l1_data_gas.map(|dg| { + dg + l1_block_info.l1_fee_overhead.unwrap_or_default().saturating_to::() + }); + op_fields.l1_fee_scalar = Some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0); + op_fields.l1_gas_price = Some(l1_block_info.l1_base_fee.saturating_to()); + } + + resp_builder.add_other_fields(op_fields.into()) +} + +impl LoadPendingBlock + for EthApi +where + Self: SpawnBlocking, + Provider: BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, + Pool: TransactionPool, + EvmConfig: ConfigureEvm, +{ + #[inline] + fn provider( + &self, + ) -> impl BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory { + self.inner.provider() + } + + #[inline] + fn pool(&self) -> impl reth_transaction_pool::TransactionPool { + self.inner.pool() + } + + #[inline] + fn pending_block(&self) -> &tokio::sync::Mutex> { + self.inner.pending_block() + } + + #[inline] + fn evm_config(&self) -> &impl reth_evm::ConfigureEvm { + self.inner.evm_config() + } + + fn assemble_receipt( + &self, + tx: &TransactionSignedEcRecovered, + result: ExecutionResult, + cumulative_gas_used: u64, + ) -> Receipt { + Receipt { + tx_type: tx.tx_type(), + success: result.is_success(), + cumulative_gas_used, + logs: result.into_logs().into_iter().map(Into::into).collect(), + deposit_nonce: None, + deposit_receipt_version: None, + } + } + + fn receipts_root( + &self, + _block_env: &BlockEnv, + execution_outcome: &ExecutionOutcome, + block_number: BlockNumber, + ) -> B256 { + execution_outcome + .optimism_receipts_root_slow( + block_number, + self.provider().chain_spec().as_ref(), + _block_env.timestamp.to::(), + ) + .expect("Block is present") + } +} + +/// Optimism specific errors, that extend [`EthApiError`]. +#[derive(Debug, thiserror::Error)] +pub enum OptimismEthApiError { + /// Thrown when calculating L1 gas fee. + #[error("failed to calculate l1 gas fee")] + L1BlockFeeError, + /// Thrown when calculating L1 gas used + #[error("failed to calculate l1 gas used")] + L1BlockGasError, +} + +impl ToRpcError for OptimismEthApiError { + fn to_rpc_error(&self) -> ErrorObject<'static> { + match self { + Self::L1BlockFeeError | Self::L1BlockGasError => internal_rpc_err(self.to_string()), + } + } +} + +impl From for EthApiError { + fn from(err: OptimismEthApiError) -> Self { + Self::other(err) + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs new file mode 100644 index 000000000..d1a47da75 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -0,0 +1,40 @@ +//! Support for building a pending block with transactions from local view of mempool. + +use reth_evm::ConfigureEvm; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_rpc_eth_api::helpers::{LoadPendingBlock, SpawnBlocking}; +use reth_rpc_eth_types::PendingBlock; +use reth_transaction_pool::TransactionPool; + +use crate::EthApi; + +impl LoadPendingBlock + for EthApi +where + Self: SpawnBlocking, + Provider: BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory, + Pool: TransactionPool, + EvmConfig: reth_evm::ConfigureEvm, +{ + #[inline] + fn provider( + &self, + ) -> impl BlockReaderIdExt + EvmEnvProvider + ChainSpecProvider + StateProviderFactory { + self.inner.provider() + } + + #[inline] + fn pool(&self) -> impl TransactionPool { + self.inner.pool() + } + + #[inline] + fn pending_block(&self) -> &tokio::sync::Mutex> { + self.inner.pending_block() + } + + #[inline] + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs new file mode 100644 index 000000000..db1fee781 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -0,0 +1,16 @@ +//! Builds an RPC receipt response w.r.t. data layout of network. + +use reth_rpc_eth_api::helpers::LoadReceipt; +use reth_rpc_eth_types::EthStateCache; + +use crate::EthApi; + +impl LoadReceipt for EthApi +where + Self: Send + Sync, +{ + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } +} diff --git a/crates/rpc/rpc/src/eth/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs similarity index 85% rename from crates/rpc/rpc/src/eth/signer.rs rename to crates/rpc/rpc/src/eth/helpers/signer.rs index 50c06159c..a4cb726a2 100644 --- a/crates/rpc/rpc/src/eth/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -1,49 +1,20 @@ //! An abstraction over ethereum signers. -use crate::eth::error::SignError; +use std::collections::HashMap; + use alloy_dyn_abi::TypedData; use reth_primitives::{ eip191_hash_message, sign_message, Address, Signature, TransactionSigned, B256, }; +use reth_rpc_eth_api::helpers::{signer::Result, EthSigner}; +use reth_rpc_eth_types::SignError; use reth_rpc_types::TypedTransactionRequest; - -use dyn_clone::DynClone; use reth_rpc_types_compat::transaction::to_primitive_transaction; use secp256k1::SecretKey; -use std::collections::HashMap; - -type Result = std::result::Result; - -/// An Ethereum Signer used via RPC. -#[async_trait::async_trait] -pub(crate) trait EthSigner: Send + Sync + DynClone { - /// Returns the available accounts for this signer. - fn accounts(&self) -> Vec
; - - /// Returns `true` whether this signer can sign for this address - fn is_signer_for(&self, addr: &Address) -> bool { - self.accounts().contains(addr) - } - - /// Returns the signature - async fn sign(&self, address: Address, message: &[u8]) -> Result; - - /// signs a transaction request using the given account in request - fn sign_transaction( - &self, - request: TypedTransactionRequest, - address: &Address, - ) -> Result; - - /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. - fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result; -} - -dyn_clone::clone_trait_object!(EthSigner); /// Holds developer keys -#[derive(Clone)] -pub(crate) struct DevSigner { +#[derive(Debug, Clone)] +pub struct DevSigner { addresses: Vec
, accounts: HashMap, } @@ -121,9 +92,12 @@ impl EthSigner for DevSigner { #[cfg(test)] mod tests { - use super::*; - use reth_primitives::U256; use std::str::FromStr; + + use reth_primitives::U256; + + use super::*; + fn build_signer() -> DevSigner { let addresses = vec![]; let secret = diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs new file mode 100644 index 000000000..a93d662ea --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -0,0 +1,65 @@ +use reth_chainspec::ChainInfo; +use reth_errors::{RethError, RethResult}; +use reth_evm::ConfigureEvm; +use reth_network_api::NetworkInfo; +use reth_primitives::{Address, U256, U64}; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_rpc_eth_api::helpers::EthApiSpec; +use reth_rpc_types::{SyncInfo, SyncStatus}; +use reth_transaction_pool::TransactionPool; + +use crate::EthApi; + +impl EthApiSpec for EthApi +where + Pool: TransactionPool + 'static, + Provider: + BlockReaderIdExt + ChainSpecProvider + StateProviderFactory + EvmEnvProvider + 'static, + Network: NetworkInfo + 'static, + EvmConfig: ConfigureEvm, +{ + /// Returns the current ethereum protocol version. + /// + /// Note: This returns an [`U64`], since this should return as hex string. + async fn protocol_version(&self) -> RethResult { + let status = self.network().network_status().await.map_err(RethError::other)?; + Ok(U64::from(status.protocol_version)) + } + + /// Returns the chain id + fn chain_id(&self) -> U64 { + U64::from(self.network().chain_id()) + } + + /// Returns the current info for the chain + fn chain_info(&self) -> RethResult { + Ok(self.provider().chain_info()?) + } + + fn accounts(&self) -> Vec
{ + self.inner.signers().read().iter().flat_map(|s| s.accounts()).collect() + } + + fn is_syncing(&self) -> bool { + self.network().is_syncing() + } + + /// Returns the [`SyncStatus`] of the network + fn sync_status(&self) -> RethResult { + let status = if self.is_syncing() { + let current_block = U256::from( + self.provider().chain_info().map(|info| info.best_number).unwrap_or_default(), + ); + SyncStatus::Info(SyncInfo { + starting_block: self.inner.starting_block(), + current_block, + highest_block: current_block, + warp_chunks_amount: None, + warp_chunks_processed: None, + }) + } else { + SyncStatus::None + }; + Ok(status) + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs new file mode 100644 index 000000000..d3a99d2f8 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -0,0 +1,113 @@ +//! Contains RPC handler implementations specific to state. + +use reth_provider::StateProviderFactory; +use reth_transaction_pool::TransactionPool; + +use reth_rpc_eth_api::helpers::{EthState, LoadState, SpawnBlocking}; +use reth_rpc_eth_types::EthStateCache; + +use crate::EthApi; + +impl EthState for EthApi +where + Self: LoadState + SpawnBlocking, +{ + fn max_proof_window(&self) -> u64 { + self.eth_proof_window() + } +} + +impl LoadState for EthApi +where + Provider: StateProviderFactory, + Pool: TransactionPool, +{ + #[inline] + fn provider(&self) -> impl StateProviderFactory { + self.inner.provider() + } + + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } + + #[inline] + fn pool(&self) -> impl TransactionPool { + self.inner.pool() + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use reth_evm_ethereum::EthEvmConfig; + use reth_primitives::{ + constants::ETHEREUM_BLOCK_GAS_LIMIT, Address, StorageKey, StorageValue, U256, + }; + use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; + use reth_rpc_eth_api::helpers::EthState; + use reth_rpc_eth_types::{ + EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + }; + use reth_rpc_server_types::constants::{DEFAULT_ETH_PROOF_WINDOW, DEFAULT_PROOF_PERMITS}; + use reth_tasks::pool::BlockingTaskPool; + use reth_transaction_pool::test_utils::testing_pool; + + use super::*; + + #[tokio::test] + async fn test_storage() { + // === Noop === + let pool = testing_pool(); + let evm_config = EthEvmConfig::default(); + + let cache = EthStateCache::spawn(NoopProvider::default(), Default::default(), evm_config); + let eth_api = EthApi::new( + NoopProvider::default(), + pool.clone(), + (), + cache.clone(), + GasPriceOracle::new(NoopProvider::default(), Default::default(), cache.clone()), + ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_ETH_PROOF_WINDOW, + BlockingTaskPool::build().expect("failed to build tracing pool"), + FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), + evm_config, + None, + DEFAULT_PROOF_PERMITS, + ); + let address = Address::random(); + let storage = eth_api.storage_at(address, U256::ZERO.into(), None).await.unwrap(); + assert_eq!(storage, U256::ZERO.to_be_bytes()); + + // === Mock === + let mock_provider = MockEthProvider::default(); + let storage_value = StorageValue::from(1337); + let storage_key = StorageKey::random(); + let storage = HashMap::from([(storage_key, storage_value)]); + let account = ExtendedAccount::new(0, U256::ZERO).extend_storage(storage); + mock_provider.add_account(address, account); + + let cache = EthStateCache::spawn(mock_provider.clone(), Default::default(), evm_config); + let eth_api = EthApi::new( + mock_provider.clone(), + pool, + (), + cache.clone(), + GasPriceOracle::new(mock_provider, Default::default(), cache.clone()), + ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_ETH_PROOF_WINDOW, + BlockingTaskPool::build().expect("failed to build tracing pool"), + FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), + evm_config, + None, + DEFAULT_PROOF_PERMITS, + ); + + let storage_key: U256 = storage_key.into(); + let storage = eth_api.storage_at(address, storage_key.into(), None).await.unwrap(); + assert_eq!(storage, storage_value.to_be_bytes()); + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs new file mode 100644 index 000000000..fe1ee9f13 --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -0,0 +1,17 @@ +//! Contains RPC handler implementations specific to tracing. + +use reth_evm::ConfigureEvm; +use reth_rpc_eth_api::helpers::{LoadState, Trace}; + +use crate::EthApi; + +impl Trace for EthApi +where + Self: LoadState, + EvmConfig: ConfigureEvm, +{ + #[inline] + fn evm_config(&self) -> &impl ConfigureEvm { + self.inner.evm_config() + } +} diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs new file mode 100644 index 000000000..872af0cee --- /dev/null +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -0,0 +1,128 @@ +//! Contains RPC handler implementations specific to transactions + +use reth_provider::{BlockReaderIdExt, TransactionsProvider}; +use reth_rpc_eth_api::{ + helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, + RawTransactionForwarder, +}; +use reth_rpc_eth_types::EthStateCache; +use reth_transaction_pool::TransactionPool; + +use crate::EthApi; + +impl EthTransactions + for EthApi +where + Self: LoadTransaction, + Pool: TransactionPool + 'static, + Provider: BlockReaderIdExt, +{ + #[inline] + fn provider(&self) -> impl BlockReaderIdExt { + self.inner.provider() + } + + #[inline] + fn raw_tx_forwarder(&self) -> Option> { + self.inner.raw_tx_forwarder() + } + + #[inline] + fn signers(&self) -> &parking_lot::RwLock>> { + self.inner.signers() + } +} + +impl LoadTransaction + for EthApi +where + Self: SpawnBlocking, + Provider: TransactionsProvider, + Pool: TransactionPool, +{ + type Pool = Pool; + + #[inline] + fn provider(&self) -> impl reth_provider::TransactionsProvider { + self.inner.provider() + } + + #[inline] + fn cache(&self) -> &EthStateCache { + self.inner.cache() + } + + #[inline] + fn pool(&self) -> &Self::Pool { + self.inner.pool() + } +} + +#[cfg(test)] +mod tests { + use reth_evm_ethereum::EthEvmConfig; + use reth_network_api::noop::NoopNetwork; + use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, hex_literal::hex, Bytes}; + use reth_provider::test_utils::NoopProvider; + use reth_rpc_eth_api::helpers::EthTransactions; + use reth_rpc_eth_types::{ + EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + }; + use reth_rpc_server_types::constants::{DEFAULT_ETH_PROOF_WINDOW, DEFAULT_PROOF_PERMITS}; + use reth_tasks::pool::BlockingTaskPool; + use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; + + use super::*; + + #[tokio::test] + async fn send_raw_transaction() { + let noop_provider = NoopProvider::default(); + let noop_network_provider = NoopNetwork::default(); + + let pool = testing_pool(); + + let evm_config = EthEvmConfig::default(); + let cache = EthStateCache::spawn(noop_provider, Default::default(), evm_config); + let fee_history_cache = + FeeHistoryCache::new(cache.clone(), FeeHistoryCacheConfig::default()); + let eth_api = EthApi::new( + noop_provider, + pool.clone(), + noop_network_provider, + cache.clone(), + GasPriceOracle::new(noop_provider, Default::default(), cache.clone()), + ETHEREUM_BLOCK_GAS_LIMIT, + DEFAULT_ETH_PROOF_WINDOW, + BlockingTaskPool::build().expect("failed to build tracing pool"), + fee_history_cache, + evm_config, + None, + DEFAULT_PROOF_PERMITS, + ); + + // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d + let tx_1 = Bytes::from(hex!("02f871018303579880850555633d1b82520894eee27662c2b8eba3cd936a23f039f3189633e4c887ad591c62bdaeb180c080a07ea72c68abfb8fca1bd964f0f99132ed9280261bdca3e549546c0205e800f7d0a05b4ef3039e9c9b9babc179a1878fb825b5aaf5aed2fa8744854150157b08d6f3")); + + let tx_1_result = eth_api.send_raw_transaction(tx_1).await.unwrap(); + assert_eq!( + pool.len(), + 1, + "expect 1 transactions in the pool, but pool size is {}", + pool.len() + ); + + // https://etherscan.io/tx/0x48816c2f32c29d152b0d86ff706f39869e6c1f01dc2fe59a3c1f9ecf39384694 + let tx_2 = Bytes::from(hex!("02f9043c018202b7843b9aca00850c807d37a08304d21d94ef1c6e67703c7bd7107eed8303fbe6ec2554bf6b881bc16d674ec80000b903c43593564c000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000063e2d99f00000000000000000000000000000000000000000000000000000000000000030b000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000001e0000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000065717fe021ea67801d1088cc80099004b05b64600000000000000000000000000000000000000000000000001bc16d674ec80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002bc02aaa39b223fe8d0a0e5c4f27ead9083c756cc20001f4a0b86991c6218b36c1d19d4a2e9eb0ce3606eb480000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009e95fd5965fd1f1a6f0d4600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48000000000000000000000000428dca9537116148616a5a3e44035af17238fe9dc080a0c6ec1e41f5c0b9511c49b171ad4e04c6bb419c74d99fe9891d74126ec6e4e879a032069a753d7a2cfa158df95421724d24c0e9501593c09905abf3699b4a4405ce")); + + let tx_2_result = eth_api.send_raw_transaction(tx_2).await.unwrap(); + assert_eq!( + pool.len(), + 2, + "expect 2 transactions in the pool, but pool size is {}", + pool.len() + ); + + assert!(pool.get(&tx_1_result).is_some(), "tx1 not found in the pool"); + assert!(pool.get(&tx_2_result).is_some(), "tx2 not found in the pool"); + } +} diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 8d8e982c2..4e6a0cbb8 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -1,28 +1,17 @@ -//! `eth` namespace handler implementation. +//! Sever implementation of `eth` namespace API. -mod api; pub mod bundle; -pub mod cache; -pub mod error; -mod filter; -pub mod gas_oracle; -mod id_provider; -mod logs_utils; -mod pubsub; -pub mod revm_utils; -mod signer; -pub mod traits; -pub(crate) mod utils; - -#[cfg(feature = "optimism")] -pub mod optimism; - -pub use api::{ - fee_history::{fee_history_cache_new_blocks_task, FeeHistoryCache, FeeHistoryCacheConfig}, - EthApi, EthApiSpec, EthTransactions, TransactionSource, RPC_DEFAULT_GAS_CAP, -}; +pub mod core; +pub mod filter; +pub mod helpers; +pub mod pubsub; +/// Implementation of `eth` namespace API. pub use bundle::EthBundle; +pub use core::EthApi; pub use filter::{EthFilter, EthFilterConfig}; -pub use id_provider::EthSubscriptionIdProvider; pub use pubsub::EthPubSub; + +pub use helpers::signer::DevSigner; + +pub use reth_rpc_eth_api::RawTransactionForwarder; diff --git a/crates/rpc/rpc/src/eth/optimism.rs b/crates/rpc/rpc/src/eth/optimism.rs deleted file mode 100644 index fb1665b95..000000000 --- a/crates/rpc/rpc/src/eth/optimism.rs +++ /dev/null @@ -1,32 +0,0 @@ -//! Optimism specific types. - -use jsonrpsee::types::ErrorObject; -use reth_rpc_types::ToRpcError; - -use crate::{eth::error::EthApiError, result::internal_rpc_err}; - -/// Eth Optimism Api Error -#[cfg(feature = "optimism")] -#[derive(Debug, thiserror::Error)] -pub enum OptimismEthApiError { - /// Thrown when calculating L1 gas fee - #[error("failed to calculate l1 gas fee")] - L1BlockFeeError, - /// Thrown when calculating L1 gas used - #[error("failed to calculate l1 gas used")] - L1BlockGasError, -} - -impl ToRpcError for OptimismEthApiError { - fn to_rpc_error(&self) -> ErrorObject<'static> { - match self { - Self::L1BlockFeeError | Self::L1BlockGasError => internal_rpc_err(self.to_string()), - } - } -} - -impl From for EthApiError { - fn from(err: OptimismEthApiError) -> Self { - Self::other(err) - } -} diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index fdfa836b9..426923dc4 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -1,9 +1,7 @@ //! `eth_` `PubSub` RPC handler implementation -use crate::{ - eth::logs_utils, - result::{internal_rpc_err, invalid_params_rpc_err}, -}; +use std::sync::Arc; + use futures::StreamExt; use jsonrpsee::{ server::SubscriptionMessage, types::ErrorObject, PendingSubscriptionSink, SubscriptionSink, @@ -11,7 +9,9 @@ use jsonrpsee::{ use reth_network_api::NetworkInfo; use reth_primitives::{IntoRecoveredTransaction, TxHash}; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; -use reth_rpc_api::EthPubSubApiServer; +use reth_rpc_eth_api::pubsub::EthPubSubApiServer; +use reth_rpc_eth_types::logs_utils; +use reth_rpc_server_types::result::{internal_rpc_err, invalid_params_rpc_err}; use reth_rpc_types::{ pubsub::{ Params, PubSubSyncStatus, SubscriptionKind, SubscriptionResult as EthSubscriptionResult, @@ -22,7 +22,6 @@ use reth_rpc_types::{ use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::{NewTransactionEvent, TransactionPool}; use serde::Serialize; -use std::sync::Arc; use tokio_stream::{ wrappers::{BroadcastStream, ReceiverStream}, Stream, @@ -163,7 +162,7 @@ where BroadcastStream::new(pubsub.chain_events.subscribe_to_canonical_state()); // get current sync status let mut initial_sync_status = pubsub.network.is_syncing(); - let current_sub_res = pubsub.sync_status(initial_sync_status).await; + let current_sub_res = pubsub.sync_status(initial_sync_status); // send the current status immediately let msg = SubscriptionMessage::from_json(¤t_sub_res) @@ -180,7 +179,7 @@ where initial_sync_status = current_syncing; // send a new message now that the status changed - let sync_status = pubsub.sync_status(current_syncing).await; + let sync_status = pubsub.sync_status(current_syncing); let msg = SubscriptionMessage::from_json(&sync_status) .map_err(SubscriptionSerializeError::new)?; if accepted_sink.send(msg).await.is_err() { @@ -197,10 +196,10 @@ where /// Helper to convert a serde error into an [`ErrorObject`] #[derive(Debug, thiserror::Error)] #[error("Failed to serialize subscription item: {0}")] -pub(crate) struct SubscriptionSerializeError(#[from] serde_json::Error); +pub struct SubscriptionSerializeError(#[from] serde_json::Error); impl SubscriptionSerializeError { - pub(crate) const fn new(err: serde_json::Error) -> Self { + const fn new(err: serde_json::Error) -> Self { Self(err) } } @@ -271,7 +270,7 @@ where Provider: BlockReader + 'static, { /// Returns the current sync status for the `syncing` subscription - async fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { + fn sync_status(&self, is_syncing: bool) -> EthSubscriptionResult { if is_syncing { let current_block = self.provider.chain_info().map(|info| info.best_number).unwrap_or_default(); diff --git a/crates/rpc/rpc/src/eth/traits.rs b/crates/rpc/rpc/src/eth/traits.rs deleted file mode 100644 index 0f73ded3c..000000000 --- a/crates/rpc/rpc/src/eth/traits.rs +++ /dev/null @@ -1,13 +0,0 @@ -//! Additional helper traits that allow for more customization. - -use crate::eth::error::EthResult; -use std::fmt; - -/// A trait that allows for forwarding raw transactions. -/// -/// For example to a sequencer. -#[async_trait::async_trait] -pub trait RawTransactionForwarder: fmt::Debug + Send + Sync + 'static { - /// Forwards raw transaction bytes for `eth_sendRawTransaction` - async fn forward_raw_transaction(&self, raw: &[u8]) -> EthResult<()>; -} diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 17dc8fcb8..eec14981b 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -46,7 +46,7 @@ mod web3; pub use admin::AdminApi; pub use debug::DebugApi; pub use engine::{EngineApi, EngineEthApi}; -pub use eth::{EthApi, EthApiSpec, EthFilter, EthPubSub, EthSubscriptionIdProvider}; +pub use eth::{EthApi, EthBundle, EthFilter, EthPubSub}; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; @@ -54,4 +54,3 @@ pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; pub use web3::Web3Api; -pub mod result; diff --git a/crates/rpc/rpc/src/net.rs b/crates/rpc/rpc/src/net.rs index 8e6615a28..79e85ac48 100644 --- a/crates/rpc/rpc/src/net.rs +++ b/crates/rpc/rpc/src/net.rs @@ -1,8 +1,8 @@ -use crate::eth::EthApiSpec; use jsonrpsee::core::RpcResult as Result; use reth_network_api::PeersInfo; use reth_primitives::U64; use reth_rpc_api::NetApiServer; +use reth_rpc_eth_api::helpers::EthApiSpec; use reth_rpc_types::PeerCount; /// `Net` API implementation. diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index e658402f3..14492f957 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -1,20 +1,27 @@ use alloy_primitives::Bytes; use async_trait::async_trait; +use futures::future::BoxFuture; use jsonrpsee::core::RpcResult; -use revm_inspectors::transfer::{TransferInspector, TransferKind}; -use revm_primitives::ExecutionResult; - use reth_primitives::{Address, BlockId, BlockNumberOrTag, TxHash, B256}; use reth_rpc_api::{EthApiServer, OtterscanServer}; +use reth_rpc_eth_api::helpers::TraceExt; +use reth_rpc_eth_types::EthApiError; +use reth_rpc_server_types::result::internal_rpc_err; use reth_rpc_types::{ - trace::otterscan::{ - BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions, - OtsReceipt, OtsTransactionReceipt, TraceEntry, TransactionsWithReceipts, + trace::{ + otterscan::{ + BlockDetails, ContractCreator, InternalOperation, OperationType, OtsBlockTransactions, + OtsReceipt, OtsTransactionReceipt, TraceEntry, TransactionsWithReceipts, + }, + parity::{Action, CreateAction, CreateOutput, TraceOutput}, }, - BlockTransactions, Transaction, + BlockTransactions, Header, }; - -use crate::{eth::EthTransactions, result::internal_rpc_err}; +use revm_inspectors::{ + tracing::{types::CallTraceNode, TracingInspectorConfig}, + transfer::{TransferInspector, TransferKind}, +}; +use revm_primitives::ExecutionResult; const API_LEVEL: u64 = 8; @@ -24,6 +31,41 @@ pub struct OtterscanApi { eth: Eth, } +/// Performs a binary search within a given block range to find the desired block number. +/// +/// The binary search is performed by calling the provided asynchronous `check` closure on the +/// blocks of the range. The closure should return a future representing the result of performing +/// the desired logic at a given block. The future resolves to an `bool` where: +/// - `true` indicates that the condition has been matched, but we can try to find a lower block to +/// make the condition more matchable. +/// - `false` indicates that the condition not matched, so the target is not present in the current +/// block and should continue searching in a higher range. +/// +/// Args: +/// - `low`: The lower bound of the block range (inclusive). +/// - `high`: The upper bound of the block range (inclusive). +/// - `check`: A closure that performs the desired logic at a given block. +async fn binary_search<'a, F>(low: u64, high: u64, check: F) -> RpcResult +where + F: Fn(u64) -> BoxFuture<'a, RpcResult>, +{ + let mut low = low; + let mut high = high; + let mut num = high; + + while low <= high { + let mid = (low + high) / 2; + if check(mid).await? { + high = mid - 1; + num = mid; + } else { + low = mid + 1 + } + } + + Ok(num) +} + impl OtterscanApi { /// Creates a new instance of `Otterscan`. pub const fn new(eth: Eth) -> Self { @@ -34,8 +76,13 @@ impl OtterscanApi { #[async_trait] impl OtterscanServer for OtterscanApi where - Eth: EthApiServer + EthTransactions, + Eth: EthApiServer + TraceExt + 'static, { + /// Handler for `{ots,erigon}_getHeaderByNumber` + async fn get_header_by_number(&self, block_number: u64) -> RpcResult> { + self.eth.header_by_number(BlockNumberOrTag::Number(block_number)).await + } + /// Handler for `ots_hasCode` async fn has_code(&self, address: Address, block_number: Option) -> RpcResult { self.eth.get_code(address, block_number).await.map(|code| !code.is_empty()) @@ -90,16 +137,39 @@ where } /// Handler for `ots_traceTransaction` - async fn trace_transaction(&self, _tx_hash: TxHash) -> RpcResult { - Err(internal_rpc_err("unimplemented")) + async fn trace_transaction(&self, tx_hash: TxHash) -> RpcResult>> { + let traces = self + .eth + .spawn_trace_transaction_in_block( + tx_hash, + TracingInspectorConfig::default_parity(), + move |_tx_info, inspector, _, _| Ok(inspector.into_traces().into_nodes()), + ) + .await? + .map(|traces| { + traces + .into_iter() + .map(|CallTraceNode { trace, .. }| TraceEntry { + r#type: if trace.is_selfdestruct() { + "SELFDESTRUCT".to_string() + } else { + trace.kind.to_string() + }, + depth: trace.depth as u32, + from: trace.caller, + to: trace.address, + value: trace.value, + input: trace.data, + output: trace.output, + }) + .collect::>() + }); + Ok(traces) } /// Handler for `ots_getBlockDetails` - async fn get_block_details( - &self, - block_number: BlockNumberOrTag, - ) -> RpcResult> { - let block = self.eth.block_by_number(block_number, true).await?; + async fn get_block_details(&self, block_number: u64) -> RpcResult> { + let block = self.eth.block_by_number(BlockNumberOrTag::Number(block_number), true).await?; Ok(block.map(Into::into)) } @@ -112,11 +182,12 @@ where /// Handler for `getBlockTransactions` async fn get_block_transactions( &self, - block_number: BlockNumberOrTag, + block_number: u64, page_number: usize, page_size: usize, ) -> RpcResult { // retrieve full block and its receipts + let block_number = BlockNumberOrTag::Number(block_number); let block = self.eth.block_by_number(block_number, true); let receipts = self.eth.block_receipts(BlockId::Number(block_number)); let (block, receipts) = futures::try_join!(block, receipts)?; @@ -180,7 +251,7 @@ where async fn search_transactions_before( &self, _address: Address, - _block_number: BlockNumberOrTag, + _block_number: u64, _page_size: usize, ) -> RpcResult { Err(internal_rpc_err("unimplemented")) @@ -190,7 +261,7 @@ where async fn search_transactions_after( &self, _address: Address, - _block_number: BlockNumberOrTag, + _block_number: u64, _page_size: usize, ) -> RpcResult { Err(internal_rpc_err("unimplemented")) @@ -199,14 +270,130 @@ where /// Handler for `getTransactionBySenderAndNonce` async fn get_transaction_by_sender_and_nonce( &self, - _sender: Address, - _nonce: u64, - ) -> RpcResult> { - Err(internal_rpc_err("unimplemented")) + sender: Address, + nonce: u64, + ) -> RpcResult> { + // Check if the sender is a contract + if self.has_code(sender, None).await? { + return Ok(None) + } + + let highest = + EthApiServer::transaction_count(&self.eth, sender, None).await?.saturating_to::(); + + // If the nonce is higher or equal to the highest nonce, the transaction is pending or not + // exists. + if nonce >= highest { + return Ok(None) + } + + // perform a binary search over the block range to find the block in which the sender's + // nonce reached the requested nonce. + let num = binary_search(1, self.eth.block_number()?.saturating_to(), |mid| { + Box::pin(async move { + let mid_nonce = + EthApiServer::transaction_count(&self.eth, sender, Some(mid.into())) + .await? + .saturating_to::(); + + // The `transaction_count` returns the `nonce` after the transaction was + // executed, which is the state of the account after the block, and we need to find + // the transaction whose nonce is the pre-state, so need to compare with `nonce`(no + // equal). + Ok(mid_nonce > nonce) + }) + }) + .await?; + + let Some(BlockTransactions::Full(transactions)) = + self.eth.block_by_number(num.into(), true).await?.map(|block| block.inner.transactions) + else { + return Err(EthApiError::UnknownBlockNumber.into()); + }; + + Ok(transactions + .into_iter() + .find(|tx| tx.from == sender && tx.nonce == nonce) + .map(|tx| tx.hash)) } /// Handler for `getContractCreator` - async fn get_contract_creator(&self, _address: Address) -> RpcResult> { - Err(internal_rpc_err("unimplemented")) + async fn get_contract_creator(&self, address: Address) -> RpcResult> { + if !self.has_code(address, None).await? { + return Ok(None); + } + + let num = binary_search(1, self.eth.block_number()?.saturating_to(), |mid| { + Box::pin( + async move { Ok(!self.eth.get_code(address, Some(mid.into())).await?.is_empty()) }, + ) + }) + .await?; + + let traces = self + .eth + .trace_block_with( + num.into(), + TracingInspectorConfig::default_parity(), + |tx_info, inspector, _, _, _| { + Ok(inspector.into_parity_builder().into_localized_transaction_traces(tx_info)) + }, + ) + .await? + .map(|traces| { + traces + .into_iter() + .flatten() + .map(|tx_trace| { + let trace = tx_trace.trace; + Ok(match (trace.action, trace.result, trace.error) { + ( + Action::Create(CreateAction { from: creator, .. }), + Some(TraceOutput::Create(CreateOutput { + address: contract, .. + })), + None, + ) if contract == address => Some(ContractCreator { + hash: tx_trace + .transaction_hash + .ok_or_else(|| EthApiError::TransactionNotFound)?, + creator, + }), + _ => None, + }) + }) + .filter_map(Result::transpose) + .collect::, EthApiError>>() + }) + .transpose()?; + + // A contract maybe created and then destroyed in multiple transactions, here we + // return the first found transaction, this behavior is consistent with etherscan's + let found = traces.and_then(|traces| traces.first().cloned()); + Ok(found) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_binary_search() { + // in the middle + let num = binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 5) })).await; + assert_eq!(num, Ok(5)); + + // in the upper + let num = binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 7) })).await; + assert_eq!(num, Ok(7)); + + // in the lower + let num = binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 1) })).await; + assert_eq!(num, Ok(1)); + + // high than the upper + let num = binary_search(1, 10, |mid| Box::pin(async move { Ok(mid >= 11) })).await; + assert_eq!(num, Ok(10)); } } diff --git a/crates/rpc/rpc/src/reth.rs b/crates/rpc/rpc/src/reth.rs index 17925b5ab..33dc74920 100644 --- a/crates/rpc/rpc/src/reth.rs +++ b/crates/rpc/rpc/src/reth.rs @@ -1,12 +1,13 @@ -use crate::eth::error::{EthApiError, EthResult}; +use std::{collections::HashMap, future::Future, sync::Arc}; + use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_errors::RethResult; use reth_primitives::{Address, BlockId, U256}; use reth_provider::{BlockReaderIdExt, ChangeSetReader, StateProviderFactory}; use reth_rpc_api::RethApiServer; +use reth_rpc_eth_types::{EthApiError, EthResult}; use reth_tasks::TaskSpawner; -use std::{collections::HashMap, future::Future, sync::Arc}; use tokio::sync::oneshot; /// `reth` API implementation. diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index e959a367e..ff98194b9 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -1,18 +1,22 @@ -use crate::eth::{ - error::{EthApiError, EthResult}, - revm_utils::prepare_call_env, - utils::recover_raw_transaction, - EthTransactions, -}; +use std::{collections::HashSet, sync::Arc}; + use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; +use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc::{ base_block_reward, base_block_reward_pre_merge, block_reward, ommer_reward, }; -use reth_primitives::{revm::env::tx_env_with_recovered, BlockId, Bytes, Header, B256, U256}; +use reth_evm::ConfigureEvmEnv; +use reth_primitives::{BlockId, Bytes, Header, B256, U256}; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::TraceApiServer; +use reth_rpc_eth_api::helpers::{Call, TraceExt}; +use reth_rpc_eth_types::{ + error::{EthApiError, EthResult}, + revm_utils::prepare_call_env, + utils::recover_raw_transaction, +}; use reth_rpc_types::{ state::{EvmOverrides, StateOverride}, trace::{ @@ -32,7 +36,6 @@ use revm_inspectors::{ opcode::OpcodeGasInspector, tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, }; -use std::{collections::HashSet, sync::Arc}; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `trace` API implementation. @@ -74,7 +77,7 @@ impl TraceApi { impl TraceApi where Provider: BlockReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + 'static, - Eth: EthTransactions + 'static, + Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. pub async fn trace_call(&self, trace_request: TraceCallRequest) -> EthResult { @@ -86,6 +89,10 @@ where let this = self.clone(); self.eth_api() .spawn_with_call_at(trace_request.call, at, overrides, move |db, env| { + // wrapper is hack to get around 'higher-ranked lifetime error', see + // + let db = db.0; + let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; let trace_res = inspector.into_parity_builder().into_trace_results_with_state( &res, @@ -107,8 +114,12 @@ where let tx = recover_raw_transaction(tx)?; let (cfg, block, at) = self.inner.eth_api.evm_env_at(block_id.unwrap_or_default()).await?; - let tx = tx_env_with_recovered(&tx.into_ecrecovered_transaction()); - let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx); + + let env = EnvWithHandlerCfg::new_with_cfg_env( + cfg, + block, + Call::evm_config(self.eth_api()).tx_env(&tx.into_ecrecovered_transaction()), + ); let config = TracingInspectorConfig::from_parity_config(&trace_types); @@ -372,7 +383,7 @@ where }, ); - let block = self.inner.eth_api.block_by_id(block_id); + let block = self.inner.eth_api.block(block_id); let (maybe_traces, maybe_block) = futures::try_join!(traces, block)?; let mut maybe_traces = @@ -455,7 +466,7 @@ where let res = self .inner .eth_api - .trace_block_with_inspector( + .trace_block_inspector( block_id, OpcodeGasInspector::default, move |tx_info, inspector, _res, _, _| { @@ -470,7 +481,7 @@ where let Some(transactions) = res else { return Ok(None) }; - let Some(block) = self.inner.eth_api.block_by_id(block_id).await? else { return Ok(None) }; + let Some(block) = self.inner.eth_api.block(block_id).await? else { return Ok(None) }; Ok(Some(BlockOpcodeGas { block_hash: block.hash(), @@ -548,7 +559,7 @@ where impl TraceApiServer for TraceApi where Provider: BlockReader + StateProviderFactory + EvmEnvProvider + ChainSpecProvider + 'static, - Eth: EthTransactions + 'static, + Eth: TraceExt + 'static, { /// Executes the given call and returns a number of possible traces for it. /// diff --git a/crates/rpc/rpc/src/web3.rs b/crates/rpc/rpc/src/web3.rs index 4ed94ac85..787604e25 100644 --- a/crates/rpc/rpc/src/web3.rs +++ b/crates/rpc/rpc/src/web3.rs @@ -1,9 +1,9 @@ -use crate::result::ToRpcResult; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_network_api::NetworkInfo; use reth_primitives::{keccak256, Bytes, B256}; use reth_rpc_api::Web3ApiServer; +use reth_rpc_server_types::ToRpcResult; /// `web3` API implementation. /// diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index e703367bc..e37eaa3d7 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -1,6 +1,6 @@ use crate::{metrics::SyncMetrics, StageCheckpoint, StageId}; use alloy_primitives::BlockNumber; -use reth_primitives_traits::constants::MGAS_TO_GAS; +use reth_primitives_traits::constants::MEGAGAS; use std::{ future::Future, pin::Pin, @@ -83,7 +83,7 @@ impl MetricsListener { } } MetricEvent::ExecutionStageGas { gas } => { - self.sync_metrics.execution_stage.mgas_processed_total.increment(gas / MGAS_TO_GAS) + self.sync_metrics.execution_stage.mgas_processed_total.increment(gas / MEGAGAS) } } } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 67ef53855..1be468702 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -13,7 +13,6 @@ use reth_provider::{ }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; -use reth_static_file_types::HighestStaticFiles; use reth_tokio_util::{EventSender, EventStream}; use std::pin::Pin; use tokio::sync::watch; @@ -248,26 +247,9 @@ where /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. pub fn move_to_static_files(&self) -> RethResult<()> { - let static_file_producer = self.static_file_producer.lock(); - // Copies data from database to static files - let lowest_static_file_height = { - let provider = self.provider_factory.provider()?; - let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] - .into_iter() - .map(|stage| { - provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number)) - }) - .collect::, _>>()?; - - let targets = static_file_producer.get_static_file_targets(HighestStaticFiles { - headers: stages_checkpoints[0], - receipts: stages_checkpoints[1], - transactions: stages_checkpoints[2], - })?; - static_file_producer.run(targets)?; - stages_checkpoints.into_iter().min().expect("exists") - }; + let lowest_static_file_height = + self.static_file_producer.lock().copy_to_static_files()?.min(); // Deletes data which has been copied to static files. if let Some(prune_tip) = lowest_static_file_height { diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index b3ae29509..c8cc97c9b 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -24,12 +24,13 @@ reth-evm.workspace = true reth-exex.workspace = true reth-network-p2p.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-execution-types.workspace = true reth-prune-types.workspace = true reth-storage-errors.workspace = true reth-revm.workspace = true -reth-stages-api = { workspace = true, features = ["test-utils"] } +reth-stages-api.workspace = true reth-trie = { workspace = true, features = ["metrics"] } reth-testing-utils = { workspace = true, optional = true } @@ -88,9 +89,6 @@ pprof = { workspace = true, features = [ ] } [features] -bsc = [ - "reth-evm-bsc/bsc", -] test-utils = [ "dep:reth-chainspec", "reth-network-p2p/test-utils", @@ -100,6 +98,9 @@ test-utils = [ "dep:reth-testing-utils", "dep:tempfile", ] +bsc = [ + "reth-evm-bsc/bsc", +] [[bench]] name = "criterion" diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index de08526be..0f2dd2acf 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -139,7 +139,11 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let offset = transitions.len() as u64; db.insert_changesets(transitions, None).unwrap(); - db.commit(|tx| Ok(updates.flush(tx)?)).unwrap(); + db.commit(|tx| { + updates.write_to_database(tx)?; + Ok(()) + }) + .unwrap(); let (transitions, final_state) = random_changeset_range( &mut rng, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 07dbda48f..b6c0feaf0 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -4,8 +4,6 @@ use std::{ }; use futures_util::TryStreamExt; -use tracing::*; - use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, @@ -24,6 +22,7 @@ use reth_stages_api::{ UnwindInput, UnwindOutput, }; use reth_storage_errors::provider::ProviderResult; +use tracing::*; // TODO(onbjerg): Metrics and events (gradual status for e.g. CLI) /// The body stage downloads block bodies. @@ -129,7 +128,7 @@ impl Stage for BodyStage { let mut next_tx_num = tx_block_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); let static_file_provider = provider.static_file_provider(); - let mut static_file_producer = + let mut static_file_producer_tx = static_file_provider.get_writer(from_block, StaticFileSegment::Transactions)?; // Make sure Transactions static file is at the same height. If it's further, this @@ -144,11 +143,42 @@ impl Stage for BodyStage { // stage run. So, our only solution is to unwind the static files and proceed from the // database expected height. Ordering::Greater => { - static_file_producer + static_file_producer_tx .prune_transactions(next_static_file_tx_num - next_tx_num, from_block - 1)?; // Since this is a database <-> static file inconsistency, we commit the change // straight away. - static_file_producer.commit()?; + static_file_producer_tx.commit()?; + } + // If static files are behind, then there was some corruption or loss of files. This + // error will trigger an unwind, that will bring the database to the same height as the + // static files. + Ordering::Less => { + return Err(missing_static_data_error( + next_static_file_tx_num.saturating_sub(1), + static_file_provider, + provider, + StaticFileSegment::Transactions, + )?) + } + Ordering::Equal => {} + } + + let mut static_file_producer_sc = + static_file_provider.get_writer(from_block, StaticFileSegment::Sidecars)?; + + // Make sure Sidecars static file is at the same height. If it's further, this + // input execution was interrupted previously and we need to unwind the static file. + let next_static_file_block_num = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Sidecars) + .map(|id| id + 1) + .unwrap_or_default(); + + match next_static_file_block_num.cmp(&from_block) { + Ordering::Greater => { + static_file_producer_sc.prune_sidecars(next_static_file_block_num - from_block)?; + // Since this is a database <-> static file inconsistency, we commit the change + // straight away. + static_file_producer_sc.commit()? } // If static files are behind, then there was some corruption or loss of files. This // error will trigger an unwind, that will bring the database to the same height as the @@ -158,6 +188,7 @@ impl Stage for BodyStage { next_static_file_tx_num.saturating_sub(1), static_file_provider, provider, + StaticFileSegment::Sidecars, )?) } Ordering::Equal => {} @@ -182,7 +213,7 @@ impl Stage for BodyStage { // Increment block on static file header. if block_number > 0 { - let appended_block_number = static_file_producer + let appended_block_number = static_file_producer_tx .increment_block(StaticFileSegment::Transactions, block_number)?; if appended_block_number != block_number { @@ -205,7 +236,7 @@ impl Stage for BodyStage { // Write transactions for transaction in block.body { - let appended_tx_number = static_file_producer + let appended_tx_number = static_file_producer_tx .append_transaction(next_tx_num, transaction.into())?; if appended_tx_number != next_tx_num { @@ -222,6 +253,14 @@ impl Stage for BodyStage { next_tx_num += 1; } + // Write sidecars + let sidecars = block.sidecars.unwrap_or_default(); + static_file_producer_sc.append_sidecars( + sidecars, + block_number, + block.header.hash(), + )?; + // Write ommers if any if !block.ommers.is_empty() { ommers_cursor @@ -243,7 +282,14 @@ impl Stage for BodyStage { } } } - BlockResponse::Empty(_) => {} + BlockResponse::Empty(header) => { + // Write empty sidecars + static_file_producer_sc.append_sidecars( + Default::default(), + block_number, + header.hash(), + )?; + } }; // insert block meta @@ -313,7 +359,7 @@ impl Stage for BodyStage { rev_walker.delete_current()?; } - let mut static_file_producer = + let mut static_file_producer_tx = static_file_provider.latest_writer(StaticFileSegment::Transactions)?; // Unwind from static files. Get the current last expected transaction from DB, and match it @@ -331,13 +377,39 @@ impl Stage for BodyStage { static_file_tx_num, static_file_provider, provider, + StaticFileSegment::Transactions, )?) } // Unwinds static file - static_file_producer + static_file_producer_tx .prune_transactions(static_file_tx_num.saturating_sub(db_tx_num), input.unwind_to)?; + let mut static_file_producer_sc = + static_file_provider.latest_writer(StaticFileSegment::Sidecars)?; + + // Unwind from static files. Get the current last expected block from DB, and match it + // on static file + let db_block_num = body_cursor.last()?.map(|(block_num, _)| block_num).unwrap_or_default(); + let static_file_block_num: u64 = static_file_provider + .get_highest_static_file_block(StaticFileSegment::Sidecars) + .unwrap_or_default(); + + // If there are more blocks on database, then we are missing static file data and we + // need to unwind further. + if db_block_num > static_file_block_num { + return Err(missing_static_data_error( + static_file_block_num, + static_file_provider, + provider, + StaticFileSegment::Sidecars, + )?) + } + + // Unwinds static file + static_file_producer_sc + .prune_sidecars(static_file_block_num.saturating_sub(db_block_num))?; + Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) .with_entities_stage_checkpoint(stage_checkpoint(provider)?), @@ -349,6 +421,7 @@ fn missing_static_data_error( last_tx_num: TxNumber, static_file_provider: &StaticFileProvider, provider: &DatabaseProviderRW, + segment: StaticFileSegment, ) -> Result { let mut last_block = static_file_provider .get_highest_static_file_block(StaticFileSegment::Transactions) @@ -370,10 +443,7 @@ fn missing_static_data_error( let missing_block = Box::new(provider.sealed_header(last_block + 1)?.unwrap_or_default()); - Ok(StageError::MissingStaticFileData { - block: missing_block, - segment: StaticFileSegment::Transactions, - }) + Ok(StageError::MissingStaticFileData { block: missing_block, segment }) } // TODO(alexey): ideally, we want to measure Bodies stage progress in bytes, but it's hard to know @@ -665,7 +735,7 @@ mod tests { transactions: block.body.clone(), ommers: block.ommers.clone(), withdrawals: block.withdrawals.clone(), - sidecars: None, + sidecars: block.sidecars.clone(), requests: block.requests.clone(), }, ) @@ -728,23 +798,34 @@ mod tests { // Insert last progress data { let tx = self.db.factory.provider_rw()?.into_tx(); - let mut static_file_producer = static_file_provider + let mut static_file_producer_tx = static_file_provider .get_writer(start, StaticFileSegment::Transactions)?; + let mut static_file_producer_sc = + static_file_provider.get_writer(start, StaticFileSegment::Sidecars)?; let body = StoredBlockBodyIndices { first_tx_num: 0, tx_count: progress.body.len() as u64, }; - static_file_producer.set_block_range(0..=progress.number); + static_file_producer_tx.set_block_range(0..=progress.number); body.tx_num_range().try_for_each(|tx_num| { let transaction = random_signed_tx(&mut rng); - static_file_producer + static_file_producer_tx .append_transaction(tx_num, transaction.into()) - .map(|_| ()) + .map(drop) })?; + for block_number in 0..=progress.number { + static_file_producer_sc.append_sidecars( + Default::default(), + block_number, + blocks.get(block_number as usize).map(|b| b.header.hash()).unwrap(), + )?; + tx.put::(block_number, Default::default())?; + } + if body.tx_count != 0 { tx.put::( body.last_tx_num(), @@ -761,7 +842,8 @@ mod tests { )?; } - static_file_producer.commit()?; + static_file_producer_tx.commit()?; + static_file_producer_sc.commit()?; tx.commit()?; } } @@ -945,6 +1027,7 @@ mod tests { body: body.transactions, ommers: body.ommers, withdrawals: body.withdrawals, + sidecars: body.sidecars, requests: body.requests, })); } diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 2141182fc..9223195de 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -6,10 +6,8 @@ use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_exex::{ExExManagerHandle, ExExNotification}; -use reth_primitives::{ - constants::gas_units::{GIGAGAS, KILOGAS, MEGAGAS}, - BlockNumber, Header, StaticFileSegment, -}; +use reth_primitives::{BlockNumber, Header, StaticFileSegment}; +use reth_primitives_traits::format_gas_throughput; use reth_provider::{ providers::{StaticFileProvider, StaticFileProviderRWRefMut, StaticFileWriter}, BlockReader, DatabaseProviderRW, HeaderProvider, LatestStateProviderRef, OriginalValuesKnown, @@ -19,8 +17,8 @@ use reth_prune_types::PruneModes; use reth_revm::database::StateProviderDatabase; use reth_stages_api::{ BlockErrorKind, CheckpointBlockRange, EntitiesCheckpoint, ExecInput, ExecOutput, - ExecutionCheckpoint, MetricEvent, MetricEventsSender, Stage, StageCheckpoint, StageError, - StageId, UnwindInput, UnwindOutput, + ExecutionCheckpoint, ExecutionStageThresholds, MetricEvent, MetricEventsSender, Stage, + StageCheckpoint, StageError, StageId, UnwindInput, UnwindOutput, }; use std::{ cmp::Ordering, @@ -109,7 +107,7 @@ impl ExecutionStage { /// Create an execution stage with the provided executor. /// - /// The commit threshold will be set to `10_000`. + /// The commit threshold will be set to [`MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD`]. pub fn new_with_executor(executor_provider: E) -> Self { Self::new( executor_provider, @@ -224,8 +222,9 @@ where provider.tx_ref(), provider.static_file_provider().clone(), )); - let mut executor = self.executor_provider.batch_executor(db, prune_modes); + let mut executor = self.executor_provider.batch_executor(db); executor.set_tip(max_block); + executor.set_prune_modes(prune_modes); // Progress tracking let mut stage_progress = start_block; @@ -234,6 +233,13 @@ where let mut fetch_block_duration = Duration::default(); let mut execution_duration = Duration::default(); + + let mut last_block = start_block; + let mut last_execution_duration = Duration::default(); + let mut last_cumulative_gas = 0; + let mut last_log_instant = Instant::now(); + let log_duration = Duration::from_secs(10); + debug!(target: "sync::stages::execution", start = start_block, end = max_block, "Executing range"); // Execute block range @@ -276,6 +282,22 @@ where })?; execution_duration += execute_start.elapsed(); + // Log execution throughput + if last_log_instant.elapsed() >= log_duration { + info!( + target: "sync::stages::execution", + start = last_block, + end = block_number, + throughput = format_gas_throughput(cumulative_gas - last_cumulative_gas, execution_duration - last_execution_duration), + "Executed block range" + ); + + last_block = block_number + 1; + last_execution_duration = execution_duration; + last_cumulative_gas = cumulative_gas; + last_log_instant = Instant::now(); + } + // Gas metrics if let Some(metrics_tx) = &mut self.metrics_tx { let _ = @@ -558,83 +580,6 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -/// The thresholds at which the execution stage writes state changes to the database. -/// -/// If either of the thresholds (`max_blocks` and `max_changes`) are hit, then the execution stage -/// commits all pending changes to the database. -/// -/// A third threshold, `max_changesets`, can be set to periodically write changesets to the -/// current database transaction, which frees up memory. -#[derive(Debug, Clone)] -pub struct ExecutionStageThresholds { - /// The maximum number of blocks to execute before the execution stage commits. - pub max_blocks: Option, - /// The maximum number of state changes to keep in memory before the execution stage commits. - pub max_changes: Option, - /// The maximum cumulative amount of gas to process before the execution stage commits. - pub max_cumulative_gas: Option, - /// The maximum spent on blocks processing before the execution stage commits. - pub max_duration: Option, -} - -impl Default for ExecutionStageThresholds { - fn default() -> Self { - Self { - max_blocks: Some(500_000), - max_changes: Some(5_000_000), - // 50k full blocks of 30M gas - max_cumulative_gas: Some(30_000_000 * 50_000), - // 10 minutes - max_duration: Some(Duration::from_secs(10 * 60)), - } - } -} - -impl ExecutionStageThresholds { - /// Check if the batch thresholds have been hit. - #[inline] - pub fn is_end_of_batch( - &self, - blocks_processed: u64, - changes_processed: u64, - cumulative_gas_used: u64, - elapsed: Duration, - ) -> bool { - blocks_processed >= self.max_blocks.unwrap_or(u64::MAX) || - changes_processed >= self.max_changes.unwrap_or(u64::MAX) || - cumulative_gas_used >= self.max_cumulative_gas.unwrap_or(u64::MAX) || - elapsed >= self.max_duration.unwrap_or(Duration::MAX) - } -} - -impl From for ExecutionStageThresholds { - fn from(config: ExecutionConfig) -> Self { - Self { - max_blocks: config.max_blocks, - max_changes: config.max_changes, - max_cumulative_gas: config.max_cumulative_gas, - max_duration: config.max_duration, - } - } -} - -/// Returns a formatted gas throughput log, showing either: -/// * "Kgas/s", or 1,000 gas per second -/// * "Mgas/s", or 1,000,000 gas per second -/// * "Ggas/s", or 1,000,000,000 gas per second -/// -/// Depending on the magnitude of the gas throughput. -pub fn format_gas_throughput(gas: u64, execution_duration: Duration) -> String { - let gas_per_second = gas as f64 / execution_duration.as_secs_f64(); - if gas_per_second < MEGAGAS as f64 { - format!("{:.} Kgas/second", gas_per_second / KILOGAS as f64) - } else if gas_per_second < GIGAGAS as f64 { - format!("{:.} Mgas/second", gas_per_second / MEGAGAS as f64) - } else { - format!("{:.} Ggas/second", gas_per_second / GIGAGAS as f64) - } -} - /// Returns a `StaticFileProviderRWRefMut` static file producer after performing a consistency /// check. /// @@ -753,22 +698,6 @@ mod tests { ) } - #[test] - fn test_gas_throughput_fmt() { - let duration = Duration::from_secs(1); - let gas = 100_000; - let throughput = format_gas_throughput(gas, duration); - assert_eq!(throughput, "100 Kgas/second"); - - let gas = 100_000_000; - let throughput = format_gas_throughput(gas, duration); - assert_eq!(throughput, "100 Mgas/second"); - - let gas = 100_000_000_000; - let throughput = format_gas_throughput(gas, duration); - assert_eq!(throughput, "100 Ggas/second"); - } - #[test] fn execution_checkpoint_matches() { let factory = create_test_provider_factory(); @@ -807,12 +736,9 @@ mod tests { .try_seal_with_senders() .map_err(|_| BlockValidationError::SenderRecoveryError) .unwrap(), - None, ) .unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -852,10 +778,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -895,10 +819,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -932,10 +854,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -1091,10 +1011,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -1210,10 +1128,8 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap(), None).unwrap(); - provider - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) - .unwrap(); + provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) diff --git a/crates/stages/stages/src/stages/finish.rs b/crates/stages/stages/src/stages/finish.rs index 9eb3a6d76..7ce1d246b 100644 --- a/crates/stages/stages/src/stages/finish.rs +++ b/crates/stages/stages/src/stages/finish.rs @@ -12,6 +12,8 @@ use reth_stages_api::{ #[non_exhaustive] pub struct FinishStage; +impl FinishStage {} + impl Stage for FinishStage { fn id(&self) -> StageId { StageId::Finish diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index fe8bc8547..6f59be9f4 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -75,7 +75,7 @@ impl AccountHashingStage { let blocks = random_block_range(&mut rng, opts.blocks.clone(), B256::ZERO, opts.txs); for block in blocks { - provider.insert_historical_block(block.try_seal_with_senders().unwrap(), None).unwrap(); + provider.insert_historical_block(block.try_seal_with_senders().unwrap()).unwrap(); } provider .static_file_provider() diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index 3d05ea52d..71c57cae4 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -631,7 +631,6 @@ mod tests { ExecutionOutcome::default(), HashedPostState::default(), TrieUpdates::default(), - None, ) .unwrap(); provider.commit().unwrap(); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 5a3d31a40..268df2635 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -217,7 +217,7 @@ impl Stage for MerkleStage { })?; match progress { StateRootProgress::Progress(state, hashed_entries_walked, updates) => { - updates.flush(tx)?; + updates.write_to_database(tx)?; let checkpoint = MerkleCheckpoint::new( to_block, @@ -237,7 +237,7 @@ impl Stage for MerkleStage { }) } StateRootProgress::Complete(root, hashed_entries_walked, updates) => { - updates.flush(tx)?; + updates.write_to_database(tx)?; entities_checkpoint.processed += hashed_entries_walked as u64; @@ -252,7 +252,7 @@ impl Stage for MerkleStage { error!(target: "sync::stages::merkle", %e, ?current_block_number, ?to_block, "Incremental state root failed! {INVALID_STATE_ROOT_ERROR_MESSAGE}"); StageError::Fatal(Box::new(e)) })?; - updates.flush(provider.tx_ref())?; + updates.write_to_database(provider.tx_ref())?; let total_hashed_entries = (provider.count_entries::()? + provider.count_entries::()?) @@ -325,7 +325,7 @@ impl Stage for MerkleStage { validate_state_root(block_root, target.seal_slow(), input.unwind_to)?; // Validation passed, apply unwind changes to the database. - updates.flush(provider.tx_ref())?; + updates.write_to_database(provider.tx_ref())?; // TODO(alexey): update entities checkpoint } else { @@ -512,13 +512,14 @@ mod tests { accounts.iter().map(|(addr, acc)| (*addr, (*acc, std::iter::empty()))), )?; - let SealedBlock { header, body, ommers, withdrawals, requests } = random_block( - &mut rng, - stage_progress, - preblocks.last().map(|b| b.hash()), - Some(0), - None, - ); + let SealedBlock { header, body, ommers, withdrawals, sidecars, requests } = + random_block( + &mut rng, + stage_progress, + preblocks.last().map(|b| b.hash()), + Some(0), + None, + ); let mut header = header.unseal(); header.state_root = state_root( @@ -527,8 +528,14 @@ mod tests { .into_iter() .map(|(address, account)| (address, (account, std::iter::empty()))), ); - let sealed_head = - SealedBlock { header: header.seal_slow(), body, ommers, withdrawals, requests }; + let sealed_head = SealedBlock { + header: header.seal_slow(), + body, + ommers, + withdrawals, + sidecars, + requests, + }; let head_hash = sealed_head.hash(); let mut blocks = vec![sealed_head]; diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 8d850b8ba..4b65523ba 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -66,7 +66,9 @@ mod tests { StaticFileProviderFactory, StorageReader, }; use reth_prune_types::{PruneMode, PruneModes}; - use reth_stages_api::{ExecInput, PipelineTarget, Stage, StageCheckpoint, StageId}; + use reth_stages_api::{ + ExecInput, ExecutionStageThresholds, PipelineTarget, Stage, StageCheckpoint, StageId, + }; use reth_testing_utils::generators::{self, random_block, random_block_range, random_receipt}; use std::{io::Write, sync::Arc}; @@ -82,11 +84,9 @@ mod tests { let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); + provider_rw.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); provider_rw - .insert_historical_block(genesis.try_seal_with_senders().unwrap(), None) - .unwrap(); - provider_rw - .insert_historical_block(block.clone().try_seal_with_senders().unwrap(), None) + .insert_historical_block(block.clone().try_seal_with_senders().unwrap()) .unwrap(); // Fill with bogus blocks to respect PruneMode distance. @@ -95,9 +95,7 @@ mod tests { for block_number in 2..=tip { let nblock = random_block(&mut rng, block_number, Some(head), Some(0), Some(0)); head = nblock.hash(); - provider_rw - .insert_historical_block(nblock.try_seal_with_senders().unwrap(), None) - .unwrap(); + provider_rw.insert_historical_block(nblock.try_seal_with_senders().unwrap()).unwrap(); } provider_rw .static_file_provider() diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 8f72b5aab..b3e053f49 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -16,7 +16,7 @@ use reth_db_api::{ DatabaseError as DbError, }; use reth_primitives::{ - keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, + keccak256, Account, Address, BlockHash, BlockNumber, Receipt, SealedBlock, SealedHeader, StaticFileSegment, StorageEntry, TxHash, TxNumber, B256, U256, }; use reth_provider::{ @@ -171,6 +171,32 @@ impl TestStageDB { Ok(()) } + /// Insert sidecars to static file if `writer` exists, otherwise to DB. + /// Always insert empty data. + pub fn insert_sidecars( + writer: Option<&mut StaticFileProviderRWRefMut<'_>>, + tx: &TX, + hash: BlockHash, + block_number: BlockNumber, + ) -> ProviderResult<()> { + if let Some(writer) = writer { + // Backfill: some tests start at a forward block number, but static files require no + // gaps. + let segment_header = writer.user_header(); + if segment_header.block_end().is_none() && segment_header.expected_block_start() == 0 { + for block_number in 0..block_number { + writer.append_sidecars(Default::default(), block_number, B256::ZERO)?; + } + } + + writer.append_sidecars(Default::default(), block_number, hash)?; + } else { + tx.put::(block_number, Default::default())?; + } + + Ok(()) + } + fn insert_headers_inner<'a, I, const TD: bool>(&self, headers: I) -> ProviderResult<()> where I: IntoIterator, @@ -234,14 +260,26 @@ impl TestStageDB { let mut headers_writer = storage_kind .is_static() .then(|| provider.latest_writer(StaticFileSegment::Headers).unwrap()); + let mut sidecars_writer = storage_kind + .is_static() + .then(|| provider.latest_writer(StaticFileSegment::Sidecars).unwrap()); blocks.iter().try_for_each(|block| { + Self::insert_sidecars( + sidecars_writer.as_mut(), + &tx, + block.header.hash(), + block.header.number, + )?; Self::insert_header(headers_writer.as_mut(), &tx, &block.header, U256::ZERO) })?; if let Some(mut writer) = headers_writer { writer.commit()?; } + if let Some(mut writer) = sidecars_writer { + writer.commit()?; + } } { @@ -490,7 +528,7 @@ impl StorageKind { fn tx_offset(&self) -> u64 { if let Self::Database(offset) = self { - return offset.unwrap_or_default() + return offset.unwrap_or_default(); } 0 } diff --git a/crates/stages/types/src/execution.rs b/crates/stages/types/src/execution.rs new file mode 100644 index 000000000..61f7313a3 --- /dev/null +++ b/crates/stages/types/src/execution.rs @@ -0,0 +1,50 @@ +use std::time::Duration; + +/// The thresholds at which the execution stage writes state changes to the database. +/// +/// If either of the thresholds (`max_blocks` and `max_changes`) are hit, then the execution stage +/// commits all pending changes to the database. +/// +/// A third threshold, `max_changesets`, can be set to periodically write changesets to the +/// current database transaction, which frees up memory. +#[derive(Debug, Clone)] +pub struct ExecutionStageThresholds { + /// The maximum number of blocks to execute before the execution stage commits. + pub max_blocks: Option, + /// The maximum number of state changes to keep in memory before the execution stage commits. + pub max_changes: Option, + /// The maximum cumulative amount of gas to process before the execution stage commits. + pub max_cumulative_gas: Option, + /// The maximum spent on blocks processing before the execution stage commits. + pub max_duration: Option, +} + +impl Default for ExecutionStageThresholds { + fn default() -> Self { + Self { + max_blocks: Some(500_000), + max_changes: Some(5_000_000), + // 50k full blocks of 30M gas + max_cumulative_gas: Some(30_000_000 * 50_000), + // 10 minutes + max_duration: Some(Duration::from_secs(10 * 60)), + } + } +} + +impl ExecutionStageThresholds { + /// Check if the batch thresholds have been hit. + #[inline] + pub fn is_end_of_batch( + &self, + blocks_processed: u64, + changes_processed: u64, + cumulative_gas_used: u64, + elapsed: Duration, + ) -> bool { + blocks_processed >= self.max_blocks.unwrap_or(u64::MAX) || + changes_processed >= self.max_changes.unwrap_or(u64::MAX) || + cumulative_gas_used >= self.max_cumulative_gas.unwrap_or(u64::MAX) || + elapsed >= self.max_duration.unwrap_or(Duration::MAX) + } +} diff --git a/crates/stages/types/src/lib.rs b/crates/stages/types/src/lib.rs index 93106bd88..0132c8b41 100644 --- a/crates/stages/types/src/lib.rs +++ b/crates/stages/types/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod id; @@ -21,6 +19,9 @@ pub use checkpoints::{ StageUnitCheckpoint, StorageHashingCheckpoint, }; +mod execution; +pub use execution::*; + /// Direction and target block for pipeline operations. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum PipelineTarget { diff --git a/crates/static-file/static-file/Cargo.toml b/crates/static-file/static-file/Cargo.toml index 29a601f05..1a1921d58 100644 --- a/crates/static-file/static-file/Cargo.toml +++ b/crates/static-file/static-file/Cargo.toml @@ -21,6 +21,7 @@ reth-nippy-jar.workspace = true reth-tokio-util.workspace = true reth-prune-types.workspace = true reth-static-file-types.workspace = true +reth-stages-types.workspace = true alloy-primitives.workspace = true diff --git a/crates/static-file/static-file/src/segments/headers.rs b/crates/static-file/static-file/src/segments/headers.rs index e87c1fdc5..5824d1d1a 100644 --- a/crates/static-file/static-file/src/segments/headers.rs +++ b/crates/static-file/static-file/src/segments/headers.rs @@ -1,14 +1,14 @@ -use crate::segments::{dataset_for_compression, prepare_jar, Segment, SegmentHeader}; +use crate::segments::Segment; use alloy_primitives::BlockNumber; -use reth_db::{static_file::create_static_file_T1_T2_T3, tables, RawKey, RawTable}; +use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, DatabaseProviderRO, }; -use reth_static_file_types::{SegmentConfig, StaticFileSegment}; +use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; -use std::{ops::RangeInclusive, path::Path}; +use std::ops::RangeInclusive; /// Static File segment responsible for [`StaticFileSegment::Headers`] part of data. #[derive(Debug, Default)] @@ -56,73 +56,4 @@ impl Segment for Headers { Ok(()) } - - fn create_static_file_file( - &self, - provider: &DatabaseProviderRO, - directory: &Path, - config: SegmentConfig, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let range_len = block_range.clone().count(); - let jar = prepare_jar::( - provider, - directory, - StaticFileSegment::Headers, - config, - block_range.clone(), - range_len, - || { - Ok([ - dataset_for_compression::( - provider, - &block_range, - range_len, - )?, - dataset_for_compression::( - provider, - &block_range, - range_len, - )?, - dataset_for_compression::( - provider, - &block_range, - range_len, - )?, - ]) - }, - )?; - - // Generate list of hashes for filters & PHF - let mut cursor = provider.tx_ref().cursor_read::>()?; - let hashes = if config.filters.has_filters() { - Some( - cursor - .walk(Some(RawKey::from(*block_range.start())))? - .take(range_len) - .map(|row| row.map(|(_key, value)| value.into_value()).map_err(|e| e.into())), - ) - } else { - None - }; - - create_static_file_T1_T2_T3::< - tables::Headers, - tables::HeaderTerminalDifficulties, - tables::CanonicalHeaders, - BlockNumber, - SegmentHeader, - >( - provider.tx_ref(), - block_range, - None, - // We already prepared the dictionary beforehand - None::>>>, - hashes, - range_len, - jar, - )?; - - Ok(()) - } } diff --git a/crates/static-file/static-file/src/segments/mod.rs b/crates/static-file/static-file/src/segments/mod.rs index 77798dd08..fcbd19c8a 100644 --- a/crates/static-file/static-file/src/segments/mod.rs +++ b/crates/static-file/static-file/src/segments/mod.rs @@ -9,21 +9,15 @@ pub use headers::Headers; mod receipts; pub use receipts::Receipts; +mod sidecars; +pub use sidecars::Sidecars; + use alloy_primitives::BlockNumber; -use reth_db::{RawKey, RawTable}; -use reth_db_api::{cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx}; -use reth_nippy_jar::NippyJar; -use reth_provider::{ - providers::StaticFileProvider, DatabaseProviderRO, ProviderError, TransactionsProviderExt, -}; -use reth_static_file_types::{ - find_fixed_range, Compression, Filters, InclusionFilter, PerfectHashingFunction, SegmentConfig, - SegmentHeader, StaticFileSegment, -}; +use reth_db_api::database::Database; +use reth_provider::{providers::StaticFileProvider, DatabaseProviderRO}; +use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::ProviderResult; -use std::{ops::RangeInclusive, path::Path}; - -pub(crate) type Rows = [Vec>; COLUMNS]; +use std::ops::RangeInclusive; /// A segment represents moving some portion of the data to static files. pub trait Segment: Send + Sync { @@ -38,80 +32,4 @@ pub trait Segment: Send + Sync { static_file_provider: StaticFileProvider, block_range: RangeInclusive, ) -> ProviderResult<()>; - - /// Create a static file of data for the provided block range. The `directory` parameter - /// determines the static file's save location. - fn create_static_file_file( - &self, - provider: &DatabaseProviderRO, - directory: &Path, - config: SegmentConfig, - block_range: RangeInclusive, - ) -> ProviderResult<()>; -} - -/// Returns a [`NippyJar`] according to the desired configuration. The `directory` parameter -/// determines the static file's save location. -pub(crate) fn prepare_jar( - provider: &DatabaseProviderRO, - directory: impl AsRef, - segment: StaticFileSegment, - segment_config: SegmentConfig, - block_range: RangeInclusive, - total_rows: usize, - prepare_compression: impl Fn() -> ProviderResult>, -) -> ProviderResult> { - let tx_range = match segment { - StaticFileSegment::Headers => None, - StaticFileSegment::Receipts | StaticFileSegment::Transactions => { - Some(provider.transaction_range_by_block_range(block_range.clone())?.into()) - } - }; - - let mut nippy_jar = NippyJar::new( - COLUMNS, - &directory.as_ref().join(segment.filename(&find_fixed_range(*block_range.end())).as_str()), - SegmentHeader::new(block_range.clone().into(), Some(block_range.into()), tx_range, segment), - ); - - nippy_jar = match segment_config.compression { - Compression::Lz4 => nippy_jar.with_lz4(), - Compression::Zstd => nippy_jar.with_zstd(false, 0), - Compression::ZstdWithDictionary => { - let dataset = prepare_compression()?; - - nippy_jar = nippy_jar.with_zstd(true, 5_000_000); - nippy_jar - .prepare_compression(dataset.to_vec()) - .map_err(|e| ProviderError::NippyJar(e.to_string()))?; - nippy_jar - } - Compression::Uncompressed => nippy_jar, - }; - - if let Filters::WithFilters(inclusion_filter, phf) = segment_config.filters { - nippy_jar = match inclusion_filter { - InclusionFilter::Cuckoo => nippy_jar.with_cuckoo_filter(total_rows), - }; - nippy_jar = match phf { - PerfectHashingFunction::Fmph => nippy_jar.with_fmph(), - PerfectHashingFunction::GoFmph => nippy_jar.with_gofmph(), - }; - } - - Ok(nippy_jar) -} - -/// Generates the dataset to train a zstd dictionary with the most recent rows (at most 1000). -pub(crate) fn dataset_for_compression>( - provider: &DatabaseProviderRO, - range: &RangeInclusive, - range_len: usize, -) -> ProviderResult>> { - let mut cursor = provider.tx_ref().cursor_read::>()?; - Ok(cursor - .walk_back(Some(RawKey::from(*range.end())))? - .take(range_len.min(1000)) - .map(|row| row.map(|(_key, value)| value.into_value()).expect("should exist")) - .collect::>()) } diff --git a/crates/static-file/static-file/src/segments/receipts.rs b/crates/static-file/static-file/src/segments/receipts.rs index e09b5e690..5548e9f99 100644 --- a/crates/static-file/static-file/src/segments/receipts.rs +++ b/crates/static-file/static-file/src/segments/receipts.rs @@ -1,14 +1,14 @@ -use crate::segments::{dataset_for_compression, prepare_jar, Segment}; -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_db::{static_file::create_static_file_T1, tables}; +use crate::segments::Segment; +use alloy_primitives::BlockNumber; +use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DatabaseProviderRO, TransactionsProviderExt, + BlockReader, DatabaseProviderRO, }; -use reth_static_file_types::{SegmentConfig, SegmentHeader, StaticFileSegment}; +use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::{ops::RangeInclusive, path::Path}; +use std::ops::RangeInclusive; /// Static File segment responsible for [`StaticFileSegment::Receipts`] part of data. #[derive(Debug, Default)] @@ -47,56 +47,4 @@ impl Segment for Receipts { Ok(()) } - - fn create_static_file_file( - &self, - provider: &DatabaseProviderRO, - directory: &Path, - config: SegmentConfig, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; - let tx_range_len = tx_range.clone().count(); - - let jar = prepare_jar::( - provider, - directory, - StaticFileSegment::Receipts, - config, - block_range, - tx_range_len, - || { - Ok([dataset_for_compression::( - provider, - &tx_range, - tx_range_len, - )?]) - }, - )?; - - // Generate list of hashes for filters & PHF - let hashes = if config.filters.has_filters() { - Some( - provider - .transaction_hashes_by_range(*tx_range.start()..(*tx_range.end() + 1))? - .into_iter() - .map(|(tx, _)| Ok(tx)), - ) - } else { - None - }; - - create_static_file_T1::( - provider.tx_ref(), - tx_range, - None, - // We already prepared the dictionary beforehand - None::>>>, - hashes, - tx_range_len, - jar, - )?; - - Ok(()) - } } diff --git a/crates/static-file/static-file/src/segments/sidecars.rs b/crates/static-file/static-file/src/segments/sidecars.rs new file mode 100644 index 000000000..8f04c64ee --- /dev/null +++ b/crates/static-file/static-file/src/segments/sidecars.rs @@ -0,0 +1,52 @@ +use crate::segments::Segment; +use alloy_primitives::BlockNumber; +use reth_db::tables; +use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; +use reth_provider::{ + providers::{StaticFileProvider, StaticFileWriter}, + DatabaseProviderRO, +}; +use reth_static_file_types::StaticFileSegment; +use reth_storage_errors::provider::ProviderResult; +use std::ops::RangeInclusive; + +/// Static File segment responsible for [`StaticFileSegment::Sidecars`] part of data. +#[derive(Debug, Default)] +pub struct Sidecars; + +impl Segment for Sidecars { + fn segment(&self) -> StaticFileSegment { + StaticFileSegment::Sidecars + } + + fn copy_to_static_files( + &self, + provider: DatabaseProviderRO, + static_file_provider: StaticFileProvider, + block_range: RangeInclusive, + ) -> ProviderResult<()> { + let mut static_file_writer = + static_file_provider.get_writer(*block_range.start(), StaticFileSegment::Sidecars)?; + + let mut sidecars_cursor = provider.tx_ref().cursor_read::()?; + let sidecars_walker = sidecars_cursor.walk_range(block_range.clone())?; + + let mut canonical_headers_cursor = + provider.tx_ref().cursor_read::()?; + let canonical_headers_walker = canonical_headers_cursor.walk_range(block_range)?; + + for (sidecar_entry, canonical_header_entry) in sidecars_walker.zip(canonical_headers_walker) + { + let (header_block, sidecar) = sidecar_entry?; + let (canonical_header_block, canonical_header) = canonical_header_entry?; + + debug_assert_eq!(header_block, canonical_header_block); + + let _static_file_block = + static_file_writer.append_sidecars(sidecar, header_block, canonical_header)?; + debug_assert_eq!(_static_file_block, header_block); + } + + Ok(()) + } +} diff --git a/crates/static-file/static-file/src/segments/transactions.rs b/crates/static-file/static-file/src/segments/transactions.rs index c7daeba06..4361f8ca6 100644 --- a/crates/static-file/static-file/src/segments/transactions.rs +++ b/crates/static-file/static-file/src/segments/transactions.rs @@ -1,14 +1,14 @@ -use crate::segments::{dataset_for_compression, prepare_jar, Segment}; -use alloy_primitives::{BlockNumber, TxNumber}; -use reth_db::{static_file::create_static_file_T1, tables}; +use crate::segments::Segment; +use alloy_primitives::BlockNumber; +use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, database::Database, transaction::DbTx}; use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, - BlockReader, DatabaseProviderRO, TransactionsProviderExt, + BlockReader, DatabaseProviderRO, }; -use reth_static_file_types::{SegmentConfig, SegmentHeader, StaticFileSegment}; +use reth_static_file_types::StaticFileSegment; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::{ops::RangeInclusive, path::Path}; +use std::ops::RangeInclusive; /// Static File segment responsible for [`StaticFileSegment::Transactions`] part of data. #[derive(Debug, Default)] @@ -53,56 +53,4 @@ impl Segment for Transactions { Ok(()) } - - fn create_static_file_file( - &self, - provider: &DatabaseProviderRO, - directory: &Path, - config: SegmentConfig, - block_range: RangeInclusive, - ) -> ProviderResult<()> { - let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; - let tx_range_len = tx_range.clone().count(); - - let jar = prepare_jar::( - provider, - directory, - StaticFileSegment::Transactions, - config, - block_range, - tx_range_len, - || { - Ok([dataset_for_compression::( - provider, - &tx_range, - tx_range_len, - )?]) - }, - )?; - - // Generate list of hashes for filters & PHF - let hashes = if config.filters.has_filters() { - Some( - provider - .transaction_hashes_by_range(*tx_range.start()..(*tx_range.end() + 1))? - .into_iter() - .map(|(tx, _)| Ok(tx)), - ) - } else { - None - }; - - create_static_file_T1::( - provider.tx_ref(), - tx_range, - None, - // We already prepared the dictionary beforehand - None::>>>, - hashes, - tx_range_len, - jar, - )?; - - Ok(()) - } } diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 44ea3a5c8..98284ffcf 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -5,8 +5,12 @@ use alloy_primitives::BlockNumber; use parking_lot::Mutex; use rayon::prelude::*; use reth_db_api::database::Database; -use reth_provider::{providers::StaticFileWriter, ProviderFactory, StaticFileProviderFactory}; +use reth_provider::{ + providers::StaticFileWriter, ProviderFactory, StageCheckpointReader as _, + StaticFileProviderFactory, +}; use reth_prune_types::PruneModes; +use reth_stages_types::StageId; use reth_static_file_types::HighestStaticFiles; use reth_storage_errors::provider::ProviderResult; use reth_tokio_util::{EventSender, EventStream}; @@ -56,18 +60,22 @@ pub struct StaticFileProducerInner { event_sender: EventSender, } -/// Static File targets, per data part, measured in [`BlockNumber`]. +/// Static File targets, per data segment, measured in [`BlockNumber`]. #[derive(Debug, Clone, Eq, PartialEq)] pub struct StaticFileTargets { headers: Option>, receipts: Option>, transactions: Option>, + sidecars: Option>, } impl StaticFileTargets { /// Returns `true` if any of the targets are [Some]. pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + self.headers.is_some() || + self.receipts.is_some() || + self.transactions.is_some() || + self.sidecars.is_some() } // Returns `true` if all targets are either [`None`] or has beginning of the range equal to the @@ -77,6 +85,7 @@ impl StaticFileTargets { (self.headers.as_ref(), static_files.headers), (self.receipts.as_ref(), static_files.receipts), (self.transactions.as_ref(), static_files.transactions), + (self.sidecars.as_ref(), static_files.sidecars), ] .iter() .all(|(target_block_range, highest_static_fileted_block)| { @@ -135,6 +144,9 @@ impl StaticFileProducerInner { if let Some(block_range) = targets.receipts.clone() { segments.push((Box::new(segments::Receipts), block_range)); } + if let Some(block_range) = targets.sidecars.clone() { + segments.push((Box::new(segments::Sidecars), block_range)); + } segments.par_iter().try_for_each(|(segment, block_range)| -> ProviderResult<()> { debug!(target: "static_file", segment = %segment.segment(), ?block_range, "StaticFileProducer segment"); @@ -167,6 +179,29 @@ impl StaticFileProducerInner { Ok(targets) } + /// Copies data from database to static files according to + /// [stage checkpoints](reth_stages_types::StageCheckpoint). + /// + /// Returns highest block numbers for all static file segments. + pub fn copy_to_static_files(&self) -> ProviderResult { + let provider = self.provider_factory.provider()?; + let stages_checkpoints = [StageId::Headers, StageId::Execution, StageId::Bodies] + .into_iter() + .map(|stage| provider.get_stage_checkpoint(stage).map(|c| c.map(|c| c.block_number))) + .collect::, _>>()?; + + let highest_static_files = HighestStaticFiles { + headers: stages_checkpoints[0], + receipts: stages_checkpoints[1], + transactions: stages_checkpoints[2], + sidecars: stages_checkpoints[2], + }; + let targets = self.get_static_file_targets(highest_static_files)?; + self.run(targets)?; + + Ok(highest_static_files) + } + /// Returns a static file targets at the provided finalized block numbers per segment. /// The target is determined by the check against highest `static_files` using /// [`reth_provider::providers::StaticFileProvider::get_highest_static_files`]. @@ -200,6 +235,9 @@ impl StaticFileProducerInner { finalized_block_number, ) }), + sidecars: finalized_block_numbers.sidecars.and_then(|finalized_block_number| { + self.get_static_file_target(highest_static_files.sidecars, finalized_block_number) + }), }; trace!( @@ -296,6 +334,7 @@ mod tests { headers: Some(1), receipts: Some(1), transactions: Some(1), + sidecars: Some(1), }) .expect("get static file targets"); assert_eq!( @@ -303,13 +342,19 @@ mod tests { StaticFileTargets { headers: Some(0..=1), receipts: Some(0..=1), - transactions: Some(0..=1) + transactions: Some(0..=1), + sidecars: Some(0..=1) } ); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(1), receipts: Some(1), transactions: Some(1) } + HighestStaticFiles { + headers: Some(1), + receipts: Some(1), + transactions: Some(1), + sidecars: Some(1) + } ); let targets = static_file_producer @@ -317,6 +362,7 @@ mod tests { headers: Some(3), receipts: Some(3), transactions: Some(3), + sidecars: Some(3), }) .expect("get static file targets"); assert_eq!( @@ -324,13 +370,19 @@ mod tests { StaticFileTargets { headers: Some(2..=3), receipts: Some(2..=3), - transactions: Some(2..=3) + transactions: Some(2..=3), + sidecars: Some(2..=3) } ); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { + headers: Some(3), + receipts: Some(3), + transactions: Some(3), + sidecars: Some(3) + } ); let targets = static_file_producer @@ -338,6 +390,7 @@ mod tests { headers: Some(4), receipts: Some(4), transactions: Some(4), + sidecars: Some(4), }) .expect("get static file targets"); assert_eq!( @@ -345,7 +398,8 @@ mod tests { StaticFileTargets { headers: Some(4..=4), receipts: Some(4..=4), - transactions: Some(4..=4) + transactions: Some(4..=4), + sidecars: Some(4..=4) } ); assert_matches!( @@ -354,7 +408,12 @@ mod tests { ); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { + headers: Some(3), + receipts: Some(3), + transactions: Some(3), + sidecars: Some(3) + } ); } @@ -382,6 +441,7 @@ mod tests { headers: Some(1), receipts: Some(1), transactions: Some(1), + sidecars: Some(1), }) .expect("get static file targets"); assert_matches!(locked_producer.run(targets.clone()), Ok(_)); diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index f78d61f69..15feafd18 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -20,7 +20,7 @@ pub use segment::{SegmentConfig, SegmentHeader, SegmentRangeInclusive, StaticFil /// Default static file block count. pub const BLOCKS_PER_STATIC_FILE: u64 = 500_000; -/// Highest static file block numbers, per data part. +/// Highest static file block numbers, per data segment. #[derive(Debug, Clone, Copy, Default, Eq, PartialEq)] pub struct HighestStaticFiles { /// Highest static file block of headers, inclusive. @@ -32,6 +32,9 @@ pub struct HighestStaticFiles { /// Highest static file block of transactions, inclusive. /// If [`None`], no static file is available. pub transactions: Option, + /// Highest static file block of sidecars, inclusive. + /// If [`None`], no static file is available. + pub sidecars: Option, } impl HighestStaticFiles { @@ -41,6 +44,7 @@ impl HighestStaticFiles { StaticFileSegment::Headers => self.headers, StaticFileSegment::Transactions => self.transactions, StaticFileSegment::Receipts => self.receipts, + StaticFileSegment::Sidecars => self.sidecars, } } @@ -50,9 +54,15 @@ impl HighestStaticFiles { StaticFileSegment::Headers => &mut self.headers, StaticFileSegment::Transactions => &mut self.transactions, StaticFileSegment::Receipts => &mut self.receipts, + StaticFileSegment::Sidecars => &mut self.sidecars, } } + /// Returns the minimum block of all segments. + pub fn min(&self) -> Option { + [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).min() + } + /// Returns the maximum block of all segments. pub fn max(&self) -> Option { [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).max() diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index d609f4a98..ba2b2c98f 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -34,6 +34,9 @@ pub enum StaticFileSegment { #[strum(serialize = "receipts")] /// Static File segment responsible for the `Receipts` table. Receipts, + #[strum(serialize = "sidecars")] + /// Static File segment responsible for the `Sidecars` table. + Sidecars, } impl StaticFileSegment { @@ -43,6 +46,7 @@ impl StaticFileSegment { Self::Headers => "headers", Self::Transactions => "transactions", Self::Receipts => "receipts", + Self::Sidecars => "sidecars", } } @@ -57,7 +61,7 @@ impl StaticFileSegment { }; match self { - Self::Headers | Self::Transactions | Self::Receipts => default_config, + Self::Headers | Self::Transactions | Self::Receipts | Self::Sidecars => default_config, } } @@ -66,6 +70,7 @@ impl StaticFileSegment { match self { Self::Headers => 3, Self::Transactions | Self::Receipts => 1, + Self::Sidecars => 2, } } @@ -138,6 +143,11 @@ impl StaticFileSegment { pub const fn is_receipts(&self) -> bool { matches!(self, Self::Receipts) } + + /// Returns `true` if the segment is `StaticFileSegment::Sidecars`. + pub const fn is_sidecars(&self) -> bool { + matches!(self, Self::Sidecars) + } } /// A segment header that contains information common to all segments. Used for storage. @@ -239,7 +249,7 @@ impl SegmentHeader { /// Increments tx end range depending on segment pub fn increment_tx(&mut self) { match self.segment { - StaticFileSegment::Headers => (), + StaticFileSegment::Headers | StaticFileSegment::Sidecars => (), StaticFileSegment::Transactions | StaticFileSegment::Receipts => { if let Some(tx_range) = &mut self.tx_range { tx_range.end += 1; @@ -253,7 +263,7 @@ impl SegmentHeader { /// Removes `num` elements from end of tx or block range. pub fn prune(&mut self, num: u64) { match self.segment { - StaticFileSegment::Headers => { + StaticFileSegment::Headers | StaticFileSegment::Sidecars => { if let Some(range) = &mut self.block_range { if num > range.end { self.block_range = None; @@ -297,7 +307,7 @@ impl SegmentHeader { /// Returns the row offset which depends on whether the segment is block or transaction based. pub fn start(&self) -> Option { match self.segment { - StaticFileSegment::Headers => self.block_start(), + StaticFileSegment::Headers | StaticFileSegment::Sidecars => self.block_start(), StaticFileSegment::Transactions | StaticFileSegment::Receipts => self.tx_start(), } } diff --git a/crates/storage/codecs/Cargo.toml b/crates/storage/codecs/Cargo.toml index 6864a5cf6..6f286a95b 100644 --- a/crates/storage/codecs/Cargo.toml +++ b/crates/storage/codecs/Cargo.toml @@ -15,7 +15,7 @@ workspace = true reth-codecs-derive = { path = "./derive", default-features = false } # eth -alloy-consensus = { workspace = true, optional = true, features = ["arbitrary"] } +alloy-consensus = { workspace = true, optional = true } alloy-eips = { workspace = true, optional = true } alloy-genesis = { workspace = true, optional = true } alloy-primitives.workspace = true diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index b0927a148..bea26090d 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -14,8 +14,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index cf7d7f546..09fe6cb52 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -72,4 +72,4 @@ arbitrary = [ "dep:arbitrary", "dep:proptest", ] -optimism = [] +optimism = ["reth-primitives/optimism"] diff --git a/crates/storage/db-api/src/lib.rs b/crates/storage/db-api/src/lib.rs index 284321092..cd25b3c65 100644 --- a/crates/storage/db-api/src/lib.rs +++ b/crates/storage/db-api/src/lib.rs @@ -58,8 +58,6 @@ )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] /// Common types used throughout the abstraction. pub mod common; @@ -81,3 +79,5 @@ pub mod models; mod scale; mod utils; + +pub use database::Database; diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index 7f88c70af..a837ac0d3 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -241,6 +241,8 @@ impl_compression_for_compact!( PruneCheckpoint, ClientVersion, Requests, + BlobSidecar, + BlobSidecars, // Non-DB GenesisAccount ); @@ -336,71 +338,70 @@ mod tests { // // this check is to ensure we do not inadvertently add too many fields to a struct which would // expand the flags field and break backwards compatibility + #[cfg(not(feature = "optimism"))] #[test] fn test_ensure_backwards_compatibility() { - #[cfg(not(feature = "optimism"))] - { - assert_eq!(Account::bitflag_encoded_bytes(), 2); - assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); - assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); - assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); - assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); - assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(Header::bitflag_encoded_bytes(), 4); - assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); - assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); - assert_eq!(Receipt::bitflag_encoded_bytes(), 1); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); - assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); - assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); - assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); - assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); - assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); - assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); - assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); - } - - #[cfg(feature = "optimism")] - { - assert_eq!(Account::bitflag_encoded_bytes(), 2); - assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); - assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); - assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); - assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); - assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(Header::bitflag_encoded_bytes(), 4); - assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); - assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); - assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); - assert_eq!(Receipt::bitflag_encoded_bytes(), 2); - assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); - assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); - assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); - assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); - assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); - assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); - assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); - assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); - assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); - assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); - assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); - } + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 1); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); + } + + #[cfg(feature = "optimism")] + #[test] + fn test_ensure_backwards_compatibility() { + assert_eq!(Account::bitflag_encoded_bytes(), 2); + assert_eq!(AccountHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(CheckpointBlockRange::bitflag_encoded_bytes(), 1); + assert_eq!(CompactClientVersion::bitflag_encoded_bytes(), 0); + assert_eq!(CompactU256::bitflag_encoded_bytes(), 1); + assert_eq!(CompactU64::bitflag_encoded_bytes(), 1); + assert_eq!(EntitiesCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(ExecutionCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(Header::bitflag_encoded_bytes(), 4); + assert_eq!(HeadersCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(IndexHistoryCheckpoint::bitflag_encoded_bytes(), 0); + assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); + assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); + assert_eq!(Receipt::bitflag_encoded_bytes(), 2); + assert_eq!(ReceiptWithBloom::bitflag_encoded_bytes(), 0); + assert_eq!(SealedHeader::bitflag_encoded_bytes(), 0); + assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); + assert_eq!(StoredBlockOmmers::bitflag_encoded_bytes(), 0); + assert_eq!(StoredBlockWithdrawals::bitflag_encoded_bytes(), 0); + assert_eq!(StorageHashingCheckpoint::bitflag_encoded_bytes(), 1); + assert_eq!(TxEip1559::bitflag_encoded_bytes(), 4); + assert_eq!(TxEip2930::bitflag_encoded_bytes(), 3); + assert_eq!(TxEip4844::bitflag_encoded_bytes(), 5); + assert_eq!(TxLegacy::bitflag_encoded_bytes(), 3); + assert_eq!(Withdrawals::bitflag_encoded_bytes(), 0); } #[test] diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index c2760f672..d80236def 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -19,6 +19,7 @@ reth-trie.workspace = true reth-etl.workspace = true reth-codecs.workspace = true reth-stages-types.workspace = true +reth-fs-util.workspace = true # eth alloy-genesis.workspace = true @@ -26,6 +27,7 @@ alloy-genesis.workspace = true # misc eyre.workspace = true thiserror.workspace = true +boyer-moore-magiclen.workspace = true # io serde.workspace = true diff --git a/bin/reth/src/utils.rs b/crates/storage/db-common/src/db_tool/mod.rs similarity index 95% rename from bin/reth/src/utils.rs rename to crates/storage/db-common/src/db_tool/mod.rs index 1dd4f6893..3884089b4 100644 --- a/bin/reth/src/utils.rs +++ b/crates/storage/db-common/src/db_tool/mod.rs @@ -1,4 +1,4 @@ -//! Common CLI utility functions. +//! Common db operations use boyer_moore_magiclen::BMByte; use eyre::Result; @@ -16,15 +16,6 @@ use reth_provider::{ChainSpecProvider, ProviderFactory}; use std::{path::Path, rc::Rc, sync::Arc}; use tracing::info; -/// Exposing `open_db_read_only` function -pub mod db { - pub use reth_db::open_db_read_only; -} - -/// Re-exported from `reth_node_core`, also to prevent a breaking change. See the comment on -/// the `reth_node_core::args` re-export for more details. -pub use reth_node_core::utils::*; - /// Wrapper over DB that implements many useful DB queries. #[derive(Debug)] pub struct DbTool { diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index c6b4802b7..e4755d390 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -285,6 +285,10 @@ pub fn insert_genesis_header( let (difficulty, hash) = (header.difficulty, block_hash); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?; writer.append_header(header, difficulty, hash)?; + + // skip the zero block index + let mut writer = static_file_provider.latest_writer(StaticFileSegment::Sidecars)?; + writer.append_sidecars(Default::default(), 0, B256::ZERO)?; } Ok(Some(_)) => {} Err(e) => return Err(e), @@ -464,19 +468,17 @@ fn compute_state_root(provider: &DatabaseProviderRW) -> eyre:: .root_with_progress()? { StateRootProgress::Progress(state, _, updates) => { - let updates_len = updates.len(); + let updated_len = updates.write_to_database(tx)?; + total_flushed_updates += updated_len; trace!(target: "reth::cli", last_account_key = %state.last_account_key, - updates_len, + updated_len, total_flushed_updates, "Flushing trie updates" ); intermediate_state = Some(*state); - updates.flush(tx)?; - - total_flushed_updates += updates_len; if total_flushed_updates % SOFT_LIMIT_COUNT_FLUSHED_UPDATES == 0 { info!(target: "reth::cli", @@ -486,15 +488,12 @@ fn compute_state_root(provider: &DatabaseProviderRW) -> eyre:: } } StateRootProgress::Complete(root, _, updates) => { - let updates_len = updates.len(); - - updates.flush(tx)?; - - total_flushed_updates += updates_len; + let updated_len = updates.write_to_database(tx)?; + total_flushed_updates += updated_len; trace!(target: "reth::cli", %root, - updates_len = updates_len, + updated_len, total_flushed_updates, "State root has been computed" ); @@ -526,7 +525,7 @@ struct GenesisAccountWithAddress { mod tests { use super::*; use alloy_genesis::Genesis; - use reth_chainspec::{Chain, GOERLI, MAINNET, SEPOLIA}; + use reth_chainspec::{Chain, HOLESKY, MAINNET, SEPOLIA}; use reth_db::DatabaseEnv; use reth_db_api::{ cursor::DbCursorRO, @@ -534,7 +533,7 @@ mod tests { table::{Table, TableRow}, transaction::DbTx, }; - use reth_primitives::{GOERLI_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; + use reth_primitives::{HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH}; use reth_primitives_traits::IntegerList; use reth_provider::test_utils::create_test_provider_factory_with_chain_spec; @@ -558,21 +557,21 @@ mod tests { } #[test] - fn success_init_genesis_goerli() { + fn success_init_genesis_sepolia() { let genesis_hash = - init_genesis(create_test_provider_factory_with_chain_spec(GOERLI.clone())).unwrap(); + init_genesis(create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap(); // actual, expected - assert_eq!(genesis_hash, GOERLI_GENESIS_HASH); + assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH); } #[test] - fn success_init_genesis_sepolia() { + fn success_init_genesis_holesky() { let genesis_hash = - init_genesis(create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap(); + init_genesis(create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap(); // actual, expected - assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH); + assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH); } #[test] @@ -620,7 +619,7 @@ mod tests { ]), ..Default::default() }, - hardforks: BTreeMap::default(), + hardforks: Default::default(), genesis_hash: None, paris_block_and_final_difficulty: None, deposit_contract: None, diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs index abcbc6276..173e53143 100644 --- a/crates/storage/db-common/src/lib.rs +++ b/crates/storage/db-common/src/lib.rs @@ -9,3 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod init; + +mod db_tool; +pub use db_tool::*; diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index 336cc75d2..117ec5ccc 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -18,16 +18,19 @@ reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-fs-util.workspace = true reth-storage-errors.workspace = true -reth-libmdbx = { workspace = true, optional = true, features = [ - "return-borrowed", - "read-tx-timeouts", -] } reth-nippy-jar.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-tracing.workspace = true reth-trie-common.workspace = true +# mdbx +reth-libmdbx = { workspace = true, optional = true, features = [ + "return-borrowed", + "read-tx-timeouts", +] } +eyre = { workspace = true, optional = true } + # codecs serde = { workspace = true, default-features = false } @@ -41,7 +44,6 @@ page_size = "0.6.0" thiserror.workspace = true tempfile = { workspace = true, optional = true } derive_more.workspace = true -eyre.workspace = true paste.workspace = true rustc-hash.workspace = true sysinfo = { version = "0.30", default-features = false } @@ -75,8 +77,8 @@ assert_matches.workspace = true [features] default = ["mdbx"] -test-utils = ["tempfile", "arbitrary"] -mdbx = ["reth-libmdbx"] +mdbx = ["dep:reth-libmdbx", "dep:eyre"] +test-utils = ["dep:tempfile", "arbitrary"] bench = [] arbitrary = ["reth-primitives/arbitrary", "reth-db-api/arbitrary"] optimism = [] diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index d37146fd1..1807e6f4a 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -130,7 +130,6 @@ where /// Generates two batches. The first is to be inserted into the database before running the /// benchmark. The second is to be benchmarked with. -#[allow(clippy::type_complexity)] fn generate_batches(size: usize) -> (Vec>, Vec>) where T: Table, diff --git a/crates/storage/db/src/mdbx.rs b/crates/storage/db/src/mdbx.rs index 328b9caab..d6947e10b 100644 --- a/crates/storage/db/src/mdbx.rs +++ b/crates/storage/db/src/mdbx.rs @@ -1,12 +1,12 @@ //! Bindings for [MDBX](https://libmdbx.dqdkfa.ru/). -pub use crate::implementation::mdbx::*; -pub use reth_libmdbx::*; - use crate::is_database_empty; use eyre::Context; use std::path::Path; +pub use crate::implementation::mdbx::*; +pub use reth_libmdbx::*; + /// Creates a new database at the specified path if it doesn't exist. Does NOT create tables. Check /// [`init_db`]. pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Result { @@ -31,21 +31,17 @@ pub fn create_db>(path: P, args: DatabaseArguments) -> eyre::Resu /// Opens up an existing database or creates a new one at the specified path. Creates tables if /// necessary. Read/Write mode. pub fn init_db>(path: P, args: DatabaseArguments) -> eyre::Result { - { - let client_version = args.client_version().clone(); - let db = create_db(path, args)?; - db.create_tables()?; - db.record_client_version(client_version)?; - Ok(db) - } + let client_version = args.client_version().clone(); + let db = create_db(path, args)?; + db.create_tables()?; + db.record_client_version(client_version)?; + Ok(db) } /// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing. pub fn open_db_read_only(path: &Path, args: DatabaseArguments) -> eyre::Result { - { - DatabaseEnv::open(path, DatabaseEnvKind::RO, args) - .with_context(|| format!("Could not open database at path: {}", path.display())) - } + DatabaseEnv::open(path, DatabaseEnvKind::RO, args) + .with_context(|| format!("Could not open database at path: {}", path.display())) } /// Opens up an existing database. Read/Write mode with `WriteMap` enabled. It doesn't create it or diff --git a/crates/storage/db/src/static_file/cursor.rs b/crates/storage/db/src/static_file/cursor.rs index 9a93ca224..4a052c6ab 100644 --- a/crates/storage/db/src/static_file/cursor.rs +++ b/crates/storage/db/src/static_file/cursor.rs @@ -10,6 +10,9 @@ use std::sync::Arc; #[derive(Debug, Deref, DerefMut)] pub struct StaticFileCursor<'a>(NippyJarCursor<'a, SegmentHeader>); +/// Type alias for column results with optional values. +type ColumnResult = ProviderResult>; + impl<'a> StaticFileCursor<'a> { /// Returns a new [`StaticFileCursor`]. pub fn new(jar: &'a NippyJar, reader: Arc) -> ProviderResult { @@ -56,7 +59,7 @@ impl<'a> StaticFileCursor<'a> { pub fn get_one( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> ProviderResult> { + ) -> ColumnResult { let row = self.get(key_or_num, M::MASK)?; match row { @@ -69,7 +72,7 @@ impl<'a> StaticFileCursor<'a> { pub fn get_two( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> ProviderResult> { + ) -> ColumnResult<(M::FIRST, M::SECOND)> { let row = self.get(key_or_num, M::MASK)?; match row { @@ -79,11 +82,10 @@ impl<'a> StaticFileCursor<'a> { } /// Gets three column values from a row. - #[allow(clippy::type_complexity)] pub fn get_three( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> ProviderResult> { + ) -> ColumnResult<(M::FIRST, M::SECOND, M::THIRD)> { let row = self.get(key_or_num, M::MASK)?; match row { diff --git a/crates/storage/db/src/static_file/generation.rs b/crates/storage/db/src/static_file/generation.rs deleted file mode 100644 index 9c2a64a23..000000000 --- a/crates/storage/db/src/static_file/generation.rs +++ /dev/null @@ -1,115 +0,0 @@ -use crate::{RawKey, RawTable}; -use reth_db_api::{ - cursor::DbCursorRO, - table::{Key, Table}, - transaction::DbTx, -}; - -use reth_nippy_jar::{ColumnResult, NippyJar, NippyJarHeader, PHFKey}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_tracing::tracing::*; -use std::{error::Error as StdError, ops::RangeInclusive}; - -/// Macro that generates static file creation functions that take an arbitrary number of [`Table`] -/// and creates a [`NippyJar`] file out of their [`Table::Value`]. Each list of [`Table::Value`] -/// from a table is a column of values. -/// -/// Has membership filter set and compression dictionary support. -macro_rules! generate_static_file_func { - ($(($($tbl:ident),+)),+ $(,)? ) => { - $( - paste::item! { - /// Creates a static file from specified tables. Each table's `Value` iterator represents a column. - /// - /// **Ensure the range contains the same number of rows.** - /// - /// * `tx`: Database transaction. - /// * `range`: Data range for columns in tables. - /// * `additional`: Additional columns which can't be straight straightforwardly walked on. - /// * `keys`: IntoIterator of keys (eg. `TxHash` or `BlockHash`) with length equal to `row_count` and ordered by future column insertion from `range`. - /// * `dict_compression_set`: Sets of column data for compression dictionaries. Max size is 2GB. Row count is independent. - /// * `row_count`: Total rows to add to `NippyJar`. Must match row count in `range`. - /// * `nippy_jar`: Static File object responsible for file generation. - #[allow(non_snake_case)] - pub fn []< - $($tbl: Table,)+ - K, - H: NippyJarHeader - > - ( - tx: &impl DbTx, - range: RangeInclusive, - additional: Option, Box>>>>>, - dict_compression_set: Option>>>, - keys: Option>>, - row_count: usize, - mut nippy_jar: NippyJar - ) -> ProviderResult<()> - where K: Key + Copy - { - let additional = additional.unwrap_or_default(); - debug!(target: "reth::static_file", ?range, "Creating static file {:?} and {} more columns.", vec![$($tbl::NAME,)+], additional.len()); - - let range: RangeInclusive> = RawKey::new(*range.start())..=RawKey::new(*range.end()); - - // Create PHF and Filter if required - if let Some(keys) = keys { - debug!(target: "reth::static_file", "Calculating Filter, PHF and offset index list"); - match nippy_jar.prepare_index(keys, row_count) { - Ok(_) => { - debug!(target: "reth::static_file", "Filter, PHF and offset index list calculated."); - }, - Err(e) => { - return Err(ProviderError::NippyJar(e.to_string())); - } - } - } - - // Create compression dictionaries if required - if let Some(data_sets) = dict_compression_set { - debug!(target: "reth::static_file", "Creating compression dictionaries."); - match nippy_jar.prepare_compression(data_sets){ - Ok(_) => { - debug!(target: "reth::static_file", "Compression dictionaries created."); - }, - Err(e) => { - return Err(ProviderError::NippyJar(e.to_string())); - } - } - - } - - // Creates the cursors for the columns - $( - let mut [< $tbl _cursor>] = tx.cursor_read::>()?; - let [< $tbl _iter>] = [< $tbl _cursor>] - .walk_range(range.clone())? - .into_iter() - .map(|row| - row - .map(|(_key, val)| val.into_value()) - .map_err(|e| Box::new(e) as Box) - ); - - )+ - - // Create the static file from the data - let col_iterators: Vec,_>>>> = vec![ - $(Box::new([< $tbl _iter>]),)+ - ]; - - - debug!(target: "reth::static_file", jar=?nippy_jar, "Generating static file."); - - let nippy_jar = nippy_jar.freeze(col_iterators.into_iter().chain(additional).collect(), row_count as u64).map_err(|e| ProviderError::NippyJar(e.to_string())); - - debug!(target: "reth::static_file", jar=?nippy_jar, "Static file generated."); - - Ok(()) - } - } - )+ - }; -} - -generate_static_file_func!((T1), (T1, T2), (T1, T2, T3), (T1, T2, T3, T4), (T1, T2, T3, T4, T5),); diff --git a/crates/storage/db/src/static_file/mask.rs b/crates/storage/db/src/static_file/mask.rs index f5d35a193..083285e29 100644 --- a/crates/storage/db/src/static_file/mask.rs +++ b/crates/storage/db/src/static_file/mask.rs @@ -31,7 +31,7 @@ macro_rules! add_segments { } }; } -add_segments!(Header, Receipt, Transaction); +add_segments!(Header, Receipt, Transaction, Sidecar); /// Trait for specifying a mask to select one column value. pub trait ColumnSelectorOne { diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index f3ce90f0d..40b5406e0 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -1,4 +1,4 @@ -use super::{ReceiptMask, TransactionMask}; +use super::{ReceiptMask, SidecarMask, TransactionMask}; use crate::{ add_static_file_mask, static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo, HeaderMask}, @@ -6,6 +6,7 @@ use crate::{ }; use reth_db_api::table::Table; use reth_primitives::{BlockHash, Header}; +use reth_primitives_traits::BlobSidecars; // HEADER MASKS add_static_file_mask!(HeaderMask, Header, 0b001); @@ -20,3 +21,8 @@ add_static_file_mask!(ReceiptMask, ::Value, 0b1); // TRANSACTION MASKS add_static_file_mask!(TransactionMask, ::Value, 0b1); add_static_file_mask!(TransactionMask, RawValue<::Value>, 0b1); + +// SIDECARS MASKS +add_static_file_mask!(SidecarMask, BlobSidecars, 0b01); +add_static_file_mask!(SidecarMask, BlockHash, 0b10); +add_static_file_mask!(SidecarMask, BlobSidecars, BlockHash, 0b11); diff --git a/crates/storage/db/src/static_file/mod.rs b/crates/storage/db/src/static_file/mod.rs index daa6f8a81..f27a574f6 100644 --- a/crates/storage/db/src/static_file/mod.rs +++ b/crates/storage/db/src/static_file/mod.rs @@ -1,13 +1,10 @@ //! reth's static file database table import and access -mod generation; use std::{ collections::{hash_map::Entry, HashMap}, path::Path, }; -pub use generation::*; - mod cursor; pub use cursor::StaticFileCursor; diff --git a/crates/storage/db/src/tables/mod.rs b/crates/storage/db/src/tables/mod.rs index 991b77b35..bcc74f946 100644 --- a/crates/storage/db/src/tables/mod.rs +++ b/crates/storage/db/src/tables/mod.rs @@ -11,9 +11,6 @@ //! //! TODO(onbjerg): Find appropriate format for this... -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] - pub mod codecs; mod raw; @@ -35,7 +32,7 @@ use reth_primitives::{ parlia::Snapshot, Account, Address, BlockHash, BlockNumber, Bytecode, Header, Receipt, Requests, StorageEntry, TransactionSignedNoHash, TxHash, TxNumber, B256, }; -use reth_primitives_traits::IntegerList; +use reth_primitives_traits::{BlobSidecars, IntegerList}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::StageCheckpoint; use reth_trie_common::{StorageTrieEntry, StoredBranchNode, StoredNibbles, StoredNibblesSubKey}; @@ -309,6 +306,9 @@ tables! { /// Canonical only Stores transaction receipts. table Receipts; + /// Canonical only Stores block sidecars. + table Sidecars; + /// Stores all smart contract bytecodes. /// There will be multiple accounts that have same bytecode /// So we would need to introduce reference counter. diff --git a/crates/storage/errors/src/provider.rs b/crates/storage/errors/src/provider.rs index 52a010474..db59d671f 100644 --- a/crates/storage/errors/src/provider.rs +++ b/crates/storage/errors/src/provider.rs @@ -96,9 +96,6 @@ pub enum ProviderError { /// Thrown when we were unable to find a state for a block hash. #[error("no state found for block {0}")] StateForHashNotFound(B256), - /// Unable to compute state root on top of historical block. - #[error("unable to compute state root on top of historical block")] - StateRootNotAvailableForHistoricalBlock, /// Unable to find the block number for a given transaction index. #[error("unable to find the block number for a given transaction index")] BlockNumberForTransactionIndexNotFound, diff --git a/crates/storage/nippy-jar/src/compression/mod.rs b/crates/storage/nippy-jar/src/compression/mod.rs index 76e8c6d16..28a92fe90 100644 --- a/crates/storage/nippy-jar/src/compression/mod.rs +++ b/crates/storage/nippy-jar/src/compression/mod.rs @@ -30,6 +30,7 @@ pub trait Compression: Serialize + for<'a> Deserialize<'a> { true } + #[cfg(test)] /// If required, prepares compression algorithm with an early pass on the data. fn prepare_compression( &mut self, @@ -95,6 +96,7 @@ impl Compression for Compressors { } } + #[cfg(test)] fn prepare_compression( &mut self, columns: Vec>>, diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index c55ca103a..494d79de5 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -185,6 +185,7 @@ impl Compression for Zstd { matches!(self.state, ZstdState::Ready) } + #[cfg(test)] /// If using it with dictionaries, prepares a dictionary for each column. fn prepare_compression( &mut self, @@ -208,7 +209,6 @@ impl Compression for Zstd { return Err(NippyJarError::ColumnLenMismatch(self.columns, columns.len())) } - // TODO: parallel calculation let mut dictionaries = vec![]; for column in columns { // ZSTD requires all training data to be continuous in memory, alongside the size of @@ -273,6 +273,7 @@ impl<'a> std::fmt::Debug for ZstdDictionaries<'a> { } impl<'a> ZstdDictionaries<'a> { + #[cfg(test)] /// Creates [`ZstdDictionaries`]. pub(crate) fn new(raw: Vec) -> Self { Self(raw.into_iter().map(ZstdDictionary::Raw).collect()) @@ -315,6 +316,7 @@ impl<'a> ZstdDictionaries<'a> { /// A Zstd dictionary. It's created and serialized with [`ZstdDictionary::Raw`], and deserialized as /// [`ZstdDictionary::Loaded`]. pub(crate) enum ZstdDictionary<'a> { + #[allow(dead_code)] Raw(RawDictionary), Loaded(DecoderDictionary<'a>), } diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 434c40a9a..d42b0d364 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -67,7 +67,7 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { self.row = 0; } - /// Returns a row, searching it by a key used during [`NippyJar::prepare_index`]. + /// Returns a row, searching it by a key. /// /// **May return false positives.** /// @@ -130,7 +130,7 @@ impl<'a, H: NippyJarHeader> NippyJarCursor<'a, H> { )) } - /// Returns a row, searching it by a key used during [`NippyJar::prepare_index`] by using a + /// Returns a row, searching it by a key using a /// `mask` to only read certain columns from the row. /// /// **May return false positives.** diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index a440a9cb3..225d4fba3 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -24,7 +24,7 @@ pub enum NippyJarError { #[error("unexpected missing value: row:col {0}:{1}")] UnexpectedMissingValue(u64, u64), #[error(transparent)] - FilterError(#[from] cuckoofilter::CuckooError), + EthFilterError(#[from] cuckoofilter::CuckooError), #[error("nippy jar initialized without filter")] FilterMissing, #[error("filter has reached max capacity")] diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 8247599d7..056f456eb 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -28,7 +28,9 @@ pub mod filter; use filter::{Cuckoo, InclusionFilter, InclusionFilters}; pub mod compression; -use compression::{Compression, Compressors}; +#[cfg(test)] +use compression::Compression; +use compression::Compressors; pub mod phf; pub use phf::PHFKey; @@ -306,6 +308,56 @@ impl NippyJar { DataReader::new(self.data_path()) } + /// Writes all necessary configuration to file. + fn freeze_config(&self) -> Result<(), NippyJarError> { + // Atomic writes are hard: + let mut tmp_path = self.config_path(); + tmp_path.set_extension(".tmp"); + + // Write to temporary file + let mut file = File::create(&tmp_path)?; + bincode::serialize_into(&mut file, &self)?; + + // fsync() file + file.sync_all()?; + + // Rename file, not move + reth_fs_util::rename(&tmp_path, self.config_path())?; + + // fsync() dir + if let Some(parent) = tmp_path.parent() { + OpenOptions::new().read(true).open(parent)?.sync_all()?; + } + Ok(()) + } +} + +impl InclusionFilter for NippyJar { + fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { + self.filter.as_mut().ok_or(NippyJarError::FilterMissing)?.add(element) + } + + fn contains(&self, element: &[u8]) -> Result { + self.filter.as_ref().ok_or(NippyJarError::FilterMissing)?.contains(element) + } + + fn size(&self) -> usize { + self.filter.as_ref().map(|f| f.size()).unwrap_or(0) + } +} + +impl PerfectHashingFunction for NippyJar { + fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { + self.phf.as_mut().ok_or(NippyJarError::PHFMissing)?.set_keys(keys) + } + + fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { + self.phf.as_ref().ok_or(NippyJarError::PHFMissing)?.get_index(key) + } +} + +#[cfg(test)] +impl NippyJar { /// If required, prepares any compression algorithm to an early pass of the data. pub fn prepare_compression( &mut self, @@ -429,53 +481,6 @@ impl NippyJar { Ok(()) } - - /// Writes all necessary configuration to file. - fn freeze_config(&self) -> Result<(), NippyJarError> { - // Atomic writes are hard: - let mut tmp_path = self.config_path(); - tmp_path.set_extension(".tmp"); - - // Write to temporary file - let mut file = File::create(&tmp_path)?; - bincode::serialize_into(&mut file, &self)?; - - // fsync() file - file.sync_all()?; - - // Rename file, not move - reth_fs_util::rename(&tmp_path, self.config_path())?; - - // fsync() dir - if let Some(parent) = tmp_path.parent() { - OpenOptions::new().read(true).open(parent)?.sync_all()?; - } - Ok(()) - } -} - -impl InclusionFilter for NippyJar { - fn add(&mut self, element: &[u8]) -> Result<(), NippyJarError> { - self.filter.as_mut().ok_or(NippyJarError::FilterMissing)?.add(element) - } - - fn contains(&self, element: &[u8]) -> Result { - self.filter.as_ref().ok_or(NippyJarError::FilterMissing)?.contains(element) - } - - fn size(&self) -> usize { - self.filter.as_ref().map(|f| f.size()).unwrap_or(0) - } -} - -impl PerfectHashingFunction for NippyJar { - fn set_keys(&mut self, keys: &[T]) -> Result<(), NippyJarError> { - self.phf.as_mut().ok_or(NippyJarError::PHFMissing)?.set_keys(keys) - } - - fn get_index(&self, key: &[u8]) -> Result, NippyJarError> { - self.phf.as_ref().ok_or(NippyJarError::PHFMissing)?.get_index(key) - } } /// Manages the reading of static file data using memory-mapped files. @@ -581,6 +586,7 @@ impl DataReader { #[cfg(test)] mod tests { use super::*; + use compression::Compression; use rand::{rngs::SmallRng, seq::SliceRandom, RngCore, SeedableRng}; use std::{collections::HashSet, fs::OpenOptions}; diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/execution_outcome.rs similarity index 99% rename from crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs rename to crates/storage/provider/src/bundle_state/execution_outcome.rs index 85452c564..efa091aab 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/execution_outcome.rs @@ -62,7 +62,7 @@ impl StateWriter for ExecutionOutcome { if !self.snapshots.is_empty() { let mut snapshot_cursor = tx.cursor_write::()?; for snap in self.snapshots { - snapshot_cursor.insert(snap.block_hash, snap)?; + snapshot_cursor.upsert(snap.block_hash, snap)?; } } @@ -900,7 +900,7 @@ mod tests { } let (_, updates) = StateRoot::from_tx(tx).root_with_updates().unwrap(); - updates.flush(tx).unwrap(); + updates.write_to_database(tx).unwrap(); }) .unwrap(); diff --git a/crates/storage/provider/src/bundle_state/mod.rs b/crates/storage/provider/src/bundle_state/mod.rs index d1a9e9b2a..eaf3dab43 100644 --- a/crates/storage/provider/src/bundle_state/mod.rs +++ b/crates/storage/provider/src/bundle_state/mod.rs @@ -1,13 +1,12 @@ //! Bundle state module. //! This module contains all the logic related to bundle state. -mod bundle_state_with_receipts; + +mod execution_outcome; mod hashed_state_changes; mod state_changes; mod state_reverts; -pub use bundle_state_with_receipts::{ - AccountRevertInit, BundleStateInit, OriginalValuesKnown, RevertsInit, -}; +pub use execution_outcome::{AccountRevertInit, BundleStateInit, OriginalValuesKnown, RevertsInit}; pub use hashed_state_changes::HashedStateChanges; pub use state_changes::StateChanges; pub use state_reverts::{StateReverts, StorageRevertsIter}; diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 49fb196ff..6e09ff389 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -2,7 +2,8 @@ use crate::{ AccountReader, BlockHashReader, ExecutionDataProvider, StateProvider, StateRootProvider, }; use reth_primitives::{Account, Address, BlockNumber, Bytecode, B256}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; +use reth_storage_api::StateProofProvider; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::db::BundleState; @@ -80,6 +81,21 @@ impl StateRootProvider } } +impl StateProofProvider + for BundleStateProvider +{ + fn proof( + &self, + bundle_state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult { + let mut state = self.block_execution_data_provider.execution_outcome().state().clone(); + state.extend(bundle_state.clone()); + self.state_provider.proof(&state, address, slots) + } +} + impl StateProvider for BundleStateProvider { fn storage( &self, @@ -107,8 +123,4 @@ impl StateProvider for BundleStat self.state_provider.bytecode_by_hash(code_hash) } - - fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { - Err(ProviderError::StateRootNotAvailableForHistoricalBlock) - } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index cfd4b627a..43dfb700c 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -14,13 +14,14 @@ use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - parlia::Snapshot, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, - Withdrawals, B256, U256, + parlia::Snapshot, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockNumber, + BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, + StaticFileSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, + TxNumber, Withdrawal, Withdrawals, B256, U256, }; -use reth_prune_types::{PruneCheckpoint, PruneSegment}; +use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::SidecarsProvider; use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -47,6 +48,8 @@ pub struct ProviderFactory { chain_spec: Arc, /// Static File Provider static_file_provider: StaticFileProvider, + /// Optional pruning configuration + prune_modes: PruneModes, } impl ProviderFactory { @@ -56,7 +59,7 @@ impl ProviderFactory { chain_spec: Arc, static_file_provider: StaticFileProvider, ) -> Self { - Self { db: Arc::new(db), chain_spec, static_file_provider } + Self { db: Arc::new(db), chain_spec, static_file_provider, prune_modes: PruneModes::none() } } /// Enables metrics on the static file provider. @@ -65,6 +68,12 @@ impl ProviderFactory { self } + /// Sets the pruning configuration for an existing [`ProviderFactory`]. + pub fn with_prune_modes(mut self, prune_modes: PruneModes) -> Self { + self.prune_modes = prune_modes; + self + } + /// Returns reference to the underlying database. pub fn db_ref(&self) -> &DB { &self.db @@ -90,6 +99,7 @@ impl ProviderFactory { db: Arc::new(init_db(path, args).map_err(RethError::msg)?), chain_spec, static_file_provider, + prune_modes: PruneModes::none(), }) } } @@ -98,12 +108,16 @@ impl ProviderFactory { /// Returns a provider with a created `DbTx` inside, which allows fetching data from the /// database using different types of providers. Example: [`HeaderProvider`] /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. + /// + /// This sets the [`PruneModes`] to [`None`], because they should only be relevant for writing + /// data. #[track_caller] pub fn provider(&self) -> ProviderResult> { Ok(DatabaseProvider::new( self.db.tx()?, self.chain_spec.clone(), self.static_file_provider.clone(), + self.prune_modes.clone(), )) } @@ -117,6 +131,7 @@ impl ProviderFactory { self.db.tx_mut()?, self.chain_spec.clone(), self.static_file_provider.clone(), + self.prune_modes.clone(), ))) } @@ -331,6 +346,14 @@ impl BlockReader for ProviderFactory { self.provider()?.block_with_senders(id, transaction_kind) } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.provider()?.sealed_block_with_senders(id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.provider()?.block_range(range) } @@ -470,6 +493,24 @@ impl WithdrawalsProvider for ProviderFactory { } } +impl SidecarsProvider for ProviderFactory +where + DB: Database, +{ + fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { + self.provider()?.sidecars(block_hash) + } + + fn sidecars_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.static_file_provider.get_with_static_file_or_database( + StaticFileSegment::Sidecars, + num, + |static_file| static_file.sidecars_by_number(num), + || self.provider()?.sidecars_by_number(num), + ) + } +} + impl RequestsProvider for ProviderFactory where DB: Database, @@ -520,22 +561,6 @@ impl EvmEnvProvider for ProviderFactory { self.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } - fn fill_block_env_at( - &self, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - ) -> ProviderResult<()> { - self.provider()?.fill_block_env_at(block_env, at) - } - - fn fill_block_env_with_header( - &self, - block_env: &mut BlockEnv, - header: &Header, - ) -> ProviderResult<()> { - self.provider()?.fill_block_env_with_header(block_env, header) - } - fn fill_cfg_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -585,6 +610,7 @@ impl Clone for ProviderFactory { db: Arc::clone(&self.db), chain_spec: self.chain_spec.clone(), static_file_provider: self.static_file_provider.clone(), + prune_modes: self.prune_modes.clone(), } } } @@ -677,7 +703,7 @@ mod tests { { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None), + provider.insert_block(block.clone().try_seal_with_senders().unwrap()), Ok(_) ); assert_matches!( @@ -688,16 +714,14 @@ mod tests { } { - let provider = factory.provider_rw().unwrap(); + let prune_modes = PruneModes { + sender_recovery: Some(PruneMode::Full), + transaction_lookup: Some(PruneMode::Full), + ..PruneModes::none() + }; + let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); assert_matches!( - provider.insert_block( - block.clone().try_seal_with_senders().unwrap(), - Some(&PruneModes { - sender_recovery: Some(PruneMode::Full), - transaction_lookup: Some(PruneMode::Full), - ..PruneModes::none() - }) - ), + provider.insert_block(block.clone().try_seal_with_senders().unwrap(),), Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); @@ -717,7 +741,7 @@ mod tests { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block(block.clone().try_seal_with_senders().unwrap(), None), + provider.insert_block(block.clone().try_seal_with_senders().unwrap()), Ok(_) ); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index f7011d4e4..b132d6416 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -9,12 +9,12 @@ use crate::{ Chain, EvmEnvProvider, FinalizedBlockReader, FinalizedBlockWriter, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HistoricalStateProvider, HistoryWriter, LatestStateProvider, OriginalValuesKnown, ParliaSnapshotReader, ProviderError, - PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, StageCheckpointReader, - StateProviderBox, StateWriter, StatsReader, StorageReader, TransactionVariant, - TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, + PruneCheckpointReader, PruneCheckpointWriter, RequestsProvider, SidecarsProvider, + StageCheckpointReader, StateProviderBox, StateWriter, StatsReader, StorageReader, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; use itertools::{izip, Itertools}; -use reth_chainspec::{ChainInfo, ChainSpec}; +use reth_chainspec::{ChainInfo, ChainSpec, EthereumHardforks}; use reth_db::{tables, BlockNumberList}; use reth_db_api::{ common::KeyValue, @@ -32,14 +32,11 @@ use reth_evm::ConfigureEvmEnv; use reth_execution_types::ExecutionOutcome; use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::{ - keccak256, - parlia::Snapshot, - revm::{config::revm_spec, env::fill_block_env}, - Account, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, - GotExpected, Head, Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, - SealedHeader, StaticFileSegment, StorageEntry, TransactionMeta, TransactionSigned, - TransactionSignedEcRecovered, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, - Withdrawals, B256, U256, + keccak256, parlia::Snapshot, Account, Address, BlobSidecars, Block, BlockHash, + BlockHashOrNumber, BlockNumber, BlockWithSenders, GotExpected, Header, Receipt, Requests, + SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, + TransactionMeta, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; use reth_prune_types::{PruneCheckpoint, PruneLimiter, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -49,7 +46,7 @@ use reth_trie::{ updates::TrieUpdates, HashedPostState, Nibbles, StateRoot, }; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; +use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ cmp::Ordering, collections::{hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, @@ -107,6 +104,8 @@ pub struct DatabaseProvider { chain_spec: Arc, /// Static File provider static_file_provider: StaticFileProvider, + /// Pruning configuration + prune_modes: PruneModes, } impl DatabaseProvider { @@ -122,8 +121,9 @@ impl DatabaseProvider { tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, + prune_modes: PruneModes, ) -> Self { - Self { tx, chain_spec, static_file_provider } + Self { tx, chain_spec, static_file_provider, prune_modes } } } @@ -172,12 +172,12 @@ impl DatabaseProvider { } impl DatabaseProvider { - #[cfg(any(test, feature = "test-utils"))] - /// Inserts an historical block. Used for setting up test environments + // TODO: uncomment below, once `reth debug_cmd` has been feature gated with dev. + // #[cfg(any(test, feature = "test-utils"))] + /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, block: SealedBlockWithSenders, - prune_modes: Option<&PruneModes>, ) -> ProviderResult { let ttd = if block.number == 0 { block.difficulty @@ -201,7 +201,7 @@ impl DatabaseProvider { writer.append_header(block.header.as_ref().clone(), ttd, block.hash())?; - self.insert_block(block, prune_modes) + self.insert_block(block) } } @@ -259,8 +259,9 @@ impl DatabaseProvider { tx: TX, chain_spec: Arc, static_file_provider: StaticFileProvider, + prune_modes: PruneModes, ) -> Self { - Self { tx, chain_spec, static_file_provider } + Self { tx, chain_spec, static_file_provider, prune_modes } } /// Consume `DbTx` or `DbTxMut`. @@ -360,6 +361,66 @@ impl DatabaseProvider { ) } + fn block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + header_by_number: HF, + construct_block: BF, + ) -> ProviderResult> + where + H: AsRef
, + HF: FnOnce(BlockNumber) -> ProviderResult>, + BF: FnOnce( + H, + Vec, + Vec
, + Vec
, + Option, + Option, + ) -> ProviderResult>, + { + let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; + let Some(header) = header_by_number(block_number)? else { return Ok(None) }; + + let ommers = self.ommers(block_number.into())?.unwrap_or_default(); + let withdrawals = + self.withdrawals_by_block(block_number.into(), header.as_ref().timestamp)?; + let requests = self.requests_by_block(block_number.into(), header.as_ref().timestamp)?; + + // Get the block body + // + // If the body indices are not found, this means that the transactions either do not exist + // in the database yet, or they do exit but are not indexed. If they exist but are not + // indexed, we don't have enough information to return the block anyways, so we return + // `None`. + let Some(body) = self.block_body_indices(block_number)? else { return Ok(None) }; + + let tx_range = body.tx_num_range(); + + let (transactions, senders) = if tx_range.is_empty() { + (vec![], vec![]) + } else { + (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) + }; + + let body = transactions + .into_iter() + .map(|tx| match transaction_kind { + TransactionVariant::NoHash => TransactionSigned { + // Caller explicitly asked for no hash, so we don't calculate it + hash: B256::ZERO, + signature: tx.signature, + transaction: tx.transaction, + }, + TransactionVariant::WithHash => tx.with_hash(), + }) + .collect(); + + // the sidecars will always be None as this is not needed + construct_block(header, body, senders, ommers, withdrawals, requests) + } + /// Returns a range of blocks from the database. /// /// Uses the provided `headers_range` to get the headers for the range, and `assemble_block` to @@ -384,6 +445,7 @@ impl DatabaseProvider { Range, Vec
, Option, + Option, Option, ) -> ProviderResult, { @@ -441,7 +503,16 @@ impl DatabaseProvider { .unwrap_or_default() }; - if let Ok(b) = assemble_block(header, tx_range, ommers, withdrawals, requests) { + let sidecars = + if self.chain_spec.is_cancun_active_at_timestamp(header_ref.timestamp) { + self.static_file_provider.sidecars(&header_ref.hash_slow())? + } else { + None + }; + + if let Ok(b) = + assemble_block(header, tx_range, ommers, withdrawals, sidecars, requests) + { blocks.push(b); } } @@ -475,6 +546,7 @@ impl DatabaseProvider { Vec, Vec
, Option, + Option, Option, Vec
, ) -> ProviderResult, @@ -482,40 +554,43 @@ impl DatabaseProvider { let mut tx_cursor = self.tx.cursor_read::()?; let mut senders_cursor = self.tx.cursor_read::()?; - self.block_range(range, headers_range, |header, tx_range, ommers, withdrawals, requests| { - let (body, senders) = if tx_range.is_empty() { - (Vec::new(), Vec::new()) - } else { - let body = self - .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? - .into_iter() - .map(Into::into) - .collect::>(); - // fetch senders from the senders table - let known_senders = - senders_cursor + self.block_range( + range, + headers_range, + |header, tx_range, ommers, withdrawals, sidecars, requests| { + let (body, senders) = if tx_range.is_empty() { + (Vec::new(), Vec::new()) + } else { + let body = self + .transactions_by_tx_range_with_cursor(tx_range.clone(), &mut tx_cursor)? + .into_iter() + .map(Into::into) + .collect::>(); + // fetch senders from the senders table + let known_senders = senders_cursor .walk_range(tx_range.clone())? .collect::, _>>()?; - let mut senders = Vec::with_capacity(body.len()); - for (tx_num, tx) in tx_range.zip(body.iter()) { - match known_senders.get(&tx_num) { - None => { - // recover the sender from the transaction if not found - let sender = tx - .recover_signer_unchecked() - .ok_or_else(|| ProviderError::SenderRecoveryError)?; - senders.push(sender); + let mut senders = Vec::with_capacity(body.len()); + for (tx_num, tx) in tx_range.zip(body.iter()) { + match known_senders.get(&tx_num) { + None => { + // recover the sender from the transaction if not found + let sender = tx + .recover_signer_unchecked() + .ok_or_else(|| ProviderError::SenderRecoveryError)?; + senders.push(sender); + } + Some(sender) => senders.push(*sender), } - Some(sender) => senders.push(*sender), } - } - (body, senders) - }; + (body, senders) + }; - assemble_block(header, body, ommers, withdrawals, requests, senders) - }) + assemble_block(header, body, ommers, withdrawals, sidecars, requests, senders) + }, + ) } } @@ -968,8 +1043,15 @@ impl DatabaseProvider { requests = None; } + // sidecars can be missing + let sidecars = if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + self.static_file_provider.sidecars(&header.hash())? + } else { + None + }; + blocks.push(SealedBlockWithSenders { - block: SealedBlock { header, body, ommers, withdrawals, requests }, + block: SealedBlock { header, body, ommers, withdrawals, sidecars, requests }, senders, }) } @@ -1502,7 +1584,16 @@ impl BlockReader for DatabaseProvider { None => return Ok(None), }; - return Ok(Some(Block { header, body: transactions, ommers, withdrawals, requests })) + let sidecars = self.sidecars(&self.block_hash(number)?.unwrap_or_default())?; + + return Ok(Some(Block { + header, + body: transactions, + ommers, + withdrawals, + sidecars, + requests, + })) } } @@ -1553,48 +1644,41 @@ impl BlockReader for DatabaseProvider { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> ProviderResult> { - let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; - let Some(header) = self.header_by_number(block_number)? else { return Ok(None) }; - - let ommers = self.ommers(block_number.into())?.unwrap_or_default(); - let withdrawals = self.withdrawals_by_block(block_number.into(), header.timestamp)?; - let requests = self.requests_by_block(block_number.into(), header.timestamp)?; - - // Get the block body - // - // If the body indices are not found, this means that the transactions either do not exist - // in the database yet, or they do exit but are not indexed. If they exist but are not - // indexed, we don't have enough information to return the block anyways, so we return - // `None`. - let Some(body) = self.block_body_indices(block_number)? else { return Ok(None) }; - - let tx_range = body.tx_num_range(); - - let (transactions, senders) = if tx_range.is_empty() { - (vec![], vec![]) - } else { - (self.transactions_by_tx_range(tx_range.clone())?, self.senders_by_tx_range(tx_range)?) - }; - - let body = transactions - .into_iter() - .map(|tx| match transaction_kind { - TransactionVariant::NoHash => TransactionSigned { - // Caller explicitly asked for no hash, so we don't calculate it - hash: B256::ZERO, - signature: tx.signature, - transaction: tx.transaction, - }, - TransactionVariant::WithHash => tx.with_hash(), - }) - .collect(); + self.block_with_senders( + id, + transaction_kind, + |block_number| self.header_by_number(block_number), + |header, body, senders, ommers, withdrawals, requests| { + Block { header, body, ommers, withdrawals, sidecars: None, requests } + // Note: we're using unchecked here because we know the block contains valid txs + // wrt to its height and can ignore the s value check so pre + // EIP-2 txs are allowed + .try_with_senders_unchecked(senders) + .map(Some) + .map_err(|_| ProviderError::SenderRecoveryError) + }, + ) + } - Block { header, body, ommers, withdrawals, requests } - // Note: we're using unchecked here because we know the block contains valid txs wrt to - // its height and can ignore the s value check so pre EIP-2 txs are allowed - .try_with_senders_unchecked(senders) - .map(Some) - .map_err(|_| ProviderError::SenderRecoveryError) + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.block_with_senders( + id, + transaction_kind, + |block_number| self.sealed_header(block_number), + |header, body, senders, ommers, withdrawals, requests| { + SealedBlock { header, body, ommers, withdrawals, sidecars: None, requests } + // Note: we're using unchecked here because we know the block contains valid txs + // wrt to its height and can ignore the s value check so pre + // EIP-2 txs are allowed + .try_with_senders_unchecked(senders) + .map(Some) + .map_err(|_| ProviderError::SenderRecoveryError) + }, + ) } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { @@ -1602,7 +1686,7 @@ impl BlockReader for DatabaseProvider { self.block_range( range, |range| self.headers_range(range), - |header, tx_range, ommers, withdrawals, requests| { + |header, tx_range, ommers, withdrawals, sidecars, requests| { let body = if tx_range.is_empty() { Vec::new() } else { @@ -1611,7 +1695,7 @@ impl BlockReader for DatabaseProvider { .map(Into::into) .collect() }; - Ok(Block { header, body, ommers, withdrawals, requests }) + Ok(Block { header, body, ommers, withdrawals, sidecars, requests }) }, ) } @@ -1623,8 +1707,8 @@ impl BlockReader for DatabaseProvider { self.block_with_senders_range( range, |range| self.headers_range(range), - |header, body, ommers, withdrawals, requests, senders| { - Block { header, body, ommers, withdrawals, requests } + |header, body, ommers, withdrawals, sidecars, requests, senders| { + Block { header, body, ommers, withdrawals, sidecars, requests } .try_with_senders_unchecked(senders) .map_err(|_| ProviderError::SenderRecoveryError) }, @@ -1638,9 +1722,9 @@ impl BlockReader for DatabaseProvider { self.block_with_senders_range( range, |range| self.sealed_headers_range(range), - |header, body, ommers, withdrawals, requests, senders| { + |header, body, ommers, withdrawals, sidecars, requests, senders| { SealedBlockWithSenders::new( - SealedBlock { header, body, ommers, withdrawals, requests }, + SealedBlock { header, body, ommers, withdrawals, sidecars, requests }, senders, ) .ok_or(ProviderError::SenderRecoveryError) @@ -1949,6 +2033,25 @@ impl WithdrawalsProvider for DatabaseProvider { } } +impl SidecarsProvider for DatabaseProvider { + fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { + if let Some(num) = self.block_number(*block_hash)? { + Ok(self.sidecars_by_number(num)?) + } else { + Ok(None) + } + } + + fn sidecars_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.static_file_provider.get_with_static_file_or_database( + StaticFileSegment::Sidecars, + num, + |static_file| static_file.sidecars_by_number(num), + || Ok(self.tx.get::(num)?), + ) + } +} + impl RequestsProvider for DatabaseProvider { fn requests_by_block( &self, @@ -1988,7 +2091,7 @@ impl EvmEnvProvider for DatabaseProvider { cfg: &mut CfgEnvWithHandlerCfg, block_env: &mut BlockEnv, header: &Header, - _evm_config: EvmConfig, + evm_config: EvmConfig, ) -> ProviderResult<()> where EvmConfig: ConfigureEvmEnv, @@ -1996,7 +2099,7 @@ impl EvmEnvProvider for DatabaseProvider { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - EvmConfig::fill_cfg_and_block_env( + evm_config.fill_cfg_and_block_env( cfg, block_env, &self.chain_spec, @@ -2006,41 +2109,6 @@ impl EvmEnvProvider for DatabaseProvider { Ok(()) } - fn fill_block_env_at( - &self, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - ) -> ProviderResult<()> { - let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; - let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; - - self.fill_block_env_with_header(block_env, &header) - } - - fn fill_block_env_with_header( - &self, - block_env: &mut BlockEnv, - header: &Header, - ) -> ProviderResult<()> { - let total_difficulty = self - .header_td_by_number(header.number)? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - let spec_id = revm_spec( - &self.chain_spec, - Head { - number: header.number, - timestamp: header.timestamp, - difficulty: header.difficulty, - total_difficulty, - // Not required - hash: Default::default(), - }, - ); - let after_merge = spec_id >= SpecId::MERGE; - fill_block_env(block_env, &self.chain_spec, header, after_merge); - Ok(()) - } - fn fill_cfg_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, @@ -2059,7 +2127,7 @@ impl EvmEnvProvider for DatabaseProvider { &self, cfg: &mut CfgEnvWithHandlerCfg, header: &Header, - _evm_config: EvmConfig, + evm_config: EvmConfig, ) -> ProviderResult<()> where EvmConfig: ConfigureEvmEnv, @@ -2067,7 +2135,7 @@ impl EvmEnvProvider for DatabaseProvider { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; - EvmConfig::fill_cfg_env(cfg, &self.chain_spec, header, total_difficulty); + evm_config.fill_cfg_env(cfg, &self.chain_spec, header, total_difficulty); Ok(()) } } @@ -2382,7 +2450,7 @@ impl HashingWriter for DatabaseProvider { block_hash: end_block_hash, }))) } - trie_updates.flush(&self.tx)?; + trie_updates.write_to_database(&self.tx)?; } durations_recorder.record_relative(metrics::Action::InsertMerkleTree); @@ -2578,7 +2646,7 @@ impl BlockExecutionWriter for DatabaseProvider { block_hash: parent_hash, }))) } - trie_updates.flush(&self.tx)?; + trie_updates.write_to_database(&self.tx)?; } // get blocks @@ -2607,7 +2675,6 @@ impl BlockWriter for DatabaseProvider { fn insert_block( &self, block: SealedBlockWithSenders, - prune_modes: Option<&PruneModes>, ) -> ProviderResult { let block_number = block.number; @@ -2664,8 +2731,10 @@ impl BlockWriter for DatabaseProvider { for (transaction, sender) in block.block.body.into_iter().zip(block.senders.iter()) { let hash = transaction.hash(); - if prune_modes - .and_then(|modes| modes.sender_recovery) + if self + .prune_modes + .sender_recovery + .as_ref() .filter(|prune_mode| prune_mode.is_full()) .is_none() { @@ -2689,8 +2758,9 @@ impl BlockWriter for DatabaseProvider { } transactions_elapsed += elapsed; - if prune_modes - .and_then(|modes| modes.transaction_lookup) + if self + .prune_modes + .transaction_lookup .filter(|prune_mode| prune_mode.is_full()) .is_none() { @@ -2751,7 +2821,6 @@ impl BlockWriter for DatabaseProvider { execution_outcome: ExecutionOutcome, hashed_state: HashedPostState, trie_updates: TrieUpdates, - prune_modes: Option<&PruneModes>, ) -> ProviderResult<()> { if blocks.is_empty() { debug!(target: "providers::db", "Attempted to append empty block range"); @@ -2767,7 +2836,7 @@ impl BlockWriter for DatabaseProvider { // Insert the blocks for block in blocks { - self.insert_block(block, prune_modes)?; + self.insert_block(block)?; durations_recorder.record_relative(metrics::Action::InsertBlock); } @@ -2779,7 +2848,7 @@ impl BlockWriter for DatabaseProvider { // insert hashes and intermediate merkle nodes { HashedStateChanges(hashed_state).write_to_db(&self.tx)?; - trie_updates.flush(&self.tx)?; + trie_updates.write_to_database(&self.tx)?; } durations_recorder.record_relative(metrics::Action::InsertHashes); diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 3fdc1717c..c59e34617 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -19,8 +19,8 @@ use reth_db_api::{ }; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - parlia::Snapshot, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumHash, - BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, Receipt, SealedBlock, + parlia::Snapshot, Account, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockId, + BlockNumHash, BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; @@ -60,6 +60,7 @@ use chain_info::ChainInfoTracker; mod consistent_view; use alloy_rpc_types_engine::ForkchoiceState; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; +use reth_storage_api::SidecarsProvider; /// The main type for interacting with the blockchain. /// @@ -328,6 +329,14 @@ where self.database.block_with_senders(id, transaction_kind) } + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult> { + self.database.sealed_block_with_senders(id, transaction_kind) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.database.block_range(range) } @@ -482,6 +491,19 @@ where } } +impl SidecarsProvider for BlockchainProvider +where + DB: Database, +{ + fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { + self.database.sidecars(block_hash) + } + + fn sidecars_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.database.sidecars_by_number(num) + } +} + impl RequestsProvider for BlockchainProvider where DB: Database, @@ -538,22 +560,6 @@ where self.database.provider()?.fill_env_with_header(cfg, block_env, header, evm_config) } - fn fill_block_env_at( - &self, - block_env: &mut BlockEnv, - at: BlockHashOrNumber, - ) -> ProviderResult<()> { - self.database.provider()?.fill_block_env_at(block_env, at) - } - - fn fill_block_env_with_header( - &self, - block_env: &mut BlockEnv, - header: &Header, - ) -> ProviderResult<()> { - self.database.provider()?.fill_block_env_with_header(block_env, header) - } - fn fill_cfg_env_at( &self, cfg: &mut CfgEnvWithHandlerCfg, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 12545fe78..c65c6ddc1 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -13,6 +13,7 @@ use reth_primitives::{ constants::EPOCH_SLOTS, Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, StorageValue, B256, }; +use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; use revm::db::BundleState; @@ -271,6 +272,22 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { } } +impl<'b, TX: DbTx> StateProofProvider for HistoricalStateProviderRef<'b, TX> { + /// Get account and storage proofs. + fn proof( + &self, + state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult { + let mut revert_state = self.revert_state()?; + revert_state.extend(HashedPostState::from_bundle_state(&state.state)); + revert_state + .account_proof(self.tx, address, slots) + .map_err(|err| ProviderError::Database(err.into())) + } +} + impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { /// Get storage. fn storage( @@ -306,11 +323,6 @@ impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { self.tx.get::(code_hash).map_err(Into::into) } - - /// Get account and storage proofs. - fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { - Err(ProviderError::StateRootNotAvailableForHistoricalBlock) - } } /// State provider for a given block number. diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 56b4ecc38..bfc2f16ad 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -10,8 +10,9 @@ use reth_db_api::{ use reth_primitives::{ Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, StorageValue, B256, }; +use reth_storage_api::StateProofProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_trie::{proof::Proof, updates::TrieUpdates, AccountProof, HashedPostState}; +use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState}; use revm::db::BundleState; /// State provider over latest state that takes tx reference. @@ -90,6 +91,19 @@ impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { } } +impl<'b, TX: DbTx> StateProofProvider for LatestStateProviderRef<'b, TX> { + fn proof( + &self, + bundle_state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult { + Ok(HashedPostState::from_bundle_state(&bundle_state.state) + .account_proof(self.tx, address, slots) + .map_err(Into::::into)?) + } +} + impl<'b, TX: DbTx> StateProvider for LatestStateProviderRef<'b, TX> { /// Get storage. fn storage( @@ -110,12 +124,6 @@ impl<'b, TX: DbTx> StateProvider for LatestStateProviderRef<'b, TX> { fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { self.tx.get::(code_hash).map_err(Into::into) } - - fn proof(&self, address: Address, slots: &[B256]) -> ProviderResult { - Ok(Proof::new(self.tx) - .account_proof(address, slots) - .map_err(Into::::into)?) - } } /// State provider for the latest state. diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index a39cddfe3..344a21101 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -30,10 +30,6 @@ macro_rules! delegate_provider_impls { ($target:ty $(where [$($generics:tt)*])?) => { $crate::providers::state::macros::delegate_impls_to_as_ref!( for $target => - StateRootProvider $(where [$($generics)*])? { - fn state_root(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult; - fn state_root_with_updates(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; - } AccountReader $(where [$($generics)*])? { fn basic_account(&self, address: reth_primitives::Address) -> reth_storage_errors::provider::ProviderResult>; } @@ -41,11 +37,17 @@ macro_rules! delegate_provider_impls { fn block_hash(&self, number: u64) -> reth_storage_errors::provider::ProviderResult>; fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_storage_errors::provider::ProviderResult>; } - StateProvider $(where [$($generics)*])?{ + StateProvider $(where [$($generics)*])? { fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_storage_errors::provider::ProviderResult>; - fn proof(&self, address: reth_primitives::Address, keys: &[reth_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_storage_errors::provider::ProviderResult>; } + StateRootProvider $(where [$($generics)*])? { + fn state_root(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult; + fn state_root_with_updates(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; + } + StateProofProvider $(where [$($generics)*])? { + fn proof(&self, state: &revm::db::BundleState, address: reth_primitives::Address, slots: &[reth_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; + } ); } } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 5e20572e3..56a1113bf 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -7,12 +7,16 @@ use crate::{ TransactionsProvider, }; use reth_chainspec::ChainInfo; -use reth_db::static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}; +use reth_db::static_file::{ + HeaderMask, ReceiptMask, SidecarMask, StaticFileCursor, TransactionMask, +}; use reth_db_api::models::CompactU256; use reth_primitives::{ - Address, BlockHash, BlockHashOrNumber, BlockNumber, Header, Receipt, SealedHeader, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, U256, + Address, BlobSidecars, BlockHash, BlockHashOrNumber, BlockNumber, Header, Receipt, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, + B256, U256, }; +use reth_storage_api::SidecarsProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ ops::{Deref, RangeBounds}, @@ -325,3 +329,17 @@ impl<'a> ReceiptProvider for StaticFileJarProvider<'a> { Ok(receipts) } } + +impl<'a> SidecarsProvider for StaticFileJarProvider<'a> { + fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { + Ok(self + .cursor()? + .get_two::>(block_hash.into())? + .filter(|(_, hash)| hash == block_hash) + .map(|(sc, _)| sc)) + } + + fn sidecars_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.cursor()?.get_one::>(num.into()) + } +} diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 39e588c7f..8a35e10be 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -12,7 +12,9 @@ use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_db::{ lockfile::StorageLock, - static_file::{iter_static_files, HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}, + static_file::{ + iter_static_files, HeaderMask, ReceiptMask, SidecarMask, StaticFileCursor, TransactionMask, + }, tables, }; use reth_db_api::{ @@ -25,12 +27,13 @@ use reth_nippy_jar::NippyJar; use reth_primitives::{ keccak256, static_file::{find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive}, - Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, Header, Receipt, - SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionMeta, - TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, - U256, + Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, + Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment, + TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, + Withdrawals, B256, U256, }; use reth_stages_types::{PipelineTarget, StageId}; +use reth_storage_api::SidecarsProvider; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, @@ -631,6 +634,12 @@ impl StaticFileProvider { highest_tx, highest_block, )?, + StaticFileSegment::Sidecars => self.ensure_invariants::<_, tables::Sidecars>( + provider, + segment, + highest_block, + highest_block, + )?, } { update_unwind_target(unwind); } @@ -689,12 +698,37 @@ impl StaticFileProvider { let checkpoint_block_number = provider .get_stage_checkpoint(match segment { StaticFileSegment::Headers => StageId::Headers, - StaticFileSegment::Transactions => StageId::Bodies, + StaticFileSegment::Transactions | StaticFileSegment::Sidecars => StageId::Bodies, StaticFileSegment::Receipts => StageId::Execution, })? .unwrap_or_default() .block_number; + if segment == StaticFileSegment::Sidecars && + highest_static_file_block == 0 && + checkpoint_block_number != 0 + { + let range_start = find_fixed_range(checkpoint_block_number).start(); + + // create static file of newest block + let mut writer = StaticFileProviderRW::new( + segment, + range_start, + Arc::downgrade(&self.0), + self.metrics.clone(), + )?; + + // append empty sidecars + for block_number in range_start..=checkpoint_block_number { + let hash = provider.block_hash(block_number)?.unwrap_or_default(); + writer.append_sidecars(Default::default(), block_number, hash)?; + } + writer.commit()?; + self.writers.insert(segment, writer); + + return Ok(None) + } + // If the checkpoint is ahead, then we lost static file data. May be data corruption. if checkpoint_block_number > highest_static_file_block { info!( @@ -721,6 +755,8 @@ impl StaticFileProvider { let mut writer = self.latest_writer(segment)?; if segment.is_headers() { writer.prune_headers(highest_static_file_block - checkpoint_block_number)?; + } else if segment.is_sidecars() { + writer.prune_sidecars(highest_static_file_block - checkpoint_block_number)?; } else if let Some(block) = provider.block_body_indices(checkpoint_block_number)? { let number = highest_static_file_entry - block.last_tx_num(); if segment.is_receipts() { @@ -754,6 +790,7 @@ impl StaticFileProvider { headers: self.get_highest_static_file_block(StaticFileSegment::Headers), receipts: self.get_highest_static_file_block(StaticFileSegment::Receipts), transactions: self.get_highest_static_file_block(StaticFileSegment::Transactions), + sidecars: self.get_highest_static_file_block(StaticFileSegment::Sidecars), } } @@ -800,7 +837,9 @@ impl StaticFileProvider { StaticFileSegment::Headers => { self.get_segment_provider_from_block(segment, start, None) } - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { + StaticFileSegment::Transactions | + StaticFileSegment::Receipts | + StaticFileSegment::Sidecars => { self.get_segment_provider_from_transaction(segment, start, None) } }; @@ -870,7 +909,9 @@ impl StaticFileProvider { StaticFileSegment::Headers => { self.get_segment_provider_from_block(segment, start, None) } - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { + StaticFileSegment::Transactions | + StaticFileSegment::Receipts | + StaticFileSegment::Sidecars => { self.get_segment_provider_from_transaction(segment, start, None) } }; @@ -914,7 +955,9 @@ impl StaticFileProvider { { // If there is, check the maximum block or transaction number of the segment. let static_file_upper_bound = match segment { - StaticFileSegment::Headers => self.get_highest_static_file_block(segment), + StaticFileSegment::Headers | StaticFileSegment::Sidecars => { + self.get_highest_static_file_block(segment) + } StaticFileSegment::Transactions | StaticFileSegment::Receipts => { self.get_highest_static_file_tx(segment) } @@ -957,9 +1000,9 @@ impl StaticFileProvider { // If there is, check the maximum block or transaction number of the segment. if let Some(static_file_upper_bound) = match segment { StaticFileSegment::Headers => self.get_highest_static_file_block(segment), - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { - self.get_highest_static_file_tx(segment) - } + StaticFileSegment::Transactions | + StaticFileSegment::Receipts | + StaticFileSegment::Sidecars => self.get_highest_static_file_tx(segment), } { if block_or_tx_range.start <= static_file_upper_bound { let end = block_or_tx_range.end.min(static_file_upper_bound + 1); @@ -1457,6 +1500,15 @@ impl BlockReader for StaticFileProvider { Err(ProviderError::UnsupportedProvider) } + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult> { + // Required data not present in static_files + Err(ProviderError::UnsupportedProvider) + } + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) @@ -1493,6 +1545,34 @@ impl WithdrawalsProvider for StaticFileProvider { } } +impl SidecarsProvider for StaticFileProvider { + fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult> { + self.find_static_file(StaticFileSegment::Sidecars, |jar_provider| { + Ok(jar_provider + .cursor()? + .get_two::>(block_hash.into())? + .and_then(|(sc, hash)| { + if &hash == block_hash { + return Some(sc) + } + None + })) + }) + } + + fn sidecars_by_number(&self, num: BlockNumber) -> ProviderResult> { + self.get_segment_provider_from_block(StaticFileSegment::Sidecars, num, None) + .and_then(|provider| provider.sidecars_by_number(num)) + .or_else(|err| { + if let ProviderError::MissingStaticFileBlock(_, _) = err { + Ok(None) + } else { + Err(err) + } + }) + } +} + impl RequestsProvider for StaticFileProvider { fn requests_by_block( &self, diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 5c2057b3b..5f476485d 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -59,15 +59,13 @@ mod tests { use super::*; use crate::{test_utils::create_test_provider_factory, HeaderProvider}; use rand::seq::SliceRandom; - use reth_db::{ - static_file::create_static_file_T1_T2_T3, CanonicalHeaders, HeaderNumbers, - HeaderTerminalDifficulties, Headers, RawTable, + use reth_db::{CanonicalHeaders, HeaderNumbers, HeaderTerminalDifficulties, Headers, Sidecars}; + use reth_db_api::transaction::DbTxMut; + use reth_primitives::{ + static_file::find_fixed_range, BlobSidecar, BlobSidecars, BlobTransactionSidecar, B256, + U256, }; - use reth_db_api::{ - cursor::DbCursorRO, - transaction::{DbTx, DbTxMut}, - }; - use reth_primitives::{static_file::find_fixed_range, BlockNumber, B256, U256}; + use reth_storage_api::SidecarsProvider; use reth_testing_utils::generators::{self, random_header_range}; #[test] @@ -75,12 +73,6 @@ mod tests { // Ranges let row_count = 100u64; let range = 0..=(row_count - 1); - let segment_header = SegmentHeader::new( - range.clone().into(), - Some(range.clone().into()), - Some(range.clone().into()), - StaticFileSegment::Headers, - ); // Data sources let factory = create_test_provider_factory(); @@ -112,48 +104,22 @@ mod tests { // Create StaticFile { - let with_compression = true; - let with_filter = true; - - let mut nippy_jar = NippyJar::new(3, static_file.as_path(), segment_header); - - if with_compression { - nippy_jar = nippy_jar.with_zstd(false, 0); - } - - if with_filter { - nippy_jar = nippy_jar.with_cuckoo_filter(row_count as usize + 10).with_fmph(); + let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let mut writer = manager.latest_writer(StaticFileSegment::Headers).unwrap(); + let mut td = U256::ZERO; + + for header in headers.clone() { + td += header.header().difficulty; + let hash = header.hash(); + writer.append_header(header.unseal(), td, hash).unwrap(); } - - let provider = factory.provider().unwrap(); - let tx = provider.tx_ref(); - - // Hacky type inference. TODO fix - let mut none_vec = Some(vec![vec![vec![0u8]].into_iter()]); - let _ = none_vec.take(); - - // Generate list of hashes for filters & PHF - let mut cursor = tx.cursor_read::>().unwrap(); - let hashes = cursor - .walk(None) - .unwrap() - .map(|row| row.map(|(_key, value)| value.into_value()).map_err(|e| e.into())); - - create_static_file_T1_T2_T3::< - Headers, - HeaderTerminalDifficulties, - CanonicalHeaders, - BlockNumber, - SegmentHeader, - >(tx, range, None, none_vec, Some(hashes), row_count as usize, nippy_jar) - .unwrap(); + writer.commit().unwrap(); } // Use providers to query Header data and compare if it matches { let db_provider = factory.provider().unwrap(); - let manager = - StaticFileProvider::read_write(static_files_path.path()).unwrap().with_filters(); + let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); let jar_provider = manager .get_segment_provider_from_block(StaticFileSegment::Headers, 0, Some(&static_file)) .unwrap(); @@ -169,12 +135,88 @@ mod tests { // Compare Header assert_eq!(header, db_provider.header(&header_hash).unwrap().unwrap()); - assert_eq!(header, jar_provider.header(&header_hash).unwrap().unwrap()); + assert_eq!(header, jar_provider.header_by_number(header.number).unwrap().unwrap()); // Compare HeaderTerminalDifficulties assert_eq!( db_provider.header_td(&header_hash).unwrap().unwrap(), - jar_provider.header_td(&header_hash).unwrap().unwrap() + jar_provider.header_td_by_number(header.number).unwrap().unwrap() + ); + } + } + } + + #[test] + fn test_sidecars() { + // Ranges + let row_count = 100u64; + let range = 0..=(row_count - 1); + + // Data sources + let factory = create_test_provider_factory(); + let static_files_path = tempfile::tempdir().unwrap(); + let static_file = static_files_path + .path() + .join(StaticFileSegment::Sidecars.filename(&find_fixed_range(*range.end()))); + + // Setup data + let mut provider_rw = factory.provider_rw().unwrap(); + let tx = provider_rw.tx_mut(); + let mut sidecars_set = Vec::with_capacity(100); + for i in range { + let sidecars = BlobSidecars::new(vec![BlobSidecar { + blob_transaction_sidecar: BlobTransactionSidecar { + blobs: vec![], + commitments: vec![Default::default()], + proofs: vec![Default::default()], + }, + block_number: U256::from(i), + block_hash: B256::random(), + tx_index: rand::random::(), + tx_hash: B256::random(), + }]); + let block_number = sidecars[0].block_number.to(); + let block_hash = sidecars[0].block_hash; + + tx.put::(block_number, block_hash).unwrap(); + tx.put::(block_hash, block_number).unwrap(); + tx.put::(block_number, sidecars.clone()).unwrap(); + + sidecars_set.push(sidecars); + } + provider_rw.commit().unwrap(); + + // Create StaticFile + { + let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let mut writer = manager.latest_writer(StaticFileSegment::Sidecars).unwrap(); + + for sidecars in sidecars_set.clone() { + let block_number = sidecars[0].block_number.to(); + let hash = sidecars[0].block_hash; + writer.append_sidecars(sidecars, block_number, hash).unwrap(); + } + writer.commit().unwrap(); + } + + // Use providers to query sidecars data and compare if it matches + { + let db_provider = factory.provider().unwrap(); + let manager = StaticFileProvider::read_write(static_files_path.path()).unwrap(); + let jar_provider = manager + .get_segment_provider_from_block(StaticFileSegment::Sidecars, 0, Some(&static_file)) + .unwrap(); + + // Shuffled for chaos. + sidecars_set.shuffle(&mut generators::rng()); + + for sidecars in sidecars_set { + let hash = sidecars[0].block_hash; + let block_number = sidecars[0].block_number.to(); + assert_eq!(sidecars, db_provider.sidecars(&hash).unwrap().unwrap()); + assert_eq!( + sidecars, + jar_provider.sidecars_by_number(block_number).unwrap().unwrap() ); } } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 304429a01..a1385580f 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -9,8 +9,8 @@ use reth_db_api::models::CompactU256; use reth_nippy_jar::{ConsistencyFailStrategy, NippyJar, NippyJarError, NippyJarWriter}; use reth_primitives::{ static_file::{find_fixed_range, SegmentHeader, SegmentRangeInclusive}, - BlockHash, BlockNumber, Header, Receipt, StaticFileSegment, TransactionSignedNoHash, TxNumber, - U256, + BlobSidecars, BlockHash, BlockNumber, Header, Receipt, StaticFileSegment, + TransactionSignedNoHash, TxNumber, U256, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ @@ -145,7 +145,9 @@ impl StaticFileProviderRW { })?; // If we have lost rows (in this run or previous), we need to update the [SegmentHeader]. - let expected_rows = if self.user_header().segment().is_headers() { + let expected_rows = if self.user_header().segment().is_headers() || + self.user_header().segment().is_sidecars() + { self.user_header().block_len().unwrap_or_default() } else { self.user_header().tx_len().unwrap_or_default() @@ -178,6 +180,7 @@ impl StaticFileProviderRW { StaticFileSegment::Receipts => { self.prune_receipt_data(to_delete, last_block_number.expect("should exist"))? } + StaticFileSegment::Sidecars => self.prune_sidecars_data(to_delete)?, } } @@ -348,7 +351,7 @@ impl StaticFileProviderRW { let mut remaining_rows = num_rows; while remaining_rows > 0 { let len = match segment { - StaticFileSegment::Headers => { + StaticFileSegment::Headers | StaticFileSegment::Sidecars => { self.writer.user_header().block_len().unwrap_or_default() } StaticFileSegment::Transactions | StaticFileSegment::Receipts => { @@ -493,6 +496,39 @@ impl StaticFileProviderRW { Ok(block_number) } + /// Appends sidecars to static file. + /// + /// It **CALLS** `increment_block()` since the number of sidecars is equal to the number of + /// blocks. + /// + /// Returns the current [`BlockNumber`] as seen in the static file. + pub fn append_sidecars( + &mut self, + sidecars: BlobSidecars, + block_number: BlockNumber, + hash: BlockHash, + ) -> ProviderResult { + let start = Instant::now(); + self.ensure_no_queued_prune()?; + + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::Sidecars); + + let block_number = self.increment_block(StaticFileSegment::Sidecars, block_number)?; + + self.append_column(sidecars)?; + self.append_column(hash)?; + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operation( + StaticFileSegment::Sidecars, + StaticFileProviderOperation::Append, + Some(start.elapsed()), + ); + } + + Ok(block_number) + } + /// Appends transaction to static file. /// /// It **DOES NOT CALL** `increment_block()`, it should be handled elsewhere. There might be @@ -615,6 +651,12 @@ impl StaticFileProviderRW { self.queue_prune(to_delete, None) } + /// Adds an instruction to prune `to_delete` sidecars during commit. + pub fn prune_sidecars(&mut self, to_delete: u64) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::Sidecars); + self.queue_prune(to_delete, None) + } + /// Adds an instruction to prune `to_delete` elements during commit. /// /// Note: `last_block` refers to the block the unwinds ends at if dealing with transaction-based @@ -707,6 +749,26 @@ impl StaticFileProviderRW { Ok(()) } + /// Prunes the last `to_delete` sidecars from the data file. + fn prune_sidecars_data(&mut self, to_delete: u64) -> ProviderResult<()> { + let start = Instant::now(); + + let segment = StaticFileSegment::Sidecars; + debug_assert!(self.writer.user_header().segment() == segment); + + self.truncate(segment, to_delete, None)?; + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operation( + StaticFileSegment::Sidecars, + StaticFileProviderOperation::Prune, + Some(start.elapsed()), + ); + } + + Ok(()) + } + fn reader(&self) -> StaticFileProvider { Self::upgrade_provider_to_strong_reference(&self.reader) } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 2a0f900a5..f534bf9fa 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -5,9 +5,9 @@ use alloy_rlp::Decodable; use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_primitives::{ - alloy_primitives, b256, hex_literal::hex, Account, Address, BlockNumber, Bytes, Header, - Receipt, Requests, SealedBlock, SealedBlockWithSenders, TxType, Withdrawal, Withdrawals, B256, - U256, + alloy_primitives, b256, hex_literal::hex, Account, Address, BlobSidecars, BlockNumber, Bytes, + Header, Receipt, Requests, SealedBlock, SealedBlockWithSenders, TxType, Withdrawal, + Withdrawals, B256, U256, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; use revm::{ @@ -108,6 +108,7 @@ pub fn genesis() -> SealedBlock { body: vec![], ommers: vec![], withdrawals: Some(Withdrawals::default()), + sidecars: Some(BlobSidecars::default()), requests: Some(Requests::default()), } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 974982121..597e68d6f 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -10,11 +10,13 @@ use reth_chainspec::{ChainInfo, ChainSpec}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - keccak256, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, - BlockWithSenders, Bytecode, Bytes, Header, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, + keccak256, Account, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockId, + BlockNumber, BlockWithSenders, Bytecode, Bytes, Header, Receipt, SealedBlock, + SealedBlockWithSenders, SealedHeader, StorageKey, StorageValue, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, + U256, }; +use reth_storage_api::{SidecarsProvider, StateProofProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::{ @@ -474,6 +476,14 @@ impl BlockReader for MockEthProvider { Ok(None) } + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult> { + Ok(None) + } + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { let lock = self.blocks.lock(); @@ -545,6 +555,17 @@ impl StateRootProvider for MockEthProvider { } } +impl StateProofProvider for MockEthProvider { + fn proof( + &self, + _state: &BundleState, + address: Address, + _slots: &[B256], + ) -> ProviderResult { + Ok(AccountProof::new(address)) + } +} + impl StateProvider for MockEthProvider { fn storage( &self, @@ -566,10 +587,6 @@ impl StateProvider for MockEthProvider { } })) } - - fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult { - Ok(AccountProof::new(address)) - } } impl EvmEnvProvider for MockEthProvider { @@ -599,22 +616,6 @@ impl EvmEnvProvider for MockEthProvider { Ok(()) } - fn fill_block_env_at( - &self, - _block_env: &mut BlockEnv, - _at: BlockHashOrNumber, - ) -> ProviderResult<()> { - Ok(()) - } - - fn fill_block_env_with_header( - &self, - _block_env: &mut BlockEnv, - _header: &Header, - ) -> ProviderResult<()> { - Ok(()) - } - fn fill_cfg_env_at( &self, _cfg: &mut CfgEnvWithHandlerCfg, @@ -686,6 +687,16 @@ impl WithdrawalsProvider for MockEthProvider { } } +impl SidecarsProvider for MockEthProvider { + fn sidecars(&self, _block_hash: &BlockHash) -> ProviderResult> { + Ok(None) + } + + fn sidecars_by_number(&self, _num: BlockNumber) -> ProviderResult> { + Ok(None) + } +} + impl RequestsProvider for MockEthProvider { fn requests_by_block( &self, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index f029e7b57..1a18a2060 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -1,31 +1,37 @@ -use crate::{ - traits::{BlockSource, ReceiptProvider}, - AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - ChainSpecProvider, ChangeSetReader, EvmEnvProvider, HeaderProvider, ParliaSnapshotReader, - PruneCheckpointReader, ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, - StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, TransactionVariant, - TransactionsProvider, WithdrawalsProvider, +use std::{ + ops::{RangeBounds, RangeInclusive}, + sync::Arc, }; + use reth_chainspec::{ChainInfo, ChainSpec, MAINNET}; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; use reth_primitives::{ - parlia::Snapshot, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, - BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, - StorageKey, StorageValue, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, - TxNumber, Withdrawal, Withdrawals, B256, U256, + parlia::Snapshot, Account, Address, BlobSidecars, Block, BlockHash, BlockHashOrNumber, BlockId, + BlockNumber, BlockWithSenders, Bytecode, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, + TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::{SidecarsProvider, StateProofProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::{ db::BundleState, primitives::{BlockEnv, CfgEnvWithHandlerCfg}, }; -use std::{ - ops::{RangeBounds, RangeInclusive}, - sync::Arc, +use tokio::sync::broadcast; + +use crate::{ + providers::StaticFileProvider, + traits::{BlockSource, ReceiptProvider}, + AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, + CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, + EvmEnvProvider, HeaderProvider, ParliaSnapshotReader, PruneCheckpointReader, + ReceiptProviderIdExt, RequestsProvider, StageCheckpointReader, StateProvider, StateProviderBox, + StateProviderFactory, StateRootProvider, StaticFileProviderFactory, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, }; /// Supports various api interfaces for testing purposes. @@ -113,6 +119,14 @@ impl BlockReader for NoopProvider { Ok(None) } + fn sealed_block_with_senders( + &self, + _id: BlockHashOrNumber, + _transaction_kind: TransactionVariant, + ) -> ProviderResult> { + Ok(None) + } + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { Ok(vec![]) } @@ -313,6 +327,17 @@ impl StateRootProvider for NoopProvider { } } +impl StateProofProvider for NoopProvider { + fn proof( + &self, + _state: &BundleState, + address: Address, + _slots: &[B256], + ) -> ProviderResult { + Ok(AccountProof::new(address)) + } +} + impl StateProvider for NoopProvider { fn storage( &self, @@ -325,10 +350,6 @@ impl StateProvider for NoopProvider { fn bytecode_by_hash(&self, _code_hash: B256) -> ProviderResult> { Ok(None) } - - fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult { - Ok(AccountProof::new(address)) - } } impl EvmEnvProvider for NoopProvider { @@ -358,22 +379,6 @@ impl EvmEnvProvider for NoopProvider { Ok(()) } - fn fill_block_env_at( - &self, - _block_env: &mut BlockEnv, - _at: BlockHashOrNumber, - ) -> ProviderResult<()> { - Ok(()) - } - - fn fill_block_env_with_header( - &self, - _block_env: &mut BlockEnv, - _header: &Header, - ) -> ProviderResult<()> { - Ok(()) - } - fn fill_cfg_env_at( &self, _cfg: &mut CfgEnvWithHandlerCfg, @@ -455,6 +460,16 @@ impl WithdrawalsProvider for NoopProvider { } } +impl SidecarsProvider for NoopProvider { + fn sidecars(&self, _block_hash: &BlockHash) -> ProviderResult> { + Ok(None) + } + + fn sidecars_by_number(&self, _num: BlockNumber) -> ProviderResult> { + Ok(None) + } +} + impl RequestsProvider for NoopProvider { fn requests_by_block( &self, @@ -474,6 +489,18 @@ impl PruneCheckpointReader for NoopProvider { } } +impl StaticFileProviderFactory for NoopProvider { + fn static_file_provider(&self) -> StaticFileProvider { + StaticFileProvider::default() + } +} + +impl CanonStateSubscriptions for NoopProvider { + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + broadcast::channel(1).1 + } +} + impl ParliaSnapshotReader for NoopProvider { fn get_parlia_snapshot(&self, _block_hash: B256) -> ProviderResult> { Ok(None) diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 7211cb691..3d0cf3c0c 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -1,7 +1,6 @@ use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{BlockNumber, SealedBlockWithSenders}; -use reth_prune_types::PruneModes; use reth_storage_api::BlockReader; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostState}; @@ -41,11 +40,8 @@ pub trait BlockWriter: Send + Sync { /// /// Return [StoredBlockBodyIndices] that contains indices of the first and last transactions and /// transition in the block. - fn insert_block( - &self, - block: SealedBlockWithSenders, - prune_modes: Option<&PruneModes>, - ) -> ProviderResult; + fn insert_block(&self, block: SealedBlockWithSenders) + -> ProviderResult; /// Appends a batch of sealed blocks to the blockchain, including sender information, and /// updates the post-state. @@ -57,7 +53,6 @@ pub trait BlockWriter: Send + Sync { /// /// - `blocks`: Vector of `SealedBlockWithSenders` instances to append. /// - `state`: Post-state information to update after appending. - /// - `prune_modes`: Optional pruning configuration. /// /// # Returns /// @@ -68,6 +63,5 @@ pub trait BlockWriter: Send + Sync { execution_outcome: ExecutionOutcome, hashed_state: HashedPostState, trie_updates: TrieUpdates, - prune_modes: Option<&PruneModes>, ) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/full.rs b/crates/storage/provider/src/traits/full.rs index 5ebe95c44..606f645d5 100644 --- a/crates/storage/provider/src/traits/full.rs +++ b/crates/storage/provider/src/traits/full.rs @@ -3,7 +3,7 @@ use crate::{ AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, DatabaseProviderFactory, EvmEnvProvider, HeaderProvider, ParliaSnapshotReader, - StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, + StageCheckpointReader, StateProviderFactory, StaticFileProviderFactory, TransactionsProvider, }; use reth_db_api::database::Database; @@ -45,3 +45,31 @@ impl FullProvider for T where + 'static { } + +/// Helper trait to unify all provider traits required to support `eth` RPC server behaviour, for +/// simplicity. +pub trait FullRpcProvider: + StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + BlockReaderIdExt + + HeaderProvider + + TransactionsProvider + + Clone + + Unpin + + 'static +{ +} + +impl FullRpcProvider for T where + T: StateProviderFactory + + EvmEnvProvider + + ChainSpecProvider + + BlockReaderIdExt + + HeaderProvider + + TransactionsProvider + + Clone + + Unpin + + 'static +{ +} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 8f2b9f420..375a47511 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -43,7 +43,7 @@ mod stats; pub use stats::StatsReader; mod full; -pub use full::FullProvider; +pub use full::{FullProvider, FullRpcProvider}; mod tree_viewer; pub use tree_viewer::TreeViewer; diff --git a/crates/storage/provider/src/traits/spec.rs b/crates/storage/provider/src/traits/spec.rs index 917051d97..798bfeae1 100644 --- a/crates/storage/provider/src/traits/spec.rs +++ b/crates/storage/provider/src/traits/spec.rs @@ -2,6 +2,7 @@ use reth_chainspec::ChainSpec; use std::sync::Arc; /// A trait for reading the current chainspec. +#[auto_impl::auto_impl(&, Arc)] pub trait ChainSpecProvider: Send + Sync { /// Get an [`Arc`] to the chainspec. fn chain_spec(&self) -> Arc; diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 42ab05f22..8329e959b 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -1,6 +1,7 @@ use crate::{ BlockIdReader, BlockNumReader, HeaderProvider, ReceiptProvider, ReceiptProviderIdExt, - RequestsProvider, TransactionVariant, TransactionsProvider, WithdrawalsProvider, + RequestsProvider, SidecarsProvider, TransactionVariant, TransactionsProvider, + WithdrawalsProvider, }; use reth_db_api::models::StoredBlockBodyIndices; use reth_primitives::{ @@ -53,6 +54,7 @@ pub trait BlockReader: + ReceiptProvider + RequestsProvider + WithdrawalsProvider + + SidecarsProvider + Send + Sync { @@ -118,6 +120,17 @@ pub trait BlockReader: transaction_kind: TransactionVariant, ) -> ProviderResult>; + /// Returns the sealed block with senders with matching number or hash from database. + /// + /// Returns the block's transactions in the requested variant. + /// + /// Returns `None` if block is not found. + fn sealed_block_with_senders( + &self, + id: BlockHashOrNumber, + transaction_kind: TransactionVariant, + ) -> ProviderResult>; + /// Returns all blocks in the given inclusive range. /// /// Note: returns only available blocks diff --git a/crates/storage/storage-api/src/lib.rs b/crates/storage/storage-api/src/lib.rs index c5663bc2a..57b0dbe0f 100644 --- a/crates/storage/storage-api/src/lib.rs +++ b/crates/storage/storage-api/src/lib.rs @@ -51,3 +51,8 @@ pub use trie::*; mod withdrawals; pub use withdrawals::*; + +mod sidecars; +pub use sidecars::*; + +pub mod noop; diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs new file mode 100644 index 000000000..a55371f3c --- /dev/null +++ b/crates/storage/storage-api/src/noop.rs @@ -0,0 +1,44 @@ +//! Various noop implementations for traits. + +use crate::{BlockHashReader, BlockNumReader}; +use reth_chainspec::ChainInfo; +use reth_primitives::{BlockNumber, B256}; +use reth_storage_errors::provider::ProviderResult; + +/// Supports various api interfaces for testing purposes. +#[derive(Debug, Clone, Default, Copy)] +#[non_exhaustive] +pub struct NoopBlockReader; + +/// Noop implementation for testing purposes +impl BlockHashReader for NoopBlockReader { + fn block_hash(&self, _number: u64) -> ProviderResult> { + Ok(None) + } + + fn canonical_hashes_range( + &self, + _start: BlockNumber, + _end: BlockNumber, + ) -> ProviderResult> { + Ok(vec![]) + } +} + +impl BlockNumReader for NoopBlockReader { + fn chain_info(&self) -> ProviderResult { + Ok(ChainInfo::default()) + } + + fn best_block_number(&self) -> ProviderResult { + Ok(0) + } + + fn last_block_number(&self) -> ProviderResult { + Ok(0) + } + + fn block_number(&self, _hash: B256) -> ProviderResult> { + Ok(None) + } +} diff --git a/crates/storage/storage-api/src/receipts.rs b/crates/storage/storage-api/src/receipts.rs index b050ca3e2..04eb81aad 100644 --- a/crates/storage/storage-api/src/receipts.rs +++ b/crates/storage/storage-api/src/receipts.rs @@ -38,6 +38,7 @@ pub trait ReceiptProvider: Send + Sync { /// so this trait can only be implemented for types that implement `BlockIdReader`. The /// `BlockIdReader` methods should be used to resolve `BlockId`s to block numbers or hashes, and /// retrieving the receipts should be done using the type's `ReceiptProvider` methods. +#[auto_impl::auto_impl(&, Arc)] pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { /// Get receipt by block id fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { diff --git a/crates/storage/storage-api/src/requests.rs b/crates/storage/storage-api/src/requests.rs index c8b13dc05..7f3b3c95e 100644 --- a/crates/storage/storage-api/src/requests.rs +++ b/crates/storage/storage-api/src/requests.rs @@ -4,7 +4,7 @@ use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching EIP-7685 [Requests] for blocks. #[auto_impl::auto_impl(&, Arc)] pub trait RequestsProvider: Send + Sync { - /// Get withdrawals by block id. + /// Get requests by block id. fn requests_by_block( &self, id: BlockHashOrNumber, diff --git a/crates/storage/storage-api/src/sidecars.rs b/crates/storage/storage-api/src/sidecars.rs new file mode 100644 index 000000000..ffc106459 --- /dev/null +++ b/crates/storage/storage-api/src/sidecars.rs @@ -0,0 +1,16 @@ +use reth_primitives::{BlobSidecars, BlockHash, BlockNumber}; +use reth_storage_errors::provider::ProviderResult; + +/// Client trait for fetching [BlobSidecars] for blocks. +#[auto_impl::auto_impl(&, Arc)] +pub trait SidecarsProvider: Send + Sync { + /// Get sidecars by block hash + /// + /// Returns `None` if the sidecars is not found. + fn sidecars(&self, block_hash: &BlockHash) -> ProviderResult>; + + /// Get sidecar by block number. + /// + /// Returns `None` if the sidecars is not found. + fn sidecars_by_number(&self, num: BlockNumber) -> ProviderResult>; +} diff --git a/crates/storage/storage-api/src/state.rs b/crates/storage/storage-api/src/state.rs index 059909a46..c432d331b 100644 --- a/crates/storage/storage-api/src/state.rs +++ b/crates/storage/storage-api/src/state.rs @@ -1,4 +1,4 @@ -use super::{AccountReader, BlockHashReader, BlockIdReader, StateRootProvider}; +use super::{AccountReader, BlockHashReader, BlockIdReader, StateProofProvider, StateRootProvider}; use auto_impl::auto_impl; use reth_execution_types::ExecutionOutcome; use reth_primitives::{ @@ -6,14 +6,15 @@ use reth_primitives::{ StorageValue, B256, KECCAK_EMPTY, U256, }; use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use reth_trie::AccountProof; /// Type alias of boxed [`StateProvider`]. pub type StateProviderBox = Box; /// An abstraction for a type that provides state data. #[auto_impl(&, Arc, Box)] -pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + Send + Sync { +pub trait StateProvider: + BlockHashReader + AccountReader + StateRootProvider + StateProofProvider + Send + Sync +{ /// Get storage of given account. fn storage( &self, @@ -24,9 +25,6 @@ pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + S /// Get account code by its hash fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult>; - /// Get account and storage proofs. - fn proof(&self, address: Address, keys: &[B256]) -> ProviderResult; - /// Get account code by its address. /// /// Returns `None` if the account doesn't exist or account is not a contract diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index 083f56549..0ab25d18a 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -1,6 +1,6 @@ -use reth_primitives::B256; +use reth_primitives::{Address, B256}; use reth_storage_errors::provider::ProviderResult; -use reth_trie::updates::TrieUpdates; +use reth_trie::{updates::TrieUpdates, AccountProof}; use revm::db::BundleState; /// A type that can compute the state root of a given post state. @@ -22,3 +22,16 @@ pub trait StateRootProvider: Send + Sync { bundle_state: &BundleState, ) -> ProviderResult<(B256, TrieUpdates)>; } + +/// A type that can generate state proof on top of a given post state. +#[auto_impl::auto_impl(&, Box, Arc)] +pub trait StateProofProvider: Send + Sync { + /// Get account and storage proofs of target keys in the `BundleState` + /// on top of the current state. + fn proof( + &self, + state: &BundleState, + address: Address, + slots: &[B256], + ) -> ProviderResult; +} diff --git a/crates/tasks/Cargo.toml b/crates/tasks/Cargo.toml index 63eb870fc..82c80c093 100644 --- a/crates/tasks/Cargo.toml +++ b/crates/tasks/Cargo.toml @@ -23,6 +23,7 @@ reth-metrics.workspace = true metrics.workspace = true # misc +auto_impl.workspace = true tracing.workspace = true thiserror.workspace = true dyn-clone.workspace = true diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index ee5222e91..a0070698f 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -84,6 +84,7 @@ pub mod pool; /// ``` /// /// The [`TaskSpawner`] trait is [`DynClone`] so `Box` are also `Clone`. +#[auto_impl::auto_impl(&, Arc)] pub trait TaskSpawner: Send + Sync + Unpin + std::fmt::Debug + DynClone { /// Spawns the task onto the runtime. /// See also [`Handle::spawn`]. @@ -464,7 +465,7 @@ impl TaskExecutor { error!("{task_error}"); let _ = panicked_tasks_tx.send(task_error); }) - .map(|_| ()) + .map(drop) .in_current_span(); self.handle.spawn(task) @@ -513,7 +514,7 @@ impl TaskExecutor { error!("{task_error}"); let _ = panicked_tasks_tx.send(task_error); }) - .map(|_| ()) + .map(drop) .in_current_span(); self.handle.spawn(task) @@ -580,6 +581,7 @@ impl TaskSpawner for TaskExecutor { } /// `TaskSpawner` with extended behaviour +#[auto_impl::auto_impl(&, Arc)] pub trait TaskSpawnerExt: Send + Sync + Unpin + std::fmt::Debug + DynClone { /// This spawns a critical task onto the runtime. /// diff --git a/crates/tasks/src/pool.rs b/crates/tasks/src/pool.rs index dbb4e19de..10fedcced 100644 --- a/crates/tasks/src/pool.rs +++ b/crates/tasks/src/pool.rs @@ -43,7 +43,8 @@ impl BlockingTaskGuard { /// /// This is a dedicated threadpool for blocking tasks which are CPU bound. /// RPC calls that perform blocking IO (disk lookups) are not executed on this pool but on the tokio -/// runtime's blocking pool, which performs poorly with CPU bound tasks. Once the tokio blocking +/// runtime's blocking pool, which performs poorly with CPU bound tasks (see +/// ). Once the tokio blocking /// pool is saturated it is converted into a queue, blocking tasks could then interfere with the /// queue and block other RPC calls. /// diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 527c3412e..2484f6784 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -148,7 +148,6 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![warn(clippy::missing_const_for_fn)] use crate::{identifier::TransactionId, pool::PoolInner}; use aquamarine as _; diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index 90d46854d..c75e3403c 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -104,4 +104,10 @@ pub struct AllTransactionsMetrics { pub(crate) all_transactions_by_id: Gauge, /// Number of all transactions by all senders in the pool pub(crate) all_transactions_by_all_senders: Gauge, + /// Number of blob transactions nonce gaps. + pub(crate) blob_transactions_nonce_gaps: Counter, + /// The current blob base fee + pub(crate) blob_base_fee: Gauge, + /// The current base fee + pub(crate) base_fee: Gauge, } diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index a595b9f4d..1b4a8eafe 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -49,18 +49,16 @@ impl Iterator for BestTransactionsWithFees { // find the next transaction that satisfies the base fee loop { let best = self.best.next()?; - if best.transaction.max_fee_per_gas() < self.base_fee as u128 { - // tx violates base fee, mark it as invalid and continue - crate::traits::BestTransactions::mark_invalid(self, &best); + // If both the base fee and blob fee (if applicable for EIP-4844) are satisfied, return + // the transaction + if best.transaction.max_fee_per_gas() >= self.base_fee as u128 && + best.transaction + .max_fee_per_blob_gas() + .map_or(true, |fee| fee >= self.base_fee_per_blob_gas as u128) + { + return Some(best); } else { - // tx is EIP4844 and violates blob fee, mark it as invalid and continue - if best.transaction.max_fee_per_blob_gas().is_some_and(|max_fee_per_blob_gas| { - max_fee_per_blob_gas < self.base_fee_per_blob_gas as u128 - }) { - crate::traits::BestTransactions::mark_invalid(self, &best); - continue - }; - return Some(best) + crate::traits::BestTransactions::mark_invalid(self, &best); } } } @@ -270,7 +268,9 @@ mod tests { use crate::{ pool::pending::PendingPool, test_utils::{MockOrdering, MockTransaction, MockTransactionFactory}, + Priority, }; + use reth_primitives::U256; #[test] fn test_best_iter() { @@ -321,4 +321,279 @@ mod tests { // iterator is empty assert!(best.next().is_none()); } + + #[test] + fn test_best_with_fees_iter_base_fee_satisfied() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 5; + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 15; + + // Insert transactions with a max_fee_per_gas greater than or equal to the base fee + // Without blob fee + for nonce in 0..num_tx { + let tx = MockTransaction::eip1559() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + for nonce in 0..num_tx { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.nonce(), nonce); + assert!(tx.transaction.max_fee_per_gas() >= base_fee as u128); + } + } + + #[test] + fn test_best_with_fees_iter_base_fee_violated() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 5; + let base_fee: u64 = 20; + let base_fee_per_blob_gas: u64 = 15; + + // Insert transactions with a max_fee_per_gas less than the base fee + for nonce in 0..num_tx { + let tx = MockTransaction::eip1559() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 - 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // No transaction should be returned since all violate the base fee + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_blob_fee_satisfied() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 5; + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 20; + + // Insert transactions with a max_fee_per_blob_gas greater than or equal to the base fee per + // blob gas + for nonce in 0..num_tx { + let tx = MockTransaction::eip4844() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 + 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // All transactions should be returned in order since they satisfy both base fee and blob + // fee + for nonce in 0..num_tx { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.nonce(), nonce); + assert!(tx.transaction.max_fee_per_gas() >= base_fee as u128); + assert!( + tx.transaction.max_fee_per_blob_gas().unwrap() >= base_fee_per_blob_gas as u128 + ); + } + + // No more transactions should be returned + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_blob_fee_violated() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let num_tx = 5; + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 20; + + // Insert transactions with a max_fee_per_blob_gas less than the base fee per blob gas + for nonce in 0..num_tx { + let tx = MockTransaction::eip4844() + .rng_hash() + .with_nonce(nonce) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 - 5); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + // No transaction should be returned since all violate the blob fee + assert!(best.next().is_none()); + } + + #[test] + fn test_best_with_fees_iter_mixed_fees() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + let base_fee: u64 = 10; + let base_fee_per_blob_gas: u64 = 20; + + // Insert transactions with varying max_fee_per_gas and max_fee_per_blob_gas + let tx1 = + MockTransaction::eip1559().rng_hash().with_nonce(0).with_max_fee(base_fee as u128 + 5); + let tx2 = MockTransaction::eip4844() + .rng_hash() + .with_nonce(1) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 + 5); + let tx3 = MockTransaction::eip4844() + .rng_hash() + .with_nonce(2) + .with_max_fee(base_fee as u128 + 5) + .with_blob_fee(base_fee_per_blob_gas as u128 - 5); + let tx4 = + MockTransaction::eip1559().rng_hash().with_nonce(3).with_max_fee(base_fee as u128 - 5); + + pool.add_transaction(Arc::new(f.validated(tx1.clone())), 0); + pool.add_transaction(Arc::new(f.validated(tx2.clone())), 0); + pool.add_transaction(Arc::new(f.validated(tx3)), 0); + pool.add_transaction(Arc::new(f.validated(tx4)), 0); + + let mut best = pool.best_with_basefee_and_blobfee(base_fee, base_fee_per_blob_gas); + + let expected_order = vec![tx1, tx2]; + for expected_tx in expected_order { + let tx = best.next().expect("Transaction should be returned"); + assert_eq!(tx.transaction, expected_tx); + } + + // No more transactions should be returned + assert!(best.next().is_none()); + } + + #[test] + fn test_best_add_transaction_with_next_nonce() { + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add 5 transactions with increasing nonces to the pool + let num_tx = 5; + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + // Create a BestTransactions iterator from the pool + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // Create a new transaction with nonce 5 and validate it + let new_tx = MockTransaction::eip1559().rng_hash().with_nonce(5); + let valid_new_tx = f.validated(new_tx); + + // Send the new transaction through the broadcast channel + let pending_tx = PendingTransaction { + submission_id: 10, + transaction: Arc::new(valid_new_tx.clone()), + priority: Priority::Value(U256::from(1000)), + }; + tx_sender.send(pending_tx.clone()).unwrap(); + + // Add new transactions to the iterator + best.add_new_transactions(); + + // Verify that the new transaction has been added to the 'all' map + assert_eq!(best.all.len(), 6); + assert!(best.all.contains_key(valid_new_tx.id())); + + // Verify that the new transaction has been added to the 'independent' set + assert_eq!(best.independent.len(), 2); + assert!(best.independent.contains(&pending_tx)); + } + + #[test] + fn test_best_add_transaction_with_ancestor() { + // Initialize a new PendingPool with default MockOrdering and MockTransactionFactory + let mut pool = PendingPool::new(MockOrdering::default()); + let mut f = MockTransactionFactory::default(); + + // Add 5 transactions with increasing nonces to the pool + let num_tx = 5; + let tx = MockTransaction::eip1559(); + for nonce in 0..num_tx { + let tx = tx.clone().rng_hash().with_nonce(nonce); + let valid_tx = f.validated(tx); + pool.add_transaction(Arc::new(valid_tx), 0); + } + + // Create a BestTransactions iterator from the pool + let mut best = pool.best(); + + // Use a broadcast channel for transaction updates + let (tx_sender, tx_receiver) = + tokio::sync::broadcast::channel::>(1000); + best.new_transaction_receiver = Some(tx_receiver); + + // Create a new transaction with nonce 5 and validate it + let base_tx1 = MockTransaction::eip1559().rng_hash().with_nonce(5); + let valid_new_tx1 = f.validated(base_tx1.clone()); + + // Send the new transaction through the broadcast channel + let pending_tx1 = PendingTransaction { + submission_id: 10, + transaction: Arc::new(valid_new_tx1.clone()), + priority: Priority::Value(U256::from(1000)), + }; + tx_sender.send(pending_tx1.clone()).unwrap(); + + // Add new transactions to the iterator + best.add_new_transactions(); + + // Verify that the new transaction has been added to the 'all' map + assert_eq!(best.all.len(), 6); + assert!(best.all.contains_key(valid_new_tx1.id())); + + // Verify that the new transaction has been added to the 'independent' set + assert_eq!(best.independent.len(), 2); + assert!(best.independent.contains(&pending_tx1)); + + // Attempt to add a new transaction with a different nonce (not a duplicate) + let base_tx2 = base_tx1.with_nonce(6); + let valid_new_tx2 = f.validated(base_tx2); + + // Send the new transaction through the broadcast channel + let pending_tx2 = PendingTransaction { + submission_id: 11, // Different submission ID + transaction: Arc::new(valid_new_tx2.clone()), + priority: Priority::Value(U256::from(1000)), + }; + tx_sender.send(pending_tx2.clone()).unwrap(); + + // Add new transactions to the iterator + best.add_new_transactions(); + + // Verify that the new transaction has been added to 'all' + assert_eq!(best.all.len(), 7); + assert!(best.all.contains_key(valid_new_tx2.id())); + + // Verify that the new transaction has not been added to the 'independent' set + assert_eq!(best.independent.len(), 2); + assert!(!best.independent.contains(&pending_tx2)); + } } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 48048412e..a51cdc44e 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -982,9 +982,13 @@ impl AllTransactions { } = block_info; self.last_seen_block_number = last_seen_block_number; self.last_seen_block_hash = last_seen_block_hash; + self.pending_fees.base_fee = pending_basefee; + self.metrics.base_fee.set(pending_basefee as f64); + if let Some(pending_blob_fee) = pending_blob_fee { self.pending_fees.blob_fee = pending_blob_fee; + self.metrics.blob_base_fee.set(pending_blob_fee as f64); } } @@ -1335,11 +1339,13 @@ impl AllTransactions { if let Some(ancestor) = ancestor { let Some(ancestor_tx) = self.txs.get(&ancestor) else { // ancestor tx is missing, so we can't insert the new blob + self.metrics.blob_transactions_nonce_gaps.increment(1); return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) }) }; if ancestor_tx.state.has_nonce_gap() { // the ancestor transaction already has a nonce gap, so we can't insert the new // blob + self.metrics.blob_transactions_nonce_gaps.increment(1); return Err(InsertErr::BlobTxHasNonceGap { transaction: Arc::new(new_blob_tx) }) } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 41d57e941..e43c45e17 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -39,7 +39,7 @@ pub type PeerId = reth_primitives::B512; /// /// Note: This requires `Clone` for convenience, since it is assumed that this will be implemented /// for a wrapped `Arc` type, see also [`Pool`](crate::Pool). -#[auto_impl::auto_impl(Arc)] +#[auto_impl::auto_impl(&, Arc)] pub trait TransactionPool: Send + Sync + Clone { /// The transaction type of the pool type Transaction: PoolTransaction; @@ -388,7 +388,7 @@ pub trait TransactionPool: Send + Sync + Clone { } /// Extension for [TransactionPool] trait that allows to set the current block info. -#[auto_impl::auto_impl(Arc)] +#[auto_impl::auto_impl(&, Arc)] pub trait TransactionPoolExt: TransactionPool { /// Sets the current block info for the pool. fn set_block_info(&self, info: BlockInfo); diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 8bdd68d62..eef090bcd 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -9,19 +9,18 @@ use crate::{ EthBlobTransactionSidecar, EthPoolTransaction, LocalTransactionConfig, PoolTransaction, TransactionValidationOutcome, TransactionValidationTaskExecutor, TransactionValidator, }; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_primitives::{ - constants::{ - eip4844::{MAINNET_KZG_TRUSTED_SETUP, MAX_BLOBS_PER_BLOCK}, - ETHEREUM_BLOCK_GAS_LIMIT, - }, - kzg::KzgSettings, - Address, GotExpected, InvalidTransactionError, SealedBlock, TxKind, EIP1559_TX_TYPE_ID, - EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, U256, + constants::{eip4844::MAX_BLOBS_PER_BLOCK, ETHEREUM_BLOCK_GAS_LIMIT}, + GotExpected, InvalidTransactionError, SealedBlock, TxKind, EIP1559_TX_TYPE_ID, + EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID, }; use reth_provider::{AccountReader, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; -use revm::{interpreter::gas::validate_initial_tx_gas, primitives::SpecId}; +use revm::{ + interpreter::gas::validate_initial_tx_gas, + primitives::{AccessListItem, EnvKzgSettings, SpecId}, +}; use std::{ marker::PhantomData, sync::{atomic::AtomicBool, Arc}, @@ -125,7 +124,7 @@ pub(crate) struct EthTransactionValidatorInner { /// Minimum priority fee to enforce for acceptance into the pool. minimum_priority_fee: Option, /// Stores the setup and parameters needed for validating KZG proofs. - kzg_settings: Arc, + kzg_settings: EnvKzgSettings, /// How to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions. local_transactions_config: LocalTransactionConfig, /// Maximum size in bytes a single transaction can have in order to be accepted into the pool. @@ -369,7 +368,7 @@ where } EthBlobTransactionSidecar::Present(blob) => { // validate the blob - if let Err(err) = transaction.validate_blob(&blob, &self.kzg_settings) { + if let Err(err) = transaction.validate_blob(&blob, self.kzg_settings.get()) { return TransactionValidationOutcome::Invalid( transaction, InvalidPoolTransactionError::Eip4844( @@ -435,7 +434,7 @@ pub struct EthTransactionValidatorBuilder { additional_tasks: usize, /// Stores the setup and parameters needed for validating KZG proofs. - kzg_settings: Arc, + kzg_settings: EnvKzgSettings, /// How to handle [`TransactionOrigin::Local`](TransactionOrigin) transactions. local_transactions_config: LocalTransactionConfig, /// Max size in bytes of a single transaction allowed @@ -457,7 +456,7 @@ impl EthTransactionValidatorBuilder { block_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, minimum_priority_fee: None, additional_tasks: 1, - kzg_settings: Arc::clone(&MAINNET_KZG_TRUSTED_SETUP), + kzg_settings: EnvKzgSettings::Default, local_transactions_config: Default::default(), max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, @@ -538,8 +537,8 @@ impl EthTransactionValidatorBuilder { self } - /// Sets the [`KzgSettings`] to use for validating KZG proofs. - pub fn kzg_settings(mut self, kzg_settings: Arc) -> Self { + /// Sets the [`EnvKzgSettings`] to use for validating KZG proofs. + pub fn kzg_settings(mut self, kzg_settings: EnvKzgSettings) -> Self { self.kzg_settings = kzg_settings; self } @@ -713,12 +712,11 @@ pub fn ensure_intrinsic_gas( transaction: &T, is_shanghai: bool, ) -> Result<(), InvalidPoolTransactionError> { - let access_list = transaction.access_list().map(|list| list.flattened()).unwrap_or_default(); if transaction.gas_limit() < calculate_intrinsic_gas_after_merge( transaction.input(), &transaction.kind(), - &access_list, + transaction.access_list().map(|list| list.0.as_slice()).unwrap_or(&[]), is_shanghai, ) { @@ -735,11 +733,11 @@ pub fn ensure_intrinsic_gas( pub fn calculate_intrinsic_gas_after_merge( input: &[u8], kind: &TxKind, - access_list: &[(Address, Vec)], + access_list: &[AccessListItem], is_shanghai: bool, ) -> u64 { let spec_id = if is_shanghai { SpecId::SHANGHAI } else { SpecId::MERGE }; - validate_initial_tx_gas(spec_id, input, kind.is_create(), access_list) + validate_initial_tx_gas(spec_id, input, kind.is_create(), access_list, 0) } #[cfg(test)] diff --git a/crates/trie/common/src/lib.rs b/crates/trie/common/src/lib.rs index bc3749e6f..f845c9ca5 100644 --- a/crates/trie/common/src/lib.rs +++ b/crates/trie/common/src/lib.rs @@ -6,8 +6,6 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -// TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged -#![allow(unknown_lints, non_local_definitions)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] /// The implementation of hash builder. diff --git a/crates/trie/parallel/benches/root.rs b/crates/trie/parallel/benches/root.rs index d52702fbc..6a7d7a81c 100644 --- a/crates/trie/parallel/benches/root.rs +++ b/crates/trie/parallel/benches/root.rs @@ -30,7 +30,7 @@ pub fn calculate_state_root(c: &mut Criterion) { HashedStateChanges(db_state).write_to_db(provider_rw.tx_ref()).unwrap(); let (_, updates) = StateRoot::from_tx(provider_rw.tx_ref()).root_with_updates().unwrap(); - updates.flush(provider_rw.tx_ref()).unwrap(); + updates.write_to_database(provider_rw.tx_ref()).unwrap(); provider_rw.commit().unwrap(); } @@ -41,7 +41,7 @@ pub fn calculate_state_root(c: &mut Criterion) { b.to_async(&runtime).iter_with_setup( || { let sorted_state = updated_state.clone().into_sorted(); - let prefix_sets = updated_state.construct_prefix_sets(); + let prefix_sets = updated_state.construct_prefix_sets().freeze(); let provider = provider_factory.provider().unwrap(); (provider, sorted_state, prefix_sets) }, diff --git a/crates/trie/parallel/src/async_root.rs b/crates/trie/parallel/src/async_root.rs index a36a01be5..db6152b6a 100644 --- a/crates/trie/parallel/src/async_root.rs +++ b/crates/trie/parallel/src/async_root.rs @@ -86,7 +86,7 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), AsyncStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let prefix_sets = self.hashed_state.construct_prefix_sets(); + let prefix_sets = self.hashed_state.construct_prefix_sets().freeze(); let storage_root_targets = StorageRootTargets::new( self.hashed_state.accounts.keys().copied(), prefix_sets.storage_prefix_sets, @@ -132,7 +132,7 @@ where trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, prefix_sets.account_prefix_set, ) - .with_updates(retain_updates); + .with_deletions_retained(retain_updates); let mut account_node_iter = TrieNodeIter::new( walker, hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, @@ -166,7 +166,7 @@ where }; if retain_updates { - trie_updates.extend(updates.into_iter()); + trie_updates.insert_storage_updates(hashed_address, updates); } account_rlp.clear(); @@ -179,7 +179,7 @@ where let root = hash_builder.root(); - trie_updates.finalize_state_updates( + trie_updates.finalize( account_node_iter.walker, hash_builder, prefix_sets.destroyed_accounts, diff --git a/crates/trie/parallel/src/parallel_root.rs b/crates/trie/parallel/src/parallel_root.rs index edf552096..0983fd47e 100644 --- a/crates/trie/parallel/src/parallel_root.rs +++ b/crates/trie/parallel/src/parallel_root.rs @@ -77,7 +77,7 @@ where retain_updates: bool, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { let mut tracker = ParallelTrieTracker::default(); - let prefix_sets = self.hashed_state.construct_prefix_sets(); + let prefix_sets = self.hashed_state.construct_prefix_sets().freeze(); let storage_root_targets = StorageRootTargets::new( self.hashed_state.accounts.keys().copied(), prefix_sets.storage_prefix_sets, @@ -116,7 +116,7 @@ where trie_cursor_factory.account_trie_cursor().map_err(ProviderError::Database)?, prefix_sets.account_prefix_set, ) - .with_updates(retain_updates); + .with_deletions_retained(retain_updates); let mut account_node_iter = TrieNodeIter::new( walker, hashed_cursor_factory.hashed_account_cursor().map_err(ProviderError::Database)?, @@ -148,7 +148,7 @@ where }; if retain_updates { - trie_updates.extend(updates.into_iter()); + trie_updates.insert_storage_updates(hashed_address, updates); } account_rlp.clear(); @@ -161,7 +161,7 @@ where let root = hash_builder.root(); - trie_updates.finalize_state_updates( + trie_updates.finalize( account_node_iter.walker, hash_builder, prefix_sets.destroyed_accounts, diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs new file mode 100644 index 000000000..1f14a462b --- /dev/null +++ b/crates/trie/trie/src/forward_cursor.rs @@ -0,0 +1,51 @@ +/// The implementation of forward-only in memory cursor over the entries. +/// The cursor operates under the assumption that the supplied collection is pre-sorted. +#[derive(Debug)] +pub struct ForwardInMemoryCursor<'a, K, V> { + /// The reference to the pre-sorted collection of entries. + entries: &'a Vec<(K, V)>, + /// The index where cursor is currently positioned. + index: usize, +} + +impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> { + /// Create new forward cursor positioned at the beginning of the collection. + /// The cursor expects all of the entries have been sorted in advance. + pub const fn new(entries: &'a Vec<(K, V)>) -> Self { + Self { entries, index: 0 } + } + + /// Returns `true` if the cursor is empty, regardless of its position. + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } +} + +impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> +where + K: PartialOrd + Copy, + V: Copy, +{ + /// Advances the cursor forward while `comparator` returns `true` or until the collection is + /// exhausted. Returns the first entry for which `comparator` returns `false` or `None`. + fn advance_while_false(&mut self, comparator: impl Fn(&K) -> bool) -> Option<(K, V)> { + let mut entry = self.entries.get(self.index); + while entry.map_or(false, |entry| comparator(&entry.0)) { + self.index += 1; + entry = self.entries.get(self.index); + } + entry.copied() + } + + /// Returns the first entry from the current cursor position that's greater or equal to the + /// provided key. This method advances the cursor forward. + pub fn seek(&mut self, key: &K) -> Option<(K, V)> { + self.advance_while_false(|k| k < key) + } + + /// Returns the first entry from the current cursor position that's greater than the provided + /// key. This method advances the cursor forward. + pub fn first_after(&mut self, key: &K) -> Option<(K, V)> { + self.advance_while_false(|k| k <= key) + } +} diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 41b051b2a..ac262f3d4 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -1,6 +1,11 @@ use super::{HashedCursor, HashedCursorFactory, HashedStorageCursor}; -use crate::state::HashedPostStateSorted; +use crate::{ + forward_cursor::ForwardInMemoryCursor, HashedAccountsSorted, HashedPostStateSorted, + HashedStorageSorted, +}; +use reth_db::DatabaseError; use reth_primitives::{Account, B256, U256}; +use std::collections::HashSet; /// The hashed cursor factory for the post state. #[derive(Debug, Clone)] @@ -20,39 +25,44 @@ impl<'a, CF: HashedCursorFactory> HashedCursorFactory for HashedPostStateCursorF type AccountCursor = HashedPostStateAccountCursor<'a, CF::AccountCursor>; type StorageCursor = HashedPostStateStorageCursor<'a, CF::StorageCursor>; - fn hashed_account_cursor(&self) -> Result { + fn hashed_account_cursor(&self) -> Result { let cursor = self.cursor_factory.hashed_account_cursor()?; - Ok(HashedPostStateAccountCursor::new(cursor, self.post_state)) + Ok(HashedPostStateAccountCursor::new(cursor, &self.post_state.accounts)) } fn hashed_storage_cursor( &self, hashed_address: B256, - ) -> Result { + ) -> Result { let cursor = self.cursor_factory.hashed_storage_cursor(hashed_address)?; - Ok(HashedPostStateStorageCursor::new(cursor, self.post_state, hashed_address)) + Ok(HashedPostStateStorageCursor::new(cursor, self.post_state.storages.get(&hashed_address))) } } /// The cursor to iterate over post state hashed accounts and corresponding database entries. /// It will always give precedence to the data from the hashed post state. -#[derive(Debug, Clone)] -pub struct HashedPostStateAccountCursor<'b, C> { +#[derive(Debug)] +pub struct HashedPostStateAccountCursor<'a, C> { /// The database cursor. cursor: C, - /// The reference to the in-memory [`HashedPostStateSorted`]. - post_state: &'b HashedPostStateSorted, - /// The post state account index where the cursor is currently at. - post_state_account_index: usize, + /// Forward-only in-memory cursor over accounts. + post_state_cursor: ForwardInMemoryCursor<'a, B256, Account>, + /// Reference to the collection of account keys that were destroyed. + destroyed_accounts: &'a HashSet, /// The last hashed account that was returned by the cursor. /// De facto, this is a current cursor position. last_account: Option, } -impl<'b, C> HashedPostStateAccountCursor<'b, C> { +impl<'a, C> HashedPostStateAccountCursor<'a, C> +where + C: HashedCursor, +{ /// Create new instance of [`HashedPostStateAccountCursor`]. - pub const fn new(cursor: C, post_state: &'b HashedPostStateSorted) -> Self { - Self { cursor, post_state, last_account: None, post_state_account_index: 0 } + pub const fn new(cursor: C, post_state_accounts: &'a HashedAccountsSorted) -> Self { + let post_state_cursor = ForwardInMemoryCursor::new(&post_state_accounts.accounts); + let destroyed_accounts = &post_state_accounts.destroyed_accounts; + Self { cursor, post_state_cursor, destroyed_accounts, last_account: None } } /// Returns `true` if the account has been destroyed. @@ -61,34 +71,67 @@ impl<'b, C> HashedPostStateAccountCursor<'b, C> { /// This function only checks the post state, not the database, because the latter does not /// store destroyed accounts. fn is_account_cleared(&self, account: &B256) -> bool { - self.post_state.destroyed_accounts.contains(account) + self.destroyed_accounts.contains(account) + } + + fn seek_inner(&mut self, key: B256) -> Result, DatabaseError> { + // Take the next account from the post state with the key greater than or equal to the + // sought key. + let post_state_entry = self.post_state_cursor.seek(&key); + + // It's an exact match, return the account from post state without looking up in the + // database. + if post_state_entry.map_or(false, |entry| entry.0 == key) { + return Ok(post_state_entry) + } + + // It's not an exact match, reposition to the first greater or equal account that wasn't + // cleared. + let mut db_entry = self.cursor.seek(key)?; + while db_entry.as_ref().map_or(false, |(address, _)| self.is_account_cleared(address)) { + db_entry = self.cursor.next()?; + } + + // Compare two entries and return the lowest. + Ok(Self::compare_entries(post_state_entry, db_entry)) + } + + fn next_inner(&mut self, last_account: B256) -> Result, DatabaseError> { + // Take the next account from the post state with the key greater than the last sought key. + let post_state_entry = self.post_state_cursor.first_after(&last_account); + + // If post state was given precedence or account was cleared, move the cursor forward. + let mut db_entry = self.cursor.seek(last_account)?; + while db_entry.as_ref().map_or(false, |(address, _)| { + address <= &last_account || self.is_account_cleared(address) + }) { + db_entry = self.cursor.next()?; + } + + // Compare two entries and return the lowest. + Ok(Self::compare_entries(post_state_entry, db_entry)) } /// Return the account with the lowest hashed account key. /// /// Given the next post state and database entries, return the smallest of the two. /// If the account keys are the same, the post state entry is given precedence. - fn next_account( - post_state_item: Option<&(B256, Account)>, + fn compare_entries( + post_state_item: Option<(B256, Account)>, db_item: Option<(B256, Account)>, ) -> Option<(B256, Account)> { - match (post_state_item, db_item) { + if let Some((post_state_entry, db_entry)) = post_state_item.zip(db_item) { // If both are not empty, return the smallest of the two // Post state is given precedence if keys are equal - (Some((post_state_address, post_state_account)), Some((db_address, db_account))) => { - if post_state_address <= &db_address { - Some((*post_state_address, *post_state_account)) - } else { - Some((db_address, db_account)) - } - } + Some(if post_state_entry.0 <= db_entry.0 { post_state_entry } else { db_entry }) + } else { // Return either non-empty entry - _ => post_state_item.copied().or(db_item), + db_item.or(post_state_item) } } } -impl<'b, C> HashedCursor for HashedPostStateAccountCursor<'b, C> +impl<'a, C> HashedCursor for HashedPostStateAccountCursor<'a, C> where C: HashedCursor, { @@ -102,41 +145,11 @@ where /// /// The returned account key is memoized and the cursor remains positioned at that key until /// [`HashedCursor::seek`] or [`HashedCursor::next`] are called. - fn seek(&mut self, key: B256) -> Result, reth_db::DatabaseError> { - self.last_account = None; - - // Take the next account from the post state with the key greater than or equal to the - // sought key. - let mut post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - while post_state_entry.map(|(k, _)| k < &key).unwrap_or_default() { - self.post_state_account_index += 1; - post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - } - - // It's an exact match, return the account from post state without looking up in the - // database. - if let Some((address, account)) = post_state_entry { - if address == &key { - self.last_account = Some(*address); - return Ok(Some((*address, *account))) - } - } - - // It's not an exact match, reposition to the first greater or equal account that wasn't - // cleared. - let mut db_entry = self.cursor.seek(key)?; - while db_entry - .as_ref() - .map(|(address, _)| self.is_account_cleared(address)) - .unwrap_or_default() - { - db_entry = self.cursor.next()?; - } - - // Compare two entries and return the lowest. - let result = Self::next_account(post_state_entry, db_entry); - self.last_account = result.as_ref().map(|(address, _)| *address); - Ok(result) + fn seek(&mut self, key: B256) -> Result, DatabaseError> { + // Find the closes account. + let entry = self.seek_inner(key)?; + self.last_account = entry.as_ref().map(|entry| entry.0); + Ok(entry) } /// Retrieve the next entry from the cursor. @@ -146,208 +159,151 @@ where /// /// NOTE: This function will not return any entry unless [`HashedCursor::seek`] has been /// called. - fn next(&mut self) -> Result, reth_db::DatabaseError> { - let last_account = match self.last_account.as_ref() { - Some(account) => account, - None => return Ok(None), // no previous entry was found + fn next(&mut self) -> Result, DatabaseError> { + let next = match self.last_account { + Some(account) => { + let entry = self.next_inner(account)?; + self.last_account = entry.as_ref().map(|entry| entry.0); + entry + } + // no previous entry was found + None => None, }; - - // If post state was given precedence, move the cursor forward. - let mut db_entry = self.cursor.seek(*last_account)?; - while db_entry - .as_ref() - .map(|(address, _)| address <= last_account || self.is_account_cleared(address)) - .unwrap_or_default() - { - db_entry = self.cursor.next()?; - } - - // Take the next account from the post state with the key greater than the last sought key. - let mut post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - while post_state_entry.map(|(k, _)| k <= last_account).unwrap_or_default() { - self.post_state_account_index += 1; - post_state_entry = self.post_state.accounts.get(self.post_state_account_index); - } - - // Compare two entries and return the lowest. - let result = Self::next_account(post_state_entry, db_entry); - self.last_account = result.as_ref().map(|(address, _)| *address); - Ok(result) + Ok(next) } } /// The cursor to iterate over post state hashed storages and corresponding database entries. /// It will always give precedence to the data from the post state. -#[derive(Debug, Clone)] -pub struct HashedPostStateStorageCursor<'b, C> { +#[derive(Debug)] +pub struct HashedPostStateStorageCursor<'a, C> { /// The database cursor. cursor: C, - /// The reference to the post state. - post_state: &'b HashedPostStateSorted, - /// The current hashed account key. - hashed_address: B256, - /// The post state index where the cursor is currently at. - post_state_storage_index: usize, + /// Forward-only in-memory cursor over non zero-valued account storage slots. + post_state_cursor: Option>, + /// Reference to the collection of storage slot keys that were cleared. + cleared_slots: Option<&'a HashSet>, + /// Flag indicating whether database storage was wiped. + storage_wiped: bool, /// The last slot that has been returned by the cursor. /// De facto, this is the cursor's position for the given account key. last_slot: Option, } -impl<'b, C> HashedPostStateStorageCursor<'b, C> { +impl<'a, C> HashedPostStateStorageCursor<'a, C> +where + C: HashedStorageCursor, +{ /// Create new instance of [`HashedPostStateStorageCursor`] for the given hashed address. - pub const fn new( - cursor: C, - post_state: &'b HashedPostStateSorted, - hashed_address: B256, - ) -> Self { - Self { cursor, post_state, hashed_address, last_slot: None, post_state_storage_index: 0 } - } - - /// Returns `true` if the storage for the given - /// The database is not checked since it already has no wiped storage entries. - fn is_db_storage_wiped(&self) -> bool { - match self.post_state.storages.get(&self.hashed_address) { - Some(storage) => storage.wiped, - None => false, - } + pub fn new(cursor: C, post_state_storage: Option<&'a HashedStorageSorted>) -> Self { + let post_state_cursor = + post_state_storage.map(|s| ForwardInMemoryCursor::new(&s.non_zero_valued_slots)); + let cleared_slots = post_state_storage.map(|s| &s.zero_valued_slots); + let storage_wiped = post_state_storage.map_or(false, |s| s.wiped); + Self { cursor, post_state_cursor, cleared_slots, storage_wiped, last_slot: None } } /// Check if the slot was zeroed out in the post state. /// The database is not checked since it already has no zero-valued slots. fn is_slot_zero_valued(&self, slot: &B256) -> bool { - self.post_state - .storages - .get(&self.hashed_address) - .map(|storage| storage.zero_valued_slots.contains(slot)) - .unwrap_or_default() + self.cleared_slots.map_or(false, |s| s.contains(slot)) + } + + /// Find the storage entry in post state or database that's greater or equal to provided subkey. + fn seek_inner(&mut self, subkey: B256) -> Result, DatabaseError> { + // Attempt to find the account's storage in post state. + let post_state_entry = self.post_state_cursor.as_mut().and_then(|c| c.seek(&subkey)); + + // If database storage was wiped or it's an exact match, + // return the storage slot from post state without looking up in the database. + if self.storage_wiped || post_state_entry.map_or(false, |entry| entry.0 == subkey) { + return Ok(post_state_entry) + } + + // It's not an exact match and storage was not wiped, + // reposition to the first greater or equal account. + let mut db_entry = self.cursor.seek(subkey)?; + while db_entry.as_ref().map_or(false, |entry| self.is_slot_zero_valued(&entry.0)) { + db_entry = self.cursor.next()?; + } + + // Compare two entries and return the lowest. + Ok(Self::compare_entries(post_state_entry, db_entry)) + } + + /// Find the storage entry that is right after current cursor position. + fn next_inner(&mut self, last_slot: B256) -> Result, DatabaseError> { + // Attempt to find the account's storage in post state. + let post_state_entry = + self.post_state_cursor.as_mut().and_then(|c| c.first_after(&last_slot)); + + // Return post state entry immediately if database was wiped. + if self.storage_wiped { + return Ok(post_state_entry) + } + + // If post state was given precedence, move the cursor forward. + // If the entry was already returned or is zero-valued, move to the next. + let mut db_entry = self.cursor.seek(last_slot)?; + while db_entry + .as_ref() + .map_or(false, |entry| entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) + { + db_entry = self.cursor.next()?; + } + + // Compare two entries and return the lowest. + Ok(Self::compare_entries(post_state_entry, db_entry)) } /// Return the storage entry with the lowest hashed storage key (hashed slot). /// /// Given the next post state and database entries, return the smallest of the two. /// If the storage keys are the same, the post state entry is given precedence. - fn next_slot( - post_state_item: Option<&(B256, U256)>, + fn compare_entries( + post_state_item: Option<(B256, U256)>, db_item: Option<(B256, U256)>, ) -> Option<(B256, U256)> { - match (post_state_item, db_item) { + if let Some((post_state_entry, db_entry)) = post_state_item.zip(db_item) { // If both are not empty, return the smallest of the two // Post state is given precedence if keys are equal - (Some((post_state_slot, post_state_value)), Some((db_slot, db_value))) => { - if post_state_slot <= &db_slot { - Some((*post_state_slot, *post_state_value)) - } else { - Some((db_slot, db_value)) - } - } + Some(if post_state_entry.0 <= db_entry.0 { post_state_entry } else { db_entry }) + } else { // Return either non-empty entry - _ => db_item.or_else(|| post_state_item.copied()), + db_item.or(post_state_item) } } } -impl<'b, C> HashedCursor for HashedPostStateStorageCursor<'b, C> +impl<'a, C> HashedCursor for HashedPostStateStorageCursor<'a, C> where C: HashedStorageCursor, { type Value = U256; /// Seek the next account storage entry for a given hashed key pair. - fn seek( - &mut self, - subkey: B256, - ) -> Result, reth_db::DatabaseError> { - // Attempt to find the account's storage in post state. - let mut post_state_entry = None; - if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { - post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); - - while post_state_entry.map(|(slot, _)| slot < &subkey).unwrap_or_default() { - self.post_state_storage_index += 1; - post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); - } - } - - // It's an exact match, return the storage slot from post state without looking up in - // the database. - if let Some((slot, value)) = post_state_entry { - if slot == &subkey { - self.last_slot = Some(*slot); - return Ok(Some((*slot, *value))) - } - } - - // It's not an exact match, reposition to the first greater or equal account. - let db_entry = if self.is_db_storage_wiped() { - None - } else { - let mut db_entry = self.cursor.seek(subkey)?; - - while db_entry - .as_ref() - .map(|entry| self.is_slot_zero_valued(&entry.0)) - .unwrap_or_default() - { - db_entry = self.cursor.next()?; - } - - db_entry - }; - - // Compare two entries and return the lowest. - let result = Self::next_slot(post_state_entry, db_entry); - self.last_slot = result.as_ref().map(|entry| entry.0); - Ok(result) + fn seek(&mut self, subkey: B256) -> Result, DatabaseError> { + let entry = self.seek_inner(subkey)?; + self.last_slot = entry.as_ref().map(|entry| entry.0); + Ok(entry) } /// Return the next account storage entry for the current account key. - /// - /// # Panics - /// - /// If the account key is not set. [`HashedCursor::seek`] must be called first in order to - /// position the cursor. - fn next(&mut self) -> Result, reth_db::DatabaseError> { - let last_slot = match self.last_slot.as_ref() { - Some(slot) => slot, - None => return Ok(None), // no previous entry was found - }; - - let db_entry = if self.is_db_storage_wiped() { - None - } else { - // If post state was given precedence, move the cursor forward. - let mut db_entry = self.cursor.seek(*last_slot)?; - - // If the entry was already returned or is zero-values, move to the next. - while db_entry - .as_ref() - .map(|entry| &entry.0 == last_slot || self.is_slot_zero_valued(&entry.0)) - .unwrap_or_default() - { - db_entry = self.cursor.next()?; + fn next(&mut self) -> Result, DatabaseError> { + let next = match self.last_slot { + Some(last_slot) => { + let entry = self.next_inner(last_slot)?; + self.last_slot = entry.as_ref().map(|entry| entry.0); + entry } - - db_entry + // no previous entry was found + None => None, }; - - // Attempt to find the account's storage in post state. - let mut post_state_entry = None; - if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { - post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); - while post_state_entry.map(|(slot, _)| slot <= last_slot).unwrap_or_default() { - self.post_state_storage_index += 1; - post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); - } - } - - // Compare two entries and return the lowest. - let result = Self::next_slot(post_state_entry, db_entry); - self.last_slot = result.as_ref().map(|entry| entry.0); - Ok(result) + Ok(next) } } -impl<'b, C> HashedStorageCursor for HashedPostStateStorageCursor<'b, C> +impl<'a, C> HashedStorageCursor for HashedPostStateStorageCursor<'a, C> where C: HashedStorageCursor, { @@ -355,13 +311,13 @@ where /// /// This function should be called before attempting to call [`HashedCursor::seek`] or /// [`HashedCursor::next`]. - fn is_storage_empty(&mut self) -> Result { - let is_empty = match self.post_state.storages.get(&self.hashed_address) { - Some(storage) => { + fn is_storage_empty(&mut self) -> Result { + let is_empty = match &self.post_state_cursor { + Some(cursor) => { // If the storage has been wiped at any point - storage.wiped && + self.storage_wiped && // and the current storage does not contain any non-zero values - storage.non_zero_valued_slots.is_empty() + cursor.is_empty() } None => self.cursor.is_storage_empty()?, }; diff --git a/crates/trie/trie/src/lib.rs b/crates/trie/trie/src/lib.rs index eea65a7b3..07af07757 100644 --- a/crates/trie/trie/src/lib.rs +++ b/crates/trie/trie/src/lib.rs @@ -17,6 +17,9 @@ /// The container indicates when the trie has been modified. pub mod prefix_set; +/// The implementation of forward-only in-memory cursor. +pub mod forward_cursor; + /// The cursor implementations for navigating account and storage tries. pub mod trie_cursor; diff --git a/crates/trie/trie/src/prefix_set/mod.rs b/crates/trie/trie/src/prefix_set/mod.rs index f6a8789e0..3474bf74c 100644 --- a/crates/trie/trie/src/prefix_set/mod.rs +++ b/crates/trie/trie/src/prefix_set/mod.rs @@ -8,6 +8,35 @@ use std::{ mod loader; pub use loader::PrefixSetLoader; +/// Collection of mutable prefix sets. +#[derive(Default, Debug)] +pub struct TriePrefixSetsMut { + /// A set of account prefixes that have changed. + pub account_prefix_set: PrefixSetMut, + /// A map containing storage changes with the hashed address as key and a set of storage key + /// prefixes as the value. + pub storage_prefix_sets: HashMap, + /// A set of hashed addresses of destroyed accounts. + pub destroyed_accounts: HashSet, +} + +impl TriePrefixSetsMut { + /// Returns a `TriePrefixSets` with the same elements as these sets. + /// + /// If not yet sorted, the elements will be sorted and deduplicated. + pub fn freeze(self) -> TriePrefixSets { + TriePrefixSets { + account_prefix_set: self.account_prefix_set.freeze(), + storage_prefix_sets: self + .storage_prefix_sets + .into_iter() + .map(|(hashed_address, prefix_set)| (hashed_address, prefix_set.freeze())) + .collect(), + destroyed_accounts: self.destroyed_accounts, + } + } +} + /// Collection of trie prefix sets. #[derive(Default, Debug)] pub struct TriePrefixSets { @@ -102,6 +131,15 @@ impl PrefixSetMut { self.keys.push(nibbles); } + /// Extend prefix set keys with contents of provided iterator. + pub fn extend(&mut self, nibbles_iter: I) + where + I: IntoIterator, + { + self.sorted = false; + self.keys.extend(nibbles_iter); + } + /// Returns the number of elements in the set. pub fn len(&self) -> usize { self.keys.len() @@ -177,6 +215,14 @@ impl PrefixSet { } } +impl<'a> IntoIterator for &'a PrefixSet { + type IntoIter = std::slice::Iter<'a, reth_trie_common::Nibbles>; + type Item = &'a reth_trie_common::Nibbles; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 04e7952f5..eb492f81f 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -1,7 +1,7 @@ use crate::{ hashed_cursor::{HashedCursorFactory, HashedStorageCursor}, node_iter::{TrieElement, TrieNodeIter}, - prefix_set::PrefixSetMut, + prefix_set::TriePrefixSetsMut, trie_cursor::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}, walker::TrieWalker, HashBuilder, Nibbles, @@ -12,6 +12,7 @@ use reth_db_api::transaction::DbTx; use reth_execution_errors::{StateRootError, StorageRootError}; use reth_primitives::{constants::EMPTY_ROOT_HASH, keccak256, Address, B256}; use reth_trie_common::{proof::ProofRetainer, AccountProof, StorageProof, TrieAccount}; + /// A struct for generating merkle proofs. /// /// Proof generator adds the target address and slots to the prefix set, enables the proof retainer @@ -23,12 +24,32 @@ pub struct Proof<'a, TX, H> { tx: &'a TX, /// The factory for hashed cursors. hashed_cursor_factory: H, + /// A set of prefix sets that have changes. + prefix_sets: TriePrefixSetsMut, +} + +impl<'a, TX, H> Proof<'a, TX, H> { + /// Creates a new proof generator. + pub fn new(tx: &'a TX, hashed_cursor_factory: H) -> Self { + Self { tx, hashed_cursor_factory, prefix_sets: TriePrefixSetsMut::default() } + } + + /// Set the hashed cursor factory. + pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> Proof<'a, TX, HF> { + Proof { tx: self.tx, hashed_cursor_factory, prefix_sets: self.prefix_sets } + } + + /// Set the prefix sets. They have to be mutable in order to allow extension with proof target. + pub fn with_prefix_sets_mut(mut self, prefix_sets: TriePrefixSetsMut) -> Self { + self.prefix_sets = prefix_sets; + self + } } impl<'a, TX> Proof<'a, TX, &'a TX> { - /// Create a new [Proof] instance. - pub const fn new(tx: &'a TX) -> Self { - Self { tx, hashed_cursor_factory: tx } + /// Create a new [Proof] instance from database transaction. + pub fn from_tx(tx: &'a TX) -> Self { + Self::new(tx, tx) } } @@ -52,7 +73,7 @@ where DatabaseAccountTrieCursor::new(self.tx.cursor_read::()?); // Create the walker. - let mut prefix_set = PrefixSetMut::default(); + let mut prefix_set = self.prefix_sets.account_prefix_set.clone(); prefix_set.insert(target_nibbles.clone()); let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); @@ -117,12 +138,14 @@ where } let target_nibbles = proofs.iter().map(|p| p.nibbles.clone()).collect::>(); - let prefix_set = PrefixSetMut::from(target_nibbles.clone()).freeze(); + let mut prefix_set = + self.prefix_sets.storage_prefix_sets.get(&hashed_address).cloned().unwrap_or_default(); + prefix_set.extend(target_nibbles.clone()); let trie_cursor = DatabaseStorageTrieCursor::new( self.tx.cursor_dup_read::()?, hashed_address, ); - let walker = TrieWalker::new(trie_cursor, prefix_set); + let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); let retainer = ProofRetainer::from_iter(target_nibbles); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); @@ -226,7 +249,7 @@ mod tests { let (root, updates) = StateRoot::from_tx(provider.tx_ref()) .root_with_updates() .map_err(Into::::into)?; - updates.flush(provider.tx_mut())?; + updates.write_to_database(provider.tx_mut())?; provider.commit()?; @@ -281,7 +304,8 @@ mod tests { let provider = factory.provider().unwrap(); for (target, expected_proof) in data { let target = Address::from_str(target).unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!( account_proof.proof, expected_proof, @@ -301,7 +325,8 @@ mod tests { let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); let provider = factory.provider().unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); assert_eq!(slots.len(), account_proof.storage_proofs.len()); @@ -333,7 +358,7 @@ mod tests { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -356,7 +381,7 @@ mod tests { ]); let provider = factory.provider().unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); + let account_proof = Proof::from_tx(provider.tx_ref()).account_proof(target, &[]).unwrap(); similar_asserts::assert_eq!(account_proof.proof, expected_account_proof); assert_eq!(account_proof.verify(root), Ok(())); } @@ -442,7 +467,8 @@ mod tests { }; let provider = factory.provider().unwrap(); - let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &slots).unwrap(); + let account_proof = + Proof::from_tx(provider.tx_ref()).account_proof(target, &slots).unwrap(); similar_asserts::assert_eq!(account_proof, expected); assert_eq!(account_proof.verify(root), Ok(())); } diff --git a/crates/trie/trie/src/state.rs b/crates/trie/trie/src/state.rs index 821dcc971..c6c93c0b3 100644 --- a/crates/trie/trie/src/state.rs +++ b/crates/trie/trie/src/state.rs @@ -1,6 +1,7 @@ use crate::{ hashed_cursor::HashedPostStateCursorFactory, - prefix_set::{PrefixSetMut, TriePrefixSets}, + prefix_set::{PrefixSetMut, TriePrefixSetsMut}, + proof::Proof, updates::TrieUpdates, Nibbles, StateRoot, }; @@ -13,6 +14,7 @@ use reth_db_api::{ }; use reth_execution_errors::StateRootError; use reth_primitives::{keccak256, Account, Address, BlockNumber, B256, U256}; +use reth_trie_common::AccountProof; use revm::db::BundleAccount; use std::{ collections::{hash_map, HashMap, HashSet}, @@ -149,16 +151,17 @@ impl HashedPostState { /// Converts hashed post state into [`HashedPostStateSorted`]. pub fn into_sorted(self) -> HashedPostStateSorted { - let mut accounts = Vec::new(); + let mut updated_accounts = Vec::new(); let mut destroyed_accounts = HashSet::default(); for (hashed_address, info) in self.accounts { if let Some(info) = info { - accounts.push((hashed_address, info)); + updated_accounts.push((hashed_address, info)); } else { destroyed_accounts.insert(hashed_address); } } - accounts.sort_unstable_by_key(|(address, _)| *address); + updated_accounts.sort_unstable_by_key(|(address, _)| *address); + let accounts = HashedAccountsSorted { accounts: updated_accounts, destroyed_accounts }; let storages = self .storages @@ -166,13 +169,13 @@ impl HashedPostState { .map(|(hashed_address, storage)| (hashed_address, storage.into_sorted())) .collect(); - HashedPostStateSorted { accounts, destroyed_accounts, storages } + HashedPostStateSorted { accounts, storages } } - /// Construct [`TriePrefixSets`] from hashed post state. + /// Construct [`TriePrefixSetsMut`] from hashed post state. /// The prefix sets contain the hashed account and storage keys that have been changed in the /// post state. - pub fn construct_prefix_sets(&self) -> TriePrefixSets { + pub fn construct_prefix_sets(&self) -> TriePrefixSetsMut { // Populate account prefix set. let mut account_prefix_set = PrefixSetMut::with_capacity(self.accounts.len()); let mut destroyed_accounts = HashSet::default(); @@ -193,14 +196,10 @@ impl HashedPostState { for hashed_slot in hashed_storage.storage.keys() { prefix_set.insert(Nibbles::unpack(hashed_slot)); } - storage_prefix_sets.insert(*hashed_address, prefix_set.freeze()); + storage_prefix_sets.insert(*hashed_address, prefix_set); } - TriePrefixSets { - account_prefix_set: account_prefix_set.freeze(), - storage_prefix_sets, - destroyed_accounts, - } + TriePrefixSetsMut { account_prefix_set, storage_prefix_sets, destroyed_accounts } } /// Calculate the state root for this [`HashedPostState`]. @@ -235,7 +234,7 @@ impl HashedPostState { /// The state root for this [`HashedPostState`]. pub fn state_root(&self, tx: &TX) -> Result { let sorted = self.clone().into_sorted(); - let prefix_sets = self.construct_prefix_sets(); + let prefix_sets = self.construct_prefix_sets().freeze(); StateRoot::from_tx(tx) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) .with_prefix_sets(prefix_sets) @@ -249,12 +248,27 @@ impl HashedPostState { tx: &TX, ) -> Result<(B256, TrieUpdates), StateRootError> { let sorted = self.clone().into_sorted(); - let prefix_sets = self.construct_prefix_sets(); + let prefix_sets = self.construct_prefix_sets().freeze(); StateRoot::from_tx(tx) .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) .with_prefix_sets(prefix_sets) .root_with_updates() } + + /// Generates the state proof for target account and slots on top of this [`HashedPostState`]. + pub fn account_proof( + &self, + tx: &TX, + address: Address, + slots: &[B256], + ) -> Result { + let sorted = self.clone().into_sorted(); + let prefix_sets = self.construct_prefix_sets(); + Proof::from_tx(tx) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new(tx, &sorted)) + .with_prefix_sets_mut(prefix_sets) + .account_proof(address, slots) + } } /// Representation of in-memory hashed storage. @@ -309,12 +323,19 @@ impl HashedStorage { /// Sorted hashed post state optimized for iterating during state trie calculation. #[derive(PartialEq, Eq, Clone, Debug)] pub struct HashedPostStateSorted { + /// Updated state of accounts. + pub(crate) accounts: HashedAccountsSorted, + /// Map of hashed addresses to hashed storage. + pub(crate) storages: HashMap, +} + +/// Sorted account state optimized for iterating during state trie calculation. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct HashedAccountsSorted { /// Sorted collection of hashed addresses and their account info. pub(crate) accounts: Vec<(B256, Account)>, /// Set of destroyed account keys. pub(crate) destroyed_accounts: HashSet, - /// Map of hashed addresses to hashed storage. - pub(crate) storages: HashMap, } /// Sorted hashed storage optimized for iterating during state trie calculation. diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index 25ae61658..64bcc0a7d 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -5,7 +5,7 @@ use crate::{ progress::{IntermediateStateRootState, StateRootProgress}, stats::TrieTracker, trie_cursor::TrieCursorFactory, - updates::{TrieKey, TrieOp, TrieUpdates}, + updates::{StorageTrieUpdates, TrieUpdates}, walker::TrieWalker, HashBuilder, Nibbles, TrieAccount, }; @@ -221,7 +221,7 @@ where state.walker_stack, self.prefix_sets.account_prefix_set, ) - .with_updates(retain_updates); + .with_deletions_retained(retain_updates); let node_iter = TrieNodeIter::new(walker, hashed_account_cursor) .with_last_hashed_key(state.last_account_key); (hash_builder, node_iter) @@ -229,7 +229,7 @@ where None => { let hash_builder = HashBuilder::default().with_updates(retain_updates); let walker = TrieWalker::new(trie_cursor, self.prefix_sets.account_prefix_set) - .with_updates(retain_updates); + .with_deletions_retained(retain_updates); let node_iter = TrieNodeIter::new(walker, hashed_account_cursor); (hash_builder, node_iter) } @@ -237,6 +237,7 @@ where let mut account_rlp = Vec::with_capacity(128); let mut hashed_entries_walked = 0; + let mut updated_storage_nodes = 0; while let Some(node) = account_node_iter.try_next()? { match node { TrieElement::Branch(node) => { @@ -273,7 +274,9 @@ where let (root, storage_slots_walked, updates) = storage_root_calculator.root_with_updates()?; hashed_entries_walked += storage_slots_walked; - trie_updates.extend(updates); + // We only walk over hashed address once, so it's safe to insert. + updated_storage_nodes += updates.len(); + trie_updates.insert_storage_updates(hashed_address, updates); root } else { storage_root_calculator.root()? @@ -285,12 +288,14 @@ where hash_builder.add_leaf(Nibbles::unpack(hashed_address), &account_rlp); // Decide if we need to return intermediate progress. - let total_updates_len = trie_updates.len() + - account_node_iter.walker.updates_len() + + let total_updates_len = updated_storage_nodes + + account_node_iter.walker.removed_keys_len() + hash_builder.updates_len(); if retain_updates && total_updates_len as u64 >= self.threshold { - let (walker_stack, walker_updates) = account_node_iter.walker.split(); + let (walker_stack, walker_deleted_keys) = account_node_iter.walker.split(); + trie_updates.removed_nodes.extend(walker_deleted_keys); let (hash_builder, hash_builder_updates) = hash_builder.split(); + trie_updates.account_nodes.extend(hash_builder_updates); let state = IntermediateStateRootState { hash_builder, @@ -298,9 +303,6 @@ where last_account_key: hashed_address, }; - trie_updates.extend(walker_updates); - trie_updates.extend_with_account_updates(hash_builder_updates); - return Ok(StateRootProgress::Progress( Box::new(state), hashed_entries_walked, @@ -313,7 +315,7 @@ where let root = hash_builder.root(); - trie_updates.finalize_state_updates( + trie_updates.finalize( account_node_iter.walker, hash_builder, self.prefix_sets.destroyed_accounts, @@ -452,7 +454,7 @@ where /// # Returns /// /// The storage root and storage trie updates for a given address. - pub fn root_with_updates(self) -> Result<(B256, usize, TrieUpdates), StorageRootError> { + pub fn root_with_updates(self) -> Result<(B256, usize, StorageTrieUpdates), StorageRootError> { self.calculate(true) } @@ -475,7 +477,7 @@ where pub fn calculate( self, retain_updates: bool, - ) -> Result<(B256, usize, TrieUpdates), StorageRootError> { + ) -> Result<(B256, usize, StorageTrieUpdates), StorageRootError> { trace!(target: "trie::storage_root", hashed_address = ?self.hashed_address, "calculating storage root"); let mut hashed_storage_cursor = @@ -483,16 +485,13 @@ where // short circuit on empty storage if hashed_storage_cursor.is_storage_empty()? { - return Ok(( - EMPTY_ROOT_HASH, - 0, - TrieUpdates::from([(TrieKey::StorageTrie(self.hashed_address), TrieOp::Delete)]), - )) + return Ok((EMPTY_ROOT_HASH, 0, StorageTrieUpdates::deleted())) } let mut tracker = TrieTracker::default(); - let trie_cursor = self.trie_cursor_factory.storage_tries_cursor(self.hashed_address)?; - let walker = TrieWalker::new(trie_cursor, self.prefix_set).with_updates(retain_updates); + let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; + let walker = + TrieWalker::new(trie_cursor, self.prefix_set).with_deletions_retained(retain_updates); let mut hash_builder = HashBuilder::default().with_updates(retain_updates); @@ -515,12 +514,8 @@ where let root = hash_builder.root(); - let mut trie_updates = TrieUpdates::default(); - trie_updates.finalize_storage_updates( - self.hashed_address, - storage_node_iter.walker, - hash_builder, - ); + let mut trie_updates = StorageTrieUpdates::default(); + trie_updates.finalize(storage_node_iter.walker, hash_builder); let stats = tracker.finish(); @@ -621,7 +616,7 @@ mod tests { let modified_root = loader.root().unwrap(); // Update the intermediate roots table so that we can run the incremental verification - trie_updates.flush(tx.tx_ref()).unwrap(); + trie_updates.write_to_database(tx.tx_ref(), hashed_address).unwrap(); // 3. Calculate the incremental root let mut storage_changes = PrefixSetMut::default(); @@ -980,14 +975,7 @@ mod tests { assert_eq!(root, computed_expected_root); // Check account trie - let mut account_updates = trie_updates - .iter() - .filter_map(|(k, v)| match (k, v) { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - account_updates.sort_unstable_by(|a, b| a.0.cmp(b.0)); + let account_updates = trie_updates.clone().into_sorted().account_nodes; assert_eq!(account_updates.len(), 2); let (nibbles1a, node1a) = account_updates.first().unwrap(); @@ -1007,16 +995,13 @@ mod tests { assert_eq!(node2a.hashes.len(), 1); // Check storage trie - let storage_updates = trie_updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::StorageNode(_, nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - assert_eq!(storage_updates.len(), 1); + let mut updated_storage_trie = + trie_updates.storage_tries.iter().filter(|(_, u)| !u.storage_nodes.is_empty()); + assert_eq!(updated_storage_trie.clone().count(), 1); + let (_, storage_trie_updates) = updated_storage_trie.next().unwrap(); + assert_eq!(storage_trie_updates.storage_nodes.len(), 1); - let (nibbles3, node3) = storage_updates.first().unwrap(); + let (nibbles3, node3) = storage_trie_updates.storage_nodes.iter().next().unwrap(); assert!(nibbles3.is_empty()); assert_eq!(node3.state_mask, TrieMask::new(0b1010)); assert_eq!(node3.tree_mask, TrieMask::new(0b0000)); @@ -1050,14 +1035,7 @@ mod tests { .unwrap(); assert_eq!(root, expected_state_root); - let mut account_updates = trie_updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - account_updates.sort_by(|a, b| a.0.cmp(b.0)); + let account_updates = trie_updates.into_sorted().account_nodes; assert_eq!(account_updates.len(), 2); let (nibbles1b, node1b) = account_updates.first().unwrap(); @@ -1104,19 +1082,11 @@ mod tests { .root_with_updates() .unwrap(); assert_eq!(root, computed_expected_root); - assert_eq!(trie_updates.len(), 7); - assert_eq!(trie_updates.iter().filter(|(_, op)| op.is_update()).count(), 2); + assert_eq!(trie_updates.account_nodes.len() + trie_updates.removed_nodes.len(), 1); - let account_updates = trie_updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - assert_eq!(account_updates.len(), 1); + assert_eq!(trie_updates.account_nodes.len(), 1); - let (nibbles1c, node1c) = account_updates.first().unwrap(); + let (nibbles1c, node1c) = trie_updates.account_nodes.iter().next().unwrap(); assert_eq!(nibbles1c[..], [0xB]); assert_eq!(node1c.state_mask, TrieMask::new(0b1011)); @@ -1163,19 +1133,15 @@ mod tests { .root_with_updates() .unwrap(); assert_eq!(root, computed_expected_root); - assert_eq!(trie_updates.len(), 6); - assert_eq!(trie_updates.iter().filter(|(_, op)| op.is_update()).count(), 1); // no storage root update - - let account_updates = trie_updates + assert_eq!(trie_updates.account_nodes.len() + trie_updates.removed_nodes.len(), 1); + assert!(!trie_updates + .storage_tries .iter() - .filter_map(|entry| match entry { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => Some((nibbles, node)), - _ => None, - }) - .collect::>(); - assert_eq!(account_updates.len(), 1); + .any(|(_, u)| !u.storage_nodes.is_empty() || !u.removed_nodes.is_empty())); // no storage root update + + assert_eq!(trie_updates.account_nodes.len(), 1); - let (nibbles1d, node1d) = account_updates.first().unwrap(); + let (nibbles1d, node1d) = trie_updates.account_nodes.iter().next().unwrap(); assert_eq!(nibbles1d[..], [0xB]); assert_eq!(node1d.state_mask, TrieMask::new(0b1011)); @@ -1199,19 +1165,7 @@ mod tests { let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); assert_eq!(expected, got); - - // Check account trie - let account_updates = updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::AccountNode(nibbles), TrieOp::Update(node)) => { - Some((nibbles.0.clone(), node.clone())) - } - _ => None, - }) - .collect::>(); - - assert_trie_updates(&account_updates); + assert_trie_updates(&updates.account_nodes); } #[test] @@ -1223,7 +1177,7 @@ mod tests { let (got, updates) = StateRoot::from_tx(tx.tx_ref()).root_with_updates().unwrap(); assert_eq!(expected, got); - updates.flush(tx.tx_ref()).unwrap(); + updates.write_to_database(tx.tx_ref()).unwrap(); // read the account updates from the db let mut accounts_trie = tx.tx_ref().cursor_read::().unwrap(); @@ -1270,7 +1224,7 @@ mod tests { state.iter().map(|(&key, &balance)| (key, (Account { balance, ..Default::default() }, std::iter::empty()))) ); assert_eq!(expected_root, state_root); - trie_updates.flush(tx.tx_ref()).unwrap(); + trie_updates.write_to_database(tx.tx_ref()).unwrap(); } } } @@ -1286,26 +1240,14 @@ mod tests { let (got, _, updates) = StorageRoot::from_tx_hashed(tx.tx_ref(), hashed_address).root_with_updates().unwrap(); assert_eq!(expected_root, got); - - // Check account trie - let storage_updates = updates - .iter() - .filter_map(|entry| match entry { - (TrieKey::StorageNode(_, nibbles), TrieOp::Update(node)) => { - Some((nibbles.0.clone(), node.clone())) - } - _ => None, - }) - .collect::>(); - assert_eq!(expected_updates, storage_updates); - - assert_trie_updates(&storage_updates); + assert_eq!(expected_updates, updates); + assert_trie_updates(&updates.storage_nodes); } fn extension_node_storage_trie( tx: &DatabaseProviderRW>>, hashed_address: B256, - ) -> (B256, HashMap) { + ) -> (B256, StorageTrieUpdates) { let value = U256::from(1); let mut hashed_storage = tx.tx_ref().cursor_write::().unwrap(); @@ -1328,7 +1270,8 @@ mod tests { let root = hb.root(); let (_, updates) = hb.split(); - (root, updates) + let trie_updates = StorageTrieUpdates { storage_nodes: updates, ..Default::default() }; + (root, trie_updates) } fn extension_node_trie(tx: &DatabaseProviderRW>>) -> B256 { diff --git a/crates/trie/trie/src/trie_cursor/database_cursors.rs b/crates/trie/trie/src/trie_cursor/database_cursors.rs index 910ae61b4..53a64a0b0 100644 --- a/crates/trie/trie/src/trie_cursor/database_cursors.rs +++ b/crates/trie/trie/src/trie_cursor/database_cursors.rs @@ -1,5 +1,5 @@ use super::{TrieCursor, TrieCursorFactory}; -use crate::{updates::TrieKey, BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey}; +use crate::{BranchNodeCompact, Nibbles, StoredNibbles, StoredNibblesSubKey}; use reth_db::{tables, DatabaseError}; use reth_db_api::{ cursor::{DbCursorRO, DbDupCursorRO}, @@ -9,18 +9,22 @@ use reth_primitives::B256; /// Implementation of the trie cursor factory for a database transaction. impl<'a, TX: DbTx> TrieCursorFactory for &'a TX { - fn account_trie_cursor(&self) -> Result, DatabaseError> { - Ok(Box::new(DatabaseAccountTrieCursor::new(self.cursor_read::()?))) + type AccountTrieCursor = DatabaseAccountTrieCursor<::Cursor>; + type StorageTrieCursor = + DatabaseStorageTrieCursor<::DupCursor>; + + fn account_trie_cursor(&self) -> Result { + Ok(DatabaseAccountTrieCursor::new(self.cursor_read::()?)) } - fn storage_tries_cursor( + fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result, DatabaseError> { - Ok(Box::new(DatabaseStorageTrieCursor::new( + ) -> Result { + Ok(DatabaseStorageTrieCursor::new( self.cursor_dup_read::()?, hashed_address, - ))) + )) } } @@ -56,8 +60,8 @@ where } /// Retrieves the current key in the cursor. - fn current(&mut self) -> Result, DatabaseError> { - Ok(self.0.current()?.map(|(k, _)| TrieKey::AccountNode(k))) + fn current(&mut self) -> Result, DatabaseError> { + Ok(self.0.current()?.map(|(k, _)| k.0)) } } @@ -105,8 +109,8 @@ where } /// Retrieves the current value in the storage trie cursor. - fn current(&mut self) -> Result, DatabaseError> { - Ok(self.cursor.current()?.map(|(k, v)| TrieKey::StorageNode(k, v.nibbles))) + fn current(&mut self) -> Result, DatabaseError> { + Ok(self.cursor.current()?.map(|(_, v)| v.nibbles.0)) } } diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs new file mode 100644 index 000000000..983974da3 --- /dev/null +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -0,0 +1,149 @@ +use super::{TrieCursor, TrieCursorFactory}; +use crate::{ + forward_cursor::ForwardInMemoryCursor, + updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted}, +}; +use reth_db::DatabaseError; +use reth_primitives::B256; +use reth_trie_common::{BranchNodeCompact, Nibbles}; +use std::collections::HashSet; + +/// The trie cursor factory for the trie updates. +#[derive(Debug, Clone)] +pub struct InMemoryTrieCursorFactory<'a, CF> { + /// Underlying trie cursor factory. + cursor_factory: CF, + /// Reference to sorted trie updates. + trie_updates: &'a TrieUpdatesSorted, +} + +impl<'a, CF> InMemoryTrieCursorFactory<'a, CF> { + /// Create a new trie cursor factory. + pub const fn new(cursor_factory: CF, trie_updates: &'a TrieUpdatesSorted) -> Self { + Self { cursor_factory, trie_updates } + } +} + +impl<'a, CF: TrieCursorFactory> TrieCursorFactory for InMemoryTrieCursorFactory<'a, CF> { + type AccountTrieCursor = InMemoryAccountTrieCursor<'a, CF::AccountTrieCursor>; + type StorageTrieCursor = InMemoryStorageTrieCursor<'a, CF::StorageTrieCursor>; + + fn account_trie_cursor(&self) -> Result { + let cursor = self.cursor_factory.account_trie_cursor()?; + Ok(InMemoryAccountTrieCursor::new(cursor, self.trie_updates)) + } + + fn storage_trie_cursor( + &self, + hashed_address: B256, + ) -> Result { + let cursor = self.cursor_factory.storage_trie_cursor(hashed_address)?; + Ok(InMemoryStorageTrieCursor::new( + hashed_address, + cursor, + self.trie_updates.storage_tries.get(&hashed_address), + )) + } +} + +/// The cursor to iterate over account trie updates and corresponding database entries. +/// It will always give precedence to the data from the trie updates. +#[derive(Debug)] +#[allow(dead_code)] +pub struct InMemoryAccountTrieCursor<'a, C> { + /// The database cursor. + cursor: C, + /// Forward-only in-memory cursor over storage trie nodes. + in_memory_cursor: ForwardInMemoryCursor<'a, Nibbles, BranchNodeCompact>, + /// Collection of removed trie nodes. + removed_nodes: &'a HashSet, + /// Last key returned by the cursor. + last_key: Option, +} + +impl<'a, C> InMemoryAccountTrieCursor<'a, C> { + const fn new(cursor: C, trie_updates: &'a TrieUpdatesSorted) -> Self { + let in_memory_cursor = ForwardInMemoryCursor::new(&trie_updates.account_nodes); + Self { + cursor, + in_memory_cursor, + removed_nodes: &trie_updates.removed_nodes, + last_key: None, + } + } +} + +impl<'a, C: TrieCursor> TrieCursor for InMemoryAccountTrieCursor<'a, C> { + fn seek_exact( + &mut self, + _key: Nibbles, + ) -> Result, DatabaseError> { + unimplemented!() + } + + fn seek( + &mut self, + _key: Nibbles, + ) -> Result, DatabaseError> { + unimplemented!() + } + + fn current(&mut self) -> Result, DatabaseError> { + unimplemented!() + } +} + +/// The cursor to iterate over storage trie updates and corresponding database entries. +/// It will always give precedence to the data from the trie updates. +#[derive(Debug)] +#[allow(dead_code)] +pub struct InMemoryStorageTrieCursor<'a, C> { + /// The hashed address of the account that trie belongs to. + hashed_address: B256, + /// The database cursor. + cursor: C, + /// Forward-only in-memory cursor over storage trie nodes. + in_memory_cursor: Option>, + /// Reference to the set of removed storage node keys. + removed_nodes: Option<&'a HashSet>, + /// The flag indicating whether the storage trie was cleared. + storage_trie_cleared: bool, + /// Last key returned by the cursor. + last_key: Option, +} + +impl<'a, C> InMemoryStorageTrieCursor<'a, C> { + fn new(hashed_address: B256, cursor: C, updates: Option<&'a StorageTrieUpdatesSorted>) -> Self { + let in_memory_cursor = updates.map(|u| ForwardInMemoryCursor::new(&u.storage_nodes)); + let removed_nodes = updates.map(|u| &u.removed_nodes); + let storage_trie_cleared = updates.map_or(false, |u| u.is_deleted); + Self { + hashed_address, + cursor, + in_memory_cursor, + removed_nodes, + storage_trie_cleared, + last_key: None, + } + } +} + +impl<'a, C: TrieCursor> TrieCursor for InMemoryStorageTrieCursor<'a, C> { + fn seek_exact( + &mut self, + _key: Nibbles, + ) -> Result, DatabaseError> { + unimplemented!() + } + + fn seek( + &mut self, + _key: Nibbles, + ) -> Result, DatabaseError> { + unimplemented!() + } + + fn current(&mut self) -> Result, DatabaseError> { + unimplemented!() + } +} diff --git a/crates/trie/trie/src/trie_cursor/mod.rs b/crates/trie/trie/src/trie_cursor/mod.rs index aae7e773c..e5160a552 100644 --- a/crates/trie/trie/src/trie_cursor/mod.rs +++ b/crates/trie/trie/src/trie_cursor/mod.rs @@ -1,7 +1,14 @@ -use crate::{updates::TrieKey, BranchNodeCompact, Nibbles}; +use crate::{BranchNodeCompact, Nibbles}; use reth_db::DatabaseError; use reth_primitives::B256; + +/// Database implementations of trie cursors. mod database_cursors; + +/// In-memory implementations of trie cursors. +mod in_memory; + +/// Cursor for iterating over a subtrie. mod subnode; /// Noop trie cursor implementations. @@ -9,19 +16,25 @@ pub mod noop; pub use self::{ database_cursors::{DatabaseAccountTrieCursor, DatabaseStorageTrieCursor}, + in_memory::*, subnode::CursorSubNode, }; /// Factory for creating trie cursors. pub trait TrieCursorFactory { + /// The account trie cursor type. + type AccountTrieCursor: TrieCursor; + /// The storage trie cursor type. + type StorageTrieCursor: TrieCursor; + /// Create an account trie cursor. - fn account_trie_cursor(&self) -> Result, DatabaseError>; + fn account_trie_cursor(&self) -> Result; /// Create a storage tries cursor. - fn storage_tries_cursor( + fn storage_trie_cursor( &self, hashed_address: B256, - ) -> Result, DatabaseError>; + ) -> Result; } /// A cursor for navigating a trie that works with both Tables and DupSort tables. @@ -38,5 +51,5 @@ pub trait TrieCursor: Send + Sync { -> Result, DatabaseError>; /// Get the current entry. - fn current(&mut self) -> Result, DatabaseError>; + fn current(&mut self) -> Result, DatabaseError>; } diff --git a/crates/trie/trie/src/trie_cursor/noop.rs b/crates/trie/trie/src/trie_cursor/noop.rs index 46163180b..e49c90613 100644 --- a/crates/trie/trie/src/trie_cursor/noop.rs +++ b/crates/trie/trie/src/trie_cursor/noop.rs @@ -1,6 +1,7 @@ use super::{TrieCursor, TrieCursorFactory}; -use crate::{updates::TrieKey, BranchNodeCompact, Nibbles}; +use crate::{BranchNodeCompact, Nibbles}; use reth_db::DatabaseError; +use reth_primitives::B256; /// Noop trie cursor factory. #[derive(Default, Debug)] @@ -8,17 +9,20 @@ use reth_db::DatabaseError; pub struct NoopTrieCursorFactory; impl TrieCursorFactory for NoopTrieCursorFactory { + type AccountTrieCursor = NoopAccountTrieCursor; + type StorageTrieCursor = NoopStorageTrieCursor; + /// Generates a Noop account trie cursor. - fn account_trie_cursor(&self) -> Result, DatabaseError> { - Ok(Box::::default()) + fn account_trie_cursor(&self) -> Result { + Ok(NoopAccountTrieCursor::default()) } /// Generates a Noop storage trie cursor. - fn storage_tries_cursor( + fn storage_trie_cursor( &self, - _hashed_address: reth_primitives::B256, - ) -> Result, DatabaseError> { - Ok(Box::::default()) + _hashed_address: B256, + ) -> Result { + Ok(NoopStorageTrieCursor::default()) } } @@ -45,7 +49,7 @@ impl TrieCursor for NoopAccountTrieCursor { } /// Retrieves the current cursor position within the account trie. - fn current(&mut self) -> Result, DatabaseError> { + fn current(&mut self) -> Result, DatabaseError> { Ok(None) } } @@ -73,7 +77,7 @@ impl TrieCursor for NoopStorageTrieCursor { } /// Retrieves the current cursor position within storage tries. - fn current(&mut self) -> Result, DatabaseError> { + fn current(&mut self) -> Result, DatabaseError> { Ok(None) } } diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index 39628e6d5..eba5d1963 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -2,186 +2,340 @@ use crate::{ walker::TrieWalker, BranchNodeCompact, HashBuilder, Nibbles, StorageTrieEntry, StoredBranchNode, StoredNibbles, StoredNibblesSubKey, }; -use derive_more::Deref; use reth_db::tables; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, transaction::{DbTx, DbTxMut}, }; use reth_primitives::B256; -use std::collections::{hash_map::IntoIter, HashMap, HashSet}; - -/// The key of a trie node. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum TrieKey { - /// A node in the account trie. - AccountNode(StoredNibbles), - /// A node in the storage trie. - StorageNode(B256, StoredNibblesSubKey), - /// Storage trie of an account. - StorageTrie(B256), -} - -/// The operation to perform on the trie. -#[derive(PartialEq, Eq, Debug, Clone)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub enum TrieOp { - /// Delete the node entry. - Delete, - /// Update the node entry with the provided value. - Update(BranchNodeCompact), -} - -impl TrieOp { - /// Returns `true` if the operation is an update. - pub const fn is_update(&self) -> bool { - matches!(self, Self::Update(..)) - } -} +use std::collections::{HashMap, HashSet}; /// The aggregation of trie updates. -#[derive(Debug, Default, Clone, PartialEq, Eq, Deref)] +#[derive(PartialEq, Eq, Clone, Default, Debug)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct TrieUpdates { - trie_operations: HashMap, + pub(crate) account_nodes: HashMap, + pub(crate) removed_nodes: HashSet, + pub(crate) storage_tries: HashMap, } -impl From<[(TrieKey, TrieOp); N]> for TrieUpdates { - fn from(value: [(TrieKey, TrieOp); N]) -> Self { - Self { trie_operations: HashMap::from(value) } +impl TrieUpdates { + /// Returns `true` if the updates are empty. + pub fn is_empty(&self) -> bool { + self.account_nodes.is_empty() && + self.removed_nodes.is_empty() && + self.storage_tries.is_empty() } -} - -impl IntoIterator for TrieUpdates { - type Item = (TrieKey, TrieOp); - type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.trie_operations.into_iter() + /// Returns reference to updated account nodes. + pub const fn account_nodes_ref(&self) -> &HashMap { + &self.account_nodes } -} -impl TrieUpdates { - /// Schedule a delete operation on a trie key. - /// - /// # Panics - /// - /// If the key already exists and the operation is an update. - pub fn schedule_delete(&mut self, key: TrieKey) { - let existing = self.trie_operations.insert(key, TrieOp::Delete); - if let Some(op) = existing { - assert!(!op.is_update(), "Tried to delete a node that was already updated"); - } + /// Returns a reference to removed account nodes. + pub const fn removed_nodes_ref(&self) -> &HashSet { + &self.removed_nodes } - /// Extend the updates with trie updates. - pub fn extend(&mut self, updates: impl IntoIterator) { - self.trie_operations.extend(updates); + /// Returns a reference to updated storage tries. + pub const fn storage_tries_ref(&self) -> &HashMap { + &self.storage_tries } - /// Extend the updates with account trie updates. - pub fn extend_with_account_updates(&mut self, updates: HashMap) { - self.extend( - updates.into_iter().map(|(nibbles, node)| { - (TrieKey::AccountNode(nibbles.into()), TrieOp::Update(node)) - }), - ); + /// Insert storage updates for a given hashed address. + pub fn insert_storage_updates( + &mut self, + hashed_address: B256, + storage_updates: StorageTrieUpdates, + ) { + let existing = self.storage_tries.insert(hashed_address, storage_updates); + debug_assert!(existing.is_none()); } /// Finalize state trie updates. - pub fn finalize_state_updates( + pub fn finalize( &mut self, walker: TrieWalker, hash_builder: HashBuilder, destroyed_accounts: HashSet, ) { - // Add updates from trie walker. - let (_, walker_updates) = walker.split(); - self.extend(walker_updates); + // Retrieve deleted keys from trie walker. + let (_, removed_node_keys) = walker.split(); + self.removed_nodes.extend(removed_node_keys); - // Add account node updates from hash builder. - let (_, hash_builder_updates) = hash_builder.split(); - self.extend_with_account_updates(hash_builder_updates); + // Retrieve updated nodes from hash builder. + let (_, updated_nodes) = hash_builder.split(); + self.account_nodes.extend(updated_nodes); // Add deleted storage tries for destroyed accounts. - self.extend( - destroyed_accounts.into_iter().map(|key| (TrieKey::StorageTrie(key), TrieOp::Delete)), - ); + for destroyed in destroyed_accounts { + self.storage_tries.entry(destroyed).or_default().set_deleted(true); + } } - /// Finalize storage trie updates for a given address. - pub fn finalize_storage_updates( - &mut self, - hashed_address: B256, - walker: TrieWalker, - hash_builder: HashBuilder, - ) { - // Add updates from trie walker. - let (_, walker_updates) = walker.split(); - self.extend(walker_updates); - - // Add storage node updates from hash builder. - let (_, hash_builder_updates) = hash_builder.split(); - self.extend(hash_builder_updates.into_iter().map(|(nibbles, node)| { - (TrieKey::StorageNode(hashed_address, nibbles.into()), TrieOp::Update(node)) - })); + /// Converts trie updates into [`TrieUpdatesSorted`]. + pub fn into_sorted(self) -> TrieUpdatesSorted { + let mut account_nodes = Vec::from_iter(self.account_nodes); + account_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + let storage_tries = self + .storage_tries + .into_iter() + .map(|(hashed_address, updates)| (hashed_address, updates.into_sorted())) + .collect(); + TrieUpdatesSorted { removed_nodes: self.removed_nodes, account_nodes, storage_tries } } /// Flush updates all aggregated updates to the database. - pub fn flush(self, tx: &(impl DbTx + DbTxMut)) -> Result<(), reth_db::DatabaseError> { - if self.trie_operations.is_empty() { - return Ok(()) + /// + /// # Returns + /// + /// The number of storage trie entries updated in the database. + pub fn write_to_database(self, tx: &TX) -> Result + where + TX: DbTx + DbTxMut, + { + if self.is_empty() { + return Ok(0) } - let mut account_trie_cursor = tx.cursor_write::()?; - let mut storage_trie_cursor = tx.cursor_dup_write::()?; + // Track the number of inserted entries. + let mut num_entries = 0; - let mut trie_operations = Vec::from_iter(self.trie_operations); - trie_operations.sort_unstable_by(|a, b| a.0.cmp(&b.0)); - for (key, operation) in trie_operations { - match key { - TrieKey::AccountNode(nibbles) => match operation { - TrieOp::Delete => { - if account_trie_cursor.seek_exact(nibbles)?.is_some() { - account_trie_cursor.delete_current()?; - } - } - TrieOp::Update(node) => { - if !nibbles.0.is_empty() { - account_trie_cursor.upsert(nibbles, StoredBranchNode(node))?; - } - } - }, - TrieKey::StorageTrie(hashed_address) => match operation { - TrieOp::Delete => { - if storage_trie_cursor.seek_exact(hashed_address)?.is_some() { - storage_trie_cursor.delete_current_duplicates()?; - } + // Merge updated and removed nodes. Updated nodes must take precedence. + let mut account_updates = self + .removed_nodes + .into_iter() + .filter_map(|n| (!self.account_nodes.contains_key(&n)).then_some((n, None))) + .collect::>(); + account_updates + .extend(self.account_nodes.into_iter().map(|(nibbles, node)| (nibbles, Some(node)))); + // Sort trie node updates. + account_updates.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let mut account_trie_cursor = tx.cursor_write::()?; + for (key, updated_node) in account_updates { + let nibbles = StoredNibbles(key); + match updated_node { + Some(node) => { + if !nibbles.0.is_empty() { + num_entries += 1; + account_trie_cursor.upsert(nibbles, StoredBranchNode(node))?; } - TrieOp::Update(..) => unreachable!("Cannot update full storage trie."), - }, - TrieKey::StorageNode(hashed_address, nibbles) => { - if !nibbles.is_empty() { - // Delete the old entry if it exists. - if storage_trie_cursor - .seek_by_key_subkey(hashed_address, nibbles.clone())? - .filter(|e| e.nibbles == nibbles) - .is_some() - { - storage_trie_cursor.delete_current()?; - } - - // The operation is an update, insert new entry. - if let TrieOp::Update(node) = operation { - storage_trie_cursor - .upsert(hashed_address, StorageTrieEntry { nibbles, node })?; - } + } + None => { + num_entries += 1; + if account_trie_cursor.seek_exact(nibbles)?.is_some() { + account_trie_cursor.delete_current()?; } } - }; + } + } + + let mut storage_tries = Vec::from_iter(self.storage_tries); + storage_tries.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + let mut storage_trie_cursor = tx.cursor_dup_write::()?; + for (hashed_address, storage_trie_updates) in storage_tries { + let updated_storage_entries = + storage_trie_updates.write_with_cursor(&mut storage_trie_cursor, hashed_address)?; + num_entries += updated_storage_entries; + } + + Ok(num_entries) + } +} + +/// Trie updates for storage trie of a single account. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct StorageTrieUpdates { + /// Flag indicating whether the trie was deleted. + pub(crate) is_deleted: bool, + /// Collection of updated storage trie nodes. + pub(crate) storage_nodes: HashMap, + /// Collection of removed storage trie nodes. + pub(crate) removed_nodes: HashSet, +} + +impl StorageTrieUpdates { + /// Returns empty storage trie updates with `deleted` set to `true`. + pub fn deleted() -> Self { + Self { + is_deleted: true, + storage_nodes: HashMap::default(), + removed_nodes: HashSet::default(), + } + } + + /// Returns the length of updated nodes. + pub fn len(&self) -> usize { + (self.is_deleted as usize) + self.storage_nodes.len() + self.removed_nodes.len() + } + + /// Returns `true` if the trie was deleted. + pub const fn is_deleted(&self) -> bool { + self.is_deleted + } + + /// Returns reference to updated storage nodes. + pub const fn storage_nodes_ref(&self) -> &HashMap { + &self.storage_nodes + } + + /// Returns reference to removed storage nodes. + pub const fn removed_nodes_ref(&self) -> &HashSet { + &self.removed_nodes + } + + /// Returns `true` if storage updates are empty. + pub fn is_empty(&self) -> bool { + !self.is_deleted && self.storage_nodes.is_empty() && self.removed_nodes.is_empty() + } + + /// Sets `deleted` flag on the storage trie. + pub fn set_deleted(&mut self, deleted: bool) { + self.is_deleted = deleted; + } + + /// Finalize storage trie updates for by taking updates from walker and hash builder. + pub fn finalize(&mut self, walker: TrieWalker, hash_builder: HashBuilder) { + // Retrieve deleted keys from trie walker. + let (_, removed_keys) = walker.split(); + self.removed_nodes.extend(removed_keys); + + // Retrieve updated nodes from hash builder. + let (_, updated_nodes) = hash_builder.split(); + self.storage_nodes.extend(updated_nodes); + } + + /// Convert storage trie updates into [`StorageTrieUpdatesSorted`]. + pub fn into_sorted(self) -> StorageTrieUpdatesSorted { + let mut storage_nodes = Vec::from_iter(self.storage_nodes); + storage_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + StorageTrieUpdatesSorted { + is_deleted: self.is_deleted, + removed_nodes: self.removed_nodes, + storage_nodes, + } + } + + /// Initializes a storage trie cursor and writes updates to database. + pub fn write_to_database( + self, + tx: &TX, + hashed_address: B256, + ) -> Result + where + TX: DbTx + DbTxMut, + { + if self.is_empty() { + return Ok(0) } - Ok(()) + let mut cursor = tx.cursor_dup_write::()?; + self.write_with_cursor(&mut cursor, hashed_address) + } + + /// Writes updates to database. + /// + /// # Returns + /// + /// The number of storage trie entries updated in the database. + fn write_with_cursor( + self, + cursor: &mut C, + hashed_address: B256, + ) -> Result + where + C: DbCursorRO + + DbCursorRW + + DbDupCursorRO + + DbDupCursorRW, + { + // The storage trie for this account has to be deleted. + if self.is_deleted && cursor.seek_exact(hashed_address)?.is_some() { + cursor.delete_current_duplicates()?; + } + + // Merge updated and removed nodes. Updated nodes must take precedence. + let mut storage_updates = self + .removed_nodes + .into_iter() + .filter_map(|n| (!self.storage_nodes.contains_key(&n)).then_some((n, None))) + .collect::>(); + storage_updates + .extend(self.storage_nodes.into_iter().map(|(nibbles, node)| (nibbles, Some(node)))); + // Sort trie node updates. + storage_updates.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let mut num_entries = 0; + for (nibbles, maybe_updated) in storage_updates.into_iter().filter(|(n, _)| !n.is_empty()) { + num_entries += 1; + let nibbles = StoredNibblesSubKey(nibbles); + // Delete the old entry if it exists. + if cursor + .seek_by_key_subkey(hashed_address, nibbles.clone())? + .filter(|e| e.nibbles == nibbles) + .is_some() + { + cursor.delete_current()?; + } + + // There is an updated version of this node, insert new entry. + if let Some(node) = maybe_updated { + cursor.upsert(hashed_address, StorageTrieEntry { nibbles, node })?; + } + } + + Ok(num_entries) + } +} + +/// Sorted trie updates used for lookups and insertions. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct TrieUpdatesSorted { + pub(crate) account_nodes: Vec<(Nibbles, BranchNodeCompact)>, + pub(crate) removed_nodes: HashSet, + pub(crate) storage_tries: HashMap, +} + +impl TrieUpdatesSorted { + /// Returns reference to updated account nodes. + pub fn account_nodes_ref(&self) -> &[(Nibbles, BranchNodeCompact)] { + &self.account_nodes + } + + /// Returns reference to removed account nodes. + pub const fn removed_nodes_ref(&self) -> &HashSet { + &self.removed_nodes + } + + /// Returns reference to updated storage tries. + pub const fn storage_tries_ref(&self) -> &HashMap { + &self.storage_tries + } +} + +/// Sorted trie updates used for lookups and insertions. +#[derive(PartialEq, Eq, Clone, Default, Debug)] +pub struct StorageTrieUpdatesSorted { + pub(crate) is_deleted: bool, + pub(crate) storage_nodes: Vec<(Nibbles, BranchNodeCompact)>, + pub(crate) removed_nodes: HashSet, +} + +impl StorageTrieUpdatesSorted { + /// Returns `true` if the trie was deleted. + pub const fn is_deleted(&self) -> bool { + self.is_deleted + } + + /// Returns reference to updated storage nodes. + pub fn storage_nodes_ref(&self) -> &[(Nibbles, BranchNodeCompact)] { + &self.storage_nodes + } + + /// Returns reference to removed storage nodes. + pub const fn removed_nodes_ref(&self) -> &HashSet { + &self.removed_nodes } } diff --git a/crates/trie/trie/src/walker.rs b/crates/trie/trie/src/walker.rs index 6486a9b08..5f151e8b9 100644 --- a/crates/trie/trie/src/walker.rs +++ b/crates/trie/trie/src/walker.rs @@ -1,11 +1,11 @@ use crate::{ prefix_set::PrefixSet, trie_cursor::{CursorSubNode, TrieCursor}, - updates::TrieUpdates, BranchNodeCompact, Nibbles, }; use reth_db::DatabaseError; use reth_primitives::B256; +use std::collections::HashSet; /// `TrieWalker` is a structure that enables traversal of a Merkle trie. /// It allows moving through the trie in a depth-first manner, skipping certain branches @@ -22,36 +22,31 @@ pub struct TrieWalker { pub can_skip_current_node: bool, /// A `PrefixSet` representing the changes to be applied to the trie. pub changes: PrefixSet, - /// The trie updates to be applied to the trie. - trie_updates: Option, + /// The retained trie node keys that need to be removed. + removed_keys: Option>, } impl TrieWalker { /// Constructs a new `TrieWalker` from existing stack and a cursor. pub fn from_stack(cursor: C, stack: Vec, changes: PrefixSet) -> Self { let mut this = - Self { cursor, changes, stack, can_skip_current_node: false, trie_updates: None }; + Self { cursor, changes, stack, can_skip_current_node: false, removed_keys: None }; this.update_skip_node(); this } /// Sets the flag whether the trie updates should be stored. - pub fn with_updates(mut self, retain_updates: bool) -> Self { - self.set_updates(retain_updates); - self - } - - /// Sets the flag whether the trie updates should be stored. - pub fn set_updates(&mut self, retain_updates: bool) { - if retain_updates { - self.trie_updates = Some(TrieUpdates::default()); + pub fn with_deletions_retained(mut self, retained: bool) -> Self { + if retained { + self.removed_keys = Some(HashSet::default()); } + self } /// Split the walker into stack and trie updates. - pub fn split(mut self) -> (Vec, TrieUpdates) { - let trie_updates = self.trie_updates.take(); - (self.stack, trie_updates.unwrap_or_default()) + pub fn split(mut self) -> (Vec, HashSet) { + let keys = self.removed_keys.take(); + (self.stack, keys.unwrap_or_default()) } /// Prints the current stack of trie nodes. @@ -63,9 +58,9 @@ impl TrieWalker { println!("====================== END STACK ======================\n"); } - /// The current length of the trie updates. - pub fn updates_len(&self) -> usize { - self.trie_updates.as_ref().map(|u| u.len()).unwrap_or(0) + /// The current length of the removed keys. + pub fn removed_keys_len(&self) -> usize { + self.removed_keys.as_ref().map_or(0, |u| u.len()) } /// Returns the current key in the trie. @@ -117,7 +112,7 @@ impl TrieWalker { changes, stack: vec![CursorSubNode::default()], can_skip_current_node: false, - trie_updates: None, + removed_keys: None, }; // Set up the root node of the trie in the stack, if it exists. @@ -193,8 +188,8 @@ impl TrieWalker { // Delete the current node if it's included in the prefix set or it doesn't contain the root // hash. if !self.can_skip_current_node || nibble != -1 { - if let Some((updates, key)) = self.trie_updates.as_mut().zip(self.cursor.current()?) { - updates.schedule_delete(key); + if let Some((keys, key)) = self.removed_keys.as_mut().zip(self.cursor.current()?) { + keys.insert(key); } } diff --git a/deny.toml b/deny.toml index 4dff1b9ce..431698495 100644 --- a/deny.toml +++ b/deny.toml @@ -43,6 +43,7 @@ allow = [ "Unicode-DFS-2016", "Unlicense", "Unicode-3.0", + "Zlib", # https://github.com/briansmith/ring/issues/902 "LicenseRef-ring", # https://github.com/rustls/webpki/blob/main/LICENSE ISC Style @@ -63,7 +64,6 @@ exceptions = [ { allow = ["CC0-1.0"], name = "aurora-engine-modexp" }, # TODO: decide on MPL-2.0 handling # These dependencies are grandfathered in in https://github.com/paradigmxyz/reth/pull/6980 - { allow = ["MPL-2.0"], name = "attohttpc" }, { allow = ["MPL-2.0"], name = "option-ext" }, { allow = ["MPL-2.0"], name = "webpki-roots" }, ] diff --git a/docs/crates/discv4.md b/docs/crates/discv4.md index 5abe7c439..348c68e06 100644 --- a/docs/crates/discv4.md +++ b/docs/crates/discv4.md @@ -126,7 +126,7 @@ The `NodeRecord::from_secret_key()` takes the socket address used for discovery If the `discv4_config` supplied to the `Discovery::new()` function is `None`, the discv4 service will not be spawned. In this case, no new peers will be discovered across the network. The node will have to rely on manually added peers. However, if the `discv4_config` contains a `Some(Discv4Config)` value, then the `Discv4::bind()` function is called to bind to a new UdpSocket and create the disc_v4 service. -[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/lib.rs#L188) +[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/discv4/src/lib.rs#L178) ```rust ignore impl Discv4 { //--snip-- @@ -155,7 +155,7 @@ impl Discv4 { To better understand what is actually happening when the disc_v4 service is created, lets take a deeper look at the `Discv4Service::new()` function. -[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/lib.rs#L392) +[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/discv4/src/lib.rs#L495) ```rust ignore impl Discv4Service { /// Create a new instance for a bound [`UdpSocket`]. @@ -216,7 +216,7 @@ In Rust, the owner of a [`Future`](https://doc.rust-lang.org/std/future/trait.Fu Lets take a detailed look at how `Discv4Service::poll` works under the hood. This function has many moving parts, so we will break it up into smaller sections. -[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/lib.rs#L1302) +[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/discv4/src/lib.rs#L495) ```rust ignore impl Discv4Service { //--snip-- @@ -259,7 +259,7 @@ impl Discv4Service { As the function starts, a `loop` is entered and the `Discv4Service.queued_events` are evaluated to see if there are any events ready to be processed. If there is an event ready, the function immediately returns the event wrapped in `Poll::Ready()`. The `queued_events` field is a `VecDeque` where `Discv4Event` is an enum containing one of the following variants. -[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/discv4/src/lib.rs#L1455) +[File: crates/net/discv4/src/lib.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/discv4/src/lib.rs#L1770) ```rust ignore pub enum Discv4Event { /// A `Ping` message was handled. @@ -285,7 +285,7 @@ Next, the Discv4Service handles all incoming `Discv4Command`s until there are no In Reth, once a new `NetworkState` is initialized as the node starts up and a new task is spawned to handle the network, the `poll()` function is used to advance the state of the network. -[File: crates/net/network/src/state.rs](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/state.rs#L377) +[File: crates/net/network/src/state.rs](https://github.com/paradigmxyz/reth/blob/530e7e8961b8f82ae2c675d16c368dd266ceba7d/crates/net/network/src/state.rs#L396) ```rust ignore impl NetworkState where diff --git a/docs/crates/stages.md b/docs/crates/stages.md index 8e3de4a04..c7815b453 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -90,7 +90,7 @@ pub struct SealedHeader { Each `SealedHeader` is then validated to ensure that it has the proper parent. Note that this is only a basic response validation, and the `HeaderDownloader` uses the `validate` method during the `stream`, so that each header is validated according to the consensus specification before the header is yielded from the stream. After this, each header is then written to the database. If a header is not valid or the stream encounters any other error, the error is propagated up through the stage execution, the changes to the database are unwound and the stage is resumed from the most recent valid state. -This process continues until all of the headers have been downloaded and written to the database. Finally, the total difficulty of the chain's head is updated and the function returns `Ok(ExecOutput { stage_progress, done: true })`, signaling that the header sync has completed successfully. +This process continues until all of the headers have been downloaded and written to the database. Finally, the total difficulty of the chain's head is updated and the function returns `Ok(ExecOutput { stage_progress, done: true })`, signaling that the header sync has been completed successfully.
diff --git a/docs/design/review.md b/docs/design/review.md index 329d7b2d4..693c991a7 100644 --- a/docs/design/review.md +++ b/docs/design/review.md @@ -25,7 +25,7 @@ This document contains some of our research in how other codebases designed vari ## Header Downloaders * Erigon Header Downloader: - * A header downloader algo was introduced in [`erigon#1016`](https://github.com/ledgerwatch/erigon/pull/1016) and finished in [`erigon#1145`](https://github.com/ledgerwatch/erigon/pull/1145). At a high level, the downloader concurrently requested headers by hash, then sorted, validated and fused the responses into chain segments. Smaller segments were fused into larger as the gaps between them were filled. The downloader also used to maintain hardcoded hashes (later renamed to preverified) to bootstrap the sync. + * A header downloader algo was introduced in [`erigon#1016`](https://github.com/ledgerwatch/erigon/pull/1016) and finished in [`erigon#1145`](https://github.com/ledgerwatch/erigon/pull/1145). At a high level, the downloader concurrently requested headers by hash, then sorted, validated and fused the responses into chain segments. Smaller segments were fused into larger as the gaps between them were filled. The downloader is also used to maintain hardcoded hashes (later renamed to preverified) to bootstrap the sync. * The downloader was refactored multiple times: [`erigon#1471`](https://github.com/ledgerwatch/erigon/pull/1471), [`erigon#1559`](https://github.com/ledgerwatch/erigon/pull/1559) and [`erigon#2035`](https://github.com/ledgerwatch/erigon/pull/2035). * With PoS transition in [`erigon#3075`](https://github.com/ledgerwatch/erigon/pull/3075) terminal td was introduced to the algo to stop forward syncing. For the downward sync (post merge), the download was now delegated to [`EthBackendServer`](https://github.com/ledgerwatch/erigon/blob/3c95db00788dc740849c2207d886fe4db5a8c473/ethdb/privateapi/ethbackend.go#L245) * Proper reverse PoS downloader was introduced in [`erigon#3092`](https://github.com/ledgerwatch/erigon/pull/3092) which downloads the header batches from tip until local head is reached. Refactored later in [`erigon#3340`](https://github.com/ledgerwatch/erigon/pull/3340) and [`erigon#3717`](https://github.com/ledgerwatch/erigon/pull/3717). diff --git a/docs/repo/ci.md b/docs/repo/ci.md index 18356ddb7..d69e12c5d 100644 --- a/docs/repo/ci.md +++ b/docs/repo/ci.md @@ -4,7 +4,6 @@ The CI runs a couple of workflows: ### Code -- **[ci]**: A catch-all for small jobs. Currently only runs lints (rustfmt, clippy etc.) - **[unit]**: Runs unit tests (tests in `src/`) and doc tests - **[integration]**: Runs integration tests (tests in `tests/` and sync tests) - **[bench]**: Runs benchmarks @@ -16,14 +15,11 @@ The CI runs a couple of workflows: ### Meta - **[deny]**: Runs `cargo deny` to check for license conflicts and security advisories in our dependencies -- **[sanity]**: Runs a couple of sanity checks on the code every night, such as checking for unused dependencies - **[release]**: Runs the release workflow -[ci]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/ci.yml [unit]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/unit.yml [integration]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/integration.yml [bench]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/bench.yml [book]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/book.yml [deny]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/deny.yml -[sanity]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/sanity.yml [release]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/release.yml diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index e56c94b11..d9b3cdefd 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -8177,6 +8177,529 @@ ], "title": "Number of ExExs", "type": "stat" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 298 + }, + "id": 226, + "panels": [], + "title": "Eth Requests", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 299 + }, + "id": 225, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_headers_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Headers Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Headers Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 299 + }, + "id": 227, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_receipts_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Receipts Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Receipts Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 306 + }, + "id": 235, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_bodies_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Bodies Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Bodies Requests Received", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "http" + }, + "properties": [ + { + "id": "displayName", + "value": "HTTP" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "ws" + }, + "properties": [ + { + "id": "displayName", + "value": "WebSocket" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 306 + }, + "id": 234, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "rate(reth_network_eth_node_data_requests_received_total{instance=~\"$instance\"}[$__rate_interval])", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "Node Data Requests/s", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Node Data Requests Received", + "type": "timeseries" } ], "refresh": "30s", diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index 092faaccb..90fe5ba8d 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -1719,6 +1719,23 @@ "range": true, "refId": "C", "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "reth_transaction_pool_blob_transactions_nonce_gaps{instance=~\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Blob transactions nonce gaps", + "range": true, + "refId": "D", + "useBackend": false } ], "title": "All Transactions metrics", diff --git a/examples/README.md b/examples/README.md index c5f20f21c..b24b7387f 100644 --- a/examples/README.md +++ b/examples/README.md @@ -10,26 +10,27 @@ to make a PR! ## Node Builder -| Example | Description | -| -------------------------------------------------- | ------------------------------------------------------------------------------------------------ | -| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | -| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | -| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | -| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | -| [Custom Stateful Precompile](./stateful-precompile)| Illustrates how to implement a node with a stateful precompile | -| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | -| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | -| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | -| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | +| Example | Description | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| [Additional RPC namespace](./node-custom-rpc) | Illustrates how to add custom CLI parameters and set up a custom RPC namespace | +| [Custom event hooks](./node-event-hooks) | Illustrates how to hook to various node lifecycle events | +| [Custom dev node](./custom-dev-node) | Illustrates how to run a custom dev node programmatically and submit a transaction to it via RPC | +| [Custom EVM](./custom-evm) | Illustrates how to implement a node with a custom EVM | +| [Custom Stateful Precompile](./stateful-precompile) | Illustrates how to implement a node with a stateful precompile | +| [Custom inspector](./custom-inspector) | Illustrates how to use a custom EVM inspector to trace new transactions | +| [Custom engine types](./custom-engine-types) | Illustrates how to create a node with custom engine types | +| [Custom node components](./custom-node-components) | Illustrates how to configure custom node components | +| [Custom payload builder](./custom-payload-builder) | Illustrates how to use a custom payload builder | ## ExEx -| Example | Description | -|-------------------------------------------|-----------------------------------------------------------------------------------| -| [Minimal ExEx](./exex/minimal) | Illustrates how to build a simple ExEx | -| [OP Bridge ExEx](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | -| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | -| [In Memory State](./exex/in-memory-state) | Illustrates an ExEx that tracks the plain state in memory | +| Example | Description | +| ----------------------------------------- | --------------------------------------------------------------------------------------------------- | +| [In Memory State](./exex/in-memory-state) | Illustrates an ExEx that tracks the plain state in memory | +| [Minimal](./exex/minimal) | Illustrates how to build a simple ExEx | +| [OP Bridge](./exex/op-bridge) | Illustrates an ExEx that decodes Optimism deposit and withdrawal receipts from L1 | +| [Rollup](./exex/rollup) | Illustrates a rollup ExEx that derives the state from L1 | +| [Discv5 as ExEx](./exex/discv5) | Illustrates an ExEx that runs discv5 discovery stack | ## RPC @@ -58,11 +59,11 @@ to make a PR! ## P2P -| Example | Description | -| --------------------------- | ----------------------------------------------------------------- | -| [Manual P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer | -| [Polygon P2P](./polygon-p2p) | Illustrates how to connect and communicate with a peer on Polygon | -| [BSC P2P](./bsc-p2p) | Illustrates how to connect and communicate with a peer on Binance Smart Chain | +| Example | Description | +| ---------------------------- | ----------------------------------------------------------------------------- | +| [Manual P2P](./manual-p2p) | Illustrates how to connect and communicate with a peer | +| [Polygon P2P](./polygon-p2p) | Illustrates how to connect and communicate with a peer on Polygon | +| [BSC P2P](./bsc-p2p) | Illustrates how to connect and communicate with a peer on Binance Smart Chain | ## Misc diff --git a/examples/bsc-p2p/Cargo.toml b/examples/bsc-p2p/Cargo.toml index c4d1dbf77..dde02080d 100644 --- a/examples/bsc-p2p/Cargo.toml +++ b/examples/bsc-p2p/Cargo.toml @@ -12,6 +12,7 @@ reth-chainspec.workspace = true reth-discv4 = { workspace = true, features = ["test-utils"] } reth-network = { workspace = true, features = ["test-utils"] } reth-network-api.workspace = true +reth-network-peers.workspace = true reth-primitives.workspace = true reth-tracing.workspace = true diff --git a/examples/bsc-p2p/src/chainspec.rs b/examples/bsc-p2p/src/chainspec.rs index d9c3a8682..0c4cbe1ed 100644 --- a/examples/bsc-p2p/src/chainspec.rs +++ b/examples/bsc-p2p/src/chainspec.rs @@ -1,7 +1,10 @@ -use reth_chainspec::{net::NodeRecord, BaseFeeParams, Chain, ChainSpec, ForkCondition, Hardfork}; +use reth_chainspec::{ + BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, +}; +use reth_network_peers::NodeRecord; use reth_primitives::{b256, B256}; -use std::{collections::BTreeMap, sync::Arc}; +use std::sync::Arc; pub const SHANGHAI_TIME: u64 = 1705996800; @@ -13,7 +16,10 @@ pub(crate) fn bsc_chain_spec() -> Arc { genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), genesis_hash: Some(GENESIS), paris_block_and_final_difficulty: None, - hardforks: BTreeMap::from([(Hardfork::Shanghai, ForkCondition::Timestamp(SHANGHAI_TIME))]), + hardforks: ChainHardforks::new(vec![( + EthereumHardfork::Shanghai.boxed(), + ForkCondition::Timestamp(SHANGHAI_TIME), + )]), deposit_contract: None, base_fee_params: reth_chainspec::BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), prune_delete_limit: 0, diff --git a/examples/custom-dev-node/Cargo.toml b/examples/custom-dev-node/Cargo.toml index cc21c97d2..d40c97ca6 100644 --- a/examples/custom-dev-node/Cargo.toml +++ b/examples/custom-dev-node/Cargo.toml @@ -11,7 +11,7 @@ reth.workspace = true reth-chainspec.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["test-utils"] } futures-util.workspace = true eyre.workspace = true diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index 498971dbd..176e4c503 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -3,18 +3,19 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use std::sync::Arc; + use futures_util::StreamExt; use reth::{ builder::{NodeBuilder, NodeHandle}, providers::CanonStateSubscriptions, - rpc::eth::EthTransactions, + rpc::api::eth::helpers::EthTransactions, tasks::TaskManager, }; use reth_chainspec::ChainSpec; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; use reth_node_ethereum::EthereumNode; use reth_primitives::{b256, hex, Genesis}; -use std::sync::Arc; #[tokio::main] async fn main() -> eyre::Result<()> { @@ -26,7 +27,7 @@ async fn main() -> eyre::Result<()> { .with_rpc(RpcServerArgs::default().with_http()) .with_chain(custom_chain()); - let NodeHandle { mut node, node_exit_future: _ } = NodeBuilder::new(node_config) + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config) .testing_node(tasks.executor()) .node(EthereumNode::default()) .launch() diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index 3b6a796ba..c00863147 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -15,7 +15,7 @@ reth-primitives.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true diff --git a/examples/custom-evm/Cargo.toml b/examples/custom-evm/Cargo.toml index 5822bcb22..7642dc80c 100644 --- a/examples/custom-evm/Cargo.toml +++ b/examples/custom-evm/Cargo.toml @@ -8,10 +8,11 @@ license.workspace = true [dependencies] reth.workspace = true reth-chainspec.workspace = true +reth-evm-ethereum.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 9d394126f..207640dce 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -7,8 +7,8 @@ use reth::{ builder::{components::ExecutorBuilder, BuilderContext, NodeBuilder}, primitives::{ address, - revm_primitives::{CfgEnvWithHandlerCfg, Env, PrecompileResult, TxEnv}, - Address, Bytes, U256, + revm_primitives::{Env, PrecompileResult}, + Bytes, }, revm::{ handler::register::EvmHandler, @@ -18,11 +18,15 @@ use reth::{ }, tasks::TaskManager, }; -use reth_chainspec::{Chain, ChainSpec}; +use reth_chainspec::{Chain, ChainSpec, Head}; +use reth_evm_ethereum::EthEvmConfig; use reth_node_api::{ConfigureEvm, ConfigureEvmEnv, FullNodeTypes}; use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; -use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider, EthereumNode}; -use reth_primitives::{Header, TransactionSigned}; +use reth_node_ethereum::{EthExecutorProvider, EthereumNode}; +use reth_primitives::{ + revm_primitives::{AnalysisKind, CfgEnvWithHandlerCfg, TxEnv}, + Address, Header, TransactionSigned, U256, +}; use reth_tracing::{RethTracer, Tracer}; use std::sync::Arc; @@ -63,17 +67,42 @@ impl MyEvmConfig { } impl ConfigureEvmEnv for MyEvmConfig { - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - EthEvmConfig::fill_tx_env(tx_env, transaction, sender) - } - fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ) { - EthEvmConfig::fill_cfg_env(cfg_env, chain_spec, header, total_difficulty) + let spec_id = reth_evm_ethereum::revm_spec( + chain_spec, + &Head { + number: header.number, + timestamp: header.timestamp, + difficulty: header.difficulty, + total_difficulty, + hash: Default::default(), + }, + ); + + cfg_env.chain_id = chain_spec.chain().id(); + cfg_env.perf_analyse_created_bytecodes = AnalysisKind::Analyse; + + cfg_env.handler_cfg.spec_id = spec_id; + } + + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + EthEvmConfig::default().fill_tx_env(tx_env, transaction, sender) + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + EthEvmConfig::default().fill_tx_env_system_contract_call(env, caller, contract, data) } } diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index c1aae0227..b6721eded 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -21,7 +21,7 @@ use reth::{ interpreter::{Interpreter, OpCode}, Database, Evm, EvmContext, Inspector, }, - rpc::{compat::transaction::transaction_to_call_request, eth::EthTransactions}, + rpc::{api::eth::helpers::Call, compat::transaction::transaction_to_call_request}, transaction_pool::TransactionPool, }; use reth_node_ethereum::node::EthereumNode; @@ -31,14 +31,14 @@ fn main() { Cli::::parse() .run(|builder, args| async move { // launch the node - let NodeHandle { mut node, node_exit_future } = + let NodeHandle { node, node_exit_future } = builder.node(EthereumNode::default()).launch().await?; // create a new subscription to pending transactions let mut pending_transactions = node.pool.new_pending_pool_transactions_listener(); // get an instance of the `trace_` API handler - let eth_api = node.rpc_registry.eth_api(); + let eth_api = node.rpc_registry.eth_api().clone(); println!("Spawning trace task!"); diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 19bc9777b..842627797 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -39,7 +39,7 @@ pub struct CustomPoolBuilder { pool_config: PoolConfig, } -/// Implement the `PoolBuilder` trait for the custom pool builder +/// Implement the [`PoolBuilder`] trait for the custom pool builder /// /// This will be used to build the transaction pool and its maintenance tasks during launch. impl PoolBuilder for CustomPoolBuilder diff --git a/examples/custom-rlpx-subprotocol/Cargo.toml b/examples/custom-rlpx-subprotocol/Cargo.toml new file mode 100644 index 000000000..d2d1caab6 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "example-custom-rlpx-subprotocol" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + + +[dependencies] +tokio = { workspace = true, features = ["full"] } +futures.workspace = true +reth-eth-wire.workspace = true +reth-network.workspace = true +reth-network-api.workspace = true +reth-node-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-primitives.workspace = true +reth-rpc-types.workspace = true +reth.workspace = true +tokio-stream.workspace = true +eyre.workspace = true +rand.workspace = true +tracing.workspace = true diff --git a/examples/custom-rlpx-subprotocol/src/main.rs b/examples/custom-rlpx-subprotocol/src/main.rs new file mode 100644 index 000000000..3a198c38d --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/main.rs @@ -0,0 +1,104 @@ +//! Example for how to customize the network layer by adding a custom rlpx subprotocol. +//! +//! Run with +//! +//! ```not_rust +//! cargo run -p example-custom-rlpx-subprotocol -- node +//! ``` +//! +//! This launch a regular reth node with a custom rlpx subprotocol. +use reth::builder::NodeHandle; +use reth_network::{ + config::SecretKey, protocol::IntoRlpxSubProtocol, NetworkConfig, NetworkManager, + NetworkProtocols, +}; +use reth_network_api::NetworkInfo; +use reth_node_ethereum::EthereumNode; +use reth_provider::test_utils::NoopProvider; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; +use subprotocol::{ + connection::CustomCommand, + protocol::{ + event::ProtocolEvent, + handler::{CustomRlpxProtoHandler, ProtocolState}, + }, +}; +use tokio::sync::{mpsc, oneshot}; +use tracing::info; + +mod subprotocol; + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _args| async move { + // launch the node + let NodeHandle { node, node_exit_future } = + builder.node(EthereumNode::default()).launch().await?; + let peer_id = node.network.peer_id(); + let peer_addr = node.network.local_addr(); + + // add the custom network subprotocol to the launched node + let (tx, mut from_peer0) = mpsc::unbounded_channel(); + let custom_rlpx_handler = CustomRlpxProtoHandler { state: ProtocolState { events: tx } }; + node.network.add_rlpx_sub_protocol(custom_rlpx_handler.into_rlpx_sub_protocol()); + + // creates a separate network instance and adds the custom network subprotocol + let secret_key = SecretKey::new(&mut rand::thread_rng()); + let (tx, mut from_peer1) = mpsc::unbounded_channel(); + let custom_rlpx_handler_2 = CustomRlpxProtoHandler { state: ProtocolState { events: tx } }; + let net_cfg = NetworkConfig::builder(secret_key) + .listener_addr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0))) + .disable_discovery() + .add_rlpx_sub_protocol(custom_rlpx_handler_2.into_rlpx_sub_protocol()) + .build(NoopProvider::default()); + + // spawn the second network instance + let subnetwork = NetworkManager::new(net_cfg).await?; + let subnetwork_peer_id = *subnetwork.peer_id(); + let subnetwork_peer_addr = subnetwork.local_addr(); + let subnetwork_handle = subnetwork.peers_handle(); + node.task_executor.spawn(subnetwork); + + // connect the launched node to the subnetwork + node.network.peers_handle().add_peer(subnetwork_peer_id, subnetwork_peer_addr); + + // connect the subnetwork to the launched node + subnetwork_handle.add_peer(*peer_id, peer_addr); + + // establish connection between peer0 and peer1 + let peer0_to_peer1 = from_peer0.recv().await.expect("peer0 connecting to peer1"); + let peer0_conn = match peer0_to_peer1 { + ProtocolEvent::Established { direction: _, peer_id, to_connection } => { + assert_eq!(peer_id, subnetwork_peer_id); + to_connection + } + }; + + // establish connection between peer1 and peer0 + let peer1_to_peer0 = from_peer1.recv().await.expect("peer1 connecting to peer0"); + let peer1_conn = match peer1_to_peer0 { + ProtocolEvent::Established { direction: _, peer_id: peer1_id, to_connection } => { + assert_eq!(peer1_id, *peer_id); + to_connection + } + }; + info!(target:"rlpx-subprotocol", "Connection established!"); + + // send a ping message from peer0 to peer1 + let (tx, rx) = oneshot::channel(); + peer0_conn.send(CustomCommand::Message { msg: "hello!".to_string(), response: tx })?; + let response = rx.await?; + assert_eq!(response, "hello!"); + info!(target:"rlpx-subprotocol", ?response, "New message received"); + + // send a ping message from peer1 to peer0 + let (tx, rx) = oneshot::channel(); + peer1_conn.send(CustomCommand::Message { msg: "world!".to_string(), response: tx })?; + let response = rx.await?; + assert_eq!(response, "world!"); + info!(target:"rlpx-subprotocol", ?response, "New message received"); + + info!(target:"rlpx-subprotocol", "Peers connected via custom rlpx subprotocol!"); + + node_exit_future.await + }) +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/connection/handler.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/handler.rs new file mode 100644 index 000000000..dae2d5c86 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/handler.rs @@ -0,0 +1,53 @@ +use super::CustomRlpxConnection; +use crate::subprotocol::protocol::{ + event::ProtocolEvent, handler::ProtocolState, proto::CustomRlpxProtoMessage, +}; +use reth_eth_wire::{ + capability::SharedCapabilities, multiplex::ProtocolConnection, protocol::Protocol, +}; +use reth_network::protocol::{ConnectionHandler, OnNotSupported}; +use reth_network_api::Direction; +use reth_rpc_types::PeerId; +use tokio::sync::mpsc; +use tokio_stream::wrappers::UnboundedReceiverStream; + +/// The connection handler for the custom RLPx protocol. +pub(crate) struct CustomRlpxConnectionHandler { + pub(crate) state: ProtocolState, +} + +impl ConnectionHandler for CustomRlpxConnectionHandler { + type Connection = CustomRlpxConnection; + + fn protocol(&self) -> Protocol { + CustomRlpxProtoMessage::protocol() + } + + fn on_unsupported_by_peer( + self, + _supported: &SharedCapabilities, + _direction: Direction, + _peer_id: PeerId, + ) -> OnNotSupported { + OnNotSupported::KeepAlive + } + + fn into_connection( + self, + direction: Direction, + peer_id: PeerId, + conn: ProtocolConnection, + ) -> Self::Connection { + let (tx, rx) = mpsc::unbounded_channel(); + self.state + .events + .send(ProtocolEvent::Established { direction, peer_id, to_connection: tx }) + .ok(); + CustomRlpxConnection { + conn, + initial_ping: direction.is_outgoing().then(CustomRlpxProtoMessage::ping), + commands: UnboundedReceiverStream::new(rx), + pending_pong: None, + } + } +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs new file mode 100644 index 000000000..a6d835b70 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/connection/mod.rs @@ -0,0 +1,76 @@ +use super::protocol::proto::{CustomRlpxProtoMessage, CustomRlpxProtoMessageKind}; +use futures::{Stream, StreamExt}; +use reth_eth_wire::multiplex::ProtocolConnection; +use reth_primitives::BytesMut; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio::sync::oneshot; +use tokio_stream::wrappers::UnboundedReceiverStream; + +pub(crate) mod handler; + +/// We define some custom commands that the subprotocol supports. +pub(crate) enum CustomCommand { + /// Sends a message to the peer + Message { + msg: String, + /// The response will be sent to this channel. + response: oneshot::Sender, + }, +} + +/// The connection handler for the custom RLPx protocol. +pub(crate) struct CustomRlpxConnection { + conn: ProtocolConnection, + initial_ping: Option, + commands: UnboundedReceiverStream, + pending_pong: Option>, +} + +impl Stream for CustomRlpxConnection { + type Item = BytesMut; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + if let Some(initial_ping) = this.initial_ping.take() { + return Poll::Ready(Some(initial_ping.encoded())) + } + + loop { + if let Poll::Ready(Some(cmd)) = this.commands.poll_next_unpin(cx) { + return match cmd { + CustomCommand::Message { msg, response } => { + this.pending_pong = Some(response); + Poll::Ready(Some(CustomRlpxProtoMessage::ping_message(msg).encoded())) + } + } + } + + let Some(msg) = ready!(this.conn.poll_next_unpin(cx)) else { return Poll::Ready(None) }; + + let Some(msg) = CustomRlpxProtoMessage::decode_message(&mut &msg[..]) else { + return Poll::Ready(None) + }; + + match msg.message { + CustomRlpxProtoMessageKind::Ping => { + return Poll::Ready(Some(CustomRlpxProtoMessage::pong().encoded())) + } + CustomRlpxProtoMessageKind::Pong => {} + CustomRlpxProtoMessageKind::PingMessage(msg) => { + return Poll::Ready(Some(CustomRlpxProtoMessage::pong_message(msg).encoded())) + } + CustomRlpxProtoMessageKind::PongMessage(msg) => { + if let Some(sender) = this.pending_pong.take() { + sender.send(msg).ok(); + } + continue + } + } + + return Poll::Pending + } + } +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/mod.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/mod.rs new file mode 100644 index 000000000..53ec0dc1d --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod connection; +pub(crate) mod protocol; diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/event.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/event.rs new file mode 100644 index 000000000..ea9e588e5 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/event.rs @@ -0,0 +1,15 @@ +use crate::subprotocol::connection::CustomCommand; +use reth_network::Direction; +use reth_network_api::PeerId; +use tokio::sync::mpsc; + +/// The events that can be emitted by our custom protocol. +#[derive(Debug)] +pub(crate) enum ProtocolEvent { + Established { + #[allow(dead_code)] + direction: Direction, + peer_id: PeerId, + to_connection: mpsc::UnboundedSender, + }, +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/handler.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/handler.rs new file mode 100644 index 000000000..d5a35398d --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/handler.rs @@ -0,0 +1,34 @@ +use super::event::ProtocolEvent; +use crate::subprotocol::connection::handler::CustomRlpxConnectionHandler; +use reth_network::protocol::ProtocolHandler; +use reth_network_api::PeerId; +use std::net::SocketAddr; +use tokio::sync::mpsc; + +/// Protocol state is an helper struct to store the protocol events. +#[derive(Clone, Debug)] +pub(crate) struct ProtocolState { + pub(crate) events: mpsc::UnboundedSender, +} + +/// The protocol handler takes care of incoming and outgoing connections. +#[derive(Debug)] +pub(crate) struct CustomRlpxProtoHandler { + pub state: ProtocolState, +} + +impl ProtocolHandler for CustomRlpxProtoHandler { + type ConnectionHandler = CustomRlpxConnectionHandler; + + fn on_incoming(&self, _socket_addr: SocketAddr) -> Option { + Some(CustomRlpxConnectionHandler { state: self.state.clone() }) + } + + fn on_outgoing( + &self, + _socket_addr: SocketAddr, + _peer_id: PeerId, + ) -> Option { + Some(CustomRlpxConnectionHandler { state: self.state.clone() }) + } +} diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/mod.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/mod.rs new file mode 100644 index 000000000..8aba9a4e3 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod event; +pub(crate) mod handler; +pub(crate) mod proto; diff --git a/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs new file mode 100644 index 000000000..8b179a447 --- /dev/null +++ b/examples/custom-rlpx-subprotocol/src/subprotocol/protocol/proto.rs @@ -0,0 +1,113 @@ +//! Simple RLPx Ping Pong protocol that also support sending messages, +//! following [RLPx specs](https://github.com/ethereum/devp2p/blob/master/rlpx.md) + +use reth_eth_wire::{protocol::Protocol, Capability}; +use reth_primitives::{Buf, BufMut, BytesMut}; + +#[repr(u8)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) enum CustomRlpxProtoMessageId { + Ping = 0x00, + Pong = 0x01, + PingMessage = 0x02, + PongMessage = 0x03, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) enum CustomRlpxProtoMessageKind { + Ping, + Pong, + PingMessage(String), + PongMessage(String), +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct CustomRlpxProtoMessage { + pub message_type: CustomRlpxProtoMessageId, + pub message: CustomRlpxProtoMessageKind, +} + +impl CustomRlpxProtoMessage { + /// Returns the capability for the `custom_rlpx` protocol. + pub fn capability() -> Capability { + Capability::new_static("custom_rlpx", 1) + } + + /// Returns the protocol for the `custom_rlpx` protocol. + pub fn protocol() -> Protocol { + Protocol::new(Self::capability(), 4) + } + + /// Creates a ping message + pub fn ping_message(msg: impl Into) -> Self { + Self { + message_type: CustomRlpxProtoMessageId::PingMessage, + message: CustomRlpxProtoMessageKind::PingMessage(msg.into()), + } + } + /// Creates a ping message + pub fn pong_message(msg: impl Into) -> Self { + Self { + message_type: CustomRlpxProtoMessageId::PongMessage, + message: CustomRlpxProtoMessageKind::PongMessage(msg.into()), + } + } + + /// Creates a ping message + pub fn ping() -> Self { + Self { + message_type: CustomRlpxProtoMessageId::Ping, + message: CustomRlpxProtoMessageKind::Ping, + } + } + + /// Creates a pong message + pub fn pong() -> Self { + Self { + message_type: CustomRlpxProtoMessageId::Pong, + message: CustomRlpxProtoMessageKind::Pong, + } + } + + /// Creates a new `CustomRlpxProtoMessage` with the given message ID and payload. + pub fn encoded(&self) -> BytesMut { + let mut buf = BytesMut::new(); + buf.put_u8(self.message_type as u8); + match &self.message { + CustomRlpxProtoMessageKind::Ping | CustomRlpxProtoMessageKind::Pong => {} + CustomRlpxProtoMessageKind::PingMessage(msg) | + CustomRlpxProtoMessageKind::PongMessage(msg) => { + buf.put(msg.as_bytes()); + } + } + buf + } + + /// Decodes a `CustomRlpxProtoMessage` from the given message buffer. + pub fn decode_message(buf: &mut &[u8]) -> Option { + if buf.is_empty() { + return None; + } + let id = buf[0]; + buf.advance(1); + let message_type = match id { + 0x00 => CustomRlpxProtoMessageId::Ping, + 0x01 => CustomRlpxProtoMessageId::Pong, + 0x02 => CustomRlpxProtoMessageId::PingMessage, + 0x03 => CustomRlpxProtoMessageId::PongMessage, + _ => return None, + }; + let message = match message_type { + CustomRlpxProtoMessageId::Ping => CustomRlpxProtoMessageKind::Ping, + CustomRlpxProtoMessageId::Pong => CustomRlpxProtoMessageKind::Pong, + CustomRlpxProtoMessageId::PingMessage => CustomRlpxProtoMessageKind::PingMessage( + String::from_utf8_lossy(&buf[..]).into_owned(), + ), + CustomRlpxProtoMessageId::PongMessage => CustomRlpxProtoMessageKind::PongMessage( + String::from_utf8_lossy(&buf[..]).into_owned(), + ), + }; + + Some(Self { message_type, message }) + } +} diff --git a/examples/exex/discv5/Cargo.toml b/examples/exex/discv5/Cargo.toml new file mode 100644 index 000000000..b1777cfa1 --- /dev/null +++ b/examples/exex/discv5/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "example-exex-discv5" +version = "0.0.0" +publish = false +edition.workspace = true +license.workspace = true + +[dependencies] +discv5.workspace = true +enr.workspace = true + +reth-discv5.workspace = true +reth.workspace = true +reth-exex.workspace = true +reth-node-api.workspace = true +reth-node-ethereum.workspace = true +reth-network-peers.workspace = true +reth-tracing.workspace = true +futures.workspace = true + +clap.workspace = true +reth-chainspec.workspace = true +serde_json.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +futures-util.workspace = true + +tracing.workspace = true +eyre.workspace = true + +[dev-dependencies] +reth-exex-test-utils.workspace = true +reth-testing-utils.workspace = true diff --git a/examples/exex/discv5/src/exex/mod.rs b/examples/exex/discv5/src/exex/mod.rs new file mode 100644 index 000000000..4631f3929 --- /dev/null +++ b/examples/exex/discv5/src/exex/mod.rs @@ -0,0 +1,70 @@ +use eyre::Result; +use futures::{Future, FutureExt}; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_api::FullNodeComponents; +use reth_tracing::tracing::info; +use std::{ + pin::Pin, + task::{ready, Context, Poll}, +}; +use tracing::error; + +use crate::network::DiscV5ExEx; + +/// The ExEx struct, representing the initialization and execution of the ExEx. +pub struct ExEx { + exex: ExExContext, + disc_v5: DiscV5ExEx, +} + +impl ExEx { + pub fn new(exex: ExExContext, disc_v5: DiscV5ExEx) -> Self { + Self { exex, disc_v5 } + } +} + +impl Future for ExEx { + type Output = Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Poll the Discv5 future until its drained + loop { + match self.disc_v5.poll_unpin(cx) { + Poll::Ready(Ok(())) => { + info!("Discv5 task completed successfully"); + } + Poll::Ready(Err(e)) => { + error!(?e, "Discv5 task encountered an error"); + return Poll::Ready(Err(e)); + } + Poll::Pending => { + // Exit match and continue to poll notifications + break; + } + } + } + + // Continuously poll the ExExContext notifications + loop { + if let Some(notification) = ready!(self.exex.notifications.poll_recv(cx)) { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + } + + if let Some(committed_chain) = notification.committed_chain() { + self.exex + .events + .send(ExExEvent::FinishedHeight(committed_chain.tip().number))?; + } + } + } + } +} diff --git a/examples/exex/discv5/src/main.rs b/examples/exex/discv5/src/main.rs new file mode 100644 index 000000000..237432605 --- /dev/null +++ b/examples/exex/discv5/src/main.rs @@ -0,0 +1,29 @@ +use clap::Parser; + +use exex::ExEx; +use network::{cli_ext::Discv5ArgsExt, DiscV5ExEx}; +use reth_node_ethereum::EthereumNode; + +mod exex; +mod network; + +fn main() -> eyre::Result<()> { + reth::cli::Cli::::parse().run(|builder, args| async move { + let tcp_port = args.tcp_port; + let udp_port = args.udp_port; + + let handle = builder + .node(EthereumNode::default()) + .install_exex("exex-discv5", move |ctx| async move { + // start Discv5 task + let disc_v5 = DiscV5ExEx::new(tcp_port, udp_port).await?; + + // start exex task with discv5 + Ok(ExEx::new(ctx, disc_v5)) + }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/examples/exex/discv5/src/network/cli_ext.rs b/examples/exex/discv5/src/network/cli_ext.rs new file mode 100644 index 000000000..1eb864de3 --- /dev/null +++ b/examples/exex/discv5/src/network/cli_ext.rs @@ -0,0 +1,15 @@ +use clap::Args; + +pub const DEFAULT_DISCOVERY_PORT: u16 = 30304; +pub const DEFAULT_RLPX_PORT: u16 = 30303; + +#[derive(Debug, Clone, Args)] +pub(crate) struct Discv5ArgsExt { + /// TCP port used by RLPx + #[clap(long = "exex-discv5.tcp-port", default_value_t = DEFAULT_RLPX_PORT)] + pub tcp_port: u16, + + /// UDP port used for discovery + #[clap(long = "exex-discv5.udp-port", default_value_t = DEFAULT_DISCOVERY_PORT)] + pub udp_port: u16, +} diff --git a/examples/exex/discv5/src/network/mod.rs b/examples/exex/discv5/src/network/mod.rs new file mode 100644 index 000000000..ebab28342 --- /dev/null +++ b/examples/exex/discv5/src/network/mod.rs @@ -0,0 +1,123 @@ +#![allow(dead_code)] + +use discv5::{enr::secp256k1::rand, Enr, Event, ListenConfig}; +use reth::network::config::SecretKey; +use reth_discv5::{enr::EnrCombinedKeyWrapper, Config, Discv5}; +use reth_network_peers::NodeRecord; +use reth_tracing::tracing::info; +use std::{ + future::Future, + net::SocketAddr, + pin::Pin, + task::{ready, Context, Poll}, +}; +use tokio::sync::mpsc; + +pub(crate) mod cli_ext; + +/// Helper struct to manage a discovery node using discv5. +pub(crate) struct DiscV5ExEx { + /// The inner discv5 instance. + inner: Discv5, + /// The node record of the discv5 instance. + node_record: NodeRecord, + /// The events stream of the discv5 instance. + events: mpsc::Receiver, +} + +impl DiscV5ExEx { + /// Starts a new discv5 node. + pub async fn new(udp_port: u16, tcp_port: u16) -> eyre::Result { + let secret_key = SecretKey::new(&mut rand::thread_rng()); + + let discv5_addr: SocketAddr = format!("127.0.0.1:{udp_port}").parse()?; + let rlpx_addr: SocketAddr = format!("127.0.0.1:{tcp_port}").parse()?; + + let discv5_listen_config = ListenConfig::from(discv5_addr); + let discv5_config = Config::builder(rlpx_addr) + .discv5_config(discv5::ConfigBuilder::new(discv5_listen_config).build()) + .build(); + + let (discv5, events, node_record) = Discv5::start(&secret_key, discv5_config).await?; + Ok(Self { inner: discv5, events, node_record }) + } + + /// Adds a node to the table if its not already present. + pub fn add_node(&mut self, enr: Enr) -> eyre::Result<()> { + let reth_enr: enr::Enr = EnrCombinedKeyWrapper(enr.clone()).into(); + self.inner.add_node(reth_enr)?; + Ok(()) + } + + /// Returns the local ENR of the discv5 node. + pub fn local_enr(&self) -> Enr { + self.inner.with_discv5(|discv5| discv5.local_enr()) + } +} + +impl Future for DiscV5ExEx { + type Output = eyre::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.as_mut(); + loop { + match ready!(this.events.poll_recv(cx)) { + Some(evt) => { + if let Event::SessionEstablished(enr, socket_addr) = evt { + info!(?enr, ?socket_addr, "Session established with a new peer."); + } + } + None => return Poll::Ready(Ok(())), + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::network::DiscV5ExEx; + use tracing::info; + + #[tokio::test] + async fn can_establish_discv5_session_with_peer() { + reth_tracing::init_test_tracing(); + let mut node_1 = DiscV5ExEx::new(30301, 30303).await.unwrap(); + let node_1_enr = node_1.local_enr(); + + let mut node_2 = DiscV5ExEx::new(30302, 30303).await.unwrap(); + + let node_2_enr = node_2.local_enr(); + + info!(?node_1_enr, ?node_2_enr, "Started discovery nodes."); + + // add node_2 to node_1 table + node_1.add_node(node_2_enr.clone()).unwrap(); + + // verify node_2 is in node_1 table + assert!(node_1 + .inner + .with_discv5(|discv5| discv5.table_entries_id().contains(&node_2_enr.node_id()))); + + // send ping from node_1 to node_2 + node_1.inner.with_discv5(|discv5| discv5.send_ping(node_2_enr.clone())).await.unwrap(); + + // verify they both established a session + let event_2_v5 = node_2.events.recv().await.unwrap(); + let event_1_v5 = node_1.events.recv().await.unwrap(); + assert!(matches!( + event_1_v5, + discv5::Event::SessionEstablished(node, socket) if node == node_2_enr && socket == node_2_enr.udp4_socket().unwrap().into() + )); + assert!(matches!( + event_2_v5, + discv5::Event::SessionEstablished(node, socket) if node == node_1_enr && socket == node_1_enr.udp4_socket().unwrap().into() + )); + + // verify node_1 is in + let event_2_v5 = node_2.events.recv().await.unwrap(); + assert!(matches!( + event_2_v5, + discv5::Event::NodeInserted { node_id, replaced } if node_id == node_1_enr.node_id() && replaced.is_none() + )); + } +} diff --git a/examples/exex/rollup/src/db.rs b/examples/exex/rollup/src/db.rs index 2c42beafb..dcc8b435e 100644 --- a/examples/exex/rollup/src/db.rs +++ b/examples/exex/rollup/src/db.rs @@ -443,7 +443,7 @@ impl reth_revm::Database for Database { get_storage(&self.connection(), address, index.into()).map(|data| data.unwrap_or_default()) } - fn block_hash(&mut self, number: U256) -> Result { + fn block_hash(&mut self, number: u64) -> Result { let block_hash = self.connection().query_row::( "SELECT hash FROM block WHERE number = ?", (number.to_string(),), diff --git a/examples/exex/rollup/src/execution.rs b/examples/exex/rollup/src/execution.rs index cc5b716bd..22ec58292 100644 --- a/examples/exex/rollup/src/execution.rs +++ b/examples/exex/rollup/src/execution.rs @@ -10,10 +10,9 @@ use reth_primitives::{ constants, eip4844::kzg_to_versioned_hash, keccak256, - revm::env::fill_tx_env, revm_primitives::{CfgEnvWithHandlerCfg, EVMError, ExecutionResult, ResultAndState}, - Address, Block, BlockWithSenders, Bytes, Hardfork, Header, Receipt, TransactionSigned, TxType, - B256, U256, + Address, Block, BlockWithSenders, Bytes, EthereumHardfork, Header, Receipt, TransactionSigned, + TxType, B256, U256, }; use reth_revm::{ db::{states::bundle_state::BundleRetention, BundleState}, @@ -69,16 +68,17 @@ fn construct_header(db: &Database, header: &RollupContract::BlockHeader) -> eyre let block_number = u64::try_from(header.sequence)?; // Calculate base fee per gas for EIP-1559 transactions - let base_fee_per_gas = if CHAIN_SPEC.fork(Hardfork::London).transitions_at_block(block_number) { - constants::EIP1559_INITIAL_BASE_FEE - } else { - parent_block - .as_ref() - .ok_or(eyre::eyre!("parent block not found"))? - .header - .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) - .ok_or(eyre::eyre!("failed to calculate base fee"))? - }; + let base_fee_per_gas = + if CHAIN_SPEC.fork(EthereumHardfork::London).transitions_at_block(block_number) { + constants::EIP1559_INITIAL_BASE_FEE + } else { + parent_block + .as_ref() + .ok_or(eyre::eyre!("parent block not found"))? + .header + .next_block_base_fee(CHAIN_SPEC.base_fee_params_at_block(block_number)) + .ok_or(eyre::eyre!("failed to calculate base fee"))? + }; // Construct header Ok(Header { @@ -103,17 +103,11 @@ fn configure_evm<'a>( .build(), ); evm.db_mut().set_state_clear_flag( - CHAIN_SPEC.fork(Hardfork::SpuriousDragon).active_at_block(header.number), + CHAIN_SPEC.fork(EthereumHardfork::SpuriousDragon).active_at_block(header.number), ); let mut cfg = CfgEnvWithHandlerCfg::new_with_spec_id(evm.cfg().clone(), evm.spec_id()); - EthEvmConfig::fill_cfg_and_block_env( - &mut cfg, - evm.block_mut(), - &CHAIN_SPEC, - header, - U256::ZERO, - ); + config.fill_cfg_and_block_env(&mut cfg, evm.block_mut(), &CHAIN_SPEC, header, U256::ZERO); *evm.cfg_mut() = cfg.cfg_env; evm @@ -216,7 +210,7 @@ fn execute_transactions( } // Execute transaction. // Fill revm structure. - fill_tx_env(evm.tx_mut(), &transaction, sender); + EthEvmConfig::default().fill_tx_env(evm.tx_mut(), &transaction, sender); let ResultAndState { result, state } = match evm.transact() { Ok(result) => result, diff --git a/examples/manual-p2p/src/main.rs b/examples/manual-p2p/src/main.rs index 2b89b5539..c23802d26 100644 --- a/examples/manual-p2p/src/main.rs +++ b/examples/manual-p2p/src/main.rs @@ -10,15 +10,15 @@ use std::time::Duration; use futures::StreamExt; use once_cell::sync::Lazy; -use reth_chainspec::{net::mainnet_nodes, Chain, MAINNET}; +use reth_chainspec::{Chain, MAINNET}; use reth_discv4::{DiscoveryUpdate, Discv4, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; use reth_ecies::stream::ECIESStream; use reth_eth_wire::{ EthMessage, EthStream, HelloMessage, P2PStream, Status, UnauthedEthStream, UnauthedP2PStream, }; use reth_network::config::rng_secret_key; -use reth_network_peers::{pk2id, NodeRecord}; -use reth_primitives::{Hardfork, Head, MAINNET_GENESIS_HASH}; +use reth_network_peers::{mainnet_nodes, pk2id, NodeRecord}; +use reth_primitives::{EthereumHardfork, Head, MAINNET_GENESIS_HASH}; use secp256k1::{SecretKey, SECP256K1}; use tokio::net::TcpStream; @@ -95,14 +95,14 @@ async fn handshake_p2p( // Perform a ETH Wire handshake with a peer async fn handshake_eth(p2p_stream: AuthedP2PStream) -> eyre::Result<(AuthedEthStream, Status)> { let fork_filter = MAINNET.fork_filter(Head { - timestamp: MAINNET.fork(Hardfork::Shanghai).as_timestamp().unwrap(), + timestamp: MAINNET.fork(EthereumHardfork::Shanghai).as_timestamp().unwrap(), ..Default::default() }); let status = Status::builder() .chain(Chain::mainnet()) .genesis(MAINNET_GENESIS_HASH) - .forkid(MAINNET.hardfork_fork_id(Hardfork::Shanghai).unwrap()) + .forkid(MAINNET.hardfork_fork_id(EthereumHardfork::Shanghai).unwrap()) .build(); let status = Status { version: p2p_stream.shared_capabilities().eth()?.version(), ..status }; diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index b178d1349..92256a1be 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,8 +1,10 @@ -use reth_chainspec::{BaseFeeParams, Chain, ChainSpec, ForkCondition, Hardfork}; +use reth_chainspec::{ + BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, +}; use reth_discv4::NodeRecord; use reth_primitives::{b256, Head, B256}; -use std::{collections::BTreeMap, sync::Arc}; +use std::sync::Arc; const SHANGAI_BLOCK: u64 = 50523000; @@ -15,13 +17,13 @@ pub(crate) fn polygon_chain_spec() -> Arc { genesis: serde_json::from_str(include_str!("./genesis.json")).expect("deserialize genesis"), genesis_hash: Some(GENESIS), paris_block_and_final_difficulty: None, - hardforks: BTreeMap::from([ - (Hardfork::Petersburg, ForkCondition::Block(0)), - (Hardfork::Istanbul, ForkCondition::Block(3395000)), - (Hardfork::MuirGlacier, ForkCondition::Block(3395000)), - (Hardfork::Berlin, ForkCondition::Block(14750000)), - (Hardfork::London, ForkCondition::Block(23850000)), - (Hardfork::Shanghai, ForkCondition::Block(SHANGAI_BLOCK)), + hardforks: ChainHardforks::new(vec![ + (EthereumHardfork::Petersburg.boxed(), ForkCondition::Block(0)), + (EthereumHardfork::Istanbul.boxed(), ForkCondition::Block(3395000)), + (EthereumHardfork::MuirGlacier.boxed(), ForkCondition::Block(3395000)), + (EthereumHardfork::Berlin.boxed(), ForkCondition::Block(14750000)), + (EthereumHardfork::London.boxed(), ForkCondition::Block(23850000)), + (EthereumHardfork::Shanghai.boxed(), ForkCondition::Block(SHANGAI_BLOCK)), ]), deposit_contract: None, base_fee_params: reth_chainspec::BaseFeeParamsKind::Constant(BaseFeeParams::ethereum()), diff --git a/examples/rpc-db/Cargo.toml b/examples/rpc-db/Cargo.toml index 8bcab0e2b..007a488b8 100644 --- a/examples/rpc-db/Cargo.toml +++ b/examples/rpc-db/Cargo.toml @@ -13,5 +13,6 @@ reth-chainspec.workspace = true reth-db.workspace = true reth-db-api.workspace = true reth-node-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } tokio = { workspace = true, features = ["full"] } eyre.workspace = true diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 96863d4f0..30c047954 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -12,12 +12,14 @@ //! cast rpc myrpcExt_customMethod //! ``` +use std::{path::Path, sync::Arc}; + use reth::{ providers::{ providers::{BlockchainProvider, StaticFileProvider}, ProviderFactory, }, - utils::db::open_db_read_only, + utils::open_db_read_only, }; use reth_chainspec::ChainSpecBuilder; use reth_db::mdbx::DatabaseArguments; @@ -25,16 +27,13 @@ use reth_db_api::models::ClientVersion; // Bringing up the RPC use reth::rpc::builder::{ - RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig, + EthApiBuild, RethRpcModule, RpcModuleBuilder, RpcServerConfig, TransportRpcModuleConfig, }; // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; -use reth::{ - blockchain_tree::noop::NoopBlockchainTree, providers::test_utils::TestCanonStateSubscriptions, - tasks::TokioTaskExecutor, -}; +use reth::{blockchain_tree::noop::NoopBlockchainTree, tasks::TokioTaskExecutor}; use reth_node_ethereum::EthEvmConfig; -use std::{path::Path, sync::Arc}; +use reth_provider::test_utils::TestCanonStateSubscriptions; // Custom rpc extension pub mod myrpc_ext; @@ -71,7 +70,7 @@ async fn main() -> eyre::Result<()> { // Pick which namespaces to expose. let config = TransportRpcModuleConfig::default().with_http([RethRpcModule::Eth]); - let mut server = rpc_builder.build(config); + let mut server = rpc_builder.build(config, EthApiBuild::build); // Add a custom rpc namespace let custom_rpc = MyRpcExt { provider }; @@ -80,7 +79,7 @@ async fn main() -> eyre::Result<()> { // Start the server & keep it alive let server_args = RpcServerConfig::http(Default::default()).with_http_address("0.0.0.0:8545".parse()?); - let _handle = server_args.start(server).await?; + let _handle = server_args.start(&server).await?; futures::future::pending::<()>().await; Ok(()) diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs index d1898b81c..e38b6fc24 100644 --- a/examples/rpc-db/src/myrpc_ext.rs +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -3,7 +3,7 @@ use reth::{primitives::Block, providers::BlockReaderIdExt}; // Rpc related imports use jsonrpsee::proc_macros::rpc; -use reth::rpc::eth::error::EthResult; +use reth::rpc::server_types::eth::EthResult; /// trait interface for a custom rpc namespace: `MyRpc` /// diff --git a/examples/stateful-precompile/Cargo.toml b/examples/stateful-precompile/Cargo.toml index c983ef80d..2ae4656ee 100644 --- a/examples/stateful-precompile/Cargo.toml +++ b/examples/stateful-precompile/Cargo.toml @@ -11,7 +11,7 @@ reth-chainspec.workspace = true reth-node-api.workspace = true reth-node-core.workspace = true reth-primitives.workspace = true -reth-node-ethereum.workspace = true +reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true alloy-genesis.workspace = true diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 8eaecb29a..b595647e0 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -30,6 +30,12 @@ use reth_tracing::{RethTracer, Tracer}; use schnellru::{ByLength, LruMap}; use std::{collections::HashMap, sync::Arc}; +/// Type alias for the LRU cache used within the [`PrecompileCache`]. +type PrecompileLRUCache = LruMap<(Bytes, u64), PrecompileResult>; + +/// Type alias for the thread-safe `Arc>` wrapper around [`PrecompileCache`]. +type CachedPrecompileResult = Arc>; + /// A cache for precompile inputs / outputs. /// /// This assumes that the precompile is a standard precompile, as in `StandardPrecompileFn`, meaning @@ -40,8 +46,7 @@ use std::{collections::HashMap, sync::Arc}; #[derive(Debug, Default)] pub struct PrecompileCache { /// Caches for each precompile input / output. - #[allow(clippy::type_complexity)] - cache: HashMap<(Address, SpecId), Arc>>>, + cache: HashMap<(Address, SpecId), CachedPrecompileResult>, } /// Custom EVM configuration @@ -133,17 +138,28 @@ impl StatefulPrecompileMut for WrappedPrecompile { } impl ConfigureEvmEnv for MyEvmConfig { - fn fill_tx_env(tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { - EthEvmConfig::fill_tx_env(tx_env, transaction, sender) + fn fill_tx_env(&self, tx_env: &mut TxEnv, transaction: &TransactionSigned, sender: Address) { + EthEvmConfig::default().fill_tx_env(tx_env, transaction, sender) } fn fill_cfg_env( + &self, cfg_env: &mut CfgEnvWithHandlerCfg, chain_spec: &ChainSpec, header: &Header, total_difficulty: U256, ) { - EthEvmConfig::fill_cfg_env(cfg_env, chain_spec, header, total_difficulty) + EthEvmConfig::default().fill_cfg_env(cfg_env, chain_spec, header, total_difficulty) + } + + fn fill_tx_env_system_contract_call( + &self, + env: &mut Env, + caller: Address, + contract: Address, + data: Bytes, + ) { + EthEvmConfig::default().fill_tx_env_system_contract_call(env, caller, contract, data) } } diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index 85a5b795a..c9a14dee1 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -28,7 +28,7 @@ fn main() { Cli::::parse() .run(|builder, args| async move { // launch the node - let NodeHandle { mut node, node_exit_future } = + let NodeHandle { node, node_exit_future } = builder.node(EthereumNode::default()).launch().await?; // create a new subscription to pending transactions diff --git a/op.Dockerfile b/op.Dockerfile index a4692dad5..833ca9f69 100644 --- a/op.Dockerfile +++ b/op.Dockerfile @@ -43,7 +43,7 @@ FROM ubuntu AS runtime WORKDIR /app # Copy reth over from the build stage -COPY --from=builder /app/bsc-reth /usr/local/bin +COPY --from=builder /app/op-reth /usr/local/bin # Copy licenses COPY LICENSE-* ./ diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 87c3b8df1..41642f0f9 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -102,7 +102,6 @@ impl Case for BlockchainTestCase { ) .try_seal_with_senders() .unwrap(), - None, )?; case.pre.write_to_db(provider.tx_ref())?; @@ -121,7 +120,6 @@ impl Case for BlockchainTestCase { let decoded = SealedBlock::decode(&mut block.rlp.as_ref())?; provider.insert_historical_block( decoded.clone().try_seal_with_senders().unwrap(), - None, )?; Ok::, Error>(Some(decoded)) })?; @@ -189,7 +187,7 @@ pub fn should_skip(path: &Path) -> bool { | "ValueOverflow.json" | "ValueOverflowParis.json" - // txbyte is of type 02 and we dont parse tx bytes for this test to fail. + // txbyte is of type 02 and we don't parse tx bytes for this test to fail. | "typeTwoBerlin.json" // Test checks if nonce overflows. We are handling this correctly but we are not parsing diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 4ef65043f..73715d0cb 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -164,6 +164,7 @@ pub fn random_block( body: transactions, ommers, withdrawals: None, + sidecars: None, requests: None, } }